python_code
stringlengths 0
679k
| repo_name
stringlengths 9
41
| file_path
stringlengths 6
149
|
---|---|---|
#
# Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
from datetime import datetime as dt
from glob import glob
import cv2
import numpy as np
from cuda import cudart
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3"
import calibrator
import tensorflow as tf2
import tensorrt as trt
from tensorflow.python.framework.convert_to_constants import \
convert_variables_to_constants_v2
np.random.seed(31193)
tf2.random.set_seed(97)
nTrainBatchSize = 128
nHeight = 28
nWidth = 28
pbFilePath = "./model-NHWC-C2/"
pbFile = "model-NHWC-C2.pb"
onnxFile = "./model-NHWC-C2.onnx"
trtFile = "./model-NHWC-C2.plan"
dataPath = os.path.dirname(os.path.realpath(__file__)) + "/../../00-MNISTData/"
trainFileList = sorted(glob(dataPath + "train/*.jpg"))
testFileList = sorted(glob(dataPath + "test/*.jpg"))
inferenceImage = dataPath + "8.png"
# Two equivalent method to export ONNX file, using single .pb file or several files in a directory
bSinglePbFile = True
# for FP16 mode
bUseFP16Mode = False
# for INT8 model
bUseINT8Mode = False
nCalibration = 1
cacheFile = "./int8.cache"
calibrationDataPath = dataPath + "test/"
os.system("rm -rf %s ./*.plan ./*.cache" % pbFilePath)
np.set_printoptions(precision=3, linewidth=200, suppress=True)
tf2.config.experimental.set_memory_growth(tf2.config.list_physical_devices("GPU")[0], True)
cudart.cudaDeviceSynchronize()
# Create network and train model in TensorFlow2 --------------------------------
def getData(fileList):
nSize = len(fileList)
xData = np.zeros([nSize, nHeight, nWidth, 1], dtype=np.float32)
yData = np.zeros([nSize, 10], dtype=np.float32)
for i in range(nSize):
imageName = fileList[i]
data = cv2.imread(imageName, cv2.IMREAD_GRAYSCALE)
label = np.zeros(10, dtype=np.float32)
label[int(imageName[-7])] = 1
xData[i] = data.reshape(nHeight, nWidth, 1).astype(np.float32) / 255
yData[i] = label
return xData, yData
modelInput = tf2.keras.Input(shape=[nHeight, nWidth, 2], dtype=tf2.dtypes.float32)
layerConv1 = tf2.keras.layers.Conv2D(32, [5, 5], strides=[1, 1], padding="same", data_format=None, dilation_rate=[1, 1], groups=1, activation="relu", use_bias=True, kernel_initializer="glorot_uniform", bias_initializer="zeros", kernel_regularizer=None, bias_regularizer=None, activity_regularizer=None, kernel_constraint=None, bias_constraint=None, name="conv1")
x = layerConv1(modelInput)
layerPool1 = tf2.keras.layers.MaxPool2D(pool_size=[2, 2], strides=[2, 2], padding="same", data_format=None, name="pool1")
x = layerPool1(x)
layerConv2 = tf2.keras.layers.Conv2D(64, [5, 5], strides=[1, 1], padding="same", data_format=None, dilation_rate=[1, 1], groups=1, activation="relu", use_bias=True, kernel_initializer="glorot_uniform", bias_initializer="zeros", kernel_regularizer=None, bias_regularizer=None, activity_regularizer=None, kernel_constraint=None, bias_constraint=None, name="conv2")
x = layerConv2(x)
laerPool2 = tf2.keras.layers.MaxPool2D(pool_size=[2, 2], strides=[2, 2], padding="same", data_format=None, name="pool2")
x = laerPool2(x)
layerReshape = tf2.keras.layers.Reshape([-1], name="reshape")
x = layerReshape(x)
layerDense1 = tf2.keras.layers.Dense(1024, activation="relu", use_bias=True, kernel_initializer="glorot_uniform", bias_initializer="zeros", kernel_regularizer=None, bias_regularizer=None, activity_regularizer=None, kernel_constraint=None, bias_constraint=None, name="dense1")
x = layerDense1(x)
layerDense2 = tf2.keras.layers.Dense(10, activation=None, use_bias=True, kernel_initializer="glorot_uniform", bias_initializer="zeros", kernel_regularizer=None, bias_regularizer=None, activity_regularizer=None, kernel_constraint=None, bias_constraint=None, name="dense2")
x = layerDense2(x)
layerSoftmax = tf2.keras.layers.Softmax(axis=1, name="softmax")
z = layerSoftmax(x)
model = tf2.keras.Model(inputs=modelInput, outputs=z, name="MNISTExample")
model.summary()
model.compile(
loss=tf2.keras.losses.CategoricalCrossentropy(from_logits=False),
optimizer=tf2.keras.optimizers.Adam(),
metrics=["accuracy"],
)
xTrain, yTrain = getData(trainFileList)
xTrain = np.tile(xTrain, [1, 1, 1, 2])
history = model.fit(xTrain, yTrain, batch_size=128, epochs=10, validation_split=0.1)
xTest, yTest = getData(testFileList)
xTest = np.tile(xTest, [1, 1, 1, 2])
testScore = model.evaluate(xTest, yTest, verbose=2)
print("%s, loss = %f, accuracy = %f" % (dt.now(), testScore[0], testScore[1]))
tf2.saved_model.save(model, pbFilePath)
if bSinglePbFile:
modelFunction = tf2.function(lambda Input: model(Input)).get_concrete_function(tf2.TensorSpec(model.inputs[0].shape, model.inputs[0].dtype))
frozen_func = convert_variables_to_constants_v2(modelFunction)
frozen_func.graph.as_graph_def()
print("_________________________________________________________________")
print("Frozen model inputs:\n", frozen_func.inputs)
print("Frozen model outputs:\n", frozen_func.outputs)
print("Frozen model layers:")
for op in frozen_func.graph.get_operations():
print(op.name)
tf2.io.write_graph(graph_or_graph_def=frozen_func.graph, logdir=pbFilePath, name=pbFile, as_text=False)
print("Succeeded building model in TensorFlow2!")
# Export model as ONNX file ----------------------------------------------------
if bSinglePbFile:
os.system("python3 -m tf2onnx.convert --input %s --output %s --opset 13 --inputs 'Input:0' --outputs 'Identity:0'" % (pbFilePath + pbFile, onnxFile))
else:
os.system("python3 -m tf2onnx.convert --saved-model %s --output %s --opset 13" % (pbFilePath, onnxFile))
print("Succeeded converting model into ONNX!")
# Parse network, rebuild network and do inference in TensorRT ------------------
logger = trt.Logger(trt.Logger.VERBOSE)
builder = trt.Builder(logger)
network = builder.create_network(1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH))
profile = builder.create_optimization_profile()
config = builder.create_builder_config()
if bUseFP16Mode:
config.set_flag(trt.BuilderFlag.FP16)
if bUseINT8Mode:
config.set_flag(trt.BuilderFlag.INT8)
config.int8_calibrator = calibrator.MyCalibrator(calibrationDataPath, nCalibration, (1, 1, nHeight, nWidth), cacheFile)
parser = trt.OnnxParser(network, logger)
if not os.path.exists(onnxFile):
print("Failed finding ONNX file!")
exit()
print("Succeeded finding ONNX file!")
with open(onnxFile, "rb") as model:
if not parser.parse(model.read()):
print("Failed parsing .onnx file!")
for error in range(parser.num_errors):
print(parser.get_error(error))
exit()
print("Succeeded parsing .onnx file!")
inputTensor = network.get_input(0)
inputTensor.shape = [-1, nHeight, nWidth, 2]
profile.set_shape(inputTensor.name, [1, nHeight, nWidth, 2], [4, nHeight, nWidth, 2], [8, nHeight, nWidth, 2])
config.add_optimization_profile(profile)
outputTensor = network.get_output(0)
network.unmark_output(outputTensor)
_17 = network.add_topk(outputTensor, trt.TopKOperation.MAX, 1, 1 << 1) # add last ArgMax node
network.mark_output(_17.get_output(1))
engineString = builder.build_serialized_network(network, config)
if engineString == None:
print("Failed building engine!")
exit()
print("Succeeded building engine!")
with open(trtFile, "wb") as f:
f.write(engineString)
engine = trt.Runtime(logger).deserialize_cuda_engine(engineString)
nIO = engine.num_io_tensors
lTensorName = [engine.get_tensor_name(i) for i in range(nIO)]
nInput = [engine.get_tensor_mode(lTensorName[i]) for i in range(nIO)].count(trt.TensorIOMode.INPUT)
context = engine.create_execution_context()
context.set_input_shape(lTensorName[0], [1, nHeight, nWidth, 2])
for i in range(nIO):
print("[%2d]%s->" % (i, "Input " if i < nInput else "Output"), engine.get_tensor_dtype(lTensorName[i]), engine.get_tensor_shape(lTensorName[i]), context.get_tensor_shape(lTensorName[i]), lTensorName[i])
bufferH = []
for i in range(nIO):
bufferH.append(np.empty(context.get_tensor_shape(lTensorName[i]), dtype=trt.nptype(engine.get_tensor_dtype(lTensorName[i]))))
bufferD = []
for i in range(nIO):
bufferD.append(cudart.cudaMalloc(bufferH[i].nbytes)[1])
data = cv2.imread(inferenceImage, cv2.IMREAD_GRAYSCALE).astype(np.float32).reshape(1, nHeight, nWidth, 1)
bufferH[0] = np.tile(data, [1, 1, 1, 2])
for i in range(nInput):
cudart.cudaMemcpy(bufferD[i], bufferH[i].ctypes.data, bufferH[i].nbytes, cudart.cudaMemcpyKind.cudaMemcpyHostToDevice)
for i in range(nIO):
context.set_tensor_address(lTensorName[i], int(bufferD[i]))
context.execute_async_v3(0)
for i in range(nInput, nIO):
cudart.cudaMemcpy(bufferH[i].ctypes.data, bufferD[i], bufferH[i].nbytes, cudart.cudaMemcpyKind.cudaMemcpyDeviceToHost)
for i in range(nIO):
print(lTensorName[i])
print(bufferH[i])
for b in bufferD:
cudart.cudaFree(b)
print("Succeeded running model in TensorRT!")
| trt-samples-for-hackathon-cn-master | cookbook/04-BuildEngineByONNXParser/TensorFlow2-ONNX-TensorRT/main-NHWC-C2.py |
#
# Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
from datetime import datetime as dt
from glob import glob
#from cuda import cudart
import cv2
import numpy as np
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3"
import tensorflow as tf1
np.random.seed(31193)
tf1.compat.v1.set_random_seed(97)
nTrainBatchSize = 128
nHeight = 28
nWidth = 28
ckptFile = "./model.ckpt"
pbFile = "./model.pb"
caffeFile = "./model"
trtFile = "./model.plan"
dataPath = os.path.dirname(os.path.realpath(__file__)) + "/../../00-MNISTData/"
trainFileList = sorted(glob(dataPath + "train/*.jpg"))
testFileList = sorted(glob(dataPath + "test/*.jpg"))
inferenceImage = dataPath + "8.png"
os.system("rm -rf ./*.plan ./*.cache")
np.set_printoptions(precision=3, linewidth=200, suppress=True)
tf1.compat.v1.disable_eager_execution()
cudart.cudaDeviceSynchronize()
# Create network and train model in TensorFlow1 --------------------------------
def getBatch(fileList, nSize=1, isTrain=True):
if isTrain:
indexList = np.random.choice(len(fileList), nSize)
else:
nSize = len(fileList)
indexList = np.arange(nSize)
xData = np.zeros([nSize, nHeight, nWidth, 1], dtype=np.float32)
yData = np.zeros([nSize, 10], dtype=np.float32)
for i, index in enumerate(indexList):
imageName = fileList[index]
data = cv2.imread(imageName, cv2.IMREAD_GRAYSCALE)
label = np.zeros(10, dtype=np.float32)
label[int(imageName[-7])] = 1
xData[i] = data.reshape(nHeight, nWidth, 1).astype(np.float32) / 255
yData[i] = label
return xData, yData
x = tf1.compat.v1.placeholder(tf1.float32, [None, nHeight, nWidth, 1], name="x")
y_ = tf1.compat.v1.placeholder(tf1.float32, [None, 10], name="y_")
w1 = tf1.compat.v1.get_variable("w1", shape=[5, 5, 1, 32], initializer=tf1.truncated_normal_initializer(mean=0, stddev=0.1))
b1 = tf1.compat.v1.get_variable("b1", shape=[32], initializer=tf1.constant_initializer(value=0.1))
h1 = tf1.nn.conv2d(x, w1, strides=[1, 1, 1, 1], padding="SAME")
#h2 = h1 + b1 # Conversion will fail if using bias, see detailed information in result-withBias.txt
h2 = h1
h3 = tf1.nn.relu(h2)
h4 = tf1.nn.max_pool2d(h3, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding="SAME")
w2 = tf1.compat.v1.get_variable("w2", shape=[5, 5, 32, 64], initializer=tf1.truncated_normal_initializer(mean=0, stddev=0.1))
b2 = tf1.compat.v1.get_variable("b2", shape=[64], initializer=tf1.constant_initializer(value=0.1))
h5 = tf1.nn.conv2d(h4, w2, strides=[1, 1, 1, 1], padding="SAME")
#h6 = h5 + b2
h6 = h5
h7 = tf1.nn.relu(h6)
h8 = tf1.nn.max_pool2d(h7, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding="SAME")
w3 = tf1.compat.v1.get_variable("w3", shape=[7 * 7 * 64, 1024], initializer=tf1.truncated_normal_initializer(mean=0, stddev=0.1))
b3 = tf1.compat.v1.get_variable("b3", shape=[1024], initializer=tf1.constant_initializer(value=0.1))
h9 = tf1.reshape(h8, [-1, 7 * 7 * 64])
h10 = tf1.matmul(h9, w3)
#h11 = h10 + b3
h11 = h10
h12 = tf1.nn.relu(h11)
w4 = tf1.compat.v1.get_variable("w4", shape=[1024, 10], initializer=tf1.truncated_normal_initializer(mean=0, stddev=0.1))
b4 = tf1.compat.v1.get_variable("b4", shape=[10], initializer=tf1.constant_initializer(value=0.1))
h13 = tf1.matmul(h12, w4)
#h14 = h13 + b4
h14 = h13
y = tf1.nn.softmax(h14, name="y")
z = tf1.argmax(y, 1, name="z")
crossEntropy = -tf1.reduce_sum(y_ * tf1.math.log(y))
trainStep = tf1.compat.v1.train.AdamOptimizer(1e-4).minimize(crossEntropy)
accuracy = tf1.reduce_mean(tf1.cast(tf1.equal(z, tf1.argmax(y_, 1)), tf1.float32), name="accuracy")
tfConfig = tf1.compat.v1.ConfigProto()
tfConfig.gpu_options.per_process_gpu_memory_fraction = 0.5
sess = tf1.compat.v1.Session(config=tfConfig)
sess.run(tf1.compat.v1.global_variables_initializer())
for i in range(100):
xSample, ySample = getBatch(trainFileList, nTrainBatchSize, True)
trainStep.run(session=sess, feed_dict={x: xSample, y_: ySample})
if i % 10 == 0:
accuracyValue = accuracy.eval(session=sess, feed_dict={x: xSample, y_: ySample})
print("%s, batch %3d, acc = %f" % (dt.now(), 10 + i, accuracyValue))
if True: # here we use .ckpt to convert the model ((.pb is also OK but the command of mmdnn should be edited).
saver = tf1.compat.v1.train.Saver(max_to_keep=1)
saver.save(sess, ckptFile)
else:
constantGraph = tf1.graph_util.convert_variables_to_constants(sess, sess.graph_def, ["y"])
with tf1.gfile.FastGFile(pbFile, mode="wb") as f:
f.write(constantGraph.SerializeToString())
sess.close()
print("Succeeded building model in TensorFlow1!")
| trt-samples-for-hackathon-cn-master | cookbook/04-BuildEngineByONNXParser/TensorFlow1-Caffe-TensorRT/buildModelInTensorFlow1.py |
#
# Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
from datetime import datetime as dt
from glob import glob
import cv2
import numpy as np
import tensorrt as trt
from cuda import cudart
np.random.seed(31193)
nTrainBatchSize = 128
nHeight = 28
nWidth = 28
pbFile = "./model-NCHW.pb"
caffePrototxtFile = "./model.prototxt"
caffeModelFile = "./model.caffemodel"
trtFile = "./model.plan"
dataPath = os.path.dirname(os.path.realpath(__file__)) + "/../../00-MNISTData/"
testFileList = sorted(glob(dataPath + "test/*.jpg"))
inferenceImage = dataPath + "8.png"
np.set_printoptions(precision=3, linewidth=200, suppress=True)
cudart.cudaDeviceSynchronize()
# Parse Caffe file, rebuild network and do inference in TensorRT ----------------
logger = trt.Logger(trt.Logger.VERBOSE)
builder = trt.Builder(logger)
network = builder.create_network(1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH))
config = builder.create_builder_config()
parser = trt.CaffeParser()
if not os.path.exists(caffePrototxtFile) or not os.path.exists(caffeModelFile):
print("Failed finding caffe file!")
exit()
print("Succeeded finding caffe file!")
with open(caffePrototxtFile, "rb") as f0, open(caffeModelFile, "rb") as f1:
net = parser.parse_buffer(f0.read(), f1.read(), network, trt.float32)
if net is None:
print("Failed parsing caffe file!")
exit()
print("Succeeded parsing cafe file!")
outputTensor = net.find("y") # find output layer of the network
squeezeLayer = network.add_reduce(outputTensor, trt.ReduceOperation.SUM, (1 << 2) + (1 << 3), False) # remove the dimension we added manually
argmaxLayer = network.add_topk(squeezeLayer.get_output(0), trt.TopKOperation.MAX, 1, 1 << 1) # add ArgMax layer which Caffe does not support
network.mark_output(argmaxLayer.get_output(1))
engineString = builder.build_serialized_network(network, config)
if engineString == None:
print("Failed building engine!")
exit()
print("Succeeded building engine!")
with open(trtFile, "wb") as f:
f.write(engineString)
engine = trt.Runtime(logger).deserialize_cuda_engine(engineString)
nIO = engine.num_io_tensors
lTensorName = [engine.get_tensor_name(i) for i in range(nIO)]
nInput = [engine.get_tensor_mode(lTensorName[i]) for i in range(nIO)].count(trt.TensorIOMode.INPUT)
context = engine.create_execution_context()
context.set_input_shape(lTensorName[0], [1, 1, nHeight, nWidth])
for i in range(nIO):
print("[%2d]%s->" % (i, "Input " if i < nInput else "Output"), engine.get_tensor_dtype(lTensorName[i]), engine.get_tensor_shape(lTensorName[i]), context.get_tensor_shape(lTensorName[i]), lTensorName[i])
bufferH = []
data = cv2.imread(inferenceImage, cv2.IMREAD_GRAYSCALE).astype(np.float32).reshape(1, 1, nHeight, nWidth)
bufferH.append(np.ascontiguousarray(data))
for i in range(nInput, nIO):
bufferH.append(np.empty(context.get_tensor_shape(lTensorName[i]), dtype=trt.nptype(engine.get_tensor_dtype(lTensorName[i]))))
bufferD = []
for i in range(nIO):
bufferD.append(cudart.cudaMalloc(bufferH[i].nbytes)[1])
for i in range(nInput):
cudart.cudaMemcpy(bufferD[i], bufferH[i].ctypes.data, bufferH[i].nbytes, cudart.cudaMemcpyKind.cudaMemcpyHostToDevice)
for i in range(nIO):
context.set_tensor_address(lTensorName[i], int(bufferD[i]))
context.execute_async_v3(0)
for i in range(nInput, nIO):
cudart.cudaMemcpy(bufferH[i].ctypes.data, bufferD[i], bufferH[i].nbytes, cudart.cudaMemcpyKind.cudaMemcpyDeviceToHost)
for i in range(nIO):
print(lTensorName[i])
print(bufferH[i])
for b in bufferD:
cudart.cudaFree(b)
print("Succeeded running model in TensorRT!")
| trt-samples-for-hackathon-cn-master | cookbook/04-BuildEngineByONNXParser/TensorFlow1-Caffe-TensorRT/main.py |
#
# Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import numpy as np
import tensorrt as trt
from cuda import cudart
# yapf:disable
trtFile = "./model.plan"
data = np.arange(3 * 4 * 5, dtype=np.float32).reshape(3, 4, 5) # input data for inference
def run():
logger = trt.Logger(trt.Logger.ERROR) # create Logger, avaiable level: VERBOSE, INFO, WARNING, ERRROR, INTERNAL_ERROR
if os.path.isfile(trtFile): # load serialized network and skip building process if .plan file existed
with open(trtFile, "rb") as f:
engineString = f.read()
if engineString == None:
print("Failed getting serialized engine!")
return
print("Succeeded getting serialized engine!")
else: # build a serialized network from scratch
builder = trt.Builder(logger) # create Builder
network = builder.create_network(1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)) # create Network
profile = builder.create_optimization_profile() # create Optimization Profile if using Dynamic Shape mode
config = builder.create_builder_config() # create BuidlerConfig to set meta data of the network
config.set_memory_pool_limit(trt.MemoryPoolType.WORKSPACE, 1 << 30) # set workspace for the optimization process (default value is total GPU memory)
inputTensor = network.add_input("inputT0", trt.float32, [-1, -1, -1]) # set inpute tensor for the network
profile.set_shape(inputTensor.name, [1, 1, 1], [3, 4, 5], [6, 8, 10]) # set danamic range of the input tensor
config.add_optimization_profile(profile) # add the Optimization Profile into the BuilderConfig
identityLayer = network.add_identity(inputTensor) # here is only a identity transformation layer in our simple network, which the output is exactly equal to input
network.mark_output(identityLayer.get_output(0)) # mark the output tensor of the network
engineString = builder.build_serialized_network(network, config) # create a serialized network
if engineString == None:
print("Failed building serialized engine!")
return
print("Succeeded building serialized engine!")
with open(trtFile, "wb") as f: # write the serialized netwok into a .plan file
f.write(engineString)
print("Succeeded saving .plan file!")
engine = trt.Runtime(logger).deserialize_cuda_engine(engineString) # create inference Engine using Runtime
if engine == None:
print("Failed building engine!")
return
print("Succeeded building engine!")
nIO = engine.num_io_tensors # since TensorRT 8.5, the concept of Binding is replaced by I/O Tensor, all the APIs with "binding" in their name are deprecated
lTensorName = [engine.get_tensor_name(i) for i in range(nIO)] # get a list of I/O tensor names of the engine, because all I/O tensor in Engine and Excution Context are indexed by name, not binding number like TensorRT 8.4 or before
nInput = [engine.get_tensor_mode(lTensorName[i]) for i in range(nIO)].count(trt.TensorIOMode.INPUT) # get the count of input tensor
#nOutput = [engine.get_tensor_mode(lTensorName[i]) for i in range(nIO)].count(trt.TensorIOMode.OUTPUT) # get the count of output tensor
context = engine.create_execution_context() # create Excution Context from the engine (analogy to a GPU context, or a CPU process)
context.set_input_shape(lTensorName[0], [3, 4, 5]) # set actual size of input tensor if using Dynamic Shape mode
for i in range(nIO):
print("[%2d]%s->" % (i, "Input " if i < nInput else "Output"), engine.get_tensor_dtype(lTensorName[i]), engine.get_tensor_shape(lTensorName[i]), context.get_tensor_shape(lTensorName[i]), lTensorName[i])
bufferH = [] # prepare the memory buffer on host and device
bufferH.append(np.ascontiguousarray(data))
for i in range(nInput, nIO):
bufferH.append(np.empty(context.get_tensor_shape(lTensorName[i]), dtype=trt.nptype(engine.get_tensor_dtype(lTensorName[i]))))
bufferD = []
for i in range(nIO):
bufferD.append(cudart.cudaMalloc(bufferH[i].nbytes)[1])
for i in range(nInput): # copy input data from host buffer into device buffer
cudart.cudaMemcpy(bufferD[i], bufferH[i].ctypes.data, bufferH[i].nbytes, cudart.cudaMemcpyKind.cudaMemcpyHostToDevice)
for i in range(nIO):
context.set_tensor_address(lTensorName[i], int(bufferD[i])) # set address of all input and output data in device buffer
context.execute_async_v3(0) # do inference computation
for i in range(nInput, nIO): # copy output data from device buffer into host buffer
cudart.cudaMemcpy(bufferH[i].ctypes.data, bufferD[i], bufferH[i].nbytes, cudart.cudaMemcpyKind.cudaMemcpyDeviceToHost)
for i in range(nIO):
print(lTensorName[i])
print(bufferH[i])
for b in bufferD: # free the GPU memory buffer after all work
cudart.cudaFree(b)
if __name__ == "__main__":
os.system("rm -rf ./*.plan")
run() # create a serialized network of TensorRT and do inference
run() # load a serialized network of TensorRT and do inference
| trt-samples-for-hackathon-cn-master | cookbook/01-SimpleDemo/TensorRT8.5/main.py |
#
# Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import numpy as np
import tensorrt as trt
from cuda import cudart # using CUDA Runtime API
# yapf:disable
trtFile = "./model.plan"
data = np.arange(3 * 4 * 5, dtype=np.float32).reshape(3, 4, 5) # input data for inference
def run():
logger = trt.Logger(trt.Logger.ERROR) # Logger, avialable level: VERBOSE, INFO, WARNING, ERRROR, INTERNAL_ERROR
if os.path.isfile(trtFile): # read .plan file if exists
with open(trtFile, "rb") as f:
engineString = f.read()
if engineString == None:
print("Failed getting serialized engine!")
return
print("Succeeded getting serialized engine!")
engine = trt.Runtime(logger).deserialize_cuda_engine(engineString) # deserialize the binaray object into TensorRT engine
if engine == None:
print("Failed building engine!")
return
print("Succeeded building engine!")
else: # no .plan file, build engine from scratch
builder = trt.Builder(logger) # meta data of the network
builder.max_batch_size = 3
builder.max_workspace_size = 1 << 30 # set workspace for TensorRT
network = builder.create_network()
inputTensor = network.add_input("inputT0", trt.float32, [4, 5]) # set input tensor of the network
identityLayer = network.add_identity(inputTensor) # add a layer of identity operator
network.mark_output(identityLayer.get_output(0)) # set output tensor of the network
engine = builder.build_cuda_engine(network) # create TensorRT engine from the networrk
if engine == None:
print("Failed building engine!")
return
print("Succeeded building engine!")
with open(trtFile, "wb") as f: # serialize the TensorRT engine as binaray file
f.write(engine.serialize())
print("Succeeded saving .plan file!")
context = engine.create_execution_context() # create CUDA context (similar to a process on GPU)
nInput = np.sum([engine.binding_is_input(i) for i in range(engine.num_bindings)]) # get information of the TensorRT engine
nOutput = engine.num_bindings - nInput
for i in range(nInput):
print("Bind[%2d]:i[%2d]->" % (i, i), engine.get_binding_dtype(i), engine.get_binding_shape(i), context.get_binding_shape(i), engine.get_binding_name(i))
for i in range(nInput, nInput + nOutput):
print("Bind[%2d]:o[%2d]->" % (i, i - nInput), engine.get_binding_dtype(i), engine.get_binding_shape(i), context.get_binding_shape(i), engine.get_binding_name(i))
bufferH = []
bufferH.append(np.ascontiguousarray(data))
for i in range(nInput, nInput + nOutput):
bufferH.append(np.empty((3, ) + tuple(context.get_binding_shape(i)), dtype=trt.nptype(engine.get_binding_dtype(i))))
bufferD = []
for i in range(nInput + nOutput):
bufferD.append(cudart.cudaMalloc(bufferH[i].nbytes)[1])
for i in range(nInput): # copy the data from host to device
cudart.cudaMemcpy(bufferD[i], bufferH[i].ctypes.data, bufferH[i].nbytes, cudart.cudaMemcpyKind.cudaMemcpyHostToDevice)
context.execute(3, bufferD) # do inference computation
for i in range(nInput, nInput + nOutput): # copy the result from device to host
cudart.cudaMemcpy(bufferH[i].ctypes.data, bufferD[i], bufferH[i].nbytes, cudart.cudaMemcpyKind.cudaMemcpyDeviceToHost)
for i in range(nInput + nOutput):
print(engine.get_binding_name(i))
print(bufferH[i].reshape((3, ) + tuple(context.get_binding_shape(i))))
for b in bufferD: # free the buffer on device
cudart.cudaFree(b)
if __name__ == "__main__":
os.system("rm -rf ./*.plan")
run() # create TensorRT engine and do inference
run() # load TensorRT engine from file and do inference
| trt-samples-for-hackathon-cn-master | cookbook/01-SimpleDemo/TensorRT7/main.py |
#
# Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import numpy as np
# cuda-python onlly support python>=3.7, older version of python can only use pycuda
import pycuda.autoinit
import pycuda.driver as cuda
import tensorrt as trt
# yapf:disable
trtFile = "./model.plan"
def run():
logger = trt.Logger(trt.Logger.ERROR) # Logger, avialable level: VERBOSE, INFO, WARNING, ERRROR, INTERNAL_ERROR
if os.path.isfile(trtFile): # read .plan file if exists
with open(trtFile, "rb") as f:
engineString = f.read()
if engineString == None:
print("Failed getting serialized engine!")
return
print("Succeeded getting serialized engine!")
engine = trt.Runtime(logger).deserialize_cuda_engine(engineString) # deserialize the binaray object into TensorRT engine
if engine == None:
print("Failed building engine!")
return
print("Succeeded building engine!")
else: # no .plan file, build engine from scratch
builder = trt.Builder(logger) # meta data of the network
builder.max_batch_size = 3
builder.max_workspace_size = 1 << 30 # set workspace for TensorRT
network = builder.create_network()
inputTensor = network.add_input("inputT0", trt.float32, [4, 5]) # set input tensor of the network
identityLayer = network.add_identity(inputTensor) # add a layer of identity operator
network.mark_output(identityLayer.get_output(0)) # set output tensor of the network
engine = builder.build_cuda_engine(network) # create TensorRT engine from the networrk
if engine == None:
print("Failed building engine!")
return
print("Succeeded building engine!")
with open(trtFile, "wb") as f: # serialize the TensorRT engine as binaray file
f.write(engine.serialize())
print("Succeeded saving .plan file!")
context = engine.create_execution_context() # create CUDA context (similar to a process on GPU)
nInput = np.sum([engine.binding_is_input(i) for i in range(engine.num_bindings)]) # get information of the TensorRT engine
nOutput = engine.num_bindings - nInput
for i in range(nInput):
print("Bind[%2d]:i[%2d]->" % (i, i), engine.get_binding_dtype(i), engine.get_binding_shape(i), context.get_binding_shape(i), engine.get_binding_name(i))
for i in range(nInput, nInput + nOutput):
print("Bind[%2d]:o[%2d]->" % (i, i - nInput), engine.get_binding_dtype(i), engine.get_binding_shape(i), context.get_binding_shape(i), engine.get_binding_name(i))
data = np.arange(3 * 4 * 5, dtype=np.float32).reshape(3, 4, 5) # prepare data and host / device buffer for the inference
bufferH = []
bufferH.append(np.ascontiguousarray(data))
for i in range(nInput, nInput + nOutput):
bufferH.append(np.empty((3, ) + tuple(context.get_binding_shape(i)), dtype=trt.nptype(engine.get_binding_dtype(i))))
bufferD = []
for i in range(nInput + nOutput):
bufferD.append(cuda.mem_alloc(bufferH[i].nbytes))
for i in range(nInput): # copy the data from host to device
cuda.memcpy_htod(bufferD[i], bufferH[i])
context.execute(3, bufferD) # do inference computation
for i in range(nInput, nInput + nOutput): # copy the result from device to host
cuda.memcpy_dtoh(bufferH[i], bufferD[i])
for i in range(nInput + nOutput):
print(engine.get_binding_name(i))
print(bufferH[i].reshape((3, ) + tuple(context.get_binding_shape(i))))
for b in bufferD: # free the buffer on device
b.free()
if __name__ == "__main__":
os.system("rm -rf ./*.plan")
#print( "GPU = %s"%(cuda.Device(0).name()) )
#cuda.Device(conf.iGPU).make_context()
run() # create TensorRT engine and do inference
run() # load TensorRT engine from file and do inference
#cuda.Context.pop()
| trt-samples-for-hackathon-cn-master | cookbook/01-SimpleDemo/TensorRT6/main.py |
#
# Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import numpy as np
import tensorrt as trt
from cuda import cudart # using CUDA Runtime API
# yapf:disable
trtFile = "./model.plan"
data = np.arange(3 * 4 * 5, dtype=np.float32).reshape(3, 4, 5) # input data for inference
def run():
logger = trt.Logger(trt.Logger.ERROR) # Logger, avialable level: VERBOSE, INFO, WARNING, ERRROR, INTERNAL_ERROR
if os.path.isfile(trtFile): # read .plan file if exists
with open(trtFile, "rb") as f:
engineString = f.read()
if engineString == None:
print("Failed getting serialized engine!")
return
print("Succeeded getting serialized engine!")
else: # no .plan file, build engine from scratch
builder = trt.Builder(logger) # meta data of the network
network = builder.create_network(1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH))
profile = builder.create_optimization_profile()
config = builder.create_builder_config()
config.max_workspace_size = 1 << 30 # set workspace for TensorRT
inputTensor = network.add_input("inputT0", trt.float32, [-1, -1, -1]) # set input tensor of the network
profile.set_shape(inputTensor.name, [1, 1, 1], [3, 4, 5], [6, 8, 10]) # set dynamic shape range of the input tensor
config.add_optimization_profile(profile)
identityLayer = network.add_identity(inputTensor) # add a layer of identity operator
network.mark_output(identityLayer.get_output(0)) # set output tensor of the network
engineString = builder.build_serialized_network(network, config) # create serialized network from the networrk
if engineString == None:
print("Failed building serialized engine!")
return
print("Succeeded building serialized engine!")
with open(trtFile, "wb") as f: # save the serialized network as binaray file
f.write(engineString)
print("Succeeded saving .plan file!")
engine = trt.Runtime(logger).deserialize_cuda_engine(engineString) # create TensorRT engine using Runtime
if engine == None:
print("Failed building engine!")
return
print("Succeeded building engine!")
context = engine.create_execution_context() # create CUDA context (similar to a process on GPU)
context.set_binding_shape(0, [3, 4, 5]) # bind actual shape of the input tensor in Dynamic Shape mode
nInput = np.sum([engine.binding_is_input(i) for i in range(engine.num_bindings)]) # get information of the TensorRT engine
nOutput = engine.num_bindings - nInput
for i in range(nInput):
print("Bind[%2d]:i[%2d]->" % (i, i), engine.get_binding_dtype(i), engine.get_binding_shape(i), context.get_binding_shape(i), engine.get_binding_name(i))
for i in range(nInput, nInput + nOutput):
print("Bind[%2d]:o[%2d]->" % (i, i - nInput), engine.get_binding_dtype(i), engine.get_binding_shape(i), context.get_binding_shape(i), engine.get_binding_name(i))
bufferH = []
bufferH.append(np.ascontiguousarray(data))
for i in range(nInput, nInput + nOutput):
bufferH.append(np.empty(context.get_binding_shape(i), dtype=trt.nptype(engine.get_binding_dtype(i))))
bufferD = []
for i in range(nInput + nOutput):
bufferD.append(cudart.cudaMalloc(bufferH[i].nbytes)[1])
for i in range(nInput): # copy the data from host to device
cudart.cudaMemcpy(bufferD[i], bufferH[i].ctypes.data, bufferH[i].nbytes, cudart.cudaMemcpyKind.cudaMemcpyHostToDevice)
context.execute_v2(bufferD) # do inference computation
for i in range(nInput, nInput + nOutput): # copy the result from device to host
cudart.cudaMemcpy(bufferH[i].ctypes.data, bufferD[i], bufferH[i].nbytes, cudart.cudaMemcpyKind.cudaMemcpyDeviceToHost)
for i in range(nInput + nOutput):
print(engine.get_binding_name(i))
print(bufferH[i])
for b in bufferD: # free the buffer on device
cudart.cudaFree(b)
if __name__ == "__main__":
os.system("rm -rf ./*.plan")
run() # create TensorRT engine and do inference
run() # load TensorRT engine from file and do inference
| trt-samples-for-hackathon-cn-master | cookbook/01-SimpleDemo/TensorRT8.0/main-cudart.py |
#
# Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import numpy as np
import tensorrt as trt
from cuda import cuda # using CUDA Driver API
# yapf:disable
trtFile = "./model.plan"
data = np.arange(3 * 4 * 5, dtype=np.float32).reshape(3, 4, 5) # input data for inference
def run():
logger = trt.Logger(trt.Logger.ERROR) # Logger, avialable level: VERBOSE, INFO, WARNING, ERRROR, INTERNAL_ERROR
if os.path.isfile(trtFile): # read .plan file if exists
with open(trtFile, "rb") as f:
engineString = f.read()
if engineString == None:
print("Failed getting serialized engine!")
return
print("Succeeded getting serialized engine!")
else: # no .plan file, build engine from scratch
builder = trt.Builder(logger) # meta data of the network
network = builder.create_network(1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH))
profile = builder.create_optimization_profile()
config = builder.create_builder_config()
config.max_workspace_size = 1 << 30 # set workspace for TensorRT
inputTensor = network.add_input("inputT0", trt.float32, [-1, -1, -1]) # set input tensor of the network
profile.set_shape(inputTensor.name, [1, 1, 1], [3, 4, 5], [6, 8, 10]) # set dynamic shape range of the input tensor
config.add_optimization_profile(profile)
identityLayer = network.add_identity(inputTensor) # add a layer of identity operator
network.mark_output(identityLayer.get_output(0)) # set output tensor of the network
engineString = builder.build_serialized_network(network, config) # create serialized network from the networrk
if engineString == None:
print("Failed building serialized engine!")
return
print("Succeeded building serialized engine!")
with open(trtFile, "wb") as f: # save the serialized network as binaray file
f.write(engineString)
print("Succeeded saving .plan file!")
engine = trt.Runtime(logger).deserialize_cuda_engine(engineString) # create TensorRT engine using Runtime
if engine == None:
print("Failed building engine!")
return
print("Succeeded building engine!")
context = engine.create_execution_context() # create CUDA context (similar to a process on GPU)
context.set_input_shape(engine.get_tensor_name(0), [3, 4, 5]) # bind actual shape of the input tensor in Dynamic Shape mode
nInput = np.sum([engine.binding_is_input(i) for i in range(engine.num_bindings)]) # get information of the TensorRT engine
nOutput = engine.num_bindings - nInput
for i in range(nInput):
print("Bind[%2d]:i[%2d]->" % (i, i), engine.get_binding_dtype(i), engine.get_binding_shape(i), context.get_binding_shape(i), engine.get_binding_name(i))
for i in range(nInput, nInput + nOutput):
print("Bind[%2d]:o[%2d]->" % (i, i - nInput), engine.get_binding_dtype(i), engine.get_binding_shape(i), context.get_binding_shape(i), engine.get_binding_name(i))
bufferH = []
bufferH.append(np.ascontiguousarray(data))
for i in range(nInput, nInput + nOutput):
bufferH.append(np.empty(context.get_binding_shape(i), dtype=trt.nptype(engine.get_binding_dtype(i))))
bufferD = []
for i in range(nInput + nOutput):
bufferD.append(cuda.cuMemAlloc(bufferH[i].nbytes)[1])
for i in range(nInput): # copy the data from host to device
cuda.cuMemcpyHtoD(bufferD[i], bufferH[i].ctypes.data, bufferH[i].nbytes)
context.execute_v2(bufferD) # do inference computation
for i in range(nInput, nInput + nOutput): # copy the result from device to host
cuda.cuMemcpyDtoH(bufferH[i].ctypes.data, bufferD[i], bufferH[i].nbytes)
for i in range(nInput + nOutput):
print(engine.get_binding_name(i))
print(bufferH[i])
for b in bufferD: # free the buffer on device
cuda.cuMemFree(b)
if __name__ == "__main__":
os.system("rm -rf ./*.plan")
cuda.cuInit(0) # initialize the device manually
cuda.cuDeviceGet(0)
run() # create TensorRT engine and do inference
run() # load TensorRT engine from file and do inference
| trt-samples-for-hackathon-cn-master | cookbook/01-SimpleDemo/TensorRT8.0/main-cuda.py |
#
# Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
from time import time_ns
import numpy as np
import tensorrt as trt
from cuda import cudart
trtFile = "./model.plan"
nB, nM, nN = 4, 32, 1024
nLoop = 10
nWarmUp = 10
nTest = 100
np.random.seed(31193)
weightUp = (np.random.rand(nM, nN).astype(np.float32) * 2 - 1)
weightDown = (np.random.rand(nN, nM).astype(np.float32) * 2 - 1)
weightUp = weightUp.reshape(-1)
for i in range(0, weightUp.shape[0], 2):
weightUp[i] = 0
weightUp = weightUp.reshape(nM, nN)
#print(weightUp)
weightDown = weightDown.reshape(-1)
for i in range(0, weightDown.shape[0], 2):
weightDown[i] = 0
weightDown = weightDown.reshape(nN, nM)
#print(weightDown)
def printArrayInformation(x, info="", n=5):
if 0 in x.shape:
print('%s:%s' % (info, str(x.shape)))
print()
return
print( '%s:%s,SumAbs=%.5e,Var=%.5f,Max=%.5f,Min=%.5f,SAD=%.5f'%( \
info,str(x.shape),np.sum(abs(x)),np.var(x),np.max(x),np.min(x),np.sum(np.abs(np.diff(x.reshape(-1)))) ))
print('\t', x.reshape(-1)[:n], x.reshape(-1)[-n:])
def run(bUseSparsity):
logger = trt.Logger(trt.Logger.VERBOSE)
builder = trt.Builder(logger)
network = builder.create_network(1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH))
profile = builder.create_optimization_profile()
config = builder.create_builder_config()
config.set_flag(trt.BuilderFlag.FP16) # Sparse is supported in FP16 / Int8 mode
if bUseSparsity:
config.set_flag(trt.BuilderFlag.SPARSE_WEIGHTS)
inputTensor = network.add_input("inputT0", trt.float32, [-1, nM])
profile.set_shape(inputTensor.name, [1, nM], [nB, nM], [nB, nM])
config.add_optimization_profile(profile)
constantLayer0 = network.add_constant(weightUp.shape, trt.Weights(np.ascontiguousarray(weightUp)))
constantLayer1 = network.add_constant(weightDown.shape, trt.Weights(np.ascontiguousarray(weightDown)))
tensor = inputTensor
for i in range(nLoop):
layer0 = network.add_matrix_multiply(tensor, trt.MatrixOperation.NONE, constantLayer0.get_output(0), trt.MatrixOperation.NONE)
layer1 = network.add_activation(layer0.get_output(0), trt.ActivationType.RELU)
tensor = layer1.get_output(0)
layer2 = network.add_matrix_multiply(tensor, trt.MatrixOperation.NONE, constantLayer1.get_output(0), trt.MatrixOperation.NONE)
layer3 = network.add_activation(layer2.get_output(0), trt.ActivationType.RELU)
tensor = layer3.get_output(0)
network.mark_output(tensor)
engineString = builder.build_serialized_network(network, config)
engine = trt.Runtime(logger).deserialize_cuda_engine(engineString)
nIO = engine.num_io_tensors
lTensorName = [engine.get_tensor_name(i) for i in range(nIO)]
nInput = [engine.get_tensor_mode(lTensorName[i]) for i in range(nIO)].count(trt.TensorIOMode.INPUT)
context = engine.create_execution_context()
context.set_input_shape(lTensorName[0], [nB, nM])
for i in range(nIO):
print("[%2d]%s->" % (i, "Input " if i < nInput else "Output"), engine.get_tensor_dtype(lTensorName[i]), engine.get_tensor_shape(lTensorName[i]), context.get_tensor_shape(lTensorName[i]), lTensorName[i])
data = np.arange(np.prod([nB, nM]), dtype=np.float32).reshape(nB, nM) * 2 - 1
bufferH = []
bufferH.append(np.ascontiguousarray(data))
for i in range(nInput, nIO):
bufferH.append(np.empty(context.get_tensor_shape(lTensorName[i]), dtype=trt.nptype(engine.get_tensor_dtype(lTensorName[i]))))
bufferD = []
for i in range(nIO):
bufferD.append(cudart.cudaMalloc(bufferH[i].nbytes)[1])
for i in range(nInput):
cudart.cudaMemcpy(bufferD[i], bufferH[i].ctypes.data, bufferH[i].nbytes, cudart.cudaMemcpyKind.cudaMemcpyHostToDevice)
for i in range(nIO):
context.set_tensor_address(lTensorName[i], int(bufferD[i]))
context.execute_async_v3(0)
for i in range(nInput, nIO):
cudart.cudaMemcpy(bufferH[i].ctypes.data, bufferD[i], bufferH[i].nbytes, cudart.cudaMemcpyKind.cudaMemcpyDeviceToHost)
for i in range(nIO):
print(lTensorName[i])
print(bufferH[i])
for i in range(nWarmUp):
context.execute_async_v3(0)
cudart.cudaDeviceSynchronize()
t0 = time_ns()
for i in range(nTest):
context.execute_async_v3(0)
cudart.cudaDeviceSynchronize()
t1 = time_ns()
print("Time per inference: %f ms" % ((t1 - t0) / 1000000 / nTest))
printArrayInformation(bufferH[-1])
for b in bufferD:
cudart.cudaFree(b)
if __name__ == "__main__":
os.system("rm -rf ./*.plan")
run(False)
run(True)
| trt-samples-for-hackathon-cn-master | cookbook/08-Advance/Sparsity/main.py |
#
# Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
from glob import glob
import cv2
import numpy as np
import tensorrt as trt
from cuda import cudart
class MyCalibrator(trt.IInt8EntropyCalibrator2):
def __init__(self, calibrationDataPath, nCalibration, inputShape, cacheFile):
trt.IInt8EntropyCalibrator2.__init__(self)
self.imageList = glob(calibrationDataPath + "*.jpg")[:100]
self.nCalibration = nCalibration
self.shape = inputShape # (N,C,H,W)
self.buffeSize = trt.volume(inputShape) * trt.float32.itemsize
self.cacheFile = cacheFile
_, self.dIn = cudart.cudaMalloc(self.buffeSize)
self.oneBatch = self.batchGenerator()
print(int(self.dIn))
def __del__(self):
cudart.cudaFree(self.dIn)
def batchGenerator(self):
for i in range(self.nCalibration):
print("> calibration %d" % i)
subImageList = np.random.choice(self.imageList, self.shape[0], replace=False)
yield np.ascontiguousarray(self.loadImageList(subImageList))
def loadImageList(self, imageList):
res = np.empty(self.shape, dtype=np.float32)
for i in range(self.shape[0]):
res[i, 0] = cv2.imread(imageList[i], cv2.IMREAD_GRAYSCALE).astype(np.float32)
return res
def get_batch_size(self): # necessary API
return self.shape[0]
def get_batch(self, nameList=None, inputNodeName=None): # necessary API
try:
data = next(self.oneBatch)
cudart.cudaMemcpy(self.dIn, data.ctypes.data, self.buffeSize, cudart.cudaMemcpyKind.cudaMemcpyHostToDevice)
return [int(self.dIn)]
except StopIteration:
return None
def read_calibration_cache(self): # necessary API
if os.path.exists(self.cacheFile):
print("Succeed finding cahce file: %s" % (self.cacheFile))
with open(self.cacheFile, "rb") as f:
cache = f.read()
return cache
else:
print("Failed finding int8 cache!")
return
def write_calibration_cache(self, cache): # necessary API
with open(self.cacheFile, "wb") as f:
f.write(cache)
print("Succeed saving int8 cache!")
return
if __name__ == "__main__":
cudart.cudaDeviceSynchronize()
m = MyCalibrator("../../00-MNISTData/test/", 5, (1, 1, 28, 28), "./int8.cache")
m.get_batch("FakeNameList")
m.get_batch("FakeNameList")
m.get_batch("FakeNameList")
m.get_batch("FakeNameList")
m.get_batch("FakeNameList")
| trt-samples-for-hackathon-cn-master | cookbook/08-Advance/Sparsity/pyTorch-ONNX-TensorRT-ASP/calibrator.py |
#
# Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
from datetime import datetime as dt
from glob import glob
import calibrator
import cv2
import numpy as np
import tensorrt as trt
import torch as t
import torch.nn.functional as F
from apex.contrib.sparsity import ASP
from cuda import cudart
from torch.autograd import Variable
np.random.seed(31193)
t.manual_seed(97)
t.cuda.manual_seed_all(97)
t.backends.cudnn.deterministic = True
nTrainBatchSize = 128
nHeight = 28
nWidth = 28
onnxFile = "./model.onnx"
trtFile = "./model.plan"
dataPath = os.path.dirname(os.path.realpath(__file__)) + "/../../00-MNISTData/"
trainFileList = sorted(glob(dataPath + "train/*.jpg"))
testFileList = sorted(glob(dataPath + "test/*.jpg"))
inferenceImage = dataPath + "8.png"
# for FP16 mode
bUseFP16Mode = False
# for INT8 model
bUseINT8Mode = False
nCalibration = 1
cacheFile = "./int8.cache"
calibrationDataPath = dataPath + "test/"
os.system("rm -rf ./*.onnx ./*.plan ./*.cache")
np.set_printoptions(precision=3, linewidth=200, suppress=True)
cudart.cudaDeviceSynchronize()
# Create network and train model in pyTorch ------------------------------------
class Net(t.nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = t.nn.Conv2d(1, 32, (5, 5), padding=(2, 2), bias=True)
self.conv2 = t.nn.Conv2d(32, 64, (5, 5), padding=(2, 2), bias=True)
self.fc1 = t.nn.Linear(64 * 7 * 7, 1024, bias=True)
self.fc2 = t.nn.Linear(1024, 10, bias=True)
def forward(self, x):
x = F.max_pool2d(F.relu(self.conv1(x)), (2, 2))
x = F.max_pool2d(F.relu(self.conv2(x)), (2, 2))
x = x.reshape(-1, 64 * 7 * 7)
x = F.relu(self.fc1(x))
y = self.fc2(x)
z = F.softmax(y, dim=1)
z = t.argmax(z, dim=1)
return y, z
class MyData(t.utils.data.Dataset):
def __init__(self, isTrain=True):
if isTrain:
self.data = trainFileList
else:
self.data = testFileList
def __getitem__(self, index):
imageName = self.data[index]
data = cv2.imread(imageName, cv2.IMREAD_GRAYSCALE)
label = np.zeros(10, dtype=np.float32)
index = int(imageName[-7])
label[index] = 1
return t.from_numpy(data.reshape(1, nHeight, nWidth).astype(np.float32)), t.from_numpy(label)
def __len__(self):
return len(self.data)
model = Net().cuda()
ceLoss = t.nn.CrossEntropyLoss()
opt = t.optim.Adam(model.parameters(), lr=0.001)
trainDataset = MyData(True)
testDataset = MyData(False)
trainLoader = t.utils.data.DataLoader(dataset=trainDataset, batch_size=nTrainBatchSize, shuffle=True)
testLoader = t.utils.data.DataLoader(dataset=testDataset, batch_size=nTrainBatchSize, shuffle=True)
ASP.prune_trained_model(model, opt)
for epoch in range(10):
for xTrain, yTrain in trainLoader:
xTrain = Variable(xTrain).cuda()
yTrain = Variable(yTrain).cuda()
opt.zero_grad()
y_, z = model(xTrain)
loss = ceLoss(y_, yTrain)
loss.backward()
opt.step()
with t.no_grad():
acc = 0
n = 0
for xTest, yTest in testLoader:
xTest = Variable(xTest).cuda()
yTest = Variable(yTest).cuda()
y_, z = model(xTest)
acc += t.sum(z == t.matmul(yTest, t.Tensor([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]).to("cuda:0"))).cpu().numpy()
n += xTest.shape[0]
print("%s, epoch %2d, loss = %f, test acc = %f" % (dt.now(), epoch + 1, loss.data, acc / n))
print("Succeeded building model in pyTorch!")
# Export model as ONNX file ----------------------------------------------------
t.onnx.export(model, t.randn(1, 1, nHeight, nWidth, device="cuda"), onnxFile, input_names=["x"], output_names=["y", "z"], do_constant_folding=True, verbose=True, keep_initializers_as_inputs=True, opset_version=12, dynamic_axes={"x": {0: "nBatchSize"}, "z": {0: "nBatchSize"}})
print("Succeeded converting model into ONNX!")
# Parse network, rebuild network and do inference in TensorRT ------------------
logger = trt.Logger(trt.Logger.VERBOSE)
builder = trt.Builder(logger)
network = builder.create_network(1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH))
profile = builder.create_optimization_profile()
config = builder.create_builder_config()
if bUseFP16Mode:
config.set_flag(trt.BuilderFlag.FP16)
if bUseINT8Mode:
config.set_flag(trt.BuilderFlag.INT8)
config.int8_calibrator = calibrator.MyCalibrator(calibrationDataPath, nCalibration, (1, 1, nHeight, nWidth), cacheFile)
parser = trt.OnnxParser(network, logger)
if not os.path.exists(onnxFile):
print("Failed finding ONNX file!")
exit()
print("Succeeded finding ONNX file!")
with open(onnxFile, "rb") as model:
if not parser.parse(model.read()):
print("Failed parsing .onnx file!")
for error in range(parser.num_errors):
print(parser.get_error(error))
exit()
print("Succeeded parsing .onnx file!")
inputTensor = network.get_input(0)
profile.set_shape(inputTensor.name, [1, 1, nHeight, nWidth], [4, 1, nHeight, nWidth], [8, 1, nHeight, nWidth])
config.add_optimization_profile(profile)
network.unmark_output(network.get_output(0)) # remove output tensor "y"
engineString = builder.build_serialized_network(network, config)
if engineString == None:
print("Failed building engine!")
exit()
print("Succeeded building engine!")
with open(trtFile, "wb") as f:
f.write(engineString)
engine = trt.Runtime(logger).deserialize_cuda_engine(engineString)
nIO = engine.num_io_tensors
lTensorName = [engine.get_tensor_name(i) for i in range(nIO)]
nInput = [engine.get_tensor_mode(lTensorName[i]) for i in range(nIO)].count(trt.TensorIOMode.INPUT)
context = engine.create_execution_context()
context.set_input_shape(lTensorName[0], [1, 1, nHeight, nWidth])
for i in range(nIO):
print("[%2d]%s->" % (i, "Input " if i < nInput else "Output"), engine.get_tensor_dtype(lTensorName[i]), engine.get_tensor_shape(lTensorName[i]), context.get_tensor_shape(lTensorName[i]), lTensorName[i])
bufferH = []
data = cv2.imread(inferenceImage, cv2.IMREAD_GRAYSCALE).astype(np.float32).reshape(1, 1, nHeight, nWidth)
bufferH.append(np.ascontiguousarray(data))
for i in range(nInput, nIO):
bufferH.append(np.empty(context.get_tensor_shape(lTensorName[i]), dtype=trt.nptype(engine.get_tensor_dtype(lTensorName[i]))))
bufferD = []
for i in range(nIO):
bufferD.append(cudart.cudaMalloc(bufferH[i].nbytes)[1])
for i in range(nInput):
cudart.cudaMemcpy(bufferD[i], bufferH[i].ctypes.data, bufferH[i].nbytes, cudart.cudaMemcpyKind.cudaMemcpyHostToDevice)
for i in range(nIO):
context.set_tensor_address(lTensorName[i], int(bufferD[i]))
context.execute_async_v3(0)
for i in range(nInput, nIO):
cudart.cudaMemcpy(bufferH[i].ctypes.data, bufferD[i], bufferH[i].nbytes, cudart.cudaMemcpyKind.cudaMemcpyDeviceToHost)
for i in range(nIO):
print(lTensorName[i])
print(bufferH[i])
for b in bufferD:
cudart.cudaFree(b)
print("Succeeded running model in TensorRT!")
| trt-samples-for-hackathon-cn-master | cookbook/08-Advance/Sparsity/pyTorch-ONNX-TensorRT-ASP/main.py |
#
# Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import numpy as np
import onnx
import onnx_graphsurgeon as gs
import tensorrt as trt
onnxFile = "./model.onnx"
# Create a ONNX graph with Onnx Graphsurgeon
# The first dimension of the two input tensors are both called "B", but the computation of the two tensors is independent of each other. Theoretically, the unequal first dimension of the two input tensors does not affect the computation
tensor0 = gs.Variable("tensor0", np.float32, ["B", 1, 1])
tensor1 = gs.Variable("tensor1", np.float32, ["B", 1])
tensor2 = gs.Variable("tensor2", np.float32, None)
tensor3 = gs.Variable("tensor3", np.float32, None)
node0 = gs.Node("Identity", "myIdentity0", inputs=[tensor0], outputs=[tensor2])
node1 = gs.Node("Identity", "myIdentity1", inputs=[tensor1], outputs=[tensor3])
graph = gs.Graph(nodes=[node0, node1], inputs=[tensor0, tensor1], outputs=[tensor2, tensor3])
onnx.save(gs.export_onnx(graph.cleanup().toposort()), onnxFile)
logger = trt.Logger(trt.Logger.ERROR)
builder = trt.Builder(logger)
network = builder.create_network(1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH))
profile = builder.create_optimization_profile()
config = builder.create_builder_config()
parser = trt.OnnxParser(network, logger)
with open(onnxFile, "rb") as model:
parser.parse(model.read())
inputT0 = network.get_input(0)
profile.set_shape(inputT0.name, [1, 1, 1], [4, 1, 1], [8, 1, 1])
inputT1 = network.get_input(1)
profile.set_shape(inputT1.name, [1, 1], [4, 1], [8, 1])
config.add_optimization_profile(profile)
engineString = builder.build_serialized_network(network, config)
engine = trt.Runtime(logger).deserialize_cuda_engine(engineString)
context = engine.create_execution_context()
context.set_binding_shape(0, [4, 1, 1]) # two input tensor with the same head dimension
context.set_binding_shape(1, [4, 1])
print("Binding all? %s" % (["No", "Yes"][int(context.all_binding_shapes_specified)]))
nInput = np.sum([engine.binding_is_input(i) for i in range(engine.num_bindings)])
nOutput = engine.num_bindings - nInput
for i in range(engine.num_bindings):
print("Bind[%2d]:i[%d]->" % (i, i) if engine.binding_is_input(i) else "Bind[%2d]:o[%d]->" % (i, i - nInput), engine.get_binding_dtype(i), engine.get_binding_shape(i), context.get_binding_shape(i), engine.get_binding_name(i))
context.set_binding_shape(0, [4, 1, 1]) # two input tensor with different head dimension
context.set_binding_shape(1, [5, 1])
print("Binding all? %s" % (["No", "Yes"][int(context.all_binding_shapes_specified)]))
nInput = np.sum([engine.binding_is_input(i) for i in range(engine.num_bindings)])
nOutput = engine.num_bindings - nInput
for i in range(engine.num_bindings):
print("Bind[%2d]:i[%d]->" % (i, i) if engine.binding_is_input(i) else "Bind[%2d]:o[%d]->" % (i, i - nInput), engine.get_binding_dtype(i), engine.get_binding_shape(i), context.get_binding_shape(i), engine.get_binding_name(i))
| trt-samples-for-hackathon-cn-master | cookbook/08-Advance/LabeledDimension/main.py |
#
# Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import tensorrt as trt
import torch
trtFile = "./model.plan"
def run():
logger = trt.Logger(trt.Logger.ERROR)
if os.path.isfile(trtFile):
with open(trtFile, "rb") as f:
engineString = f.read()
if engineString == None:
print("Failed getting serialized engine!")
return
print("Succeeded getting serialized engine!")
else:
builder = trt.Builder(logger)
network = builder.create_network(1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH))
profile = builder.create_optimization_profile()
config = builder.create_builder_config()
inputTensor = network.add_input("inputT0", trt.float32, [-1, -1, -1])
profile.set_shape(inputTensor.name, [1, 1, 1], [3, 4, 5], [6, 8, 10])
config.add_optimization_profile(profile)
identityLayer = network.add_identity(inputTensor)
network.mark_output(identityLayer.get_output(0))
engineString = builder.build_serialized_network(network, config)
if engineString == None:
print("Failed building serialized engine!")
return
print("Succeeded building serialized engine!")
with open(trtFile, "wb") as f:
f.write(engineString)
print("Succeeded saving .plan file!")
engine = trt.Runtime(logger).deserialize_cuda_engine(engineString)
if engine == None:
print("Failed building engine!")
return
print("Succeeded building engine!")
nIO = engine.num_io_tensors
lTensorName = [engine.get_tensor_name(i) for i in range(nIO)]
nInput = [engine.get_tensor_mode(lTensorName[i]) for i in range(nIO)].count(trt.TensorIOMode.INPUT)
stream = torch.cuda.Stream()
context = engine.create_execution_context()
context.set_input_shape(lTensorName[0], [3, 4, 5])
for i in range(nIO):
print("[%2d]%s->" % (i, "Input " if i < nInput else "Output"), engine.get_tensor_dtype(lTensorName[i]), engine.get_tensor_shape(lTensorName[i]), context.get_tensor_shape(lTensorName[i]), lTensorName[i])
buffer = [] # do not distinguish buffer between host and device
buffer.append(torch.arange(3 * 4 * 5, dtype=torch.float32).reshape(3, 4, 5))
for i in range(nInput, nIO):
if engine.get_tensor_dtype(lTensorName[i]) == trt.float32:
dataType = torch.float32
else:
print("Not implement")
dataType = torch.float32
buffer.append(torch.empty(tuple(context.get_tensor_shape(lTensorName[i])), dtype=dataType).cuda())
for i in range(nIO):
context.set_tensor_address(lTensorName[i], int(buffer[i].data_ptr()))
context.execute_async_v3(stream.cuda_stream)
torch.cuda.synchronize()
for i in range(nIO):
print(lTensorName[i])
print(buffer[i])
if __name__ == "__main__":
os.system("rm -rf ./*.plan")
run()
run()
| trt-samples-for-hackathon-cn-master | cookbook/08-Advance/TorchOperation/main.py |
#
# Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import numpy as np
import tensorrt as trt
from cuda import cudart
trtFile = "./model.plan"
def run():
logger = trt.Logger(trt.Logger.ERROR)
if os.path.isfile(trtFile):
with open(trtFile, "rb") as f:
engineString = f.read()
if engineString == None:
print("Failed getting serialized engine!")
return
print("Succeeded getting serialized engine!")
else:
builder = trt.Builder(logger)
network = builder.create_network(1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH))
profile = builder.create_optimization_profile()
config = builder.create_builder_config()
inputTensor = network.add_input("inputT0", trt.float32, [-1, -1, -1])
profile.set_shape(inputTensor.name, [1, 1, 1], [3, 4, 5], [6, 8, 10])
config.add_optimization_profile(profile)
identityLayer = network.add_identity(inputTensor)
network.mark_output(identityLayer.get_output(0))
engineString = builder.build_serialized_network(network, config)
if engineString == None:
print("Failed building serialized engine!")
return
print("Succeeded building serialized engine!")
with open(trtFile, "wb") as f:
f.write(engineString)
print("Succeeded saving .plan file!")
engine = trt.Runtime(logger).deserialize_cuda_engine(engineString)
if engine == None:
print("Failed building engine!")
return
print("Succeeded building engine!")
nIO = engine.num_io_tensors
lTensorName = [engine.get_tensor_name(i) for i in range(nIO)]
nInput = [engine.get_tensor_mode(lTensorName[i]) for i in range(nIO)].count(trt.TensorIOMode.INPUT)
#nOutput = [engine.get_tensor_mode(lTensorName[i]) for i in range(nIO)].count(trt.TensorIOMode.OUTPUT)
context = engine.create_execution_context()
# get a CUDA stream for CUDA graph and inference
_, stream = cudart.cudaStreamCreate()
context.set_input_shape(lTensorName[0], [3, 4, 5])
for i in range(nIO):
print("[%2d]%s->" % (i, "Input " if i < nInput else "Output"), engine.get_tensor_dtype(lTensorName[i]), engine.get_tensor_shape(lTensorName[i]), context.get_tensor_shape(lTensorName[i]), lTensorName[i])
data = np.arange(3 * 4 * 5, dtype=np.float32).reshape(3, 4, 5)
bufferH = []
bufferH.append(np.ascontiguousarray(data))
for i in range(nInput, nIO):
bufferH.append(np.empty(context.get_tensor_shape(lTensorName[i]), dtype=trt.nptype(engine.get_tensor_dtype(lTensorName[i]))))
bufferD = []
for i in range(nIO):
bufferD.append(cudart.cudaMalloc(bufferH[i].nbytes)[1])
# do inference before CUDA graph capture
for i in range(nInput):
cudart.cudaMemcpyAsync(bufferD[i], bufferH[i].ctypes.data, bufferH[i].nbytes, cudart.cudaMemcpyKind.cudaMemcpyHostToDevice, stream)
for i in range(nIO):
context.set_tensor_address(lTensorName[i], int(bufferD[i]))
context.execute_async_v3(stream)
for i in range(nInput, nIO):
cudart.cudaMemcpyAsync(bufferH[i].ctypes.data, bufferD[i], bufferH[i].nbytes, cudart.cudaMemcpyKind.cudaMemcpyDeviceToHost, stream)
# CUDA Graph capture
cudart.cudaStreamBeginCapture(stream, cudart.cudaStreamCaptureMode.cudaStreamCaptureModeGlobal)
for i in range(nInput):
cudart.cudaMemcpyAsync(bufferD[i], bufferH[i].ctypes.data, bufferH[i].nbytes, cudart.cudaMemcpyKind.cudaMemcpyHostToDevice, stream)
#for i in range(nIO): # no need to reset the address if unchanged
# context.set_tensor_address(lTensorName[i], int(bufferD[i]))
context.execute_async_v3(stream)
for i in range(nInput, nIO):
cudart.cudaMemcpyAsync(bufferH[i].ctypes.data, bufferD[i], bufferH[i].nbytes, cudart.cudaMemcpyKind.cudaMemcpyDeviceToHost, stream)
#cudart.cudaStreamSynchronize(stream) # no need to synchronize within the CUDA graph capture
_, graph = cudart.cudaStreamEndCapture(stream)
#_, graphExe, _ = cudart.cudaGraphInstantiate(graph, b"", 0) # for CUDA < 12
_, graphExe = cudart.cudaGraphInstantiate(graph, 0) # for CUDA >= 12
# do inference with CUDA graph
bufferH[1] *= 0 # set output buffer as 0 to see the real output of inference
cudart.cudaGraphLaunch(graphExe, stream)
cudart.cudaStreamSynchronize(stream)
for i in range(nIO):
print(lTensorName[i])
print(bufferH[i])
for b in bufferD:
cudart.cudaFree(b)
# when the input shape changed, inference is also needed before CUDA graph capture
context.set_input_shape(lTensorName[0], [2, 3, 4])
data = np.arange(2 * 3 * 4, dtype=np.float32).reshape(2, 3, 4)
bufferH = []
bufferH.append(np.ascontiguousarray(data))
for i in range(nInput, nIO):
bufferH.append(np.empty(context.get_tensor_shape(lTensorName[i]), dtype=trt.nptype(engine.get_tensor_dtype(lTensorName[i]))))
bufferD = []
for i in range(nIO):
bufferD.append(cudart.cudaMalloc(bufferH[i].nbytes)[1])
for i in range(nInput):
cudart.cudaMemcpyAsync(bufferD[i], bufferH[i].ctypes.data, bufferH[i].nbytes, cudart.cudaMemcpyKind.cudaMemcpyHostToDevice, stream)
for i in range(nIO):
context.set_tensor_address(lTensorName[i], int(bufferD[i])) # set address of all input and output data in device buffer
context.execute_async_v3(stream)
for i in range(nInput, nIO):
cudart.cudaMemcpyAsync(bufferH[i].ctypes.data, bufferD[i], bufferH[i].nbytes, cudart.cudaMemcpyKind.cudaMemcpyDeviceToHost, stream)
# CUDA Graph capture again
cudart.cudaStreamBeginCapture(stream, cudart.cudaStreamCaptureMode.cudaStreamCaptureModeGlobal)
for i in range(nInput):
cudart.cudaMemcpyAsync(bufferD[i], bufferH[i].ctypes.data, bufferH[i].nbytes, cudart.cudaMemcpyKind.cudaMemcpyHostToDevice, stream)
#for i in range(nIO): # no need to reset the address if unchanged
# context.set_tensor_address(lTensorName[i], int(bufferD[i]))
context.execute_async_v3(stream)
for i in range(nInput, nIO):
cudart.cudaMemcpyAsync(bufferH[i].ctypes.data, bufferD[i], bufferH[i].nbytes, cudart.cudaMemcpyKind.cudaMemcpyDeviceToHost, stream)
#cudart.cudaStreamSynchronize(stream) # no need to synchronize within the CUDA graph capture
_, graph = cudart.cudaStreamEndCapture(stream)
#_, graphExe, _ = cudart.cudaGraphInstantiate(graph, b"", 0) # for CUDA < 12
_, graphExe = cudart.cudaGraphInstantiate(graph, 0) # for CUDA >= 12
# do inference with CUDA graph
bufferH[1] *= 0 # set output buffer as 0 to see the real output of inference
cudart.cudaGraphLaunch(graphExe, stream)
cudart.cudaStreamSynchronize(stream)
for i in range(nIO):
print(lTensorName[i])
print(bufferH[i])
for b in bufferD:
cudart.cudaFree(b)
cudart.cudaStreamDestroy(stream)
if __name__ == "__main__":
os.system("rm -rf ./*.plan")
cudart.cudaDeviceSynchronize()
run()
run()
| trt-samples-for-hackathon-cn-master | cookbook/08-Advance/CudaGraph/BasicUsage.py |
#
# Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import numpy as np
import tensorrt as trt
from cuda import cudart
trtFile = "./model.plan"
nGEMM = 10
sizeGEMM = 16
nInference = 10
np.random.seed(31193)
def run():
logger = trt.Logger(trt.Logger.ERROR)
if os.path.isfile(trtFile):
with open(trtFile, "rb") as f:
engineString = f.read()
if engineString == None:
print("Failed getting serialized engine!")
return
print("Succeeded getting serialized engine!")
else:
builder = trt.Builder(logger)
network = builder.create_network(1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH))
profile = builder.create_optimization_profile()
config = builder.create_builder_config()
inputList = []
for i in range(nGEMM + 1):
inputT = network.add_input("inputT" + str(i), trt.float32, [-1, 4, sizeGEMM, sizeGEMM])
profile.set_shape(inputT.name, [1, 4, sizeGEMM, sizeGEMM], [4, 4, sizeGEMM, sizeGEMM], [sizeGEMM, 4, sizeGEMM, sizeGEMM])
inputList.append(inputT)
config.add_optimization_profile(profile)
tempTensor = inputList[0]
for i in range(1, nGEMM + 1):
tempLayer = network.add_matrix_multiply(tempTensor, trt.MatrixOperation.NONE, inputList[i], trt.MatrixOperation.NONE)
tempTensor = tempLayer.get_output(0)
network.mark_output(tempLayer.get_output(0))
engineString = builder.build_serialized_network(network, config)
if engineString == None:
print("Failed building serialized engine!")
return
print("Succeeded building serialized engine!")
with open(trtFile, "wb") as f:
f.write(engineString)
print("Succeeded saving .plan file!")
engine = trt.Runtime(logger).deserialize_cuda_engine(engineString)
if engine == None:
print("Failed building engine!")
return
print("Succeeded building engine!")
nIO = engine.num_io_tensors
lTensorName = [engine.get_tensor_name(i) for i in range(nIO)]
nInput = [engine.get_tensor_mode(lTensorName[i]) for i in range(nIO)].count(trt.TensorIOMode.INPUT)
#nOutput = [engine.get_tensor_mode(lTensorName[i]) for i in range(nIO)].count(trt.TensorIOMode.OUTPUT)
context = engine.create_execution_context()
_, stream = cudart.cudaStreamCreate()
for i in range(nGEMM + 1):
context.set_input_shape(lTensorName[i], [4, 4, sizeGEMM, sizeGEMM])
#for i in range(nIO):
# print("[%2d]%s->" % (i, "Input " if i < nInput else "Output"), engine.get_tensor_dtype(lTensorName[i]), engine.get_tensor_shape(lTensorName[i]), context.get_tensor_shape(lTensorName[i]), lTensorName[i])
bufferH = []
for i in range(nGEMM + 1):
bufferH.append(np.random.rand(4 * 4 * sizeGEMM * sizeGEMM).astype(np.float32).reshape(4, 4, sizeGEMM, sizeGEMM))
for i in range(nInput, nIO):
bufferH.append(np.empty(context.get_tensor_shape(lTensorName[i]), dtype=trt.nptype(engine.get_tensor_dtype(lTensorName[i]))))
bufferD = []
for i in range(nIO):
bufferD.append(cudart.cudaMalloc(bufferH[i].nbytes)[1])
for i in range(nInput):
cudart.cudaMemcpyAsync(bufferD[i], bufferH[i].ctypes.data, bufferH[i].nbytes, cudart.cudaMemcpyKind.cudaMemcpyHostToDevice, stream)
for i in range(nIO):
context.set_tensor_address(lTensorName[i], int(bufferD[i]))
context.execute_async_v3(stream)
for i in range(nInput, nIO):
cudart.cudaMemcpyAsync(bufferH[i].ctypes.data, bufferD[i], bufferH[i].nbytes, cudart.cudaMemcpyKind.cudaMemcpyDeviceToHost, stream)
# test performance without CUDA graph
cudart.cudaStreamSynchronize(stream)
for n in range(nInference):
for i in range(nInput):
cudart.cudaMemcpyAsync(bufferD[i], bufferH[i].ctypes.data, bufferH[i].nbytes, cudart.cudaMemcpyKind.cudaMemcpyHostToDevice, stream)
for i in range(nIO):
context.set_tensor_address(lTensorName[i], int(bufferD[i]))
context.execute_async_v3(stream)
for i in range(nInput, nIO):
cudart.cudaMemcpyAsync(bufferH[i].ctypes.data, bufferD[i], bufferH[i].nbytes, cudart.cudaMemcpyKind.cudaMemcpyDeviceToHost, stream)
cudart.cudaStreamSynchronize(stream)
cudart.cudaStreamBeginCapture(stream, cudart.cudaStreamCaptureMode.cudaStreamCaptureModeGlobal)
for i in range(nInput):
cudart.cudaMemcpyAsync(bufferD[i], bufferH[i].ctypes.data, bufferH[i].nbytes, cudart.cudaMemcpyKind.cudaMemcpyHostToDevice, stream)
#for i in range(nIO): # no need to reset the address if unchanged
# context.set_tensor_address(lTensorName[i], int(bufferD[i]))
context.execute_async_v3(stream)
for i in range(nInput, nIO):
cudart.cudaMemcpyAsync(bufferH[i].ctypes.data, bufferD[i], bufferH[i].nbytes, cudart.cudaMemcpyKind.cudaMemcpyDeviceToHost, stream)
#cudart.cudaStreamSynchronize(stream)
_, graph = cudart.cudaStreamEndCapture(stream)
_, graphExe, _ = cudart.cudaGraphInstantiate(graph, b"", 0)
cudart.cudaGraphLaunch(graphExe, stream)
cudart.cudaStreamSynchronize(stream)
for n in range(nInference):
cudart.cudaGraphLaunch(graphExe, stream)
cudart.cudaStreamSynchronize(stream)
for b in bufferD:
cudart.cudaFree(b)
cudart.cudaStreamDestroy(stream)
if __name__ == "__main__":
os.system("rm -rf ./*.plan")
cudart.cudaDeviceSynchronize()
run()
run()
| trt-samples-for-hackathon-cn-master | cookbook/08-Advance/CudaGraph/Comparison.py |
#
# Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
from time import time
import numpy as np
import tensorrt as trt
from cuda import cudart
trtFile = "./model.plan"
np.random.seed(31193)
nWarmUp = 10
nTest = 30
# There are 3 scenarios of the inference
# 1. HtoD-bound
nB, nC, nH, nW = 8, 64, 256, 256
nCOut, nKernelHeight, nKernelWidth = 1, 3, 3
# 2. Calculation-bound
"""
nB,nC,nH,nW = 8,64,128,128
nCOut,nKernelHeight,nKernelWidth = 64,9,9
"""
# 3. DtoH-bound
"""
nB,nC,nH,nW = 8,64,128,128
nCOut,nKernelHeight,nKernelWidth = 256,3,3
"""
def getEngine():
logger = trt.Logger(trt.Logger.ERROR)
if os.path.isfile(trtFile):
with open(trtFile, "rb") as f:
engineString = f.read()
if engineString == None:
print("Failed getting serialized engine!")
return
print("Succeeded getting serialized engine!")
else:
builder = trt.Builder(logger)
network = builder.create_network(1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH))
profile = builder.create_optimization_profile()
config = builder.create_builder_config()
inputTensor = network.add_input("inputT0", trt.float32, [-1, nC, nH, nW])
profile.set_shape(inputTensor.name, [1, nC, nH, nW], [nB, nC, nH, nW], [nB * 2, nC, nH, nW])
config.add_optimization_profile(profile)
w = np.ascontiguousarray(np.random.rand(nCOut, nC, nKernelHeight, nKernelWidth).astype(np.float32) * 2 - 1)
b = np.ascontiguousarray(np.random.rand(nCOut).astype(np.float32) * 2 - 1)
_0 = network.add_convolution_nd(inputTensor, nCOut, [nKernelHeight, nKernelWidth], trt.Weights(w), trt.Weights(b))
_0.padding_nd = (nKernelHeight // 2, nKernelWidth // 2)
_1 = network.add_activation(_0.get_output(0), trt.ActivationType.RELU)
network.mark_output(_1.get_output(0))
engineString = builder.build_serialized_network(network, config)
if engineString == None:
print("Failed building serialized engine!")
return
print("Succeeded building serialized engine!")
with open(trtFile, "wb") as f:
f.write(engineString)
print("Succeeded saving .plan file!")
return trt.Runtime(logger).deserialize_cuda_engine(engineString)
def run1(engine):
context = engine.create_execution_context()
context.set_binding_shape(0, [nB, nC, nH, nW])
_, stream = cudart.cudaStreamCreate()
data = np.random.rand(nB * nC * nH * nW).astype(np.float32).reshape(nB, nC, nH, nW)
inputH0 = np.ascontiguousarray(data.reshape(-1))
outputH0 = np.empty(context.get_binding_shape(1), dtype=trt.nptype(engine.get_binding_dtype(1)))
_, inputD0 = cudart.cudaMallocAsync(inputH0.nbytes, stream)
_, outputD0 = cudart.cudaMallocAsync(outputH0.nbytes, stream)
# do a complete inference
cudart.cudaMemcpyAsync(inputD0, inputH0.ctypes.data, inputH0.nbytes, cudart.cudaMemcpyKind.cudaMemcpyHostToDevice, stream)
context.execute_async_v2([int(inputD0), int(outputD0)], stream)
cudart.cudaMemcpyAsync(outputH0.ctypes.data, outputD0, outputH0.nbytes, cudart.cudaMemcpyKind.cudaMemcpyDeviceToHost, stream)
cudart.cudaStreamSynchronize(stream)
# Count time of memory copy from host to device
for i in range(nWarmUp):
cudart.cudaMemcpyAsync(inputD0, inputH0.ctypes.data, inputH0.nbytes, cudart.cudaMemcpyKind.cudaMemcpyHostToDevice, stream)
trtTimeStart = time()
for i in range(nTest):
cudart.cudaMemcpyAsync(inputD0, inputH0.ctypes.data, inputH0.nbytes, cudart.cudaMemcpyKind.cudaMemcpyHostToDevice, stream)
cudart.cudaStreamSynchronize(stream)
trtTimeEnd = time()
print("%6.3fms - 1 stream, DataCopyHtoD" % ((trtTimeEnd - trtTimeStart) / nTest * 1000))
# Count time of inference
for i in range(nWarmUp):
context.execute_async_v2([int(inputD0), int(outputD0)], stream)
trtTimeStart = time()
for i in range(nTest):
context.execute_async_v2([int(inputD0), int(outputD0)], stream)
cudart.cudaStreamSynchronize(stream)
trtTimeEnd = time()
print("%6.3fms - 1 stream, Inference" % ((trtTimeEnd - trtTimeStart) / nTest * 1000))
# Count time of memory copy from device to host
for i in range(nWarmUp):
cudart.cudaMemcpyAsync(outputH0.ctypes.data, outputD0, outputH0.nbytes, cudart.cudaMemcpyKind.cudaMemcpyDeviceToHost, stream)
trtTimeStart = time()
for i in range(nTest):
cudart.cudaMemcpyAsync(outputH0.ctypes.data, outputD0, outputH0.nbytes, cudart.cudaMemcpyKind.cudaMemcpyDeviceToHost, stream)
cudart.cudaStreamSynchronize(stream)
trtTimeEnd = time()
print("%6.3fms - 1 stream, DataCopyDtoH" % ((trtTimeEnd - trtTimeStart) / nTest * 1000))
# Count time of end to end
for i in range(nWarmUp):
context.execute_async_v2([int(inputD0), int(outputD0)], stream)
trtTimeStart = time()
for i in range(nTest):
cudart.cudaMemcpyAsync(inputD0, inputH0.ctypes.data, inputH0.nbytes, cudart.cudaMemcpyKind.cudaMemcpyHostToDevice, stream)
context.execute_async_v2([int(inputD0), int(outputD0)], stream)
cudart.cudaMemcpyAsync(outputH0.ctypes.data, outputD0, outputH0.nbytes, cudart.cudaMemcpyKind.cudaMemcpyDeviceToHost, stream)
cudart.cudaStreamSynchronize(stream)
trtTimeEnd = time()
print("%6.3fms - 1 stream, DataCopy + Inference" % ((trtTimeEnd - trtTimeStart) / nTest * 1000))
cudart.cudaStreamDestroy(stream)
cudart.cudaFree(inputD0)
cudart.cudaFree(outputD0)
def run2(engine):
context = engine.create_execution_context()
context.set_binding_shape(0, [nB, nC, nH, nW])
_, stream0 = cudart.cudaStreamCreate()
_, stream1 = cudart.cudaStreamCreate()
_, event0 = cudart.cudaEventCreate()
_, event1 = cudart.cudaEventCreate()
data = np.random.rand(nB * nC * nH * nW).astype(np.float32).reshape(nB, nC, nH, nW)
inputSize = trt.volume(context.get_binding_shape(0)) * np.array([0], dtype=trt.nptype(engine.get_binding_dtype(0))).nbytes
outputSize = trt.volume(context.get_binding_shape(1)) * np.array([0], dtype=trt.nptype(engine.get_binding_dtype(1))).nbytes
_, inputH0 = cudart.cudaHostAlloc(inputSize, cudart.cudaHostAllocWriteCombined)
_, inputH1 = cudart.cudaHostAlloc(inputSize, cudart.cudaHostAllocWriteCombined)
_, outputH0 = cudart.cudaHostAlloc(outputSize, cudart.cudaHostAllocWriteCombined)
_, outputH1 = cudart.cudaHostAlloc(outputSize, cudart.cudaHostAllocWriteCombined)
_, inputD0 = cudart.cudaMallocAsync(inputSize, stream0)
_, inputD1 = cudart.cudaMallocAsync(inputSize, stream1)
_, outputD0 = cudart.cudaMallocAsync(outputSize, stream0)
_, outputD1 = cudart.cudaMallocAsync(outputSize, stream1)
# Count time of end to end
for i in range(nWarmUp):
context.execute_async_v2([int(inputD0), int(outputD0)], stream0)
trtTimeStart = time()
cudart.cudaEventRecord(event1, stream1)
for i in range(nTest):
inputH, outputH = [inputH1, outputH1] if i & 1 else [inputH0, outputH0]
inputD, outputD = [inputD1, outputD1] if i & 1 else [inputD0, outputD0]
eventBefore, eventAfter = [event0, event1] if i & 1 else [event1, event0]
stream = stream1 if i & 1 else stream0
cudart.cudaMemcpyAsync(inputD, inputH, inputSize, cudart.cudaMemcpyKind.cudaMemcpyHostToDevice, stream)
cudart.cudaStreamWaitEvent(stream, eventBefore, cudart.cudaEventWaitDefault)
context.execute_async_v2([int(inputD), int(outputD)], stream)
cudart.cudaEventRecord(eventAfter, stream)
cudart.cudaMemcpyAsync(outputH, outputD, outputSize, cudart.cudaMemcpyKind.cudaMemcpyDeviceToHost, stream)
"""# split the loop into odd and even iterations
for i in range(nTest//2):
cudart.cudaMemcpyAsync(inputD0, inputH0, inputSize, cudart.cudaMemcpyKind.cudaMemcpyHostToDevice, stream0)
cudart.cudaStreamWaitEvent(stream0,event1,cudart.cudaEventWaitDefault)
context.execute_async_v2([int(inputD0), int(outputD0)], stream0)
cudart.cudaEventRecord(event0,stream0)
cudart.cudaMemcpyAsync(outputH0, outputD0, outputSize, cudart.cudaMemcpyKind.cudaMemcpyDeviceToHost, stream0)
cudart.cudaMemcpyAsync(inputD1, inputH1, inputSize, cudart.cudaMemcpyKind.cudaMemcpyHostToDevice, stream1)
cudart.cudaStreamWaitEvent(stream1,event0,cudart.cudaEventWaitDefault)
context.execute_async_v2([int(inputD1), int(outputD1)], stream1)
cudart.cudaEventRecord(event1,stream1)
cudart.cudaMemcpyAsync(outputH1, outputD1, outputSize, cudart.cudaMemcpyKind.cudaMemcpyDeviceToHost, stream1)
"""
cudart.cudaEventSynchronize(event1)
trtTimeEnd = time()
print("%6.3fms - 2 stream, DataCopy + Inference" % ((trtTimeEnd - trtTimeStart) / nTest * 1000))
if __name__ == "__main__":
os.system("rm -rf ./*.plan")
cudart.cudaDeviceSynchronize()
engine = getEngine() # build TensorRT engine
run1(engine) # do inference with single stream
run2(engine) # do inference with double stream
| trt-samples-for-hackathon-cn-master | cookbook/08-Advance/MultiStream/main.py |
#
# Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# For TensorRT < 8.5 with deprecated binding API
import numpy as np
import tensorrt as trt
from cuda import cudart
shape = [2, 3, 4, 5]
nProfile = 2 # count of OptimizationProfile
np.random.seed(31193)
np.set_printoptions(precision=3, linewidth=200, suppress=True)
cudart.cudaDeviceSynchronize()
logger = trt.Logger(trt.Logger.ERROR)
builder = trt.Builder(logger)
network = builder.create_network(1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH))
profileList = [builder.create_optimization_profile() for _ in range(nProfile)]
config = builder.create_builder_config()
inputT0 = network.add_input("inputT0", trt.float32, [-1, -1, -1, -1])
inputT1 = network.add_input("inputT1", trt.float32, [-1, -1, -1, -1])
for profile in profileList:
profile.set_shape(inputT0.name, shape, shape, [k * nProfile for k in shape]) # "* nProfile" is just for this example, not required in real use case
profile.set_shape(inputT1.name, shape, shape, [k * nProfile for k in shape])
config.add_optimization_profile(profile)
layer = network.add_elementwise(inputT0, inputT1, trt.ElementWiseOperation.SUM)
network.mark_output(layer.get_output(0))
engineString = builder.build_serialized_network(network, config)
engine = trt.Runtime(logger).deserialize_cuda_engine(engineString)
nIO = engine.num_bindings
nInput = np.sum([engine.binding_is_input(i) for i in range(engine.num_bindings)])
nOutput = nIO - nInput
nIO, nInput, nOutput = nIO // nProfile, nInput // nProfile, nOutput // nProfile
streamList = [cudart.cudaStreamCreate()[1] for _ in range(nProfile)]
context = engine.create_execution_context()
bufferH = [] # a list of buffers for all Context (all OptimizationProfile)
for index in range(nProfile):
stream = streamList[index]
context.set_optimization_profile_async(index, stream)
bindingPad = nIO * index # skip bindings of previous OptimizationProfile occupied
inputShape = [k * (index + 1) for k in shape] # we use different shape for various context in this example, not required in real use case
context.set_binding_shape(bindingPad + 0, inputShape)
context.set_binding_shape(bindingPad + 1, inputShape)
print("Context%d binding all? %s" % (index, "Yes" if context.all_binding_shapes_specified else "No"))
for i in range(nIO):
print(i, "Input " if engine.binding_is_input(i) else "Output", engine.get_binding_shape(i), context.get_binding_shape(i))
for i in range(nInput):
bufferH.append(np.arange(np.prod(inputShape)).astype(np.float32).reshape(inputShape))
for i in range(nOutput):
bufferH.append(np.empty(context.get_binding_shape(bindingPad + nInput + i), dtype=trt.nptype(engine.get_binding_dtype(bindingPad + nInput + i))))
bufferD = []
for i in range(len(bufferH)):
bufferD.append(cudart.cudaMalloc(bufferH[i].nbytes)[1])
for index in range(nProfile):
print("Use Profile %d" % index)
stream = streamList[index]
context.set_optimization_profile_async(index, stream)
bindingPad = nIO * index
inputShape = [k * (index + 1) for k in shape]
context.set_binding_shape(bindingPad + 0, inputShape)
context.set_binding_shape(bindingPad + 1, inputShape)
for i in range(nIO * nProfile):
print("[%2d]%s->" % (i, "Input " if i < nInput else "Output"), engine.get_binding_dtype(i), engine.get_binding_shape(i), context.get_binding_shape(i), engine.get_binding_name(i))
for i in range(nInput):
cudart.cudaMemcpyAsync(bufferD[bindingPad + i], bufferH[bindingPad + i].ctypes.data, bufferH[bindingPad + i].nbytes, cudart.cudaMemcpyKind.cudaMemcpyHostToDevice, stream)
bufferList = [int(0) for b in bufferD[:bindingPad]] + [int(b) for b in bufferD[bindingPad:(bindingPad + nInput + nOutput)]] + [int(0) for b in bufferD[(bindingPad + nInput + nOutput):]]
# divide the buffers into three parts, and fill int(0) for the parts beside the buffer of this context uses
context.execute_async_v2(bufferList, stream)
for i in range(nOutput):
cudart.cudaMemcpyAsync(bufferH[bindingPad + nInput + i].ctypes.data, bufferD[bindingPad + nInput + i], bufferH[bindingPad + nInput + i].nbytes, cudart.cudaMemcpyKind.cudaMemcpyDeviceToHost, stream)
for index in range(nProfile):
cudart.cudaStreamSynchronize(stream)
for index in range(nProfile):
bindingPad = nIO * index
print("check result of OptimizationProfile %d: %s" % (index, np.all(bufferH[bindingPad + 2] == bufferH[bindingPad + 0] + bufferH[bindingPad + 1])))
for stream in streamList:
cudart.cudaStreamDestroy(stream)
for b in bufferD:
cudart.cudaFree(b)
| trt-samples-for-hackathon-cn-master | cookbook/08-Advance/MultiOptimizationProfile/main-BindingAPI.py |
#
# Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import numpy as np
import tensorrt as trt
from cuda import cudart
shape = [2, 3, 4, 5]
nProfile = 2 # count of OptimizationProfile
np.random.seed(31193)
np.set_printoptions(precision=3, linewidth=200, suppress=True)
cudart.cudaDeviceSynchronize()
logger = trt.Logger(trt.Logger.ERROR)
builder = trt.Builder(logger)
network = builder.create_network(1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH))
profileList = [builder.create_optimization_profile() for _ in range(nProfile)]
config = builder.create_builder_config()
inputT0 = network.add_input("inputT0", trt.float32, [-1, -1, -1, -1])
inputT1 = network.add_input("inputT1", trt.float32, [-1, -1, -1, -1])
for profile in profileList:
profile.set_shape(inputT0.name, shape, shape, [k * nProfile for k in shape]) # "* nProfile" is just for this example, not required in real use case
profile.set_shape(inputT1.name, shape, shape, [k * nProfile for k in shape])
config.add_optimization_profile(profile)
layer = network.add_elementwise(inputT0, inputT1, trt.ElementWiseOperation.SUM)
network.mark_output(layer.get_output(0))
engineString = builder.build_serialized_network(network, config)
engine = trt.Runtime(logger).deserialize_cuda_engine(engineString)
nIO = engine.num_io_tensors
lTensorName = [engine.get_tensor_name(i) for i in range(nIO)]
nInput = [engine.get_tensor_mode(lTensorName[i]) for i in range(nIO)].count(trt.TensorIOMode.INPUT)
context = engine.create_execution_context()
for index in range(nProfile):
print("Use Profile %d" % index)
context.set_optimization_profile_async(index, 0) # use default stream
inputShape = [k * (index + 1) for k in shape] # we use different shape for various context in this example, not required in real use case
context.set_input_shape(lTensorName[0], inputShape)
context.set_input_shape(lTensorName[1], inputShape)
bufferH = [] # use respective buffers for different Optimization Profile
for i in range(nInput):
bufferH.append(np.arange(np.prod(inputShape)).astype(np.float32).reshape(inputShape))
for i in range(nInput, nIO):
bufferH.append(np.empty(context.get_tensor_shape(lTensorName[i]), dtype=trt.nptype(engine.get_tensor_dtype(lTensorName[i]))))
bufferD = []
for i in range(len(bufferH)):
bufferD.append(cudart.cudaMalloc(bufferH[i].nbytes)[1])
for i in range(nIO):
print("[%2d]%s->" % (i, "Input " if i < nInput else "Output"), engine.get_tensor_dtype(lTensorName[i]), engine.get_tensor_shape(lTensorName[i]), context.get_tensor_shape(lTensorName[i]), lTensorName[i])
for i in range(nInput):
cudart.cudaMemcpyAsync(bufferD[i], bufferH[i].ctypes.data, bufferH[i].nbytes, cudart.cudaMemcpyKind.cudaMemcpyHostToDevice, 0)
for i in range(nIO):
context.set_tensor_address(lTensorName[i], int(bufferD[i]))
context.execute_async_v3(0)
for i in range(nInput, nIO):
cudart.cudaMemcpyAsync(bufferH[i].ctypes.data, bufferD[i], bufferH[i].nbytes, cudart.cudaMemcpyKind.cudaMemcpyDeviceToHost, 0)
print("check result of OptimizationProfile %d: %s" % (index, np.all(bufferH[2] == bufferH[0] + bufferH[1])))
for b in bufferD:
cudart.cudaFree(b)
| trt-samples-for-hackathon-cn-master | cookbook/08-Advance/MultiOptimizationProfile/main.py |
#
# Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import numpy as np
import tensorrt as trt
logger = trt.Logger(trt.Logger.VERBOSE)
builder = trt.Builder(logger)
network = builder.create_network(1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH))
profile = builder.create_optimization_profile()
config = builder.create_builder_config()
config.hardware_compatibility_level = trt.HardwareCompatibilityLevel.AMPERE_PLUS # turn on the switch of hardware compatibility, no other work needed
inputTensor = network.add_input("inputT0", trt.float32, [-1, 1024, 64]) # I write a "complex" network to see the performance differences between GPUs
profile.set_shape(inputTensor.name, [1, 1024, 64], [4, 1024,64], [16, 1024, 64])
config.add_optimization_profile(profile)
_0 = inputTensor
for i in range(64, 256):
w = np.random.rand(1, i, i + 1).astype(np.float32)
b = np.random.rand(1, 1, i + 1).astype(np.float32)
_1 = network.add_constant(w.shape, trt.Weights(np.ascontiguousarray(w)))
_2 = network.add_matrix_multiply(_0, trt.MatrixOperation.NONE, _1.get_output(0), trt.MatrixOperation.NONE)
_3 = network.add_constant(b.shape, trt.Weights(np.ascontiguousarray(b)))
_4 = network.add_elementwise(_2.get_output(0), _3.get_output(0), trt.ElementWiseOperation.SUM)
_5 = network.add_activation(_4.get_output(0), trt.ActivationType.RELU)
_0 = _5.get_output(0)
network.mark_output(_0)
engineString = builder.build_serialized_network(network, config)
with open("model.plan", "wb") as f:
f.write(engineString)
print("Succeeded saving .plan file!")
| trt-samples-for-hackathon-cn-master | cookbook/08-Advance/HardwareCompatibility/main.py |
#
# Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import numpy as np
import tensorrt as trt
from cuda import cudart
np.random.seed(31193)
m, k, n = 3, 4, 5
data0 = np.tile(np.arange(1, 1 + k), [m, 1]) * 1 / 10 ** (2 * np.arange(1, 1 + m) - 2)[:, np.newaxis]
data1 = np.tile(np.arange(k), [n, 1]).T * 10 ** np.arange(n)[np.newaxis, :]
def run(useFP16):
logger = trt.Logger(trt.Logger.ERROR)
builder = trt.Builder(logger)
network = builder.create_network(1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH))
config = builder.create_builder_config()
if useFP16:
config.flags = config.flags | (1 << int(trt.BuilderFlag.STRICT_TYPES)) | (1 << int(trt.BuilderFlag.FP16))
inputT0 = network.add_input("inputT0", trt.float32, (m, k))
constantLayer = network.add_constant([k, n], np.ascontiguousarray(data1.astype(np.float16 if useFP16 else np.float32)))
matrixMultiplyLayer = network.add_matrix_multiply(inputT0, trt.MatrixOperation.NONE, constantLayer.get_output(0), trt.MatrixOperation.NONE)
if useFP16:
matrixMultiplyLayer.precision = trt.float16
matrixMultiplyLayer.get_output(0).dtype = trt.float16
network.mark_output(matrixMultiplyLayer.get_output(0))
engineString = builder.build_serialized_network(network, config)
engine = trt.Runtime(logger).deserialize_cuda_engine(engineString)
nIO = engine.num_io_tensors
lTensorName = [engine.get_tensor_name(i) for i in range(nIO)]
nInput = [engine.get_tensor_mode(lTensorName[i]) for i in range(nIO)].count(trt.TensorIOMode.INPUT)
context = engine.create_execution_context()
for i in range(nIO):
print("[%2d]%s->" % (i, "Input " if i < nInput else "Output"), engine.get_tensor_dtype(lTensorName[i]), engine.get_tensor_shape(lTensorName[i]), context.get_tensor_shape(lTensorName[i]), lTensorName[i])
bufferH = []
bufferH.append(np.ascontiguousarray(data0))
for i in range(nInput, nIO):
bufferH.append(np.empty(context.get_tensor_shape(lTensorName[i]), dtype=trt.nptype(engine.get_tensor_dtype(lTensorName[i]))))
bufferD = []
for i in range(nIO):
bufferD.append(cudart.cudaMalloc(bufferH[i].nbytes)[1])
for i in range(nInput):
cudart.cudaMemcpy(bufferD[i], bufferH[i].ctypes.data, bufferH[i].nbytes, cudart.cudaMemcpyKind.cudaMemcpyHostToDevice)
for i in range(nIO):
context.set_tensor_address(lTensorName[i], int(bufferD[i]))
context.execute_async_v3(0)
for i in range(nInput, nIO):
cudart.cudaMemcpy(bufferH[i].ctypes.data, bufferD[i], bufferH[i].nbytes, cudart.cudaMemcpyKind.cudaMemcpyDeviceToHost)
for i in range(nIO):
print(lTensorName[i])
print(bufferH[i])
for b in bufferD:
cudart.cudaFree(b)
if __name__ == "__main__":
np.set_printoptions(precision=3, linewidth=200, suppress=True)
cudart.cudaDeviceSynchronize()
run(False) # using FP32
run(True) # using FP16
| trt-samples-for-hackathon-cn-master | cookbook/08-Advance/StrictType/main.py |
#
# Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from time import time_ns
import numpy as np
import tensorrt as trt
from cuda import cudart
np.random.seed(31193)
shape = [4, 1024, 64]
data = np.random.rand(*shape).reshape(shape).astype(np.float32) * 2 - 1
def run(nLevel):
testCase = "<Level=%d>" % (nLevel)
trtFile = "model-Level%d.plan" % (nLevel)
print("Test %s" % testCase)
logger = trt.Logger(trt.Logger.ERROR)
builder = trt.Builder(logger)
network = builder.create_network(1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH))
profile = builder.create_optimization_profile()
config = builder.create_builder_config()
config.builder_optimization_level = nLevel
inputTensor = network.add_input("inputT0", trt.float32, [-1] + shape[1:]) # I write a "complex" network to see the performance differences
profile.set_shape(inputTensor.name, [1] + shape[1:], shape, [16] + shape[1:])
config.add_optimization_profile(profile)
_0 = inputTensor
for i in range(64, 256):
w = np.random.rand(1, i, i + 1).astype(np.float32)
b = np.random.rand(1, 1, i + 1).astype(np.float32)
_1 = network.add_constant(w.shape, trt.Weights(np.ascontiguousarray(w)))
_2 = network.add_matrix_multiply(_0, trt.MatrixOperation.NONE, _1.get_output(0), trt.MatrixOperation.NONE)
_3 = network.add_constant(b.shape, trt.Weights(np.ascontiguousarray(b)))
_4 = network.add_elementwise(_2.get_output(0), _3.get_output(0), trt.ElementWiseOperation.SUM)
_5 = network.add_activation(_4.get_output(0), trt.ActivationType.RELU)
_0 = _5.get_output(0)
network.mark_output(_0)
t0 = time_ns()
engineString = builder.build_serialized_network(network, config)
t1 = time_ns()
print("Time of building: %fms" % ((t1 - t0) / (10 ** 6)))
engine = trt.Runtime(logger).deserialize_cuda_engine(engineString)
nIO = engine.num_io_tensors
lTensorName = [engine.get_tensor_name(i) for i in range(nIO)]
nInput = [engine.get_tensor_mode(lTensorName[i]) for i in range(nIO)].count(trt.TensorIOMode.INPUT)
context = engine.create_execution_context()
context.set_input_shape(lTensorName[0], shape)
bufferH = []
bufferH.append(np.ascontiguousarray(data))
for i in range(nInput, nIO):
bufferH.append(np.empty(context.get_tensor_shape(lTensorName[i]), dtype=trt.nptype(engine.get_tensor_dtype(lTensorName[i]))))
bufferD = []
for i in range(nIO):
bufferD.append(cudart.cudaMalloc(bufferH[i].nbytes)[1])
for i in range(nInput):
cudart.cudaMemcpy(bufferD[i], bufferH[i].ctypes.data, bufferH[i].nbytes, cudart.cudaMemcpyKind.cudaMemcpyHostToDevice)
for i in range(nIO):
context.set_tensor_address(lTensorName[i], int(bufferD[i]))
# warming up
context.execute_async_v3(0)
cudart.cudaDeviceSynchronize()
t0 = time_ns()
for _ in range(10):
context.execute_async_v3(0)
cudart.cudaDeviceSynchronize()
t1 = time_ns()
print("Time of inference: %fms" % ((t1 - t0) / (10 ** 6)))
for i in range(nInput, nIO):
cudart.cudaMemcpy(bufferH[i].ctypes.data, bufferD[i], bufferH[i].nbytes, cudart.cudaMemcpyKind.cudaMemcpyDeviceToHost)
for b in bufferD:
cudart.cudaFree(b)
print("Test %s finish!\n" % testCase)
if __name__ == "__main__":
run(0)
run(1)
run(2)
run(3)
run(4)
run(5)
| trt-samples-for-hackathon-cn-master | cookbook/08-Advance/BuilderOptimizationLevel/main.py |
#
# Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import numpy as np
import tensorrt as trt
from cuda import cudart
trtFile = "./model.plan"
def run():
logger = trt.Logger(trt.Logger.ERROR)
if os.path.isfile(trtFile):
with open(trtFile, "rb") as f:
engineString = f.read()
if engineString == None:
print("Failed getting serialized engine!")
return
print("Succeeded getting serialized engine!")
else:
builder = trt.Builder(logger)
network = builder.create_network(1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH))
network = builder.create_network()
config = builder.create_builder_config()
#config.set_flag(trt.BuilderFlag.SAFETY_SCOPE) # use Safety mode
config.engine_capability = trt.EngineCapability.SAFETY
inputTensor = network.add_input("inputT0", trt.float32, [3, 4, 5]) # only Explicit Batch + Static Shape is supported in safety mode
# 否则报错 [TRT] [E] 2: [helpers.h::volume::113] Error Code 2: Internal Error (Assertion std::all_of(d.d, d.d + d.nbDims, [](int32_t x) { return x >= 0; }) failed. )
identityLayer = network.add_identity(inputTensor)
network.mark_output(identityLayer.get_output(0))
engineString = builder.build_serialized_network(network, config)
if engineString == None:
print("Failed building serialized engine!")
return
print("Succeeded building serialized engine!")
with open(trtFile, "wb") as f:
f.write(engineString)
print("Succeeded saving .plan file!")
engine = trt.Runtime(logger).deserialize_cuda_engine(engineString)
print("Engine Capability:", engine.engine_capability)
if engine == None:
print("Failed building engine!")
return
print("Succeeded building engine!")
context = engine.create_execution_context()
nInput = np.sum([engine.binding_is_input(i) for i in range(engine.num_bindings)])
nOutput = engine.num_bindings - nInput
for i in range(nInput):
print("Bind[%2d]:i[%2d]->" % (i, i), engine.get_binding_dtype(i), engine.get_binding_shape(i), context.get_binding_shape(i), engine.get_binding_name(i))
for i in range(nInput, nInput + nOutput):
print("Bind[%2d]:o[%2d]->" % (i, i - nInput), engine.get_binding_dtype(i), engine.get_binding_shape(i), context.get_binding_shape(i), engine.get_binding_name(i))
data = np.arange(3 * 4 * 5, dtype=np.float32).reshape(3, 4, 5)
bufferH = []
bufferH.append(np.ascontiguousarray(data.reshape(-1)))
for i in range(nInput, nInput + nOutput):
bufferH.append(np.empty(context.get_binding_shape(i), dtype=trt.nptype(engine.get_binding_dtype(i))))
bufferD = []
for i in range(nInput + nOutput):
bufferD.append(cudart.cudaMalloc(bufferH[i].nbytes)[1])
for i in range(nInput):
cudart.cudaMemcpy(bufferD[i], bufferH[i].ctypes.data, bufferH[i].nbytes, cudart.cudaMemcpyKind.cudaMemcpyHostToDevice)
context.execute(1, bufferD)
for i in range(nInput, nInput + nOutput):
cudart.cudaMemcpy(bufferH[i].ctypes.data, bufferD[i], bufferH[i].nbytes, cudart.cudaMemcpyKind.cudaMemcpyDeviceToHost)
for i in range(nInput + nOutput):
print(engine.get_binding_name(i))
print(bufferH[i])
for b in bufferD:
cudart.cudaFree(b)
if __name__ == "__main__":
os.system("rm -rf ./*.plan")
run()
run()
| trt-samples-for-hackathon-cn-master | cookbook/08-Advance/Safety-TODO/main.py |
#
# Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import numpy as np
import tensorrt as trt
from cuda import cudart
trtFile = "./model.plan"
data = np.arange(3 * 4 * 5, dtype=np.float32).reshape(3, 4, 5)
logger = trt.Logger(trt.Logger.ERROR)
if os.path.isfile(trtFile):
with open(trtFile, "rb") as f:
engineString = f.read()
if engineString == None:
print("Failed getting serialized engine!")
exit()
print("Succeeded getting serialized engine!")
else:
builder = trt.Builder(logger)
network = builder.create_network(1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH))
profile = builder.create_optimization_profile()
config = builder.create_builder_config()
config.set_flag(trt.BuilderFlag.VERSION_COMPATIBLE) # turn on the flag of version compatibility, no other work needed during build process
inputTensor = network.add_input("inputT0", trt.float32, [-1, -1, -1])
profile.set_shape(inputTensor.name, [1, 1, 1], [3, 4, 5], [6, 8, 10])
config.add_optimization_profile(profile)
identityLayer = network.add_identity(inputTensor)
network.mark_output(identityLayer.get_output(0))
engineString = builder.build_serialized_network(network, config)
with open(trtFile, "wb") as f:
f.write(engineString)
print("Succeeded saving .plan file!")
runtime = trt.Runtime(logger)
runtime.engine_host_code_allowed = True # turn on the switch of runtime host code allowed, no other work needed during runtime process
engine = runtime.deserialize_cuda_engine(engineString)
if engine == None:
print("Failed building engine!")
exit()
print("Succeeded building engine!")
nIO = engine.num_io_tensors
lTensorName = [engine.get_tensor_name(i) for i in range(nIO)]
nInput = [engine.get_tensor_mode(lTensorName[i]) for i in range(nIO)].count(trt.TensorIOMode.INPUT)
context = engine.create_execution_context()
context.set_input_shape(lTensorName[0], [3, 4, 5])
for i in range(nIO):
print("[%2d]%s->" % (i, "Input " if i < nInput else "Output"), engine.get_tensor_dtype(lTensorName[i]), engine.get_tensor_shape(lTensorName[i]), context.get_tensor_shape(lTensorName[i]), lTensorName[i])
bufferH = []
bufferH.append(np.ascontiguousarray(data))
for i in range(nInput, nIO):
bufferH.append(np.empty(context.get_tensor_shape(lTensorName[i]), dtype=trt.nptype(engine.get_tensor_dtype(lTensorName[i]))))
bufferD = []
for i in range(nIO):
bufferD.append(cudart.cudaMalloc(bufferH[i].nbytes)[1])
for i in range(nInput):
cudart.cudaMemcpy(bufferD[i], bufferH[i].ctypes.data, bufferH[i].nbytes, cudart.cudaMemcpyKind.cudaMemcpyHostToDevice)
for i in range(nIO):
context.set_tensor_address(lTensorName[i], int(bufferD[i]))
context.execute_async_v3(0)
for i in range(nInput, nIO):
cudart.cudaMemcpy(bufferH[i].ctypes.data, bufferD[i], bufferH[i].nbytes, cudart.cudaMemcpyKind.cudaMemcpyDeviceToHost)
for i in range(nIO):
print(lTensorName[i])
print(bufferH[i])
for b in bufferD:
cudart.cudaFree(b)
| trt-samples-for-hackathon-cn-master | cookbook/08-Advance/VersionCompatibility/main.py |
#
# Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import numpy as np
import tensorrt as trt
from cuda import cudart
nHeight = 28
nWidth = 28
data = np.random.rand(1, 1, nHeight, nWidth).astype(np.float32).reshape(1, 1, nHeight, nWidth) * 2 - 1
trtFile = "./model.plan"
np.random.seed(31193)
np.set_printoptions(precision=3, linewidth=200, suppress=True)
cudart.cudaDeviceSynchronize()
logger = trt.Logger(trt.Logger.VERBOSE)
builder = trt.Builder(logger)
network = builder.create_network(1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH))
profile = builder.create_optimization_profile()
config = builder.create_builder_config()
config.set_preview_feature(trt.PreviewFeature.DISABLE_EXTERNAL_TACTIC_SOURCES_FOR_CORE_0805, True) # default value is True since TensorRT 8.6
#config.set_preview_feature(trt.PreviewFeature.DISABLE_EXTERNAL_TACTIC_SOURCES_FOR_CORE_0805, False) # we can comapre the VERBOSE log and performance after turning off this switch
inputTensor = network.add_input("inputT0", trt.float32, [-1, 1, nHeight, nWidth])
profile.set_shape(inputTensor.name, [1, 1, nHeight, nWidth], [4, 1, nHeight, nWidth], [8, 1, nHeight, nWidth])
config.add_optimization_profile(profile)
w = np.ascontiguousarray(np.random.rand(32, 1, 5, 5).astype(np.float32))
b = np.ascontiguousarray(np.random.rand(32, 1, 1).astype(np.float32))
_0 = network.add_convolution_nd(inputTensor, 32, [5, 5], trt.Weights(w), trt.Weights(b))
_0.padding_nd = [2, 2]
_1 = network.add_activation(_0.get_output(0), trt.ActivationType.RELU)
_2 = network.add_pooling_nd(_1.get_output(0), trt.PoolingType.MAX, [2, 2])
_2.stride_nd = [2, 2]
w = np.ascontiguousarray(np.random.rand(64, 32, 5, 5).astype(np.float32))
b = np.ascontiguousarray(np.random.rand(64, 1, 1).astype(np.float32))
_3 = network.add_convolution_nd(_2.get_output(0), 64, [5, 5], trt.Weights(w), trt.Weights(b))
_3.padding_nd = [2, 2]
_4 = network.add_activation(_3.get_output(0), trt.ActivationType.RELU)
_5 = network.add_pooling_nd(_4.get_output(0), trt.PoolingType.MAX, [2, 2])
_5.stride_nd = [2, 2]
_6 = network.add_shuffle(_5.get_output(0))
_6.reshape_dims = (-1, 64 * 7 * 7)
w = np.ascontiguousarray(np.random.rand(64 * 7 * 7, 1024).astype(np.float32))
b = np.ascontiguousarray(np.random.rand(1, 1024).astype(np.float32))
_7 = network.add_constant(w.shape, trt.Weights(w))
_8 = network.add_matrix_multiply(_6.get_output(0), trt.MatrixOperation.NONE, _7.get_output(0), trt.MatrixOperation.NONE)
_9 = network.add_constant(b.shape, trt.Weights(b))
_10 = network.add_elementwise(_8.get_output(0), _9.get_output(0), trt.ElementWiseOperation.SUM)
_11 = network.add_activation(_10.get_output(0), trt.ActivationType.RELU)
w = np.ascontiguousarray(np.random.rand(1024, 10).astype(np.float32))
b = np.ascontiguousarray(np.random.rand(1, 10).astype(np.float32))
_12 = network.add_constant(w.shape, trt.Weights(w))
_13 = network.add_matrix_multiply(_11.get_output(0), trt.MatrixOperation.NONE, _12.get_output(0), trt.MatrixOperation.NONE)
_14 = network.add_constant(b.shape, trt.Weights(b))
_15 = network.add_elementwise(_13.get_output(0), _14.get_output(0), trt.ElementWiseOperation.SUM)
_16 = network.add_softmax(_15.get_output(0))
_16.axes = 1 << 1
_17 = network.add_topk(_16.get_output(0), trt.TopKOperation.MAX, 1, 1 << 1)
network.mark_output(_17.get_output(1))
engineString = builder.build_serialized_network(network, config)
engine = trt.Runtime(logger).deserialize_cuda_engine(engineString)
nIO = engine.num_io_tensors
lTensorName = [engine.get_tensor_name(i) for i in range(nIO)]
nInput = [engine.get_tensor_mode(lTensorName[i]) for i in range(nIO)].count(trt.TensorIOMode.INPUT)
context = engine.create_execution_context()
context.set_input_shape(lTensorName[0], [1, 1, nHeight, nWidth])
for i in range(nIO):
print("[%2d]%s->" % (i, "Input " if i < nInput else "Output"), engine.get_tensor_dtype(lTensorName[i]), engine.get_tensor_shape(lTensorName[i]), context.get_tensor_shape(lTensorName[i]), lTensorName[i])
bufferH = []
bufferH.append(np.ascontiguousarray(data))
for i in range(nInput, nIO):
bufferH.append(np.empty(context.get_tensor_shape(lTensorName[i]), dtype=trt.nptype(engine.get_tensor_dtype(lTensorName[i]))))
bufferD = []
for i in range(nIO):
bufferD.append(cudart.cudaMalloc(bufferH[i].nbytes)[1])
for i in range(nInput):
cudart.cudaMemcpy(bufferD[i], bufferH[i].ctypes.data, bufferH[i].nbytes, cudart.cudaMemcpyKind.cudaMemcpyHostToDevice)
for i in range(nIO):
context.set_tensor_address(lTensorName[i], int(bufferD[i]))
context.execute_async_v3(0)
for i in range(nInput, nIO):
cudart.cudaMemcpy(bufferH[i].ctypes.data, bufferD[i], bufferH[i].nbytes, cudart.cudaMemcpyKind.cudaMemcpyDeviceToHost)
for i in range(nIO):
print(lTensorName[i])
print(bufferH[i])
for b in bufferD:
cudart.cudaFree(b)
| trt-samples-for-hackathon-cn-master | cookbook/08-Advance/ExternalSource/main.py |
#
# Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import ctypes
import numpy as np
from cuda import cudart
data = np.zeros([2, 3], dtype=np.float32)
nElement = np.prod(data.shape)
nByteSize = np.nbytes[np.float32] * nElement
# 申请页锁定内存(Pinned memory)
_, pBuffer = cudart.cudaHostAlloc(nByteSize, cudart.cudaHostAllocWriteCombined)
# 将页锁定内存数组映射到 numpy 数组上,并使用 numpy 的方法写入新数据
pBufferCtype = ctypes.cast(pBuffer, ctypes.POINTER(ctypes.c_float * nElement))
numpyArray = np.ndarray(shape=data.shape, buffer=pBufferCtype[0], dtype=np.float32)
for i in range(nElement):
numpyArray.reshape(-1)[i] = i
# 将页锁定内存数组拷贝到另一个 numpy 数组上,并打印
anotherArray = np.zeros(data.shape, dtype=np.float32)
cudart.cudaMemcpy(anotherArray.ctypes.data, pBuffer, nByteSize, cudart.cudaMemcpyKind.cudaMemcpyDeviceToHost)
print(anotherArray)
| trt-samples-for-hackathon-cn-master | cookbook/08-Advance/StreamAndAsync/usePinnedMemory.py |
#
# Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import ctypes
import os
import numpy as np
import nvtx
import tensorrt as trt
from cuda import cudart
trtFile = "./model.plan"
nB, nC, nH, nW = 1, 3, 256, 256
nTest = 30
def printArrayInformation(x, info="", n=5):
print( '%s:%s,SumAbs=%.5e,Var=%.5f,Max=%.5f,Min=%.5f,SAD=%.5f'%( \
info,str(x.shape),np.sum(abs(x)),np.var(x),np.max(x),np.min(x),np.sum(np.abs(np.diff(x.reshape(-1)))) ))
print('\t', x.reshape(-1)[:n], x.reshape(-1)[-n:])
def build():
logger = trt.Logger(trt.Logger.ERROR)
if os.path.isfile(trtFile):
with open(trtFile, "rb") as f:
engineString = f.read()
if engineString == None:
print("Failed getting serialized engine!")
return
print("Succeeded getting serialized engine!")
else:
builder = trt.Builder(logger)
network = builder.create_network(1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH))
profile = builder.create_optimization_profile()
config = builder.create_builder_config()
inputTensor = network.add_input("inputT0", trt.float32, [-1, -1, -1, -1])
profile.set_shape(inputTensor.name, [nB, nC, nH, nW], [nB, nC, nH, nW], [nB * 2, nC * 2, nH * 2, nW * 2])
config.add_optimization_profile(profile)
identityLayer = network.add_unary(inputTensor, trt.UnaryOperation.NEG)
network.mark_output(identityLayer.get_output(0))
engineString = builder.build_serialized_network(network, config)
if engineString == None:
print("Failed building serialized engine!")
return
print("Succeeded building serialized engine!")
with open(trtFile, "wb") as f:
f.write(engineString)
print("Succeeded saving .plan file!")
engine = trt.Runtime(logger).deserialize_cuda_engine(engineString)
nIO = engine.num_io_tensors
lTensorName = [engine.get_tensor_name(i) for i in range(nIO)]
nInput = [engine.get_tensor_mode(lTensorName[i]) for i in range(nIO)].count(trt.TensorIOMode.INPUT)
context = engine.create_execution_context()
context.set_input_shape(lTensorName[0], [nB, nC, nH, nW])
for i in range(nIO):
print("[%2d]%s->" % (i, "Input " if i < nInput else "Output"), engine.get_tensor_dtype(lTensorName[i]), engine.get_tensor_shape(lTensorName[i]), context.get_tensor_shape(lTensorName[i]), lTensorName[i])
return context
def run(context, bUsePinnedMemory):
engine = context.engine
_, stream = cudart.cudaStreamCreate()
nIO = engine.num_io_tensors
lTensorName = [engine.get_tensor_name(i) for i in range(nIO)]
nInput = [engine.get_tensor_mode(lTensorName[i]) for i in range(nIO)].count(trt.TensorIOMode.INPUT)
if bUsePinnedMemory: # pin-memory needed for Async API
bufferSize = []
bufferH = []
bufferD = []
for i in range(nInput):
bufferSize.append(trt.volume(context.get_tensor_shape(lTensorName[i])) * engine.get_tensor_dtype(lTensorName[i]).itemsize)
bufferD.append(cudart.cudaHostAlloc(bufferSize[i], cudart.cudaHostAllocWriteCombined)[1])
pBufferCtype = ctypes.cast(bufferD[i], ctypes.POINTER(ctypes.c_float * trt.volume(context.get_tensor_shape(lTensorName[i]))))
bufferH.append(np.ndarray(shape=context.get_tensor_shape(lTensorName[i]), buffer=pBufferCtype[0], dtype=np.float32))
buffer = bufferH[-1].reshape(-1)
for j in range(trt.volume(context.get_tensor_shape(lTensorName[i]))):
buffer[j] = j
for i in range(nInput, nIO):
bufferSize.append(trt.volume(context.get_tensor_shape(lTensorName[i])) * engine.get_tensor_dtype(lTensorName[i]).itemsize)
bufferD.append(cudart.cudaHostAlloc(bufferSize[i], cudart.cudaHostAllocWriteCombined)[1])
pBufferCtype = ctypes.cast(bufferD[-1], ctypes.POINTER(ctypes.c_float * trt.volume(context.get_tensor_shape(lTensorName[i]))))
bufferH.append(np.ndarray(shape=context.get_tensor_shape(lTensorName[i]), buffer=pBufferCtype[0], dtype=np.float32))
for i in range(nIO):
context.set_tensor_address(lTensorName[i], int(bufferD[i])) # use pin-memory directly
# warm up
context.execute_async_v3(stream)
cudart.cudaStreamSynchronize(stream)
# test
with nvtx.annotate("Pagelock", color="green"):
for k in range(nTest):
context.execute_async_v3(stream)
cudart.cudaStreamSynchronize(stream)
for i in range(nIO):
printArrayInformation(bufferH[i])
for b in bufferH:
cudart.cudaFreeHost(b)
for b in bufferD:
cudart.cudaFreeAsync(b, stream)
cudart.cudaStreamDestroy(stream)
else: # do not use pin-memory
bufferSize = []
bufferH = []
bufferD = []
for i in range(nInput):
bufferSize.append(trt.volume(context.get_tensor_shape(lTensorName[i])) * engine.get_tensor_dtype(lTensorName[i]).itemsize)
bufferH.append(np.arange(nB * nC * nH * nW, dtype=np.float32).reshape(nC, nH, nW))
bufferD.append(cudart.cudaMallocAsync(bufferSize[i], stream)[1])
for i in range(nInput, nIO):
bufferSize.append(trt.volume(context.get_tensor_shape(lTensorName[i])) * engine.get_tensor_dtype(lTensorName[i]).itemsize)
bufferH.append(np.empty(context.get_tensor_shape(lTensorName[i]), dtype=trt.nptype(engine.get_tensor_dtype(lTensorName[i]))))
bufferD.append(cudart.cudaMallocAsync(bufferSize[i], stream)[1])
# warm up --------------------------------------------------------------
for i in range(nInput): # numpy array -> GPU memory
cudart.cudaMemcpyAsync(bufferD[i], bufferH[i].ctypes.data, bufferSize[i], cudart.cudaMemcpyKind.cudaMemcpyHostToDevice, stream)
context.execute_async_v2(bufferD, stream) # use GPU memory
for i in range(nInput, nIO): # GPU memory -> numpy array
cudart.cudaMemcpyAsync(bufferH[i].ctypes.data, bufferD[i], bufferSize[i], cudart.cudaMemcpyKind.cudaMemcpyDeviceToHost, stream)
cudart.cudaStreamSynchronize(stream)
# test -----------------------------------------------------------------
with nvtx.annotate("Pageable", color="Red"):
for k in range(nTest):
for i in range(nInput): # numpy array -> GPU memory
cudart.cudaMemcpyAsync(bufferD[i], bufferH[i].ctypes.data, bufferSize[i], cudart.cudaMemcpyKind.cudaMemcpyHostToDevice, stream)
context.execute_async_v2(bufferD, stream) # use GPU memory
for i in range(nInput, nIO): # GPU memory -> numpy array
cudart.cudaMemcpyAsync(bufferH[i].ctypes.data, bufferD[i], bufferSize[i], cudart.cudaMemcpyKind.cudaMemcpyDeviceToHost, stream)
cudart.cudaStreamSynchronize(stream)
for i in range(nIO):
printArrayInformation(bufferH[i])
for b in bufferD:
cudart.cudaFreeAsync(b, stream)
cudart.cudaStreamDestroy(stream)
if __name__ == "__main__":
os.system("rm -rf ./*.plan")
np.set_printoptions(precision=3, linewidth=200, suppress=True)
cudart.cudaDeviceSynchronize()
context = build() # build engine and prepare context
run(context, False) # use pageable memory
run(context, True) # use pagelocked memory
| trt-samples-for-hackathon-cn-master | cookbook/08-Advance/StreamAndAsync/main.py |
#
# Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import numpy as np
import tensorrt as trt
from cuda import cudart
#nB, nC, nH, nW = 1, 4, 8, 8 # nC % 4 ==0, all data will be saved
nB, nC, nH, nW = 1, 3, 8, 8 # nC % 4 !=0,
data = (np.arange(1, 1 + nB * nC * nH * nW, dtype=np.float32) / np.prod(nB * nC * nH * nW) * 128).astype(np.float32).reshape(nB, nC, nH, nW)
np.set_printoptions(precision=3, edgeitems=8, linewidth=300, suppress=True)
cudart.cudaDeviceSynchronize()
logger = trt.Logger(trt.Logger.ERROR)
builder = trt.Builder(logger)
network = builder.create_network(1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH))
profile = builder.create_optimization_profile()
config = builder.create_builder_config()
config.set_flag(trt.BuilderFlag.INT8)
inputT0 = network.add_input("inputT0", trt.float32, (-1, nC, nH, nW))
profile.set_shape(inputT0.name, [1, nC, nH, nW], [nB, nC, nH, nW], [nB * 2, nC, nH, nW])
config.add_optimization_profile(profile)
layer = network.add_identity(inputT0)
layer.get_output(0).dtype = trt.int8
layer.set_output_type(0, trt.int8)
layer.get_output(0).allowed_formats = 1 << int(trt.TensorFormat.CHW4)
layer.get_output(0).dynamic_range = [-128, 128]
network.mark_output(layer.get_output(0))
engineString = builder.build_serialized_network(network, config)
engine = trt.Runtime(logger).deserialize_cuda_engine(engineString)
context = engine.create_execution_context()
nIO = engine.num_io_tensors
#nIO = engine.num_bindings # deprecated since TensorRT 8.5
lTensorName = [engine.get_tensor_name(i) for i in range(nIO)]
nInput = [engine.get_tensor_mode(lTensorName[i]) for i in range(nIO)].count(trt.TensorIOMode.INPUT) # get the count of input tensor
nOutput = [engine.get_tensor_mode(lTensorName[i]) for i in range(nIO)].count(trt.TensorIOMode.OUTPUT) # get the count of output tensor
#nInput = np.sum([engine.binding_is_input(i) for i in range(engine.num_bindings)]) # deprecated since TensorRT 8.5
#nOutput = engine.num_bindings - nInput
print("engine.__len__() = %d" % len(engine))
print("engine.__sizeof__() = %d" % engine.__sizeof__())
print("engine.__str__() = %s" % engine.__str__())
print("engine.name = %s" % engine.name)
print("engine.device_memory_size = %d" % engine.device_memory_size)
print("engine.engine_capability = %d" % engine.engine_capability) # refer to 02-API/BuilderConfig
print("engine.has_implicit_batch_dimension = %s" % engine.has_implicit_batch_dimension)
#print("engine.max_batch_size = %d" % engine.max_batch_size) # used in Implicit Batch mode, deprecated since TensorRT 8.4, use Dyanmic Shape mode instead
print("engine.num_io_tensors = %d" % engine.num_io_tensors)
#print("engine.num_bindings = %d" % engine.num_bindings) # deprecated since TensorRT 8.5
print("engine.num_layers = %d" % engine.num_layers)
print("engine.num_optimization_profiles = %d" % engine.num_optimization_profiles)
print("engine.refittable = %s" % engine.refittable)
print("engine.tactic_sources = %d" % engine.tactic_sources)
print("\n\nMethod related to binding:")
print("Binding: %s 0,%s 1" % (" " * 56, " " * 56))
print("get_binding_name: %58s,%58s" % (engine.get_binding_name(0), engine.get_binding_name(1)))
print("get_binding_shape: %58s,%58s" % (engine.get_binding_shape(0), engine.get_binding_shape(1)))
print("get_binding_dtype: %58s,%58s" % (engine.get_binding_dtype(0), engine.get_binding_dtype(1)))
print("get_binding_format: %58s,%58s" % (engine.get_binding_format(0), engine.get_binding_format(1)))
print("get_binding_format_desc: %58s,%58s" % (engine.get_binding_format_desc(0), engine.get_binding_format_desc(1)))
print("get_binding_bytes_per_component: %58d,%58d" % (engine.get_binding_bytes_per_component(0), engine.get_binding_bytes_per_component(1)))
print("get_binding_components_per_element:%58d,%58d" % (engine.get_binding_components_per_element(0), engine.get_binding_components_per_element(1)))
print("get_binding_vectorized_dim: %58d,%58d" % (engine.get_binding_vectorized_dim(0), engine.get_binding_vectorized_dim(1)))
print("")
print("binding_is_input: %58s,%58s" % (engine.binding_is_input(0), engine.binding_is_input(1)))
print("is_execution_binding: %58s,%58s" % (engine.is_execution_binding(0), engine.is_execution_binding(1)))
print("is_shape_binding: %58s,%58s" % (engine.is_shape_binding(0), engine.is_shape_binding(1)))
print("get_profile_shape: %58s,%58s" % (engine.get_profile_shape(0, 0), "")) # only input tensors own Optimization Profile Shape
#print("get_profile_shape: %58s,%58s" % (engine.get_profile_shape_input(0,0), engine.get_profile_shape_input(0,1))) # We do not use Shape Input Tensor in this example
print("__getitem__(int): %58s,%58s" % (engine[0], engine[1]))
print("__getitem__(str): %58d,%58d" % (engine["inputT0"], engine["(Unnamed Layer* 0) [Identity]_output"]))
print("get_binding_index: %58d,%58d" % (engine.get_binding_index("inputT0"), engine.get_binding_index("(Unnamed Layer* 0) [Identity]_output")))
context.set_binding_shape(0, [nB, nC, nH, nW])
bufferH = []
bufferH.append(data)
for i in range(nOutput):
bufferH.append(np.empty(context.get_binding_shape(nInput + i), dtype=trt.nptype(engine.get_binding_dtype(nInput + i))))
bufferD = []
for i in range(engine.num_bindings):
bufferD.append(cudart.cudaMalloc(bufferH[i].nbytes)[1])
for i in range(nInput):
cudart.cudaMemcpy(bufferD[i], np.ascontiguousarray(bufferH[i].reshape(-1)).ctypes.data, bufferH[i].nbytes, cudart.cudaMemcpyKind.cudaMemcpyHostToDevice)
context.execute_v2(bufferD)
for i in range(nOutput):
cudart.cudaMemcpy(bufferH[nInput + i].ctypes.data, bufferD[nInput + i], bufferH[nInput + i].nbytes, cudart.cudaMemcpyKind.cudaMemcpyDeviceToHost)
for i in range(nInput):
print("Input %d:" % i, bufferH[i].shape, "\n", bufferH[i])
for i in range(nOutput):
print("Output %d:" % i, bufferH[nInput + i].shape, "\n", bufferH[nInput + i])
print("Restore to Linear:")
print(bufferH[-1].reshape(nB * nC * nH * 2, nW // 2).transpose(1, 0).reshape(nB, nC, nH, nW))
for buffer in bufferD:
cudart.cudaFree(buffer)
"""
Member of ICudaEngine:
++++ shown above
==== shown in binding part
---- not shown above
[no prefix] others
----__class__
__del__
__delattr__
__dir__
__doc__
__enter__
__eq__
__exit__
__format__
__ge__
__getattribute__
====__getitem__ same as get_binding_name and get_binding_index
__gt__
__hash__
__init__
__init_subclass__
__le__
++++__len__
__lt__
__module__
__ne__
__new__
__reduce__
__reduce_ex__
__repr__
__setattr__
++++__sizeof__
++++__str__
__subclasshook__
====binding_is_input
----create_engine_inspector 见 02-API/EngineInspector
++++create_execution_context
----create_execution_context_without_device_memory
++++device_memory_size
++++engine_capability
----error_recorder 见 09-Advanve/ErrorRecorder
====get_binding_bytes_per_component
====get_binding_components_per_element
====get_binding_dtype
====get_binding_format
====get_binding_format_desc
====get_binding_index
====get_binding_name
====get_binding_shape
====get_binding_vectorized_dim
====get_location
====get_profile_shape
====get_profile_shape_input
++++has_implicit_batch_dimension
====is_execution_binding
====is_shape_binding
++++max_batch_size
++++name
++++num_bindings
++++num_layers
++++num_optimization_profiles
----profiling_verbosity refer to 02-API/ProfilingVerbosity
++++refittable
----serialize 见 01-SimpleDemo/TensorRT8.4
++++tactic_sources
~~~~~~~~ API since TensorRT8.5 ~~~~~~~~
get_tensor_bytes_per_component
get_tensor_components_per_element
get_tensor_dtype
get_tensor_format
get_tensor_format_desc
get_tensor_location
get_tensor_mode
get_tensor_name
get_tensor_profile_shape
get_tensor_shape
get_tensor_vectorized_dim
is_shape_inference_io
num_io_tensors
"""
| trt-samples-for-hackathon-cn-master | cookbook/08-Advance/DataFormat/main-old.py |
#
# Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import ctypes
import os
import numpy as np
import tensorrt as trt
from cuda import cudart
os.chdir("/wili/tensorrt-cookbook/08-Advance/DataFormat")
np.set_printoptions(precision=3, linewidth=200, suppress=True)
np.random.seed(31193)
cudart.cudaDeviceSynchronize()
def printArrayInformation(x, info="", n=5):
if 0 in x.shape:
print('%s:%s' % (info, str(x.shape)))
print()
return
print( '%s:%s,SumAbs=%.5e,Var=%.5f,Max=%.5f,Min=%.5f,SAD=%.5f'%( \
info,str(x.shape),np.sum(abs(x)),np.var(x),np.max(x),np.min(x),np.sum(np.abs(np.diff(x.reshape(-1)))) ))
print('\t', x.reshape(-1)[:n], x.reshape(-1)[-n:])
def check(a, b, weak=False, checkEpsilon=1e-5):
if weak:
a = a.astype(np.float32)
b = b.astype(np.float32)
res = np.all(np.abs(a - b) < checkEpsilon)
else:
res = np.all(a == b)
diff0 = np.max(np.abs(a - b))
diff1 = np.max(np.abs(a - b) / (np.abs(b) + checkEpsilon))
print("check:%s, absDiff=%f, relDiff=%f" % (res, diff0, diff1))
def run(shape, dataType, format):
testCase = "<shape=%s,dataType=%s,format=%s>" % (shape, dataType, format)
print("Test %s" % testCase)
logger = trt.Logger(trt.Logger.ERROR)
builder = trt.Builder(logger)
network = builder.create_network(1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH))
profile = builder.create_optimization_profile()
config = builder.create_builder_config()
if dataType == trt.DataType.HALF:
config.set_flag(trt.BuilderFlag.FP16)
if dataType == trt.DataType.INT8:
config.set_flag(trt.BuilderFlag.INT8)
nDim = 4 # for normal cases, we use input tensor of 4 dimensions
if dataType == trt.DataType.HALF and format in [trt.TensorFormat.CDHW32, trt.TensorFormat.DHWC8]:
nDim = 5
inputT0 = network.add_input("inputT0", dataType, [-1] * nDim)
inputT0.allowed_formats = 1 << int(trt.TensorFormat.LINEAR)
if dataType == trt.DataType.INT8:
inputT0.set_dynamic_range(0, 384)
profile.set_shape(inputT0.name, [1] * nDim, [64] * nDim, [64] * nDim)
config.add_optimization_profile(profile)
identityLayer = network.add_identity(inputT0)
identityLayer.get_output(0).dtype = dataType
identityLayer.get_output(0).allowed_formats = 1 << int(format)
network.mark_output(identityLayer.get_output(0))
engineString = builder.build_serialized_network(network, config)
if engineString == None:
print("Failed building engine!")
return
print("Succeeded building engine!")
engine = trt.Runtime(logger).deserialize_cuda_engine(engineString)
nIO = engine.num_io_tensors
lTensorName = [engine.get_tensor_name(i) for i in range(nIO)]
nInput = [engine.get_tensor_mode(lTensorName[i]) for i in range(nIO)].count(trt.TensorIOMode.INPUT)
context = engine.create_execution_context()
context.set_input_shape(lTensorName[0], shape)
#for i in range(nIO):
# print("[%2d]%s->" % (i, "Input " if i < nInput else "Output"), engine.get_tensor_dtype(lTensorName[i]), engine.get_tensor_shape(lTensorName[i]), context.get_tensor_shape(lTensorName[i]), lTensorName[i])
bufferH = []
bufferH.append(np.arange(np.prod(shape), dtype=trt.nptype(engine.get_tensor_dtype(lTensorName[0]))).reshape(shape))
for i in range(nInput, nIO):
bufferH.append(np.empty(context.get_tensor_shape(lTensorName[i]), dtype=trt.nptype(engine.get_tensor_dtype(lTensorName[i]))))
bufferD = []
for i in range(nIO):
bufferD.append(cudart.cudaMalloc(bufferH[i].nbytes)[1])
for i in range(nInput):
cudart.cudaMemcpy(bufferD[i], bufferH[i].ctypes.data, bufferH[i].nbytes, cudart.cudaMemcpyKind.cudaMemcpyHostToDevice)
for i in range(nIO):
context.set_tensor_address(lTensorName[i], int(bufferD[i]))
context.execute_async_v3(0)
for i in range(nInput, nIO):
cudart.cudaMemcpy(bufferH[i].ctypes.data, bufferD[i], bufferH[i].nbytes, cudart.cudaMemcpyKind.cudaMemcpyDeviceToHost)
#print("Input: \n", bufferH[0])
#print("Output:\n", bufferH[1])
# check correctness manually
if dataType == trt.DataType.FLOAT and format == trt.TensorFormat.LINEAR:
check(bufferH[1], bufferH[0], weak=True)
check(bufferH[0], bufferH[1], weak=True)
elif dataType == trt.DataType.HALF and format == trt.TensorFormat.CHW2:
if shape[1] % 2 == 0: # no pad
check(bufferH[1], bufferH[0].reshape(shape[0], shape[1] // 2, 2, shape[2], shape[3]).transpose(0, 1, 3, 4, 2).reshape(shape), weak=True)
check(bufferH[0], bufferH[1].reshape(shape[0], shape[1] // 2, shape[2], shape[3], 2).transpose(0, 1, 4, 2, 3).reshape(shape), weak=True)
else: # need pad, this path is also correct when shape[1] % 2 == 0, but seems much more complex
nTile = (shape[1] + 2 - 1) // 2
nPadC = nTile * 2
nPadWidth = nPadC - shape[1]
padBuffer = np.concatenate([bufferH[0], np.zeros([shape[0], nPadWidth, shape[2], shape[3]], dtype=bufferH[0].dtype)], axis=1)
buffer = padBuffer.reshape(shape[0], nTile, 2, shape[2], shape[3]).transpose(0, 1, 3, 4, 2).reshape(shape[0], nPadC, shape[2], shape[3])[:, :shape[1], :, :]
check(bufferH[1], buffer, weak=True)
padBuffer = np.concatenate([bufferH[1], np.zeros([shape[0], nPadWidth, shape[2], shape[3]], dtype=bufferH[1].dtype)], axis=1)
buffer = padBuffer.reshape(shape[0], nTile, shape[2], shape[3], 2).transpose(0, 1, 4, 2, 3).reshape(shape[0], nPadC, shape[2], shape[3])[:, :shape[1], :, :]
check(bufferH[0], buffer, weak=True) # lose the last ((c + 1) // 2 * h * w - c * h * w // 2) element
elif dataType == trt.DataType.HALF and format == trt.TensorFormat.HWC8:
if shape[1] % 8 == 0: # no pad
check(bufferH[1], bufferH[0].reshape(shape[0], shape[1] // 8, 8, shape[2], shape[3]).transpose(0, 1, 3, 4, 2).reshape(shape), weak=True)
check(bufferH[0], bufferH[1].reshape(shape[0], shape[1] // 8, shape[2], shape[3], 8).transpose(0, 1, 4, 2, 3).reshape(shape), weak=True)
else: # need pad, this path is also correct when shape[1] % 8 == 0, but seems much more complex
nTile = (shape[1] + 8 - 1) // 8
nPadC = nTile * 8
nPadWidth = nPadC - shape[1]
padBuffer = np.concatenate([bufferH[0], np.zeros([shape[0], nPadWidth, shape[2], shape[3]], dtype=bufferH[0].dtype)], axis=1)
buffer = padBuffer.transpose(0, 2, 3, 1).reshape(shape[0], nPadC, shape[2], shape[3])[:, :shape[1], :, :]
check(bufferH[1], buffer, weak=True)
padBuffer = np.concatenate([bufferH[1], np.zeros([shape[0], nPadWidth, shape[2], shape[3]], dtype=bufferH[1].dtype)], axis=1)
buffer = padBuffer.reshape(shape[0], nTile, shape[2], shape[3], 8).transpose(0, 1, 4, 2, 3).reshape(shape[0], nPadC, shape[2], shape[3])[:, :shape[1], :, :]
check(bufferH[0], buffer, weak=True) # lose the last ((c + 7) // 8 * 8) * (h * w-1) element
elif dataType == trt.DataType.HALF and format == trt.TensorFormat.CHW4:
if shape[1] % 4 == 0: # no pad
check(bufferH[1], bufferH[0].reshape(shape[0], shape[1] // 4, 4, shape[2], shape[3]).transpose(0, 1, 3, 4, 2).reshape(shape), weak=True)
check(bufferH[0], bufferH[1].reshape(shape[0], shape[1] // 4, shape[2], shape[3], 4).transpose(0, 1, 4, 2, 3).reshape(shape), weak=True)
else: # need pad, this path is also correct when shape[1] % 4 == 0, but seems much more complex
nTile = (shape[1] + 4 - 1) // 4
nPadC = nTile * 4
nPadWidth = nPadC - shape[1]
padBuffer = np.concatenate([bufferH[0], np.zeros([shape[0], nPadWidth, shape[2], shape[3]], dtype=bufferH[0].dtype)], axis=1)
buffer = padBuffer.reshape(shape[0], nTile, 4, shape[2], shape[3]).transpose(0, 1, 3, 4, 2).reshape(shape[0], nPadC, shape[2], shape[3])[:, :shape[1], :, :]
check(bufferH[1], buffer, weak=True)
padBuffer = np.concatenate([bufferH[1], np.zeros([shape[0], nPadWidth, shape[2], shape[3]], dtype=bufferH[1].dtype)], axis=1)
buffer = padBuffer.reshape(shape[0], nTile, shape[2], shape[3], 4).transpose(0, 1, 4, 2, 3).reshape(shape[0], nPadC, shape[2], shape[3])[:, :shape[1], :, :]
check(bufferH[0], buffer, weak=True) # lose the last ((c + 1) // 4 * h * w - c * h * w // 4) element
elif dataType == trt.DataType.HALF and format == trt.TensorFormat.CHW16:
if shape[1] % 16 == 0: # no pad
check(bufferH[1], bufferH[0].reshape(shape[0], shape[1] // 16, 16, shape[2], shape[3]).transpose(0, 1, 3, 4, 2).reshape(shape), weak=True)
check(bufferH[0], bufferH[1].reshape(shape[0], shape[1] // 16, shape[2], shape[3], 16).transpose(0, 1, 4, 2, 3).reshape(shape), weak=True)
else: # need pad, this path is also correct when shape[1] % 16 == 0, but seems much more complex
nTile = (shape[1] + 16 - 1) // 16
nPadC = nTile * 16
nPadWidth = nPadC - shape[1]
padBuffer = np.concatenate([bufferH[0], np.zeros([shape[0], nPadWidth, shape[2], shape[3]], dtype=bufferH[0].dtype)], axis=1)
buffer = padBuffer.reshape(shape[0], nTile, 16, shape[2], shape[3]).transpose(0, 1, 3, 4, 2).reshape(shape[0], nPadC, shape[2], shape[3])[:, :shape[1], :, :]
check(bufferH[1], buffer, weak=True)
padBuffer = np.concatenate([bufferH[1], np.zeros([shape[0], nPadWidth, shape[2], shape[3]], dtype=bufferH[1].dtype)], axis=1)
buffer = padBuffer.reshape(shape[0], nTile, shape[2], shape[3], 16).transpose(0, 1, 4, 2, 3).reshape(shape[0], nPadC, shape[2], shape[3])[:, :shape[1], :, :]
check(bufferH[0], buffer, weak=True) # lose the last ((c + 1) // 16 * h * w - c * h * w // 16) element
elif dataType == trt.DataType.FLOAT and format == trt.TensorFormat.CHW32:
if shape[1] % 32 == 0: # no pad
check(bufferH[1], bufferH[0].reshape(shape[0], shape[1] // 32, 32, shape[2], shape[3]).transpose(0, 1, 3, 4, 2).reshape(shape), weak=True)
check(bufferH[0], bufferH[1].reshape(shape[0], shape[1] // 32, shape[2], shape[3], 32).transpose(0, 1, 4, 2, 3).reshape(shape), weak=True)
else: # need pad, this path is also correct when shape[1] % 32 == 0, but seems much more complex
nTile = (shape[1] + 31) // 32
nPadC = nTile * 32
nPadWidth = nPadC - shape[1]
padBuffer = np.concatenate([bufferH[0], np.zeros([shape[0], nPadWidth, shape[2], shape[3]], dtype=bufferH[0].dtype)], axis=1)
buffer = padBuffer.reshape(shape[0], nTile, 32, shape[2], shape[3]).transpose(0, 1, 3, 4, 2).reshape(shape[0], nPadC, shape[2], shape[3])[:, :shape[1], :, :]
check(bufferH[1], buffer, weak=True)
padBuffer = np.concatenate([bufferH[1], np.zeros([shape[0], nPadWidth, shape[2], shape[3]], dtype=bufferH[1].dtype)], axis=1)
buffer = padBuffer.reshape(shape[0], nTile, shape[2], shape[3], 32).transpose(0, 1, 4, 2, 3).reshape(shape[0], nPadC, shape[2], shape[3])[:, :shape[1], :, :]
check(bufferH[0], buffer, weak=True) # lose the last ((c + 1) // 32 * h * w - c * h * w // 32) element
elif dataType == trt.DataType.HALF and format == trt.TensorFormat.DHWC8:
if shape[1] % 8 == 0: # no pad
check(bufferH[1], bufferH[0].reshape(shape[0], shape[1] // 8, 8, shape[2], shape[3], shape[4]).transpose(0, 1, 3, 4, 5, 2).reshape(shape), weak=True)
check(bufferH[0], bufferH[1].reshape(shape[0], shape[1] // 8, shape[2], shape[3], shape[4], 8).transpose(0, 1, 5, 2, 3, 4).reshape(shape), weak=True)
else: # need pad, this path is also correct when shape[1] % 8 == 0, but seems much more complex
nTile = (shape[1] + 8 - 1) // 8
nPadC = nTile * 8
nPadWidth = nPadC - shape[1]
padBuffer = np.concatenate([bufferH[0], np.zeros([shape[0], nPadWidth, shape[2], shape[3], shape[4]], dtype=bufferH[0].dtype)], axis=1)
buffer = padBuffer.transpose(0, 2, 3, 4, 1).reshape(shape[0], nPadC, shape[2], shape[3], shape[4])[:, :shape[1], :, :]
check(bufferH[1], buffer, weak=True)
padBuffer = np.concatenate([bufferH[1], np.zeros([shape[0], nPadWidth, shape[2], shape[3], shape[4]], dtype=bufferH[1].dtype)], axis=1)
buffer = padBuffer.reshape(shape[0], nTile, shape[2], shape[3], shape[4], 8).transpose(0, 1, 5, 2, 3, 4).reshape(shape[0], nPadC, shape[2], shape[3], shape[4])[:, :shape[1], :, :, :]
check(bufferH[0], buffer, weak=True) # lose the last ((c + 7) // 8 * 8) * (h * w-1) element
elif dataType == trt.DataType.HALF and format == trt.TensorFormat.CDHW32:
if shape[1] % 32 == 0: # no pad
check(bufferH[1], bufferH[0].reshape(shape[0], shape[1] // 32, 32, shape[2], shape[3], shape[4]).transpose(0, 1, 3, 4, 5, 2).reshape(shape), weak=True)
check(bufferH[0], bufferH[1].reshape(shape[0], shape[1] // 32, shape[2], shape[3], shape[4], 32).transpose(0, 1, 5, 2, 3, 4).reshape(shape), weak=True)
else: # need pad, this path is also correct when shape[1] % 32 == 0, but seems much more complex
nTile = (shape[1] + 32 - 1) // 32
nPadC = nTile * 32
nPadWidth = nPadC - shape[1]
padBuffer = np.concatenate([bufferH[0], np.zeros([shape[0], nPadWidth, shape[2], shape[3], shape[4]], dtype=bufferH[0].dtype)], axis=1)
buffer = padBuffer.reshape(shape[0], nTile, 32, shape[2], shape[3], shape[4]).transpose(0, 1, 3, 4, 5, 2).reshape(shape[0], nPadC, shape[2], shape[3], shape[4])[:, :shape[1], :, :, :]
check(bufferH[1], buffer, weak=True)
padBuffer = np.concatenate([bufferH[1], np.zeros([shape[0], nPadWidth, shape[2], shape[3], shape[4]], dtype=bufferH[1].dtype)], axis=1)
buffer = padBuffer.reshape(shape[0], nTile, shape[2], shape[3], shape[4], 32).transpose(0, 1, 5, 2, 3, 4).reshape(shape[0], nPadC, shape[2], shape[3], shape[4])[:, :shape[1], :, :, :]
check(bufferH[0], buffer, weak=True) # lose the last ((c + 1) // 32 * h * w - c * h * w // 32) element
elif dataType == trt.DataType.FLOAT and format == trt.TensorFormat.HWC:
check(bufferH[1], bufferH[0].transpose(0, 2, 3, 1).reshape(shape), weak=True)
check(bufferH[0], bufferH[1].reshape(shape[0], shape[2], shape[3], shape[1]).transpose(0, 3, 1, 2).reshape(shape), weak=True)
elif dataType == trt.DataType.HALF and format == trt.TensorFormat.HWC16:
if shape[1] % 16 == 0: # no pad
check(bufferH[1], bufferH[0].reshape(shape[0], shape[1] // 16, 16, shape[2], shape[3]).transpose(0, 1, 3, 4, 2).reshape(shape), weak=True)
check(bufferH[0], bufferH[1].reshape(shape[0], shape[1] // 16, shape[2], shape[3], 16).transpose(0, 4, 1, 2, 3).reshape(shape), weak=True)
else: # need pad, this path is also correct when shape[1] % 16 == 0, but seems much more complex
nTile = (shape[1] + 16 - 1) // 16
nPadC = nTile * 16
nPadWidth = nPadC - shape[1]
padBuffer = np.concatenate([bufferH[0], np.zeros([shape[0], nPadWidth, shape[2], shape[3]], dtype=bufferH[0].dtype)], axis=1)
buffer = padBuffer.transpose(0, 2, 3, 1).reshape(shape[0], nPadC, shape[2], shape[3])[:, :shape[1], :, :]
check(bufferH[1], buffer, weak=True)
padBuffer = np.concatenate([bufferH[1], np.zeros([shape[0], nPadWidth, shape[2], shape[3]], dtype=bufferH[1].dtype)], axis=1)
buffer = padBuffer.reshape(shape[0], nTile, shape[2], shape[3], 16).transpose(0, 1, 4, 2, 3).reshape(shape[0], nPadC, shape[2], shape[3])[:, :shape[1], :, :]
check(bufferH[0], buffer, weak=True) # lose the last ((c + 7) // 16 * 16) * (h * w-1) element
elif dataType == trt.DataType.FLOAT and format == trt.TensorFormat.DHWC: # no change?
check(bufferH[1], bufferH[0], weak=True)
check(bufferH[0], bufferH[1], weak=True)
#check(bufferH[1], bufferH[0].transpose(0, 2, 3, 1).reshape(shape), weak=True)
#check(bufferH[0], bufferH[1].reshape(shape[0], shape[2], shape[3], shape[1]).transpose(0, 3, 1, 2).reshape(shape), weak=True)
for b in bufferD:
cudart.cudaFree(b)
print("Test %s finish!\n" % testCase)
if __name__ == "__main__":
os.system("rm -rf ./*.plan")
run([1, 2, 3, 4], trt.DataType.FLOAT, trt.TensorFormat.LINEAR)
run([1, 4, 2, 3], trt.DataType.HALF, trt.TensorFormat.CHW2) # no pad
run([1, 3, 2, 3], trt.DataType.HALF, trt.TensorFormat.CHW2) # pad 1 channel
run([1, 8, 2, 3], trt.DataType.HALF, trt.TensorFormat.HWC8) # no pad
run([1, 7, 2, 3], trt.DataType.HALF, trt.TensorFormat.HWC8) # pad 1 channel
run([1, 4, 2, 3], trt.DataType.HALF, trt.TensorFormat.CHW4) # no pad
run([1, 3, 2, 3], trt.DataType.HALF, trt.TensorFormat.CHW4) # pad 1 channel
run([1, 4, 2, 3], trt.DataType.HALF, trt.TensorFormat.CHW16) # no pad
run([1, 3, 2, 3], trt.DataType.HALF, trt.TensorFormat.CHW16) # pad 1 channel
run([1, 64, 2, 3], trt.DataType.FLOAT, trt.TensorFormat.CHW32) # no pad
run([1, 63, 2, 3], trt.DataType.FLOAT, trt.TensorFormat.CHW32) # pad 1 channel
run([1, 8, 1, 2, 3], trt.DataType.HALF, trt.TensorFormat.DHWC8) # no pad
run([1, 7, 1, 2, 3], trt.DataType.HALF, trt.TensorFormat.DHWC8) # pad 1 channel
run([1, 64, 1, 2, 3], trt.DataType.HALF, trt.TensorFormat.CDHW32) # no pad
run([1, 63, 1, 2, 3], trt.DataType.HALF, trt.TensorFormat.CDHW32) # pad 1 channel
run([1, 2, 3, 4], trt.DataType.FLOAT, trt.TensorFormat.HWC)
#run([1, 2, 3, 4], trt.DataType.FLOAT, trt.TensorFormat.DLA_LINEAR)
#run([1, 4, 2, 3], trt.DataType.HALF, trt.TensorFormat.DLA_HWC4) # no pad
#run([1, 3, 2, 3], trt.DataType.HALF, trt.TensorFormat.DLA_HWC4) # pad 1 channel
run([1, 16, 2, 3], trt.DataType.HALF, trt.TensorFormat.HWC16) # no pad
run([1, 15, 2, 3], trt.DataType.HALF, trt.TensorFormat.HWC16) # pad 1 channel
run([1, 2, 3, 4], trt.DataType.FLOAT, trt.TensorFormat.DHWC)
print("Test all finish!")
| trt-samples-for-hackathon-cn-master | cookbook/08-Advance/DataFormat/main.py |
#
# Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import ctypes
import numpy as np
import tensorrt as trt
from cuda import cudart
trtFile = "./model.plan"
timeCacheFile = "./model.cache"
nB, nC, nH, nW = 1, 1, 28, 28
np.random.seed(31193)
data = np.random.rand(nB, nC, nH, nW).astype(np.float32) * 2 - 1
np.set_printoptions(precision=3, linewidth=200, suppress=True)
cudart.cudaDeviceSynchronize()
logger = trt.Logger(trt.Logger.ERROR)
builder = trt.Builder(logger)
network = builder.create_network(1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH))
profile = builder.create_optimization_profile()
config = builder.create_builder_config()
inputTensor = network.add_input("inputT0", trt.float32, [-1, nC, nH, nW])
profile.set_shape(inputTensor.name, [1, nC, nH, nW], [nB, nC, nH, nW], [nB * 2, nC, nH, nW])
config.add_optimization_profile(profile)
w = np.ascontiguousarray(np.random.rand(32, 1, 5, 5).astype(np.float32))
b = np.ascontiguousarray(np.random.rand(32, 1, 1).astype(np.float32))
_0 = network.add_convolution_nd(inputTensor, 32, [5, 5], trt.Weights(w), trt.Weights(b))
_0.padding_nd = [2, 2]
_1 = network.add_activation(_0.get_output(0), trt.ActivationType.RELU)
_2 = network.add_pooling_nd(_1.get_output(0), trt.PoolingType.MAX, [2, 2])
_2.stride_nd = [2, 2]
w = np.ascontiguousarray(np.random.rand(64, 32, 5, 5).astype(np.float32))
b = np.ascontiguousarray(np.random.rand(64, 1, 1).astype(np.float32))
_3 = network.add_convolution_nd(_2.get_output(0), 64, [5, 5], trt.Weights(w), trt.Weights(b))
_3.padding_nd = [2, 2]
_4 = network.add_activation(_3.get_output(0), trt.ActivationType.RELU)
_5 = network.add_pooling_nd(_4.get_output(0), trt.PoolingType.MAX, [2, 2])
_5.stride_nd = [2, 2]
_6 = network.add_shuffle(_5.get_output(0))
_6.reshape_dims = (-1, 64 * 7 * 7)
w = np.ascontiguousarray(np.random.rand(64 * 7 * 7, 1024).astype(np.float32))
b = np.ascontiguousarray(np.random.rand(1, 1024).astype(np.float32))
_7 = network.add_constant(w.shape, trt.Weights(w))
_8 = network.add_matrix_multiply(_6.get_output(0), trt.MatrixOperation.NONE, _7.get_output(0), trt.MatrixOperation.NONE)
_9 = network.add_constant(b.shape, trt.Weights(b))
_10 = network.add_elementwise(_8.get_output(0), _9.get_output(0), trt.ElementWiseOperation.SUM)
_11 = network.add_activation(_10.get_output(0), trt.ActivationType.RELU)
w = np.ascontiguousarray(np.random.rand(1024, 10).astype(np.float32))
b = np.ascontiguousarray(np.random.rand(1, 10).astype(np.float32))
_12 = network.add_constant(w.shape, trt.Weights(w))
_13 = network.add_matrix_multiply(_11.get_output(0), trt.MatrixOperation.NONE, _12.get_output(0), trt.MatrixOperation.NONE)
_14 = network.add_constant(b.shape, trt.Weights(b))
_15 = network.add_elementwise(_13.get_output(0), _14.get_output(0), trt.ElementWiseOperation.SUM)
_16 = network.add_softmax(_15.get_output(0))
_16.axes = 1 << 1
_17 = network.add_topk(_16.get_output(0), trt.TopKOperation.MAX, 1, 1 << 1)
network.mark_output(_17.get_output(1))
engineString = builder.build_serialized_network(network, config)
engine = trt.Runtime(logger).deserialize_cuda_engine(engineString)
nIO = engine.num_io_tensors
lTensorName = [engine.get_tensor_name(i) for i in range(nIO)]
nInput = [engine.get_tensor_mode(lTensorName[i]) for i in range(nIO)].count(trt.TensorIOMode.INPUT)
context = engine.create_execution_context_without_device_memory() # do not alloc GPU memory when creating the context
print("Device memory needed by engine is %d byte" % engine.device_memory_size)
status, address = cudart.cudaMalloc(engine.device_memory_size) # alloc GPU memory by ourselves
context.device_memory = address # assign the address to the context
context.set_input_shape(lTensorName[0], [nB, nC, nH, nW])
for i in range(nIO):
print("[%2d]%s->" % (i, "Input " if i < nInput else "Output"), engine.get_tensor_dtype(lTensorName[i]), engine.get_tensor_shape(lTensorName[i]), context.get_tensor_shape(lTensorName[i]), lTensorName[i])
bufferH = []
bufferH.append(np.ascontiguousarray(data))
for i in range(nInput, nIO):
bufferH.append(np.empty(context.get_tensor_shape(lTensorName[i]), dtype=trt.nptype(engine.get_tensor_dtype(lTensorName[i]))))
bufferD = []
for i in range(nIO):
bufferD.append(cudart.cudaMalloc(bufferH[i].nbytes)[1])
for i in range(nInput):
cudart.cudaMemcpy(bufferD[i], bufferH[i].ctypes.data, bufferH[i].nbytes, cudart.cudaMemcpyKind.cudaMemcpyHostToDevice)
for i in range(nIO):
context.set_tensor_address(lTensorName[i], int(bufferD[i]))
context.execute_async_v3(0)
for i in range(nInput, nIO):
cudart.cudaMemcpy(bufferH[i].ctypes.data, bufferD[i], bufferH[i].nbytes, cudart.cudaMemcpyKind.cudaMemcpyDeviceToHost)
for i in range(nIO):
print(lTensorName[i])
print(bufferH[i])
for b in bufferD:
cudart.cudaFree(b)
| trt-samples-for-hackathon-cn-master | cookbook/08-Advance/CreateExecutionContextWithoutDeviceMemory/main.py |
#
# Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import ctypes
import os
import numpy as np
import tensorrt as trt
from cuda import cudart
soFile = "./ZeroPlugin.so"
np.random.seed(31193)
def printArrayInformation(x, info="", n=5):
if 0 in x.shape:
print('%s:%s' % (info, str(x.shape)))
print()
return
print( '%s:%s,SumAbs=%.5e,Var=%.5f,Max=%.5f,Min=%.5f,SAD=%.5f'%( \
info,str(x.shape),np.sum(abs(x)),np.var(x),np.max(x),np.min(x),np.sum(np.abs(np.diff(x.reshape(-1)))) ))
print('\t', x.reshape(-1)[:n], x.reshape(-1)[-n:])
def check(a, b, weak=False, checkEpsilon=1e-5):
if weak:
a = a.astype(np.float32)
b = b.astype(np.float32)
res = np.all(np.abs(a - b) < checkEpsilon)
else:
res = np.all(a == b)
diff0 = np.max(np.abs(a - b))
diff1 = np.max(np.abs(a - b) / (np.abs(b) + checkEpsilon))
print("check:%s, absDiff=%f, relDiff=%f" % (res, diff0, diff1))
def ZeroCPU(inputH):
return [np.zeros(inputH[0], dtype=np.float32)]
def getZeroPlugin():
for c in trt.get_plugin_registry().plugin_creator_list:
#print(c.name)
if c.name == "Zero":
parameterList = []
return c.create_plugin(c.name, trt.PluginFieldCollection(parameterList))
return None
def run(shape):
testCase = "<shape=%s>" % (shape)
trtFile = "./model-Shape%s.plan" % str(shape)
print("Test %s" % testCase)
logger = trt.Logger(trt.Logger.ERROR)
trt.init_libnvinfer_plugins(logger, '')
ctypes.cdll.LoadLibrary(soFile)
if os.path.isfile(trtFile):
with open(trtFile, "rb") as f:
engine = trt.Runtime(logger).deserialize_cuda_engine(f.read())
if engine == None:
print("Failed loading engine!")
return
print("Succeeded loading engine!")
else:
builder = trt.Builder(logger)
network = builder.create_network(1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH))
profile = builder.create_optimization_profile()
config = builder.create_builder_config()
inputT0 = network.add_input("inputT0", trt.int32, [2])
profile.set_shape_input(inputT0.name, [1, 1], [3, 4], [6, 8])
config.add_optimization_profile(profile)
baseLayer = network.add_constant([1, 1, 1], trt.Weights(np.zeros([1, 1, 1], dtype=np.float32)))
zeroLayer = network.add_constant([1], np.array([0], dtype=np.int32))
pqzLayer = network.add_concatenation([inputT0, zeroLayer.get_output(0)])
pqzLayer.axis = 0
sliceLayer = network.add_slice(baseLayer.get_output(0), [0, 0, 0], [0, 0, 0], [0, 0, 0])
sliceLayer.set_input(2, pqzLayer.get_output(0))
pluginLayer = network.add_plugin_v2([sliceLayer.get_output(0)], getZeroPlugin())
network.mark_output(pluginLayer.get_output(0))
engineString = builder.build_serialized_network(network, config)
if engineString == None:
print("Failed building engine!")
return
print("Succeeded building engine!")
with open(trtFile, "wb") as f:
f.write(engineString)
engine = trt.Runtime(logger).deserialize_cuda_engine(engineString)
nIO = engine.num_io_tensors
lTensorName = [engine.get_tensor_name(i) for i in range(nIO)]
nInput = [engine.get_tensor_mode(lTensorName[i]) for i in range(nIO)].count(trt.TensorIOMode.INPUT)
nOutput = nIO - nInput
context = engine.create_execution_context()
#context.set_binding_shape(0, shape)
context.set_shape_input(0, shape)
for i in range(nIO):
print("[%2d]%s->" % (i, "Input " if i < nInput else "Output"), engine.get_tensor_dtype(lTensorName[i]), engine.get_tensor_shape(lTensorName[i]), context.get_tensor_shape(lTensorName[i]), lTensorName[i])
bufferH = []
bufferH.append(np.ones([2], dtype=np.int32))
for i in range(nOutput):
bufferH.append(np.ones(context.get_binding_shape(nInput + i), dtype=trt.nptype(engine.get_binding_dtype(nInput + i))))
bufferD = []
for i in range(engine.num_bindings):
bufferD.append(cudart.cudaMalloc(bufferH[i].nbytes)[1])
for i in range(nInput):
cudart.cudaMemcpy(bufferD[i], np.ascontiguousarray(bufferH[i].reshape(-1)).ctypes.data, bufferH[i].nbytes, cudart.cudaMemcpyKind.cudaMemcpyHostToDevice)
context.execute_v2(bufferD)
for i in range(nOutput):
cudart.cudaMemcpy(bufferH[nInput + i].ctypes.data, bufferD[nInput + i], bufferH[nInput + i].nbytes, cudart.cudaMemcpyKind.cudaMemcpyDeviceToHost)
outputCPU = ZeroCPU([np.array(shape, dtype=np.int32)])
for i in range(nInput):
printArrayInformation(bufferH[i])
for i in range(nInput, nIO):
printArrayInformation(bufferH[i])
for i in range(nInput, nIO):
printArrayInformation(outputCPU[i - nInput])
check(bufferH[nInput:][0], outputCPU[0], True)
for b in bufferD:
cudart.cudaFree(b)
print("Test %s finish!\n" % testCase)
if __name__ == "__main__":
os.system("rm -rf ./*.plan")
np.set_printoptions(precision=3, linewidth=200, suppress=True)
run([3, 4])
run([6, 7])
print("Test all finish!")
| trt-samples-for-hackathon-cn-master | cookbook/08-Advance/DynamicShapeOutput/testZeroPlugin.py |
#
# Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import numpy as np
import tensorrt as trt
from cuda import cudart
shape = [2, 3, 4, 5]
nContext = 2 # count of context
np.random.seed(31193)
np.set_printoptions(precision=3, linewidth=200, suppress=True)
cudart.cudaDeviceSynchronize()
logger = trt.Logger(trt.Logger.ERROR)
builder = trt.Builder(logger)
network = builder.create_network(1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH))
profile = builder.create_optimization_profile()
config = builder.create_builder_config()
config.set_preview_feature(trt.PreviewFeature.PROFILE_SHARING_0806, True) # use this preview feature in TensorRT 8.6
inputT0 = network.add_input("inputT0", trt.float32, [-1, -1, -1, -1])
inputT1 = network.add_input("inputT1", trt.float32, [-1, -1, -1, -1])
layer = network.add_elementwise(inputT0, inputT1, trt.ElementWiseOperation.SUM)
network.mark_output(layer.get_output(0))
profile.set_shape(inputT0.name, shape, shape, [k * nContext for k in shape]) # "* nContext" is just for this example, not required in real use case
profile.set_shape(inputT1.name, shape, shape, [k * nContext for k in shape])
config.add_optimization_profile(profile)
engineString = builder.build_serialized_network(network, config)
engine = trt.Runtime(logger).deserialize_cuda_engine(engineString)
nIO = engine.num_io_tensors
lTensorName = [engine.get_tensor_name(i) for i in range(nIO)]
nInput = [engine.get_tensor_mode(lTensorName[i]) for i in range(nIO)].count(trt.TensorIOMode.INPUT)
nOutput = nIO - nInput
streamList = [cudart.cudaStreamCreate()[1] for _ in range(nContext)]
contextList = [engine.create_execution_context() for _ in range(nContext)]
for index in range(nContext):
stream = streamList[index]
context = contextList[index]
context.set_optimization_profile_async(0, stream) # only one OptimizationPriofile
inputShape = [k * (index + 1) for k in shape] # we use different shape for various context in this example, not required in real use case
context.set_input_shape(lTensorName[0], inputShape)
context.set_input_shape(lTensorName[1], inputShape)
for i in range(nIO):
print("[%2d]%s->" % (i, "Input " if i < nInput else "Output"), engine.get_tensor_dtype(lTensorName[i]), engine.get_tensor_shape(lTensorName[i]), context.get_tensor_shape(lTensorName[i]), lTensorName[i])
bufferH = [] # a list of buffers only for this Context (this OptimizationProfile)
for i in range(nInput):
bufferH.append(np.arange(np.prod(inputShape)).astype(np.float32).reshape(inputShape))
for i in range(nOutput):
bufferH.append(np.empty(context.get_tensor_shape(lTensorName[nInput + i]), dtype=trt.nptype(engine.get_tensor_dtype(lTensorName[nInput + i]))))
bufferD = []
for i in range(nIO):
bufferD.append(cudart.cudaMalloc(bufferH[i].nbytes)[1])
for i in range(nInput):
cudart.cudaMemcpyAsync(bufferD[i], bufferH[i].ctypes.data, bufferH[i].nbytes, cudart.cudaMemcpyKind.cudaMemcpyHostToDevice, stream)
for i in range(nIO):
context.set_tensor_address(lTensorName[i], int(bufferD[i]))
context.execute_async_v3(stream)
for i in range(nOutput):
cudart.cudaMemcpyAsync(bufferH[nInput + i].ctypes.data, bufferD[nInput + i], bufferH[nInput + i].nbytes, cudart.cudaMemcpyKind.cudaMemcpyDeviceToHost, stream)
cudart.cudaStreamSynchronize(stream)
print("check result of context %d: %s" % (index, np.all(bufferH[2] == bufferH[0] + bufferH[1])))
for b in bufferD:
cudart.cudaFree(b)
| trt-samples-for-hackathon-cn-master | cookbook/08-Advance/MultiContext/main.py |
#
# Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import numpy as np
import tensorrt as trt
from cuda import cudart
shape = [2, 3, 4, 5]
nContext = 2 # count of context
np.random.seed(31193)
np.set_printoptions(precision=3, linewidth=200, suppress=True)
cudart.cudaDeviceSynchronize()
logger = trt.Logger(trt.Logger.ERROR)
builder = trt.Builder(logger)
network = builder.create_network(1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH))
profileList = [builder.create_optimization_profile() for _ in range(nContext)]
config = builder.create_builder_config()
inputT0 = network.add_input("inputT0", trt.float32, [-1, -1, -1, -1])
inputT1 = network.add_input("inputT1", trt.float32, [-1, -1, -1, -1])
layer = network.add_elementwise(inputT0, inputT1, trt.ElementWiseOperation.SUM)
network.mark_output(layer.get_output(0))
for profile in profileList:
profile.set_shape(inputT0.name, shape, shape, [k * nContext for k in shape]) # "* nContext" is just for this example, not required in real use case
profile.set_shape(inputT1.name, shape, shape, [k * nContext for k in shape])
config.add_optimization_profile(profile)
engineString = builder.build_serialized_network(network, config)
engine = trt.Runtime(logger).deserialize_cuda_engine(engineString)
nIO = engine.num_bindings
nInput = np.sum([engine.binding_is_input(i) for i in range(engine.num_bindings)])
nOutput = nIO - nInput
nIO, nInput, nOutput = nIO // nContext, nInput // nContext, nOutput // nContext
streamList = [cudart.cudaStreamCreate()[1] for _ in range(nContext)]
contextList = [engine.create_execution_context() for index in range(nContext)]
bufferH = [] # a list of buffers for all Context (all OptimizationProfile)
for index in range(nContext):
stream = streamList[index]
context = contextList[index]
context.set_optimization_profile_async(index, stream)
bindingPad = nIO * index # skip bindings of previous OptimizationProfile occupied
inputShape = [k * (index + 1) for k in shape] # we use different shape for various context in this example, not required in real use case
context.set_binding_shape(bindingPad + 0, inputShape)
context.set_binding_shape(bindingPad + 1, inputShape)
print("Context%d binding all? %s" % (index, "Yes" if context.all_binding_shapes_specified else "No"))
for i in range(nIO):
print(i, "Input " if engine.binding_is_input(i) else "Output", engine.get_binding_shape(i), context.get_binding_shape(i))
for i in range(nInput):
bufferH.append(np.arange(np.prod(inputShape)).astype(np.float32).reshape(inputShape))
for i in range(nOutput):
bufferH.append(np.empty(context.get_binding_shape(bindingPad + nInput + i), dtype=trt.nptype(engine.get_binding_dtype(bindingPad + nInput + i))))
bufferD = []
for i in range(len(bufferH)):
bufferD.append(cudart.cudaMalloc(bufferH[i].nbytes)[1])
for index in range(nContext):
print("Use Context %d" % index)
stream = streamList[index]
context = contextList[index]
context.set_optimization_profile_async(index, stream)
bindingPad = nIO * index
inputShape = [k * (index + 1) for k in shape]
context.set_binding_shape(bindingPad + 0, inputShape)
context.set_binding_shape(bindingPad + 1, inputShape)
for i in range(nIO * nContext):
print("[%2d]%s->" % (i, "Input " if i < nInput else "Output"), engine.get_binding_dtype(i), engine.get_binding_shape(i), context.get_binding_shape(i), engine.get_binding_name(i))
for i in range(nInput):
cudart.cudaMemcpyAsync(bufferD[bindingPad + i], bufferH[bindingPad + i].ctypes.data, bufferH[bindingPad + i].nbytes, cudart.cudaMemcpyKind.cudaMemcpyHostToDevice, stream)
bufferList = [int(0) for b in bufferD[:bindingPad]] + [int(b) for b in bufferD[bindingPad:(bindingPad + nInput + nOutput)]] + [int(0) for b in bufferD[(bindingPad + nInput + nOutput):]]
# divide the buffers into three parts, and fill int(0) for the parts beside the buffer of this context uses
context.execute_async_v2(bufferList, stream)
for i in range(nOutput):
cudart.cudaMemcpyAsync(bufferH[bindingPad + nInput + i].ctypes.data, bufferD[bindingPad + nInput + i], bufferH[bindingPad + nInput + i].nbytes, cudart.cudaMemcpyKind.cudaMemcpyDeviceToHost, stream)
for index in range(nContext):
cudart.cudaStreamSynchronize(stream)
for index in range(nContext):
bindingPad = nIO * index
print("check result of context %d: %s" % (index, np.all(bufferH[bindingPad + 2] == bufferH[bindingPad + 0] + bufferH[bindingPad + 1])))
for stream in streamList:
cudart.cudaStreamDestroy(stream)
for b in bufferD:
cudart.cudaFree(b)
| trt-samples-for-hackathon-cn-master | cookbook/08-Advance/MultiContext/main-MultiOptimizationProfile.py |
#
# Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import ctypes
import os
import numpy as np
import tensorrt as trt
from cuda import cudart
soFile = "./IntMulBoolPlugin.so"
np.set_printoptions(precision=3, linewidth=200, suppress=True)
np.random.seed(31193)
cudart.cudaDeviceSynchronize()
def printArrayInformation(x, info="", n=5):
if 0 in x.shape:
print('%s:%s' % (info, str(x.shape)))
print()
return
print( '%s:%s,SumAbs=%.5e,Var=%.5f,Max=%.5f,Min=%.5f,SAD=%.5f'%( \
info,str(x.shape),np.sum(abs(x)),np.var(x),np.max(x),np.min(x),np.sum(np.abs(np.diff(x.reshape(-1)))) ))
print('\t', x.reshape(-1)[:n], x.reshape(-1)[-n:])
def check(a, b, weak=False, checkEpsilon=1e-5):
if weak:
a = a.astype(np.float32)
b = b.astype(np.float32)
res = np.all(np.abs(a - b) < checkEpsilon)
else:
res = np.all(a == b)
diff0 = np.max(np.abs(a - b))
diff1 = np.max(np.abs(a - b) / (np.abs(b) + checkEpsilon))
print("check:%s, absDiff=%f, relDiff=%f" % (res, diff0, diff1))
def IntMulBoolCPU(inputList):
return [inputList[0] * inputList[1].astype(np.int32)]
def getIntMulBoolPlugin():
for c in trt.get_plugin_registry().plugin_creator_list:
#print(c.name)
if c.name == "IntMulBool":
parameterList = []
return c.create_plugin(c.name, trt.PluginFieldCollection(parameterList))
return None
def run(shapeA, shapeB):
testCase = "<shapeA=%s,shapeB=%s>" % (shapeA, shapeB)
trtFile = "./model.plan"
print("Test %s" % testCase)
logger = trt.Logger(trt.Logger.ERROR)
trt.init_libnvinfer_plugins(logger, '')
ctypes.cdll.LoadLibrary(soFile)
if os.path.isfile(trtFile):
with open(trtFile, "rb") as f:
engine = trt.Runtime(logger).deserialize_cuda_engine(f.read())
if engine == None:
print("Failed loading engine!")
return
print("Succeeded loading engine!")
else:
builder = trt.Builder(logger)
network = builder.create_network(1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH))
profile = builder.create_optimization_profile()
config = builder.create_builder_config()
config.set_flag(trt.BuilderFlag.FP16)
inputT0 = network.add_input("inputT0", trt.int32, [-1, -1])
profile.set_shape(inputT0.name, [1, 1], [4, 256], [16, 1024])
inputT1 = network.add_input("inputT1", trt.bool, [-1, -1])
profile.set_shape(inputT1.name, [1, 1], [4, 256], [16, 1024])
config.add_optimization_profile(profile)
pluginLayer = network.add_plugin_v2([inputT0, inputT1], getIntMulBoolPlugin())
network.mark_output(pluginLayer.get_output(0))
engineString = builder.build_serialized_network(network, config)
if engineString == None:
print("Failed building engine!")
return
print("Succeeded building engine!")
with open(trtFile, "wb") as f:
f.write(engineString)
engine = trt.Runtime(logger).deserialize_cuda_engine(engineString)
nIO = engine.num_io_tensors
lTensorName = [engine.get_tensor_name(i) for i in range(nIO)]
nInput = [engine.get_tensor_mode(lTensorName[i]) for i in range(nIO)].count(trt.TensorIOMode.INPUT)
context = engine.create_execution_context()
context.set_input_shape(lTensorName[0], shapeA)
context.set_input_shape(lTensorName[1], shapeB)
#for i in range(nIO):
# print("[%2d]%s->" % (i, "Input " if i < nInput else "Output"), engine.get_tensor_dtype(lTensorName[i]), engine.get_tensor_shape(lTensorName[i]), context.get_tensor_shape(lTensorName[i]), lTensorName[i])
bufferH = []
bufferH.append(np.arange(np.prod(shapeA), dtype=np.int32).reshape(shapeA))
bufferH.append((np.random.rand(np.prod(shapeB)) > 0.5).reshape(shapeB))
for i in range(nInput, nIO):
bufferH.append(np.empty(context.get_tensor_shape(lTensorName[i]), dtype=trt.nptype(engine.get_tensor_dtype(lTensorName[i]))))
bufferD = []
for i in range(nIO):
bufferD.append(cudart.cudaMalloc(bufferH[i].nbytes)[1])
for i in range(nInput):
cudart.cudaMemcpy(bufferD[i], bufferH[i].ctypes.data, bufferH[i].nbytes, cudart.cudaMemcpyKind.cudaMemcpyHostToDevice)
for i in range(nIO):
context.set_tensor_address(lTensorName[i], int(bufferD[i]))
context.execute_async_v3(0)
for i in range(nInput, nIO):
cudart.cudaMemcpy(bufferH[i].ctypes.data, bufferD[i], bufferH[i].nbytes, cudart.cudaMemcpyKind.cudaMemcpyDeviceToHost)
outputCPU = IntMulBoolCPU(bufferH[:nInput])
"""
for i in range(nInput):
printArrayInformation(bufferH[i], "Input")
for i in range(nInput, nIO):
printArrayInformation(bufferH[i], "GPU")
for i in range(nInput, nIO):
printArrayInformation(outputCPU[i - nInput], "CPU")
"""
for i in range(nIO - nInput):
check(bufferH[nInput:][i], outputCPU[i], True, checkEpsilon=1e-3)
for b in bufferD:
cudart.cudaFree(b)
print("Test %s finish!\n" % testCase)
if __name__ == "__main__":
os.system("rm -rf ./*.plan")
run([1, 8], [1, 8])
run([4, 256], [4, 256])
run([16, 500], [16, 500])
print("Test all finish!")
| trt-samples-for-hackathon-cn-master | cookbook/05-Plugin/IntAndBoolDataType/testAddSubMulPlugin.py |
#
# Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import ctypes
import os
import numpy as np
import tensorrt as trt
from calibrator import MyCalibrator
from cuda import cudart
soFile = "./AddScalarPlugin.so"
cacheFile = "./int8.cache"
np.set_printoptions(precision=3, linewidth=200, suppress=True)
np.random.seed(31193)
cudart.cudaDeviceSynchronize()
def printArrayInformation(x, info="", n=5):
if 0 in x.shape:
print('%s:%s' % (info, str(x.shape)))
print()
return
print( '%s:%s,SumAbs=%.5e,Var=%.5f,Max=%.5f,Min=%.5f,SAD=%.5f'%( \
info,str(x.shape),np.sum(abs(x)),np.var(x),np.max(x),np.min(x),np.sum(np.abs(np.diff(x.reshape(-1)))) ))
print('\t', x.reshape(-1)[:n], x.reshape(-1)[-n:])
def check(a, b, weak=False, checkEpsilon=1e-5):
if weak:
a = a.astype(np.float32)
b = b.astype(np.float32)
res = np.all(np.abs(a - b) < checkEpsilon)
else:
res = np.all(a == b)
diff0 = np.max(np.abs(a - b))
diff1 = np.max(np.abs(a - b) / (np.abs(b) + checkEpsilon))
print("check:%s, absDiff=%f, relDiff=%f" % (res, diff0, diff1))
def addScalarCPU(inputH, scalar):
return [inputH[0] + scalar]
def getAddScalarPlugin(scalar):
for c in trt.get_plugin_registry().plugin_creator_list:
#print(c.name)
if c.name == "AddScalar":
parameterList = []
parameterList.append(trt.PluginField("scalar", np.float32(scalar), trt.PluginFieldType.FLOAT32))
return c.create_plugin(c.name, trt.PluginFieldCollection(parameterList))
return None
def run(shape, scalar):
testCase = "<shape=%s,scalar=%f>" % (shape, scalar)
trtFile = "./model-Dim%s.plan" % str(len(shape))
print("Test %s" % testCase)
logger = trt.Logger(trt.Logger.ERROR)
trt.init_libnvinfer_plugins(logger, '')
ctypes.cdll.LoadLibrary(soFile)
if os.path.isfile(trtFile):
with open(trtFile, "rb") as f:
engine = trt.Runtime(logger).deserialize_cuda_engine(f.read())
if engine == None:
print("Failed loading engine!")
return
print("Succeeded loading engine!")
else:
builder = trt.Builder(logger)
network = builder.create_network(1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH))
profile = builder.create_optimization_profile()
config = builder.create_builder_config()
config.set_flag(trt.BuilderFlag.INT8)
#config.int8_calibrator = MyCalibrator(1, shape, cacheFile)
inputT0 = network.add_input("inputT0", trt.float32, [-1 for i in shape])
profile.set_shape(inputT0.name, [1 for i in shape], [8 for i in shape], [32 for i in shape])
config.add_optimization_profile(profile)
#inputT0.dynamic_range = [-100,100] # set dynamic range if calibrator is not used
q0Value = 100 / 128
q0Tensor = network.add_constant([], np.array([q0Value], dtype=np.float32)).get_output(0)
quantizeLayer = network.add_quantize(inputT0, q0Tensor)
quantizeLayer.axis = 0
pluginLayer = network.add_plugin_v2([quantizeLayer.get_output(0)], getAddScalarPlugin(scalar))
pluginLayer.precision = trt.int8
pluginLayer.set_output_type(0, trt.int8)
pluginLayer.get_output(0).dtype = trt.int8
#pluginLayer.get_output(0).dynamic_range = [-120,120]
q1Value = 100 / 128
q1Tensor = network.add_constant([], np.array([q1Value], dtype=np.float32)).get_output(0)
dequantizeLayer = network.add_dequantize(pluginLayer.get_output(0), q1Tensor)
dequantizeLayer.axis = 0
network.mark_output(dequantizeLayer.get_output(0))
engineString = builder.build_serialized_network(network, config)
if engineString == None:
print("Failed building engine!")
return
print("Succeeded building engine!")
with open(trtFile, "wb") as f:
f.write(engineString)
engine = trt.Runtime(logger).deserialize_cuda_engine(engineString)
nIO = engine.num_io_tensors
lTensorName = [engine.get_tensor_name(i) for i in range(nIO)]
nInput = [engine.get_tensor_mode(lTensorName[i]) for i in range(nIO)].count(trt.TensorIOMode.INPUT)
context = engine.create_execution_context()
context.set_input_shape(lTensorName[0], shape)
#for i in range(nIO):
# print("[%2d]%s->" % (i, "Input " if i < nInput else "Output"), engine.get_tensor_dtype(lTensorName[i]), engine.get_tensor_shape(lTensorName[i]), context.get_tensor_shape(lTensorName[i]), lTensorName[i])
bufferH = []
bufferH.append(np.arange(np.prod(shape), dtype=np.float32).reshape(shape))
for i in range(nInput, nIO):
bufferH.append(np.empty(context.get_tensor_shape(lTensorName[i]), dtype=trt.nptype(engine.get_tensor_dtype(lTensorName[i]))))
bufferD = []
for i in range(nIO):
bufferD.append(cudart.cudaMalloc(bufferH[i].nbytes)[1])
for i in range(nInput):
cudart.cudaMemcpy(bufferD[i], bufferH[i].ctypes.data, bufferH[i].nbytes, cudart.cudaMemcpyKind.cudaMemcpyHostToDevice)
for i in range(nIO):
context.set_tensor_address(lTensorName[i], int(bufferD[i]))
context.execute_async_v3(0)
for i in range(nInput, nIO):
cudart.cudaMemcpy(bufferH[i].ctypes.data, bufferD[i], bufferH[i].nbytes, cudart.cudaMemcpyKind.cudaMemcpyDeviceToHost)
outputCPU = addScalarCPU(bufferH[:nInput], scalar)
"""
for i in range(nInput):
printArrayInformation(bufferH[i])
for i in range(nInput, nIO):
printArrayInformation(bufferH[i])
for i in range(nInput, nIO):
printArrayInformation(outputCPU[i - nInput])
"""
check(bufferH[nInput:][0], outputCPU[0], True)
for b in bufferD:
cudart.cudaFree(b)
print("Test %s finish!\n" % testCase)
if __name__ == "__main__":
os.system("rm -rf ./*.plan ./*.cache")
run([32], 0.1)
os.system("rm -rf ./*.plan ./*.cache") # cache files can not be shared among engines because input data ranges are different
run([32, 32], 0.1)
os.system("rm -rf ./*.plan ./*.cache")
run([16, 16, 16], 0.1) # CHW4 format needs input tensor with at least 4 Dimensions
os.system("rm -rf ./*.plan ./*.cache")
run([8, 8, 8, 8], 0.1)
print("Test all finish!")
| trt-samples-for-hackathon-cn-master | cookbook/05-Plugin/UseINT8-QDQ-TODO/testAddScalarPlugin.py |
#
# Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import numpy as np
import tensorrt as trt
from cuda import cudart
class MyCalibrator(trt.IInt8EntropyCalibrator2):
def __init__(self, nCalibration, inputShape, cacheFile):
trt.IInt8EntropyCalibrator2.__init__(self)
self.nCalibration = nCalibration
self.shape = inputShape
self.buffeSize = trt.volume(inputShape) * trt.float32.itemsize
self.cacheFile = cacheFile
_, self.dIn = cudart.cudaMalloc(self.buffeSize)
self.count = 0
def __del__(self):
cudart.cudaFree(self.dIn)
def get_batch_size(self): # necessary API
return self.shape[0]
def get_batch(self, nameList=None, inputNodeName=None): # necessary API
if self.count < self.nCalibration:
self.count += 1
data = np.random.rand(np.prod(self.shape)).astype(np.float32).reshape(*self.shape)
data = data * np.prod(self.shape) * 2 - np.prod(self.shape)
data = np.ascontiguousarray(data)
cudart.cudaMemcpy(self.dIn, data.ctypes.data, self.buffeSize, cudart.cudaMemcpyKind.cudaMemcpyHostToDevice)
return [int(self.dIn)]
else:
return None
def read_calibration_cache(self): # necessary API
if os.path.exists(self.cacheFile):
print("Succeed finding cahce file: %s" % (self.cacheFile))
with open(self.cacheFile, "rb") as f:
cache = f.read()
return cache
else:
print("Failed finding int8 cache!")
return
def write_calibration_cache(self, cache): # necessary API
with open(self.cacheFile, "wb") as f:
f.write(cache)
print("Succeed saving int8 cache!")
return
if __name__ == "__main__":
cudart.cudaDeviceSynchronize()
m = MyCalibrator(5, (1, 1, 28, 28), "./int8.cache")
m.get_batch("FakeNameList")
m.get_batch("FakeNameList")
m.get_batch("FakeNameList")
m.get_batch("FakeNameList")
m.get_batch("FakeNameList")
| trt-samples-for-hackathon-cn-master | cookbook/05-Plugin/UseINT8-QDQ-TODO/calibrator.py |
#
# Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import ctypes
import os
import sys
import numpy as np
import pycuda.autoinit
#import cupy.cuda as CD
import pycuda.driver as cuda
import tensorrt as trt
from scipy.special import expit as sigmoid
np.random.seed(31193)
npToTrt = {np.int8: trt.int8, np.float16: trt.float16, np.int32: trt.int32, np.float32: trt.float32}
nBatchSize = 2
maxSL = 40
nDimInput = 128
nDimHidden = 128
epsilonFP32 = 1.0e-5
epsilonFP16 = 1.0e-2
soFile = "./GruPlugin.so"
globalWeightFC = np.linspace(-0.5, 0.5, nDimInput * nDimHidden * 3, dtype=np.float32).reshape(nDimInput, nDimHidden * 3)
globalWeightGRU = np.linspace(-0.5, 0.5, nDimHidden * nDimHidden * 3, dtype=np.float32).reshape(nDimHidden, nDimHidden * 3)
globalBias = np.zeros((nDimHidden, 3), dtype=np.float32)
def check(a, b, weak=False):
if weak:
epsilon = [epsilonFP16, epsilonFP32][int(a.dtype == np.float32)]
return np.all(np.abs(a - b) < epsilon)
else:
return np.all(a == b)
def gruCPU(inputH0, inputH1):
weightFC = np.split(globalWeightFC, 3, axis=1)
weightGRU = np.split(globalWeightGRU, 3, axis=1)
hAllState = np.zeros([nBatchSize, maxSL, nDimHidden], dtype=np.float32)
hLastState = np.zeros((nBatchSize, nDimHidden)).astype(np.float32)
for k in range(nBatchSize):
h_t = np.zeros([1, nDimHidden], dtype=np.float32)
inp = inputH0[k]
for i in range(inputH1[k]):
x_t = inputH0[k, i]
u_t = sigmoid(np.dot(x_t, weightFC[0]) + np.dot(h_t, weightGRU[0]))
r_t = sigmoid(np.dot(x_t, weightFC[1]) + np.dot(h_t, weightGRU[1]))
g_t = np.tanh(np.dot(x_t, weightFC[2]) + np.dot((r_t * h_t), weightGRU[2]))
h_t = ((np.ones([1, nDimHidden], dtype=np.float32) - u_t) * h_t + u_t * g_t)
hAllState[k, i] = h_t
hLastState[k] = hAllState[k, inputH1[k] - 1]
return hAllState, hLastState
def cleanTrash(inputH0, inputH1):
for i in range(inputH0.shape[0]):
inputH0[i, inputH1[i]:, :] = 0
return inputH0
def getGruPlugin(nDimInput: int, nDimHidden: int, weightX: np.array, weightH: np.array, bias: np.array):
for c in trt.get_plugin_registry().plugin_creator_list:
if c.name == "GruPlugin":
p0 = trt.PluginField("nDimInput", np.array([nDimInput], dtype=np.int32), trt.PluginFieldType.INT32)
p1 = trt.PluginField("nDimHidden", np.array([nDimHidden], dtype=np.int32), trt.PluginFieldType.INT32)
p2 = trt.PluginField("WeightX", weightX, trt.PluginFieldType.FLOAT32)
p3 = trt.PluginField("WeightH", weightH, trt.PluginFieldType.FLOAT32)
p4 = trt.PluginField("Bias", bias, trt.PluginFieldType.FLOAT32)
return c.create_plugin(c.name, trt.PluginFieldCollection([p0, p1, p2, p3, p4]))
return None
def buildEngine(logger, dataType):
builder = trt.Builder(logger)
network = builder.create_network(1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH))
profile = builder.create_optimization_profile()
config = builder.create_builder_config()
config.flags = int(dataType == np.float16)
inputT0 = network.add_input("data", npToTrt[dataType], shape=[nBatchSize, maxSL, nDimInput])
profile.set_shape(inputT0.name, [nBatchSize, maxSL, nDimInput], [nBatchSize, maxSL, nDimInput], [nBatchSize, maxSL, nDimInput])
inputT1 = network.add_input("sequenceLength", trt.int32, shape=[nBatchSize])
profile.set_shape(inputT1.name, [nBatchSize], [nBatchSize], [nBatchSize])
config.add_optimization_profile(profile)
weightGRU = np.split(globalWeightGRU, 3, axis=1)
weightGRU = np.concatenate([weightGRU[0], weightGRU[1], weightGRU[2]], axis=0)
gruPlugin = getGruPlugin(nDimInput, nDimHidden, globalWeightFC, weightGRU, globalBias)
gru = network.add_plugin_v2([inputT0, inputT1], gruPlugin)
gru.name = "GRU"
if dataType == np.float32:
gru.precision = trt.float32
gru.set_output_type(0, trt.float32)
gru.set_output_type(1, trt.float32)
elif dataType == np.float16:
gru.precision = trt.float16
gru.set_output_type(0, trt.float16)
gru.set_output_type(1, trt.float16)
config.set_flag(trt.BuilderFlag.FP16)
config.set_flag(trt.BuilderFlag.STRICT_TYPES)
else:
print("datatype not support!")
network.mark_output(gru.get_output(0))
network.mark_output(gru.get_output(1))
return builder.build_engine(network, config)
def run(time, dataType):
print("test", dataType, "%d time" % time)
logger = trt.Logger(trt.Logger.ERROR)
trt.init_libnvinfer_plugins(logger, '')
ctypes.cdll.LoadLibrary(soFile)
trtFile = "./model-fp" + ["32", "16"][int(dataType == np.float16)] + ".plan"
if os.path.isfile(trtFile):
with open(trtFile, "rb") as f:
engine = trt.Runtime(logger).deserialize_cuda_engine(f.read())
if engine == None:
print("Failed loading engine!")
return None
print("Succeeded loading engine!")
else:
engine = buildEngine(logger, dataType)
if engine == None:
print("Failed building engine!")
return None
print("Succeeded building engine!")
engineStr = engine.serialize()
with open(trtFile, "wb") as f:
f.write(engineStr)
context = engine.create_execution_context()
context.set_binding_shape(0, [nBatchSize, maxSL, nDimInput])
context.set_binding_shape(1, [nBatchSize])
print("Bind0->", engine.get_binding_shape(0), context.get_binding_shape(0))
print("Bind1->", engine.get_binding_shape(1), context.get_binding_shape(1))
print("Bind2->", engine.get_binding_shape(2), context.get_binding_shape(2))
print("Bind3->", engine.get_binding_shape(3), context.get_binding_shape(3))
stream = cuda.Stream()
data0 = np.random.rand(nBatchSize, maxSL, nDimInput)
data1 = np.random.randint(low=1, high=maxSL + 1, size=[nBatchSize])
inputH0 = data0.astype(trt.nptype(engine.get_binding_dtype(0)))
inputD0 = cuda.mem_alloc(inputH0.nbytes)
inputH1 = data1.astype(trt.nptype(engine.get_binding_dtype(1)))
inputD1 = cuda.mem_alloc(inputH1.nbytes)
outputH0 = np.empty(context.get_binding_shape(2), dtype=trt.nptype(engine.get_binding_dtype(2)))
outputD0 = cuda.mem_alloc(outputH0.nbytes)
outputH1 = np.empty(context.get_binding_shape(3), dtype=trt.nptype(engine.get_binding_dtype(3)))
outputD1 = cuda.mem_alloc(outputH0.nbytes)
cuda.memcpy_htod_async(inputD0, np.ascontiguousarray(inputH0), stream)
cuda.memcpy_htod_async(inputD1, np.ascontiguousarray(inputH1), stream)
#CD.nvtx.RangePush("gru")
context.execute_async_v2([int(inputD0), int(inputD1), int(outputD0), int(outputD1)], stream.handle)
#CD.nvtx.RangePop()
cuda.memcpy_dtoh_async(outputH0, outputD0, stream)
cuda.memcpy_dtoh_async(outputH1, outputD1, stream)
stream.synchronize()
print("InputH0->", inputH0.shape, engine.get_binding_dtype(0))
#print(inputH0)
print("InputH1->", inputH1.shape, engine.get_binding_dtype(1))
#print(inputH1)
print("OutputH0->", outputH0.shape, engine.get_binding_dtype(2))
#print(cleanTrash(outputH0,inputH1))
print("OutputH1->", outputH1.shape, engine.get_binding_dtype(3))
#print(outputH1)
outputH0CPU, outputH1CPU = gruCPU(inputH0, inputH1)
print(check(cleanTrash(outputH0, inputH1), cleanTrash(outputH0CPU, inputH1), True))
print(check(outputH1, outputH1CPU, True))
print("test", dataType, "%d time finish" % time)
if __name__ == "__main__":
os.system("rm -rf ./engine*.plan")
np.set_printoptions(precision=3, linewidth=200, suppress=True)
#cuda.Device(0).make_context()
run(0, np.float32)
#CD.profiler.start()
run(1, np.float32)
#CD.profiler.stop()
run(0, np.float16)
#CD.profiler.start()
run(1, np.float16)
#CD.profiler.stop()
#cuda.Context.pop()
print("test finish!")
| trt-samples-for-hackathon-cn-master | cookbook/05-Plugin/PluginReposity/GruPlugin/testGruPlugin.py |
#
# Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import ctypes
import numpy as np
import pycuda.autoinit
import pycuda.driver as cuda
import tensorrt as trt
soFilePath = "./CCLPlugin.so"
height = 384
width = 640
np.random.seed(31193)
def getCCLPlugin():
for c in trt.get_plugin_registry().plugin_creator_list:
if c.name == "CCLPlugin":
p0 = trt.PluginField("minPixelScore", np.array([0.7], dtype=np.float32), trt.PluginFieldType.FLOAT32)
p1 = trt.PluginField("minLinkScore", np.array([0.7], dtype=np.float32), trt.PluginFieldType.FLOAT32)
p2 = trt.PluginField("minArea", np.array([10], dtype=np.int32), trt.PluginFieldType.INT32)
p3 = trt.PluginField("maxcomponentCount", np.array([65536], dtype=np.int32), trt.PluginFieldType.INT32)
return c.create_plugin(c.name, trt.PluginFieldCollection([p0, p1, p2, p3]))
return None
def buildEngine(logger):
builder = trt.Builder(logger)
builder.max_batch_size = 1
builder.set_memory_pool_limit = 3 << 30
builder.fp16_mode = False
network = builder.create_network()
inputT0 = network.add_input("pixelScore", trt.float32, (height, width))
inputT1 = network.add_input("linkScore", trt.float32, (8, height, width))
cclLayer = network.add_plugin_v2([inputT0, inputT1], getCCLPlugin())
network.mark_output(cclLayer.get_output(0))
network.mark_output(cclLayer.get_output(1))
return builder.build_cuda_engine(network)
def run():
logger = trt.Logger(trt.Logger.ERROR)
trt.init_libnvinfer_plugins(logger, '')
ctypes.cdll.LoadLibrary(soFilePath)
engine = buildEngine(logger)
if engine == None:
print("Failed building engine!")
return None
print("Succeeded building engine!")
context = engine.create_execution_context()
stream = cuda.Stream()
inputH0 = np.ascontiguousarray(np.random.rand(height, width).reshape(-1))
inputH1 = np.ascontiguousarray(np.random.rand(8, height, width).reshape(-1))
inputD0 = cuda.mem_alloc(inputH0.nbytes)
inputD1 = cuda.mem_alloc(inputH1.nbytes)
outputH0 = np.empty(context.get_binding_shape(2), dtype=trt.nptype(engine.get_binding_dtype(2)))
outputH1 = np.empty(context.get_binding_shape(3), dtype=trt.nptype(engine.get_binding_dtype(3)))
outputD0 = cuda.mem_alloc(outputH0.nbytes)
outputD1 = cuda.mem_alloc(outputH1.nbytes)
cuda.memcpy_htod_async(inputD0, inputH0, stream)
cuda.memcpy_htod_async(inputD1, inputH1, stream)
stream.synchronize()
context.execute_async(1, [int(inputD0), int(inputD1), int(outputD0), int(outputD1)], stream.handle)
stream.synchronize()
cuda.memcpy_dtoh_async(outputH0, outputD0, stream)
cuda.memcpy_dtoh_async(outputH1, outputD1, stream)
stream.synchronize()
print(np.shape(outputH0), np.shape(outputH1))
#print(outputH0)
#print(outputH1)
if __name__ == "__main__":
run()
print("test finish!")
| trt-samples-for-hackathon-cn-master | cookbook/05-Plugin/PluginReposity/CCLPlugin-TRT6-StaticShape/testCCLPlugin.py |
#
# Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import ctypes
import numpy as np
import pycuda.autoinit
import pycuda.driver as cuda
import tensorrt as trt
npToNumber = {np.float32: 0, np.float16: 1, np.int8: 2, np.int32: 3}
soFilePath = "./TopKAveragePlugin.so"
def topKAverageCPU(inputH0, inputH1, inputH2, inputH3):
sh = inputH0.shape
nTopK = len(inputH3)
outputH0CPU = np.zeros([sh[0], sh[2], sh[1] * len(inputH3)], dtype=np.float32)
for i in range(sh[0]):
data = np.sort(inputH0[i, :, :inputH1[i], :inputH2[i]])
for k in range(nTopK):
outputH0CPU[i, :inputH1[i], k::nTopK] = np.sum(data[:, :, -inputH3[k]:], axis=2).transpose() / inputH3[k]
return outputH0CPU
def cleanTrash(outputH0, inputH1): # clean the trash data in the output of GPU
for i in range(outputH0.shape[0]):
outputH0[i, inputH1[i]:, :] = 0
return outputH0
def getTopKAveragePlugin(nTopK, maxTopK):
for c in trt.get_plugin_registry().plugin_creator_list:
if c.name == "TopKAveragePlugin":
p0 = trt.PluginField("nTopK", np.array([nTopK], dtype=np.int32), trt.PluginFieldType.INT32)
p1 = trt.PluginField("maxTopK", np.array([maxTopK], dtype=np.int32), trt.PluginFieldType.INT32)
return c.create_plugin("TopKAveragePlugin", trt.PluginFieldCollection([p0, p1]))
return None
def buildEngine(logger, outDatatype, nTopK, maxTopK):
builder = trt.Builder(logger)
network = builder.create_network(1)
profile = builder.create_optimization_profile()
config = builder.create_builder_config()
config.flags = int(outDatatype == np.float16)
inputT0 = network.add_input("inputT0", trt.float32, [-1, -1, -1, -1])
profile.set_shape(inputT0.name, [1, 1, 1, 1], [36, 10, 5, 30], [72, 20, 10, 70])
inputT1 = network.add_input("inputT1", trt.int32, [-1])
profile.set_shape(inputT1.name, [1], [36], [72])
inputT2 = network.add_input("inputT2", trt.int32, [-1])
profile.set_shape(inputT2.name, [1], [36], [72])
inputT3 = network.add_input("inputT3", trt.int32, [-1])
profile.set_shape(inputT3.name, [1], [2], [4])
config.add_optimization_profile(profile)
pluginLayer = network.add_plugin_v2([inputT0, inputT1, inputT2, inputT3], getTopKAveragePlugin(nTopK, maxTopK))
network.mark_output(pluginLayer.get_output(0))
return builder.build_engine(network, config)
def run(inDim, outDatatype, topKList):
print("test", inDim, outDatatype, topKList)
logger = trt.Logger(trt.Logger.ERROR)
trt.init_libnvinfer_plugins(logger, '')
ctypes.cdll.LoadLibrary(soFilePath)
engine = buildEngine(logger, outDatatype, len(topKList), max(topKList))
if engine == None:
print("Failed building engine!")
return None
print("Succeeded building engine!")
context = engine.create_execution_context()
context.set_binding_shape(0, inDim)
context.set_binding_shape(1, inDim[:1])
context.set_binding_shape(2, inDim[:1])
context.set_binding_shape(3, [len(topKList)])
#print("Bind0->", engine.get_binding_shape(0), context.get_binding_shape(0))
#print("Bind1->", engine.get_binding_shape(1), context.get_binding_shape(1))
#print("Bind2->", engine.get_binding_shape(2), context.get_binding_shape(2))
#print("Bind3->", engine.get_binding_shape(3), context.get_binding_shape(3))
#print("Bind4->", engine.get_binding_shape(4), context.get_binding_shape(4))
print("All bind:", context.all_binding_shapes_specified)
stream = cuda.Stream()
data0 = np.tile(np.arange(1, 1 + np.prod(inDim[-2:]), dtype=np.float32).reshape(inDim[-2:]), [*inDim[:2], 1, 1])
data1 = np.arange(inDim[0], dtype=np.int32) % inDim[2] + 1
data2 = np.arange(inDim[0], dtype=np.int32) % inDim[3] + 1
data3 = np.array(topKList, dtype=np.int32)
inputH0 = np.ascontiguousarray(data0)
inputD0 = cuda.mem_alloc(inputH0.nbytes)
inputH1 = np.ascontiguousarray(data1)
inputD1 = cuda.mem_alloc(inputH1.nbytes)
inputH2 = np.ascontiguousarray(data2)
inputD2 = cuda.mem_alloc(inputH2.nbytes)
inputH3 = np.ascontiguousarray(data3)
inputD3 = cuda.mem_alloc(inputH3.nbytes)
outputH0 = np.empty(context.get_binding_shape(4), dtype=trt.nptype(engine.get_binding_dtype(4)))
outputD0 = cuda.mem_alloc(outputH0.nbytes)
cuda.memcpy_htod_async(inputD0, inputH0, stream)
cuda.memcpy_htod_async(inputD1, inputH1, stream)
cuda.memcpy_htod_async(inputD2, inputH2, stream)
cuda.memcpy_htod_async(inputD3, inputH3, stream)
context.execute_async_v2([int(inputD0), int(inputD1), int(inputD2), int(inputD3), int(outputD0)], stream.handle)
cuda.memcpy_dtoh_async(outputH0, outputD0, stream)
stream.synchronize()
outputH0CPU = topKAverageCPU(inputH0, inputH1, inputH2, inputH3)
#print("Input0:",inputH0.shape,engine.get_binding_dtype(0))
#print(inputH0)
#print("Input1:",inputH1.shape,engine.get_binding_dtype(1))
#print(inputH1)
#print("Input2:",inputH2.shape,engine.get_binding_dtype(2))
#print(inputH2)
#print("Input3:",inputH3.shape,engine.get_binding_dtype(3))
#print(inputH3)
#print("Output:",outputH0.shape, engine.get_binding_dtype(4))
#print(outputH0)
print("Check result:", ["True" if np.all(cleanTrash(outputH0, inputH1) == outputH0CPU) else "False"][0])
if __name__ == "__main__":
np.set_printoptions(precision=3, linewidth=200, suppress=True)
np.set_printoptions(threshold=1e6)
cuda.Device(0).make_context()
run((36, 10, 5, 30), np.float32, [2, 3, 4])
run((36, 8, 5, 65), np.float32, [1, 2, 5, 12])
run((36, 18, 5, 70), np.float32, [1, 2, 5, 12])
cuda.Context.pop()
print("test finish!")
| trt-samples-for-hackathon-cn-master | cookbook/05-Plugin/PluginReposity/TopKAveragePlugin/testTopKAveragePlugin.py |
#
# Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import ctypes
import os
import numpy as np
import pycuda.autoinit
import pycuda.driver as cuda
import tensorrt as trt
npToNumber = {np.float32: 0, np.float16: 1, np.int8: 2, np.int32: 3}
soFilePath = "./TopKAveragePlugin.so"
npzFile = "./testTopKAveragePlugin.npz"
def topKAverageCPU(inputH0, inputH1, inputH2, inputH3):
sh = inputH0.shape
nTopK = len(inputH3)
outputH0CPU = np.zeros([sh[0], sh[2], sh[1] * len(inputH3)], dtype=np.float32)
for i in range(sh[0]):
data = np.sort(inputH0[i, :, :inputH1[i], :inputH2[i]])
for k in range(nTopK):
outputH0CPU[i, :inputH1[i], k::nTopK] = np.sum(data[:, :, -inputH3[k]:], axis=2).transpose() / inputH3[k]
return outputH0CPU
def cleanTrash(outputH0, inputH1): # clean the trash data in the output of GPU
for i in range(outputH0.shape[0]):
outputH0[i, inputH1[i]:, :] = 0
return outputH0
def getTopKAveragePlugin(nTopK, maxTopK):
for c in trt.get_plugin_registry().plugin_creator_list:
if c.name == "TopKAveragePlugin":
p0 = trt.PluginField("nTopK", np.array([nTopK], dtype=np.int32), trt.PluginFieldType.INT32)
p1 = trt.PluginField("maxTopK", np.array([maxTopK], dtype=np.int32), trt.PluginFieldType.INT32)
return c.create_plugin("TopKAveragePlugin", trt.PluginFieldCollection([p0, p1]))
return None
def buildEngine(logger, outDatatype, nTopK, maxTopK):
builder = trt.Builder(logger)
network = builder.create_network(1)
profile = builder.create_optimization_profile()
config = builder.create_builder_config()
config.flags = int(outDatatype == np.float16)
inputT0 = network.add_input("inputT0", trt.float32, [-1, -1, -1, -1])
profile.set_shape(inputT0.name, [1, 1, 1, 1], [36, 10, 5, 30], [72, 20, 10, 70])
inputT1 = network.add_input("inputT1", trt.int32, [-1])
profile.set_shape(inputT1.name, [1], [36], [72])
inputT2 = network.add_input("inputT2", trt.int32, [-1])
profile.set_shape(inputT2.name, [1], [36], [72])
inputT3 = network.add_input("inputT3", trt.int32, [-1])
profile.set_shape(inputT3.name, [1], [2], [4])
config.add_optimization_profile(profile)
pluginLayer = network.add_plugin_v2([inputT0, inputT1, inputT2, inputT3], getTopKAveragePlugin(nTopK, maxTopK))
network.mark_output(pluginLayer.get_output(0))
return builder.build_engine(network, config)
def run():
validHeight = 5
validWidth = 30
topKList = [1, 2, 4]
data0 = np.load(npzFile)["10"][0, :, :, :validHeight, :validWidth]
inDim = data0.shape
outDatatype = np.float32
data1 = np.load(npzFile)["lod0"].astype(np.int32).reshape(inDim[:1])
data2 = np.load(npzFile)["lod2"].astype(np.int32).reshape(inDim[:1])
data3 = np.array(topKList, dtype=np.int32)
print("test", inDim, outDatatype, topKList)
logger = trt.Logger(trt.Logger.ERROR)
trt.init_libnvinfer_plugins(logger, '')
ctypes.cdll.LoadLibrary(soFilePath)
engine = buildEngine(logger, outDatatype, len(topKList), max(topKList))
if engine == None:
print("Failed building engine!")
return None
print("Succeeded building engine!")
context = engine.create_execution_context()
context.set_binding_shape(0, inDim)
context.set_binding_shape(1, inDim[:1])
context.set_binding_shape(2, inDim[:1])
context.set_binding_shape(3, [len(topKList)])
#print("Bind0->", engine.get_binding_shape(0), context.get_binding_shape(0))
#print("Bind1->", engine.get_binding_shape(1), context.get_binding_shape(1))
#print("Bind2->", engine.get_binding_shape(2), context.get_binding_shape(2))
#print("Bind3->", engine.get_binding_shape(3), context.get_binding_shape(3))
#print("Bind4->", engine.get_binding_shape(4), context.get_binding_shape(4))
print("All bind:", context.all_binding_shapes_specified)
stream = cuda.Stream()
inputH0 = np.ascontiguousarray(data0)
inputD0 = cuda.mem_alloc(inputH0.nbytes)
inputH1 = np.ascontiguousarray(data1)
inputD1 = cuda.mem_alloc(inputH1.nbytes)
inputH2 = np.ascontiguousarray(data2)
inputD2 = cuda.mem_alloc(inputH2.nbytes)
inputH3 = np.ascontiguousarray(data3)
inputD3 = cuda.mem_alloc(inputH3.nbytes)
outputH0 = np.empty(context.get_binding_shape(4), dtype=trt.nptype(engine.get_binding_dtype(4)))
outputD0 = cuda.mem_alloc(outputH0.nbytes)
cuda.memcpy_htod_async(inputD0, inputH0, stream)
cuda.memcpy_htod_async(inputD1, inputH1, stream)
cuda.memcpy_htod_async(inputD2, inputH2, stream)
cuda.memcpy_htod_async(inputD3, inputH3, stream)
context.execute_async_v2([int(inputD0), int(inputD1), int(inputD2), int(inputD3), int(outputD0)], stream.handle)
cuda.memcpy_dtoh_async(outputH0, outputD0, stream)
stream.synchronize()
outputH0CPU = topKAverageCPU(inputH0, inputH1, inputH2, inputH3)
#print("Input0:",inputH0.shape,engine.get_binding_dtype(0))
#print(inputH0)
#print("Input1:",inputH1.shape,engine.get_binding_dtype(1))
#print(inputH1)
#print("Input2:",inputH2.shape,engine.get_binding_dtype(2))
#print(inputH2)
#print("Input3:",inputH3.shape,engine.get_binding_dtype(3))
#print(inputH3)
#print("Output:",outputH0.shape, engine.get_binding_dtype(4))
#print(outputH0)
print("Check result:", np.sum(np.abs(cleanTrash(outputH0, inputH1) - outputH0CPU)))
if __name__ == "__main__":
np.set_printoptions(precision=3, linewidth=200, suppress=True)
np.set_printoptions(threshold=1e6)
cuda.Device(0).make_context()
run()
cuda.Context.pop()
print("test finish!")
| trt-samples-for-hackathon-cn-master | cookbook/05-Plugin/PluginReposity/TopKAveragePlugin/testTopKAveragePlugin-useDataFromModel.py |
#
# Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import ctypes
import os
import numpy as np
import pycuda.autoinit
import pycuda.driver as cuda
import tensorrt as trt
npToTrt = {np.int8: trt.int8, np.float16: trt.float16, np.int32: trt.int32, np.float32: trt.float32}
soFilePath = "./MaxPlugin.so"
def maxCPU(inputH0, inputH1):
outputH0CPU = np.zeros_like(inputH0[:, 0, :], dtype=np.float32)
for i in range(inputH0.shape[0]):
maxLine = np.full(inputH0.shape[-1], -600000, dtype=np.float32)
for j in range(inputH1[i]):
maxLine = np.maximum(maxLine, inputH0[i, j])
outputH0CPU[i, :] = maxLine
return outputH0CPU
def getMaxPlugin():
for c in trt.get_plugin_registry().plugin_creator_list:
if c.name == "MaxPlugin":
return c.create_plugin(c.name, trt.PluginFieldCollection([]))
return None
def buildEngine(logger, inDatatype):
builder = trt.Builder(logger)
network = builder.create_network(1)
profile = builder.create_optimization_profile()
config = builder.create_builder_config()
config.flags = int(inDatatype == np.float16)
inputT0 = network.add_input("inputT0", npToTrt[inDatatype], [-1, -1, -1])
profile.set_shape(inputT0.name, [1, 1, 1], [4, 3, 5], [9, 12, 6])
inputT1 = network.add_input("inputT1", trt.int32, [-1])
profile.set_shape(inputT1.name, [1], [4], [9])
config.add_optimization_profile(profile)
pluginLayer = network.add_plugin_v2([inputT0, inputT1], getMaxPlugin())
network.mark_output(pluginLayer.get_output(0))
return builder.build_engine(network, config)
def run(inDim, inDatatype):
print("test", inDim, inDatatype)
logger = trt.Logger(trt.Logger.ERROR)
trt.init_libnvinfer_plugins(logger, '')
ctypes.cdll.LoadLibrary(soFilePath)
engine = buildEngine(logger, inDatatype)
if engine == None:
print("Failed building engine!")
return None
print("Succeeded building engine!")
context = engine.create_execution_context()
context.set_binding_shape(0, inDim)
context.set_binding_shape(1, inDim[:1])
#print("Bind0->",engine.get_binding_shape(0),context.get_binding_shape(0))
#print("Bind1->",engine.get_binding_shape(1),context.get_binding_shape(1))
#print("Bind2->",engine.get_binding_shape(2),context.get_binding_shape(2))
print("All bind:", context.all_binding_shapes_specified)
stream = cuda.Stream()
data0 = np.arange(np.prod(inDim), dtype=inDatatype).reshape(inDim)
data1 = np.arange(1, inDim[0] + 1, dtype=np.int32)
data1[data1 > inDim[1]] = inDim[1]
inputH0 = np.ascontiguousarray(data0)
inputD0 = cuda.mem_alloc(inputH0.nbytes)
inputH1 = np.ascontiguousarray(data1)
inputD1 = cuda.mem_alloc(inputH1.nbytes)
outputH0 = np.empty(context.get_binding_shape(2), dtype=trt.nptype(engine.get_binding_dtype(2)))
outputD0 = cuda.mem_alloc(outputH0.nbytes)
cuda.memcpy_htod_async(inputD0, inputH0, stream)
cuda.memcpy_htod_async(inputD1, inputH1, stream)
context.execute_async_v2([int(inputD0), int(inputD1), int(outputD0)], stream.handle)
cuda.memcpy_dtoh_async(outputH0, outputD0, stream)
stream.synchronize()
outputH0CPU = maxCPU(inputH0, inputH1)
#print("InputH0->",inputH0.shape, engine.get_binding_dtype(0))
#print(inputH0)
#print("InputH1->",inputH1.shape, engine.get_binding_dtype(1))
#print(inputH1)
#print("OutputH0->",outputH0.shape, engine.get_binding_dtype(2))
#print(outputH0)
#print("OutputH0CPU->",outputH0CPU.shape)
#print(outputH0CPU)
print("Check result:", ["True" if np.all(outputH0 == outputH0CPU) else "False"][0])
if __name__ == "__main__":
np.set_printoptions(precision=3, linewidth=200, suppress=True)
cuda.Device(0).make_context()
run([4, 3, 5], np.float32)
run([9, 12, 6], np.float32)
run([4, 3, 5], np.float16)
run([9, 12, 6], np.float16)
cuda.Context.pop()
print("test finish!")
| trt-samples-for-hackathon-cn-master | cookbook/05-Plugin/PluginReposity/MaxPlugin/testMaxPlugin.py |
#
# Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import ctypes
import os
import numpy as np
import tensorrt as trt
from cuda import cudart
from scipy import interpolate
soFile = "./Resize2DPlugin.so"
np.random.seed(31193)
def printArrayInformation(x, info="", n=5):
if 0 in x.shape:
print('%s:%s' % (info, str(x.shape)))
print()
return
print( '%s:%s,SumAbs=%.5e,Var=%.5f,Max=%.5f,Min=%.5f,SAD=%.5f'%( \
info,str(x.shape),np.sum(abs(x)),np.var(x),np.max(x),np.min(x),np.sum(np.abs(np.diff(x.reshape(-1)))) ))
print('\t', x.reshape(-1)[:n], x.reshape(-1)[-n:])
def check(a, b, weak=False, checkEpsilon=1e-5):
if weak:
a = a.astype(np.float32)
b = b.astype(np.float32)
res = np.all(np.abs(a - b) < checkEpsilon)
else:
res = np.all(a == b)
diff0 = np.max(np.abs(a - b))
diff1 = np.max(np.abs(a - b) / (np.abs(b) + checkEpsilon))
print("check:%s, absDiff=%f, relDiff=%f" % (res, diff0, diff1))
def addResizeCPU(inputH, nMode, nScale, nH1, nW1):
nB, nC, nH0, nW0 = inputH[0].shape
if nScale > 0 and nH1 == 0 and nW1 == 0:
nH1, nW1 = nH0 * nScale, nW0 * nScale
res = np.zeros([nB, nC, nH1, nW1], dtype=np.float32)
if nMode == 0: # nearest interpolation
indexH = ((np.arange(nH1) + 0.5) * nH0 / nH1).astype(np.int32)
indexW = ((np.arange(nW1) + 0.5) * nW0 / nW1).astype(np.int32)
for b in range(nB):
for c in range(nC):
for h in range(nH1):
for w in range(nW1):
res[b, c, h, w] = inputH[0][b, c, indexH[h], indexW[w]]
elif nMode == 1: # bilinear interpolation
h0 = (1 / 2 + np.arange(nH0)) / nH0 # Half_pixel, align_corner
w0 = (1 / 2 + np.arange(nW0)) / nW0
h1 = (1 / 2 + np.arange(nH1)) / nH1
w1 = (1 / 2 + np.arange(nW1)) / nW1
h1[0], w1[0] = h0[0], w0[0]
h1[-1], w1[-1] = h0[-1], w0[-1]
for b in range(nB):
for c in range(nC):
res[b, c] = interpolate.interp2d(w0, h0, inputH[0][b, c], kind="linear")(w1, h1)
else:
print("[addResizeCPU]Error interpolation mode!")
res = inputH[0]
return [res]
def getResizePlugin(nMode, nScale, nH1, nW1):
for c in trt.get_plugin_registry().plugin_creator_list:
if c.name == "Resize2D" and c.plugin_version == "1":
parameterList = []
parameterList.append(trt.PluginField("Mode", np.int32(nMode), trt.PluginFieldType.INT32))
parameterList.append(trt.PluginField("Scale", np.int32(nScale), trt.PluginFieldType.INT32))
parameterList.append(trt.PluginField("OutputHeight", np.int32(nH1), trt.PluginFieldType.INT32))
parameterList.append(trt.PluginField("OutputWidth", np.int32(nW1), trt.PluginFieldType.INT32))
return c.create_plugin(c.name, trt.PluginFieldCollection(parameterList))
return None
def run(shape, nMode, nScale, nH1, nW1):
testCase = "<shape=%s,nMode=%d,nScale=%f,nH1=%d,nW1=%d>" % (shape, nMode, nScale, nH1, nW1)
trtFile = "./model-%d-%f-%d-%d.plan" % (nMode, nScale, nH1, nW1)
print("Test %s" % testCase)
logger = trt.Logger(trt.Logger.ERROR)
trt.init_libnvinfer_plugins(logger, '')
ctypes.cdll.LoadLibrary(soFile)
if os.path.isfile(trtFile):
with open(trtFile, "rb") as f:
engine = trt.Runtime(logger).deserialize_cuda_engine(f.read())
if engine == None:
print("Failed loading engine!")
return
print("Succeeded loading engine!")
else:
builder = trt.Builder(logger)
network = builder.create_network(1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH))
profile = builder.create_optimization_profile()
config = builder.create_builder_config()
config.set_flag(trt.BuilderFlag.FP16)
inputT0 = network.add_input("inputT0", trt.float32, [-1, -1, -1, -1])
profile.set_shape(inputT0.name, [1 for i in shape], shape, shape)
config.add_optimization_profile(profile)
resizeLayer = network.add_plugin_v2([inputT0], getResizePlugin(nMode, nScale, nH1, nW1))
network.mark_output(resizeLayer.get_output(0))
engineString = builder.build_serialized_network(network, config)
if engineString == None:
print("Failed building engine!")
return
print("Succeeded building engine!")
with open(trtFile, "wb") as f:
f.write(engineString)
engine = trt.Runtime(logger).deserialize_cuda_engine(engineString)
context = engine.create_execution_context()
context.set_binding_shape(0, shape)
#print("Binding all? %s"%(["No","Yes"][int(context.all_binding_shapes_specified)]))
nInput = np.sum([engine.binding_is_input(i) for i in range(engine.num_bindings)])
nOutput = engine.num_bindings - nInput
#for i in range(nInput):
# print("Bind[%2d]:i[%2d]->" % (i, i), engine.get_binding_dtype(i), engine.get_binding_shape(i), context.get_binding_shape(i), engine.get_binding_name(i))
#for i in range(nInput, nInput + nOutput):
# print("Bind[%2d]:o[%2d]->" % (i, i - nInput), engine.get_binding_dtype(i), engine.get_binding_shape(i), context.get_binding_shape(i), engine.get_binding_name(i))
# print("Bind[%2d]:o[%2d]->" % (i, i - nInput), engine.get_binding_dtype(i), engine.get_binding_shape(i), context.get_binding_shape(i), engine.get_binding_name(i))
data = np.tile(np.arange(shape[-1]).astype(np.float32).reshape(1, 1, 1, shape[-1]), [shape[0], shape[1], shape[2], 1])
bufferH = []
bufferH.append(data)
for i in range(nOutput):
bufferH.append(np.empty(context.get_binding_shape(nInput + i), dtype=trt.nptype(engine.get_binding_dtype(nInput + i))))
bufferD = []
for i in range(engine.num_bindings):
bufferD.append(cudart.cudaMalloc(bufferH[i].nbytes)[1])
for i in range(nInput):
cudart.cudaMemcpy(bufferD[i], np.ascontiguousarray(bufferH[i].reshape(-1)).ctypes.data, bufferH[i].nbytes, cudart.cudaMemcpyKind.cudaMemcpyHostToDevice)
context.execute_v2(bufferD)
for i in range(nOutput):
cudart.cudaMemcpy(bufferH[nInput + i].ctypes.data, bufferD[nInput + i], bufferH[nInput + i].nbytes, cudart.cudaMemcpyKind.cudaMemcpyDeviceToHost)
outputCPU = addResizeCPU(bufferH[:nInput], nMode, nScale, nH1, nW1)
'''
for i in range(nInput):
printArrayInformation(bufferH[i])
print(bufferH[i])
for i in range(nOutput):
printArrayInformation(bufferH[nInput + i])
print(bufferH[nInput + i])
for i in range(nOutput):
printArrayInformation(outputCPU[i])
print(outputCPU)
'''
check(bufferH[nInput:][0], outputCPU[0], True)
for buffer in bufferD:
cudart.cudaFree(buffer)
print("Test %s finish!\n" % testCase)
if __name__ == "__main__":
np.set_printoptions(precision=3, linewidth=200, suppress=True)
# nearest interpolation
os.system("rm -rf ./*.plan")
run([2, 8, 256, 256], 0, 2, 0, 0)
os.system("rm -rf ./*.plan")
run([2, 8, 256, 256], 0, 0, 512, 510)
# bilinear interpolation
os.system("rm -rf ./*.plan")
run([2, 8, 256, 256], 1, 2, 0, 0)
os.system("rm -rf ./*.plan")
run([2, 8, 256, 256], 1, 0, 510, 510)
print("Test all finish!")
| trt-samples-for-hackathon-cn-master | cookbook/05-Plugin/PluginReposity/Resize2DPlugin-TRT8/testResize2DPluginV1.py |
#
# Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import ctypes
import os
import numpy as np
import tensorrt as trt
from cuda import cudart
from scipy import interpolate
soFile = "./Resize2DPlugin.so"
np.random.seed(31193)
def printArrayInformation(x, info="", n=5):
if 0 in x.shape:
print('%s:%s' % (info, str(x.shape)))
print()
return
print( '%s:%s,SumAbs=%.5e,Var=%.5f,Max=%.5f,Min=%.5f,SAD=%.5f'%( \
info,str(x.shape),np.sum(abs(x)),np.var(x),np.max(x),np.min(x),np.sum(np.abs(np.diff(x.reshape(-1)))) ))
print('\t', x.reshape(-1)[:n], x.reshape(-1)[-n:])
def check(a, b, weak=False, checkEpsilon=1e-5):
if weak:
a = a.astype(np.float32)
b = b.astype(np.float32)
res = np.all(np.abs(a - b) < checkEpsilon)
else:
res = np.all(a == b)
diff0 = np.max(np.abs(a - b))
diff1 = np.max(np.abs(a - b) / (np.abs(b) + checkEpsilon))
print("check:%s, absDiff=%f, relDiff=%f" % (res, diff0, diff1))
def addResizeCPU(inputH, nMode, nScale, nH1, nW1):
nB, nC, nH0, nW0 = inputH[0].shape
if nScale > 0 and nH1 == 0 and nW1 == 0:
nH1, nW1 = nH0 * nScale, nW0 * nScale
res = np.zeros([nB, nC, nH1, nW1], dtype=np.float32)
if nMode == 0: # nearest interpolation
indexH = ((np.arange(nH1) + 0.5) * nH0 / nH1).astype(np.int32)
indexW = ((np.arange(nW1) + 0.5) * nW0 / nW1).astype(np.int32)
for b in range(nB):
for c in range(nC):
for h in range(nH1):
for w in range(nW1):
res[b, c, h, w] = inputH[0][b, c, indexH[h], indexW[w]]
elif nMode == 1: # bilinear interpolation
h0 = (1 / 2 + np.arange(nH0)) / nH0 # Half_pixel, align_corner
w0 = (1 / 2 + np.arange(nW0)) / nW0
h1 = (1 / 2 + np.arange(nH1)) / nH1
w1 = (1 / 2 + np.arange(nW1)) / nW1
h1[0], w1[0] = h0[0], w0[0]
h1[-1], w1[-1] = h0[-1], w0[-1]
for b in range(nB):
for c in range(nC):
res[b, c] = interpolate.interp2d(w0, h0, inputH[0][b, c], kind="linear")(w1, h1)
else:
print("[addResizeCPU]Error interpolation mode!")
res = inputH[0]
return [res]
def getResizePlugin(nMode, nScale, nH1, nW1):
for c in trt.get_plugin_registry().plugin_creator_list:
if c.name == "Resize2D" and c.plugin_version == "2":
parameterList = []
parameterList.append(trt.PluginField("Mode", np.int32(nMode), trt.PluginFieldType.INT32))
parameterList.append(trt.PluginField("Scale", np.int32(nScale), trt.PluginFieldType.INT32))
parameterList.append(trt.PluginField("OutputHeight", np.int32(nH1), trt.PluginFieldType.INT32))
parameterList.append(trt.PluginField("OutputWidth", np.int32(nW1), trt.PluginFieldType.INT32))
return c.create_plugin(c.name, trt.PluginFieldCollection(parameterList))
return None
def run(shape, nMode, nScale, nH1, nW1):
testCase = "<shape=%s,nMode=%d,nScale=%f,nH1=%d,nW1=%d>" % (shape, nMode, nScale, nH1, nW1)
trtFile = "./model-%d-%f-%d-%d.plan" % (nMode, nScale, nH1, nW1)
print("Test %s" % testCase)
logger = trt.Logger(trt.Logger.ERROR)
trt.init_libnvinfer_plugins(logger, '')
ctypes.cdll.LoadLibrary(soFile)
if os.path.isfile(trtFile):
with open(trtFile, "rb") as f:
engine = trt.Runtime(logger).deserialize_cuda_engine(f.read())
if engine == None:
print("Failed loading engine!")
return
print("Succeeded loading engine!")
else:
builder = trt.Builder(logger)
network = builder.create_network(1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH))
profile = builder.create_optimization_profile()
config = builder.create_builder_config()
config.set_flag(trt.BuilderFlag.FP16)
inputT0 = network.add_input("inputT0", trt.float32, [-1, -1, -1, -1])
profile.set_shape(inputT0.name, [1 for i in shape], shape, shape)
config.add_optimization_profile(profile)
resizeLayer = network.add_plugin_v2([inputT0], getResizePlugin(nMode, nScale, nH1, nW1))
network.mark_output(resizeLayer.get_output(0))
engineString = builder.build_serialized_network(network, config)
if engineString == None:
print("Failed building engine!")
return
print("Succeeded building engine!")
with open(trtFile, "wb") as f:
f.write(engineString)
engine = trt.Runtime(logger).deserialize_cuda_engine(engineString)
context = engine.create_execution_context()
context.set_binding_shape(0, shape)
#print("Binding all? %s"%(["No","Yes"][int(context.all_binding_shapes_specified)]))
nInput = np.sum([engine.binding_is_input(i) for i in range(engine.num_bindings)])
nOutput = engine.num_bindings - nInput
#for i in range(nInput):
# print("Bind[%2d]:i[%2d]->" % (i, i), engine.get_binding_dtype(i), engine.get_binding_shape(i), context.get_binding_shape(i), engine.get_binding_name(i))
#for i in range(nInput, nInput + nOutput):
# print("Bind[%2d]:o[%2d]->" % (i, i - nInput), engine.get_binding_dtype(i), engine.get_binding_shape(i), context.get_binding_shape(i), engine.get_binding_name(i))
# print("Bind[%2d]:o[%2d]->" % (i, i - nInput), engine.get_binding_dtype(i), engine.get_binding_shape(i), context.get_binding_shape(i), engine.get_binding_name(i))
data = np.arange(np.prod(shape)).reshape(shape).astype(np.float32) / 256 / 256
bufferH = []
bufferH.append(data)
for i in range(nOutput):
bufferH.append(np.empty(context.get_binding_shape(nInput + i), dtype=trt.nptype(engine.get_binding_dtype(nInput + i))))
bufferD = []
for i in range(engine.num_bindings):
bufferD.append(cudart.cudaMalloc(bufferH[i].nbytes)[1])
for i in range(nInput):
cudart.cudaMemcpy(bufferD[i], np.ascontiguousarray(bufferH[i].reshape(-1)).ctypes.data, bufferH[i].nbytes, cudart.cudaMemcpyKind.cudaMemcpyHostToDevice)
context.execute_v2(bufferD)
for i in range(nOutput):
cudart.cudaMemcpy(bufferH[nInput + i].ctypes.data, bufferD[nInput + i], bufferH[nInput + i].nbytes, cudart.cudaMemcpyKind.cudaMemcpyDeviceToHost)
outputCPU = addResizeCPU(bufferH[:nInput], nMode, nScale, nH1, nW1)
'''
for i in range(nInput):
printArrayInformation(bufferH[i])
print(bufferH[i])
for i in range(nOutput):
printArrayInformation(bufferH[nInput + i])
print(bufferH[nInput + i])
for i in range(nOutput):
printArrayInformation(outputCPU[i])
print(outputCPU)
'''
check(bufferH[nInput:][0], outputCPU[0], True)
for buffer in bufferD:
cudart.cudaFree(buffer)
print("Test %s finish!\n" % testCase)
if __name__ == "__main__":
np.set_printoptions(precision=3, linewidth=200, suppress=True)
# nearest interpolation
os.system("rm -rf ./*.plan")
run([2, 8, 256, 256], 0, 2, 0, 0)
os.system("rm -rf ./*.plan")
run([2, 8, 256, 256], 0, 0, 512, 510)
# bilinear interpolation
os.system("rm -rf ./*.plan")
run([2, 8, 256, 256], 1, 2, 0, 0)
os.system("rm -rf ./*.plan")
run([2, 8, 256, 256], 1, 0, 510, 510)
print("Test all finish!")
| trt-samples-for-hackathon-cn-master | cookbook/05-Plugin/PluginReposity/Resize2DPlugin-TRT8/testResize2DPluginV2.py |
#
# Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import numpy as np
import torch as t
import torch.nn.functional as F
np.set_printoptions(precision=3, suppress=True)
h2 = 5
w2 = 9
inputData = t.Tensor(np.array([7, 5, 6, 4, 4, 2, 5, 3, 3, 9, 9, 7]).reshape(1, 1, 3, 4).astype(np.float32))
print("input data:")
print(inputData)
print("bilinear interpolate with align_corners=False:")
print(F.interpolate(inputData, size=((h2, w2)), mode="bilinear", align_corners=False).data.numpy())
print("bilinear interpolate with align_corners=True:")
print(F.interpolate(inputData, size=((h2, w2)), mode="bilinear", align_corners=True).data.numpy())
| trt-samples-for-hackathon-cn-master | cookbook/05-Plugin/PluginReposity/Resize2DPlugin-TRT8/pyTorchExample.py |
#
# Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import ctypes
import os
import numpy as np
import tensorrt as trt
from cuda import cudart
soFile = "./MultinomialDistributionPlugin.so"
np.random.seed(31193)
def printArrayInformation(x, info="", n=5):
if 0 in x.shape:
print('%s:%s' % (info, str(x.shape)))
print()
return
print( '%s:%s,SumAbs=%.5e,Var=%.5f,Max=%.5f,Min=%.5f,SAD=%.5f'%( \
info,str(x.shape),np.sum(abs(x)),np.var(x),np.max(x),np.min(x),np.sum(np.abs(np.diff(x.reshape(-1)))) ))
print('\t', x.reshape(-1)[:n], x.reshape(-1)[-n:])
def check(a, b, weak=False, checkEpsilon=1e-5):
if weak:
a = a.astype(np.float32)
b = b.astype(np.float32)
res = np.all(np.abs(a - b) < checkEpsilon)
else:
res = np.all(a == b)
diff0 = np.max(np.abs(a - b))
diff1 = np.max(np.abs(a - b) / (np.abs(b) + checkEpsilon))
print("check:%s, absDiff=%f, relDiff=%f" % (res, diff0, diff1))
def getMultinomialDistributionPlugin(nCol, seed):
for c in trt.get_plugin_registry().plugin_creator_list:
if c.name == "MultinomialDistribution":
parameterList = []
parameterList.append(trt.PluginField("seed", np.int32(seed), trt.PluginFieldType.INT32))
return c.create_plugin(c.name, trt.PluginFieldCollection(parameterList))
return None
def run(nBatchSize, nCol, seed):
testCase = "<nRow=%d,nCol=%s,seed=%d>" % (nBatchSize, nCol, seed)
trtFile = "./model-nCol%d-seed-%d.plan" % (nCol, seed)
print("Test %s" % testCase)
logger = trt.Logger(trt.Logger.ERROR)
trt.init_libnvinfer_plugins(logger, '')
ctypes.cdll.LoadLibrary(soFile)
if os.path.isfile(trtFile):
with open(trtFile, "rb") as f:
engineStr = f.read()
engine = trt.Runtime(logger).deserialize_cuda_engine(engineStr)
if engine == None:
print("Failed loading engine!")
exit()
print("Succeeded loading engine!")
else:
builder = trt.Builder(logger)
network = builder.create_network(1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH))
profile = builder.create_optimization_profile()
config = builder.create_builder_config()
inputT0 = network.add_input("inputT0", trt.float32, [-1, nCol])
profile.set_shape(inputT0.name, [1, nCol], [32, nCol], [1024, nCol])
config.add_optimization_profile(profile)
pluginLayer = network.add_plugin_v2([inputT0], getMultinomialDistributionPlugin(nCol, seed))
network.mark_output(pluginLayer.get_output(0))
network.mark_output(pluginLayer.get_output(1))
engineString = builder.build_serialized_network(network, config)
if engineString == None:
print("Failed building engine!")
return
print("Succeeded building engine!")
with open(trtFile, "wb") as f:
f.write(engineString)
engine = trt.Runtime(logger).deserialize_cuda_engine(engineString)
context = engine.create_execution_context()
data = np.full([nBatchSize, nCol], 1, dtype=np.float32) # uniform distribution
#data = np.tile(np.arange(0,nCol,1,dtype=np.float32),[nBatchSize,1]) # non-uniform distribution
context.set_binding_shape(0, [nBatchSize, nCol])
#print("Binding all? %s"%(["No","Yes"][int(context.all_binding_shapes_specified)]))
nInput = np.sum([engine.binding_is_input(i) for i in range(engine.num_bindings)])
nOutput = engine.num_bindings - nInput
#for i in range(nInput):
# print("Bind[%2d]:i[%2d]->" % (i, i), engine.get_binding_dtype(i), engine.get_binding_shape(i), context.get_binding_shape(i), engine.get_binding_name(i))
#for i in range(nInput, nInput + nOutput):
# print("Bind[%2d]:o[%2d]->" % (i, i - nInput), engine.get_binding_dtype(i), engine.get_binding_shape(i), context.get_binding_shape(i), engine.get_binding_name(i))
bufferH = []
bufferH.append(data.astype(np.float32).reshape(nBatchSize, nCol))
for i in range(nOutput):
bufferH.append(np.empty(context.get_binding_shape(nInput + i), dtype=trt.nptype(engine.get_binding_dtype(nInput + i))))
bufferD = []
for i in range(engine.num_bindings):
bufferD.append(cudart.cudaMalloc(bufferH[i].nbytes)[1])
for i in range(nInput):
cudart.cudaMemcpy(bufferD[i], np.ascontiguousarray(bufferH[i].reshape(-1)).ctypes.data, bufferH[i].nbytes, cudart.cudaMemcpyKind.cudaMemcpyHostToDevice)
context.execute_v2(bufferD)
for i in range(nOutput):
cudart.cudaMemcpy(bufferH[nInput + i].ctypes.data, bufferD[nInput + i], bufferH[nInput + i].nbytes, cudart.cudaMemcpyKind.cudaMemcpyDeviceToHost)
"""
for i in range(nInput):
printArrayInformation(bufferH[i])
for i in range(nOutput):
printArrayInformation(bufferH[nInput + i])
"""
count, _ = np.histogram(bufferH[nInput], np.arange(nCol + 1))
for i in range(nCol):
print("[%3d]:%4d ---- %.3f %%" % (i, count[i], count[i] / nBatchSize * 100))
for buffer in bufferD:
cudart.cudaFree(buffer)
print("Test %s finish!\n" % testCase)
if __name__ == "__main__":
os.system("rm -rf ./*.plan")
np.set_printoptions(precision=3, linewidth=200, suppress=True)
run(1024, 4, 97)
run(1024, 32, 97)
run(1024, 128, 97)
run(1024, 4, 89)
run(1024, 32, 89)
run(1024, 128, 89)
print("Test all finish!")
| trt-samples-for-hackathon-cn-master | cookbook/05-Plugin/PluginReposity/MultinomialDistributionPlugin-cuRAND-TRT8/testMultinomialDistributionPlugin.py |
#
# Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import ctypes
import os
import numpy as np
import tensorrt as trt
from cuda import cudart
soFile = "./OneHotPlugin.so"
np.random.seed(31193)
def printArrayInformation(x, info="", n=5):
if 0 in x.shape:
print('%s:%s' % (info, str(x.shape)))
print()
return
print( '%s:%s,SumAbs=%.5e,Var=%.5f,Max=%.5f,Min=%.5f,SAD=%.5f'%( \
info,str(x.shape),np.sum(abs(x)),np.var(x),np.max(x),np.min(x),np.sum(np.abs(np.diff(x.reshape(-1)))) ))
print('\t', x.reshape(-1)[:n], x.reshape(-1)[-n:])
def check(a, b, weak=False, checkEpsilon=1e-5):
if weak:
a = a.astype(np.float32)
b = b.astype(np.float32)
res = np.all(np.abs(a - b) < checkEpsilon)
else:
res = np.all(a == b)
diff0 = np.max(np.abs(a - b))
diff1 = np.max(np.abs(a - b) / (np.abs(b) + checkEpsilon))
print("check:%s, absDiff=%f, relDiff=%f" % (res, diff0, diff1))
def oneHotCPU(inputH, nEmbedding):
output = np.zeros([np.prod(inputH[0].shape), nEmbedding], dtype=np.float32)
for i, x in enumerate(inputH[0].reshape(-1)):
output[i, x] = 1
return [output.reshape(inputH[0].shape + (nEmbedding, ))]
def getOneHotPlugin(nEmbedding):
for c in trt.get_plugin_registry().plugin_creator_list:
#print(c.name)
if c.name == "OneHot":
parameterList = []
parameterList.append(trt.PluginField("nEmbedding", np.array([nEmbedding], dtype=np.int32), trt.PluginFieldType.INT32))
return c.create_plugin(c.name, trt.PluginFieldCollection(parameterList))
return None
def run(shape, nEmbedding, bFp16):
testCase = "<shape=%s,nEmbedding=%d,bFp16=%s>" % (shape, nEmbedding, bFp16)
trtFile = "./model-Dim%s.plan" % str(len(shape))
print("Test %s" % testCase)
logger = trt.Logger(trt.Logger.ERROR)
trt.init_libnvinfer_plugins(logger, '')
ctypes.cdll.LoadLibrary(soFile)
if os.path.isfile(trtFile):
with open(trtFile, "rb") as f:
engine = trt.Runtime(logger).deserialize_cuda_engine(f.read())
if engine == None:
print("Failed loading engine!")
return
print("Succeeded loading engine!")
else:
builder = trt.Builder(logger)
network = builder.create_network(1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH))
profile = builder.create_optimization_profile()
config = builder.create_builder_config()
if bFp16:
config.set_flag(trt.BuilderFlag.FP16)
inputT0 = network.add_input("inputT0", trt.int32, [-1 for i in shape])
profile.set_shape(inputT0.name, [1 for i in shape], [4 for i in shape], [8 for i in shape])
config.add_optimization_profile(profile)
pluginLayer = network.add_plugin_v2([inputT0], getOneHotPlugin(nEmbedding))
network.mark_output(pluginLayer.get_output(0))
engineString = builder.build_serialized_network(network, config)
if engineString == None:
print("Failed building engine!")
return
print("Succeeded building engine!")
with open(trtFile, "wb") as f:
f.write(engineString)
engine = trt.Runtime(logger).deserialize_cuda_engine(engineString)
context = engine.create_execution_context()
context.set_binding_shape(0, shape)
#print("Binding all? %s"%(["No","Yes"][int(context.all_binding_shapes_specified)]))
nInput = np.sum([engine.binding_is_input(i) for i in range(engine.num_bindings)])
nOutput = engine.num_bindings - nInput
#for i in range(nInput):
# print("Bind[%2d]:i[%2d]->" % (i, i), engine.get_binding_dtype(i), engine.get_binding_shape(i), context.get_binding_shape(i), engine.get_binding_name(i))
#for i in range(nInput, nInput + nOutput):
# print("Bind[%2d]:o[%2d]->" % (i, i - nInput), engine.get_binding_dtype(i), engine.get_binding_shape(i), context.get_binding_shape(i), engine.get_binding_name(i))
# print("Bind[%2d]:o[%2d]->" % (i, i - nInput), engine.get_binding_dtype(i), engine.get_binding_shape(i), context.get_binding_shape(i), engine.get_binding_name(i))
bufferH = []
bufferH.append(np.random.randint(0, nEmbedding, shape).astype(np.int32))
for i in range(nOutput):
bufferH.append(np.empty(context.get_binding_shape(nInput + i), dtype=trt.nptype(engine.get_binding_dtype(nInput + i))))
bufferD = []
for i in range(engine.num_bindings):
bufferD.append(cudart.cudaMalloc(bufferH[i].nbytes)[1])
for i in range(nInput):
cudart.cudaMemcpy(bufferD[i], np.ascontiguousarray(bufferH[i].reshape(-1)).ctypes.data, bufferH[i].nbytes, cudart.cudaMemcpyKind.cudaMemcpyHostToDevice)
context.execute_v2(bufferD)
for i in range(nOutput):
cudart.cudaMemcpy(bufferH[nInput + i].ctypes.data, bufferD[nInput + i], bufferH[nInput + i].nbytes, cudart.cudaMemcpyKind.cudaMemcpyDeviceToHost)
outputCPU = oneHotCPU(bufferH[:nInput], nEmbedding)
"""
for i in range(nInput):
printArrayInformation(bufferH[i])
for i in range(nInput, nIO):
printArrayInformation(bufferH[i])
for i in range(nInput, nIO):
printArrayInformation(outputCPU[i - nInput])
"""
check(bufferH[nInput:][0], outputCPU[0], True)
for buffer in bufferD:
cudart.cudaFree(buffer)
print("Test %s finish!\n" % testCase)
if __name__ == "__main__":
os.system("rm -rf ./*.plan")
np.set_printoptions(precision=3, linewidth=200, suppress=True)
run([1], 8, False)
run([2, 2], 16, False)
run([4, 4, 4], 32, False)
run([8, 8, 8, 8], 1024, False)
os.system("rm -rf ./*.plan")
run([4, 4, 4], 2048, False) # FP32 large book
os.system("rm -rf ./*.plan")
run([4, 4, 4], 1600, False)
os.system("rm -rf ./*.plan")
run([1], 8, True)
run([2, 2], 16, True)
run([4, 4, 4], 32, True)
run([8, 8, 8, 8], 1024, True)
os.system("rm -rf ./*.plan")
run([4, 4, 4], 2048, True) # FP16 large book
os.system("rm -rf ./*.plan")
run([4, 4, 4], 1600, True)
os.system("rm -rf ./*.plan")
print("Test all finish!")
| trt-samples-for-hackathon-cn-master | cookbook/05-Plugin/PluginReposity/OneHotPlugin-TRT8/testOneHotPlugin.py |
#
# Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import ctypes
import numpy as np
import pycuda.autoinit
import pycuda.driver as cuda
import tensorrt as trt
soFilePath = "./SortPlugin.so"
np.random.seed(31193)
epsilon = 1e-6
nElement = 1024
nWidth = 1
def sortCPU(inputH0, inputH1):
index = np.lexsort((inputH1, inputH0))
output = np.array([[inputH0[index[i]], inputH1[index[i]]] for i in range(1024)])
return output
def getSortPlugin():
for c in trt.get_plugin_registry().plugin_creator_list:
if c.name == "SortPlugin":
p0 = trt.PluginField("descending", np.array([0], dtype=np.int32), trt.PluginFieldType.INT32)
return c.create_plugin(c.name, trt.PluginFieldCollection([p0]))
return None
def buildEngine(logger):
builder = trt.Builder(logger)
config = builder.create_builder_config()
network = builder.create_network()
tensor1 = network.add_input("dataKey", trt.float32, (nElement, 1))
tensor2 = network.add_input("dataValue", trt.float32, (nElement, nWidth))
sortLayer = network.add_plugin_v2([tensor1, tensor2], getSortPlugin())
network.mark_output(sortLayer.get_output(0))
network.mark_output(sortLayer.get_output(1))
return builder.build_engine(network, config)
def run():
logger = trt.Logger(trt.Logger.ERROR)
trt.init_libnvinfer_plugins(logger, '')
ctypes.cdll.LoadLibrary(soFilePath)
engine = buildEngine(logger)
if engine == None:
print("Failed building engine!")
return None
print("Succeeded building engine!")
context = engine.create_execution_context()
stream = cuda.Stream()
inputH0 = np.ascontiguousarray(np.random.rand(nElement).astype(np.float32).reshape(-1))
inputD0 = cuda.mem_alloc(inputH0.nbytes)
inputH1 = np.ascontiguousarray(np.random.rand(nElement, nWidth).astype(np.float32).reshape(-1))
inputD1 = cuda.mem_alloc(inputH1.nbytes)
outputH0 = np.empty(engine.get_binding_shape(2), dtype=np.float32)
outputD0 = cuda.mem_alloc(outputH0.nbytes)
outputH1 = np.empty(engine.get_binding_shape(3), dtype=np.float32)
outputD1 = cuda.mem_alloc(outputH1.nbytes)
cuda.memcpy_htod_async(inputD0, inputH0, stream)
cuda.memcpy_htod_async(inputD1, inputH1, stream)
context.execute_async(1, [int(inputD0), int(inputD1), int(outputD0), int(outputD1)], stream.handle)
cuda.memcpy_dtoh_async(outputH0, outputD0, stream)
cuda.memcpy_dtoh_async(outputH1, outputD1, stream)
stream.synchronize()
outputCPU = sortCPU(inputH0, inputH1)
print(np.shape(outputH0), np.shape(outputH1))
print("Check result Key:", "True" if np.mean(np.abs(outputH0.reshape(-1) - outputCPU[:, 0].reshape(-1))) < epsilon else "False")
print("Check result Value:", "True" if np.mean(np.abs(outputH1.reshape(-1) - outputCPU[:, 1].reshape(-1))) < epsilon else "False")
"""
for i in range(1000):
print("%4d"%i,(inputH0[i],inputH1[i]),outputCPU[i],outputH0[i],outputH1[i])
"""
if __name__ == "__main__":
np.set_printoptions(precision=3, linewidth=200, suppress=True)
run()
print("test finish!")
| trt-samples-for-hackathon-cn-master | cookbook/05-Plugin/PluginReposity/SortPlugin-V1.0-float/testSortPlugin.py |
#
# Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import ctypes
import numpy as np
import pycuda.autoinit
import pycuda.driver as cuda
import tensorrt as trt
npToNumber = {np.float32: 0, np.float16: 1, np.int8: 2, np.int32: 3}
soFilePath = "./Mask2DPlugin.so"
globalMask2DTrueValue = 5
globalMask2DFalseValue = -5
np.random.seed(31193)
def mask2DCPU(inputH0, inputH1, inputH2, mask2DTrueValue, mask2DFalseValue):
outputH0CPU = np.full([inputH0.shape[0], 1, *(inputH0.shape[2:])], mask2DFalseValue, dtype=np.float32)
for j in range(inputH2.shape[0]):
outputH0CPU[j, 0, :inputH1[j], :inputH2[j]] = mask2DTrueValue
return outputH0CPU
def getMask2DPlugin(datatype, mask2DTrueValue, mask2DFalseValue):
for c in trt.get_plugin_registry().plugin_creator_list:
if c.name == "Mask2DPlugin":
p0 = trt.PluginField("datatype", np.array([npToNumber[datatype]], dtype=np.int32), trt.PluginFieldType.INT32)
p1 = trt.PluginField("mask2DTrueValue", np.array([mask2DTrueValue], dtype=np.float32), trt.PluginFieldType.FLOAT32)
p2 = trt.PluginField("mask2DFalseValue", np.array([mask2DFalseValue], dtype=np.float32), trt.PluginFieldType.FLOAT32)
return c.create_plugin(c.name, trt.PluginFieldCollection([p0, p1, p2]))
return None
def buildEngine(logger, outDatatype):
builder = trt.Builder(logger)
network = builder.create_network(1)
profile = builder.create_optimization_profile()
config = builder.create_builder_config()
config.flags = int(outDatatype == np.float16)
inputT0 = network.add_input("inputT0", trt.float32, [-1, -1, -1, -1])
profile.set_shape(inputT0.name, [1, 1, 1, 1], [4, 3, 30, 40], [9, 12, 30, 40])
inputT1 = network.add_input("inputT1", trt.int32, [-1])
profile.set_shape(inputT1.name, [1], [4], [9])
inputT2 = network.add_input("inputT2", trt.int32, [-1])
profile.set_shape(inputT2.name, [1], [4], [9])
config.add_optimization_profile(profile)
pluginLayer = network.add_plugin_v2([inputT0, inputT1, inputT2], getMask2DPlugin(outDatatype, globalMask2DTrueValue, globalMask2DFalseValue))
network.mark_output(pluginLayer.get_output(0))
return builder.build_engine(network, config)
def run(inDim, outDatatype):
print("test", inDim, outDatatype)
logger = trt.Logger(trt.Logger.ERROR)
trt.init_libnvinfer_plugins(logger, '')
ctypes.cdll.LoadLibrary(soFilePath)
engine = buildEngine(logger, outDatatype)
if engine == None:
print("Failed building engine!")
return None
print("Succeeded building engine!")
context = engine.create_execution_context()
context.set_binding_shape(0, inDim)
context.set_binding_shape(1, inDim[:1])
context.set_binding_shape(2, inDim[:1])
#print("Bind0->",engine.get_binding_shape(0),context.get_binding_shape(0));
#print("Bind1->",engine.get_binding_shape(1),context.get_binding_shape(1));
#print("Bind2->",engine.get_binding_shape(2),context.get_binding_shape(2));
print("All bind:", context.all_binding_shapes_specified)
stream = cuda.Stream()
data0 = np.full(inDim, 1, dtype=np.float32)
data1 = np.random.randint(1, inDim[2], inDim[:1], dtype=np.int32)
data2 = np.random.randint(1, inDim[3], inDim[:1], dtype=np.int32)
inputH0 = np.ascontiguousarray(data0)
inputD0 = cuda.mem_alloc(inputH0.nbytes)
inputH1 = np.ascontiguousarray(data1)
inputD1 = cuda.mem_alloc(inputH1.nbytes)
inputH2 = np.ascontiguousarray(data2)
inputD2 = cuda.mem_alloc(inputH2.nbytes)
outputH0 = np.empty(context.get_binding_shape(3), dtype=trt.nptype(engine.get_binding_dtype(3)))
outputD0 = cuda.mem_alloc(outputH0.nbytes)
cuda.memcpy_htod_async(inputD0, inputH0, stream)
cuda.memcpy_htod_async(inputD1, inputH1, stream)
cuda.memcpy_htod_async(inputD2, inputH2, stream)
context.execute_async_v2([int(inputD0), int(inputD1), int(inputD2), int(outputD0)], stream.handle)
cuda.memcpy_dtoh_async(outputH0, outputD0, stream)
stream.synchronize()
outputH0CPU = mask2DCPU(inputH0, inputH1, inputH2, globalMask2DTrueValue, globalMask2DFalseValue)
#print("InputH0->",inputH0.shape, engine.get_binding_dtype(0))
#print(inputH0)
#print("InputH1->",inputH1.shape, engine.get_binding_dtype(1))
#print(inputH1)
#print("InputH2->",inputH2.shape, engine.get_binding_dtype(2))
#print(inputH2)
#print("OutputH0->",outputH0.shape, engine.get_binding_dtype(3))
#print(outputH0)
#print("OutputH0CPU->",outputH0CPU.shape)
#print(outputH0CPU)
print("Check result:", ["True" if np.all(outputH0 == outputH0CPU) else "False"][0])
if __name__ == "__main__":
np.set_printoptions(precision=3, linewidth=200, suppress=True)
cuda.Device(0).make_context()
run([4, 3, 30, 40], np.float32)
run([4, 3, 30, 40], np.float16)
cuda.Context.pop()
print("test finish!")
| trt-samples-for-hackathon-cn-master | cookbook/05-Plugin/PluginReposity/Mask2DPlugin/testMask2DPlugin.py |
#
# Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import ctypes
import numpy as np
import pycuda.autoinit
import pycuda.driver as cuda
import tensorrt as trt
soFilePath = "./ReducePlugin.so"
np.random.seed(31193)
def reduceCPU(inputH0, isSum):
if isSum:
return np.sum(inputH0, -2)
else:
return np.max(inputH0, -2)
def getReducePlugin(isSum):
for c in trt.get_plugin_registry().plugin_creator_list:
if c.name == "ReducePlugin":
p0 = trt.PluginField("isSum", np.array([int(isSum)], dtype=np.int32), trt.PluginFieldType.INT32)
return c.create_plugin(c.name, trt.PluginFieldCollection([p0]))
return None
def buildEngine(logger, shape, isSum):
builder = trt.Builder(logger)
builder.max_batch_size = 4
builder.max_workspace_size = 3 << 30
network = builder.create_network()
inputTensor = network.add_input("inputT0", trt.float32, shape)
reduceLayer = network.add_plugin_v2([inputTensor], getReducePlugin(isSum))
network.mark_output(reduceLayer.get_output(0))
return builder.build_cuda_engine(network)
def run(nBatchSize, shape, isSum):
print("test", nBatchSize, shape, isSum)
logger = trt.Logger(trt.Logger.ERROR)
trt.init_libnvinfer_plugins(logger, '')
ctypes.cdll.LoadLibrary(soFilePath)
engine = buildEngine(logger, shape, isSum)
if engine == None:
print("Failed building engine!")
return None
print("Succeeded building engine!")
context = engine.create_execution_context()
stream = cuda.Stream()
data = np.random.rand(*[nBatchSize, *shape]).astype(np.float32)
inputH0 = np.ascontiguousarray(data.reshape(-1))
inputD0 = cuda.mem_alloc(inputH0.nbytes)
outputH0 = np.empty((nBatchSize, ) + tuple(context.get_binding_shape(1)), dtype=trt.nptype(engine.get_binding_dtype(1)))
outpuD0 = cuda.mem_alloc(outputH0.nbytes)
cuda.memcpy_htod_async(inputD0, inputH0, stream)
context.execute_async(nBatchSize, [int(inputD0), int(outpuD0)], stream.handle)
cuda.memcpy_dtoh_async(outputH0, outpuD0, stream)
stream.synchronize()
outputH0CPU = reduceCPU(data, isSum)
print("Check result:", ["True" if np.all(outputH0 == outputH0CPU) else "False"][0])
"""
temp = outputH0
print(temp.shape, temp.dtype, np.mean(temp), np.var(temp), np.max(temp), np.min(temp))
print(temp)
temp = outputH0CPU
print(temp.shape, temp.dtype, np.mean(temp), np.var(temp), np.max(temp), np.min(temp))
print(temp)
"""
if __name__ == "__main__":
np.set_printoptions(precision=3, linewidth=200, suppress=True)
run(4, [8, 2, 128], False)
run(4, [8, 5, 128], False)
run(4, [8, 6, 128], False)
run(4, [8, 10, 128], False)
run(4, [8, 15, 128], False)
run(4, [8, 16, 128], False)
run(4, [8, 30, 128], False)
run(4, [8, 82, 128], False)
run(4, [8, 30, 128], True)
print("test finish!")
| trt-samples-for-hackathon-cn-master | cookbook/05-Plugin/PluginReposity/ReducePlugin/testReducePlugin.py |
#
# Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import ctypes
import numpy as np
import pycuda.autoinit
import pycuda.driver as cuda
import tensorrt as trt
soFilePath = "./SortPlugin.so"
np.random.seed(31193)
epsilon = 1e-6
nElement = 128
nWidth = 4
def sortCPU(inputH0, inputH1):
index = np.lexsort((inputH1[:, 0].reshape(-1), inputH0))
return inputH0[index], inputH1[index]
def getSortPlugin():
for c in trt.get_plugin_registry().plugin_creator_list:
if c.name == "SortPlugin":
p0 = trt.PluginField("descending", np.array([0], dtype=np.int32), trt.PluginFieldType.INT32)
return c.create_plugin(c.name, trt.PluginFieldCollection([p0]))
return None
def buildEngine(logger):
builder = trt.Builder(logger)
config = builder.create_builder_config()
network = builder.create_network()
tensor1 = network.add_input("dataKey", trt.float32, (nElement, 1))
tensor2 = network.add_input("dataValue", trt.float32, (nElement, nWidth))
sortLayer = network.add_plugin_v2([tensor1, tensor2], getSortPlugin())
network.mark_output(sortLayer.get_output(0))
network.mark_output(sortLayer.get_output(1))
return builder.build_engine(network, config)
def run():
logger = trt.Logger(trt.Logger.ERROR)
trt.init_libnvinfer_plugins(logger, '')
ctypes.cdll.LoadLibrary(soFilePath)
engine = buildEngine(logger)
if engine == None:
print("Failed building engine!")
return None
print("Succeeded building engine!")
context = engine.create_execution_context()
stream = cuda.Stream()
inputH0 = np.ascontiguousarray(np.random.rand(nElement).astype(np.float32).reshape(-1))
inputD0 = cuda.mem_alloc(inputH0.nbytes)
inputH1 = np.ascontiguousarray(np.random.rand(nElement, nWidth).astype(np.float32).reshape(-1))
inputD1 = cuda.mem_alloc(inputH1.nbytes)
outputH0 = np.empty(engine.get_binding_shape(2), dtype=np.float32)
outputD0 = cuda.mem_alloc(outputH0.nbytes)
outputH1 = np.empty(engine.get_binding_shape(3), dtype=np.float32)
outputD1 = cuda.mem_alloc(outputH1.nbytes)
cuda.memcpy_htod_async(inputD0, inputH0, stream)
cuda.memcpy_htod_async(inputD1, inputH1, stream)
context.execute_async(1, [int(inputD0), int(inputD1), int(outputD0), int(outputD1)], stream.handle)
cuda.memcpy_dtoh_async(outputH0, outputD0, stream)
cuda.memcpy_dtoh_async(outputH1, outputD1, stream)
stream.synchronize()
outputCPUH0, outputCPUH1 = sortCPU(inputH0, inputH1.reshape(nElement, nWidth))
print(np.shape(outputH0), np.shape(outputH1))
print("Check result Key:", "True" if np.mean(np.abs(outputH0.reshape(-1) - outputCPUH0.reshape(-1))) < epsilon else "False")
print("Check result Value:", "True" if np.mean(np.abs(outputH1.reshape(-1) - outputCPUH1.reshape(-1))) < epsilon else "False")
"""
for i in range(nElement):
print("%4d"%i,(inputH0[i],inputH1[i]),(outputCPUH0[i],outputCPUH1[i]),(outputH0[i],outputH1[i]))
"""
if __name__ == "__main__":
np.set_printoptions(precision=3, linewidth=200, suppress=True)
run()
print("test finish!")
| trt-samples-for-hackathon-cn-master | cookbook/05-Plugin/PluginReposity/SortPlugin-V2.0-float4/testSortPlugin.py |
#
# Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import ctypes
import numpy as np
import pycuda.autoinit
import pycuda.driver as cuda
import tensorrt as trt
#import matplotlib.pyplot as plt
soFilePath = "./CCLPlugin.so"
np.random.seed(31193)
def getCCLPlugin():
for c in trt.get_plugin_registry().plugin_creator_list:
if c.name == "CCLPlugin":
p0 = trt.PluginField("minPixelScore", np.array([0.7], dtype=np.float32), trt.PluginFieldType.FLOAT32)
p1 = trt.PluginField("minLinkScore", np.array([0.7], dtype=np.float32), trt.PluginFieldType.FLOAT32)
p2 = trt.PluginField("minArea", np.array([10], dtype=np.int32), trt.PluginFieldType.INT32)
p3 = trt.PluginField("maxcomponentCount", np.array([65536], dtype=np.int32), trt.PluginFieldType.INT32)
return c.create_plugin(c.name, trt.PluginFieldCollection([p0, p1, p2, p3]))
return None
def buildEngine(logger):
builder = trt.Builder(logger)
network = builder.create_network(1)
config = builder.create_builder_config()
profile = builder.create_optimization_profile()
inputT0 = network.add_input("pixelScore", trt.float32, (-1, -1, -1))
profile.set_shape(inputT0.name, [1, 1, 1], [2, 384, 640], [4, 768, 1280])
inputT1 = network.add_input("linkScore", trt.float32, (-1, 8, -1, -1))
profile.set_shape(inputT1.name, [1, 8, 1, 1], [4, 8, 384, 640], [8, 8, 768, 1280])
config.add_optimization_profile(profile)
cclLayer = network.add_plugin_v2([inputT0, inputT1], getCCLPlugin())
network.mark_output(cclLayer.get_output(0))
network.mark_output(cclLayer.get_output(1))
return builder.build_engine(network, config)
def run(inDim):
print("test", inDim)
logger = trt.Logger(trt.Logger.ERROR)
trt.init_libnvinfer_plugins(logger, '')
ctypes.cdll.LoadLibrary(soFilePath)
engine = buildEngine(logger)
if engine == None:
print("Failed building engine!")
return None
print("Succeeded building engine!")
context = engine.create_execution_context()
context.set_binding_shape(0, inDim)
context.set_binding_shape(1, inDim[:1] + [8] + inDim[1:])
stream = cuda.Stream()
data0 = np.random.rand(np.prod(inDim)).reshape(-1)
data1 = np.random.rand(np.prod(inDim) * 8).reshape(-1)
inputH0 = np.ascontiguousarray(data0)
inputD0 = cuda.mem_alloc(inputH0.nbytes)
inputH1 = np.ascontiguousarray(data1)
inputD1 = cuda.mem_alloc(inputH1.nbytes)
outputH0 = np.empty(context.get_binding_shape(2), dtype=trt.nptype(engine.get_binding_dtype(2)))
outputH1 = np.empty(context.get_binding_shape(3), dtype=trt.nptype(engine.get_binding_dtype(3)))
outputD0 = cuda.mem_alloc(outputH0.nbytes)
outputD1 = cuda.mem_alloc(outputH1.nbytes)
cuda.memcpy_htod_async(inputD0, inputH0, stream)
cuda.memcpy_htod_async(inputD1, inputH1, stream)
stream.synchronize()
context.execute_async_v2([int(inputD0), int(inputD1), int(outputD0), int(outputD1)], stream.handle)
stream.synchronize()
cuda.memcpy_dtoh_async(outputH0, outputD0, stream)
cuda.memcpy_dtoh_async(outputH1, outputD1, stream)
stream.synchronize()
print(np.shape(outputH0), np.shape(outputH1))
#print(outputH0)
#print(outputH1)
#plt.imshow(outputH0/np.max(outputH0))
#plt.show()
if __name__ == "__main__":
run([1, 1, 1])
run([2, 384, 640])
run([4, 768, 1280])
print("test finish!")
| trt-samples-for-hackathon-cn-master | cookbook/05-Plugin/PluginReposity/CCLPlugin-TRT7-DynamicShape/testCCLPlugin.py |
#
# Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import ctypes
import os
import numpy as np
import tensorrt as trt
from cuda import cudart
soFile = "./LayerNormPluginCUB.so"
epsilon = 1e-6
np.random.seed(31193)
def printArrayInformation(x, info="", n=5):
if 0 in x.shape:
print('%s:%s' % (info, str(x.shape)))
print()
return
print( '%s:%s,SumAbs=%.5e,Var=%.5f,Max=%.5f,Min=%.5f,SAD=%.5f'%( \
info,str(x.shape),np.sum(abs(x)),np.var(x),np.max(x),np.min(x),np.sum(np.abs(np.diff(x.reshape(-1)))) ))
print('\t', x.reshape(-1)[:n], x.reshape(-1)[-n:])
def check(a, b, weak=False, checkEpsilon=1e-5):
if weak:
a = a.astype(np.float32)
b = b.astype(np.float32)
res = np.all(np.abs(a - b) < checkEpsilon)
else:
res = np.all(a == b)
diff0 = np.max(np.abs(a - b))
diff1 = np.max(np.abs(a - b) / (np.abs(b) + checkEpsilon))
print("check:%s, absDiff=%f, relDiff=%f" % (res, diff0, diff1))
def layerNormCPU(bufferH, epsilon):
_x, gamma, beta = bufferH
nHiddenSize = bufferH[0].shape[2]
_0 = np.mean(_x, 2)[:, :, np.newaxis]
_1 = _x - _0
_2 = _1 * _1
_3 = np.mean(_2, 2)[:, :, np.newaxis]
_4 = np.array(epsilon, dtype=np.float32)
_5 = _4.reshape(1, 1, 1)
_6 = _3 + _5
_7 = np.sqrt(_6)
_8 = 1 / _7 # 1/sqrt(...)
_9 = gamma
_10 = _9.reshape(1, 1, nHiddenSize)
_11 = _8 * _10 # gamma/sqrt(...)
_12 = _0 * _11 # bμ/sqrt(...)
_13 = beta
_14 = _13.reshape(1, 1, nHiddenSize)
_15 = _14 - _12 # beta-bμ/sqrt(...)
_16 = _x * _11 # bx/sqrt(...)
_17 = _15 + _16 # gamma(x-μ)/sqrt(...)+beta
_18 = _17.reshape(bufferH[0].shape[0], bufferH[0].shape[1], bufferH[0].shape[2])
return [_18]
def getLayerNormPlugin(epsilon):
for c in trt.get_plugin_registry().plugin_creator_list:
#print(c.name)
if c.name == "LayerNorm" and c.plugin_version == "1":
print("Find %s V%s" % (c.name, c.plugin_version))
parameterList = []
parameterList.append(trt.PluginField("epsilon", np.float32(epsilon), trt.PluginFieldType.FLOAT32))
return c.create_plugin(c.name, trt.PluginFieldCollection(parameterList))
return None
def run(shape, bFp16):
testCase = "<shape=%s,dataType=%s>" % (shape, "FP16" if bFp16 else "FP32")
trtFile = "./model-%d-%s.plan" % (shape[2], "FP16" if bFp16 else "FP32")
print("Test %s" % testCase)
logger = trt.Logger(trt.Logger.ERROR)
trt.init_libnvinfer_plugins(logger, '')
ctypes.cdll.LoadLibrary(soFile)
if os.path.isfile(trtFile):
with open(trtFile, "rb") as f:
engineStr = f.read()
engine = trt.Runtime(logger).deserialize_cuda_engine(engineStr)
if engine == None:
print("Failed loading engine!")
exit()
print("Succeeded loading engine!")
else:
builder = trt.Builder(logger)
network = builder.create_network(1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH))
profile = builder.create_optimization_profile()
config = builder.create_builder_config()
if bFp16:
config.set_flag(trt.BuilderFlag.FP16)
inputT0 = network.add_input("inputT0", trt.float16 if bFp16 else trt.float32, [-1 for i in shape])
profile.set_shape(inputT0.name, [1, 1, shape[2]], shape, shape)
inputT1 = network.add_input("inputGamma", trt.float16 if bFp16 else trt.float32, [256])
inputT2 = network.add_input("inputBeta", trt.float16 if bFp16 else trt.float32, [256])
config.add_optimization_profile(profile)
pluginLayer = network.add_plugin_v2([inputT0, inputT1, inputT2], getLayerNormPlugin(epsilon))
network.mark_output(pluginLayer.get_output(0))
engineString = builder.build_serialized_network(network, config)
if engineString == None:
print("Failed building engine!")
return
print("Succeeded building engine!")
with open(trtFile, "wb") as f:
f.write(engineString)
engine = trt.Runtime(logger).deserialize_cuda_engine(engineString)
context = engine.create_execution_context()
context.set_binding_shape(0, shape)
#print("Binding all? %s"%(["No","Yes"][int(context.all_binding_shapes_specified)]))
nInput = np.sum([engine.binding_is_input(i) for i in range(engine.num_bindings)])
nOutput = engine.num_bindings - nInput
#for i in range(nInput):
# print("Bind[%2d]:i[%2d]->" % (i, i), engine.get_binding_dtype(i), engine.get_binding_shape(i), context.get_binding_shape(i), engine.get_binding_name(i))
#for i in range(nInput, nInput + nOutput):
# print("Bind[%2d]:o[%2d]->" % (i, i - nInput), engine.get_binding_dtype(i), engine.get_binding_shape(i), context.get_binding_shape(i), engine.get_binding_name(i))
bufferH = []
bufferH.append(np.random.rand(np.prod(shape)).astype(np.float16 if bFp16 else np.float32).reshape(shape) * 2 - 1)
#bufferH.append(np.arange(np.prod(shape)).astype(np.float16 if bFp16 else np.float32).reshape(shape))
bufferH.append(np.ones(shape[2]).astype(np.float16 if bFp16 else np.float32))
bufferH.append(np.zeros(shape[2]).astype(np.float16 if bFp16 else np.float32))
for i in range(nOutput):
bufferH.append(np.empty(context.get_binding_shape(nInput + i), dtype=trt.nptype(engine.get_binding_dtype(nInput + i))))
bufferD = []
for i in range(engine.num_bindings):
bufferD.append(cudart.cudaMalloc(bufferH[i].nbytes)[1])
for i in range(nInput):
cudart.cudaMemcpy(bufferD[i], np.ascontiguousarray(bufferH[i].reshape(-1)).ctypes.data, bufferH[i].nbytes, cudart.cudaMemcpyKind.cudaMemcpyHostToDevice)
context.execute_v2(bufferD)
for i in range(nOutput):
cudart.cudaMemcpy(bufferH[nInput + i].ctypes.data, bufferD[nInput + i], bufferH[nInput + i].nbytes, cudart.cudaMemcpyKind.cudaMemcpyDeviceToHost)
outputCPU = layerNormCPU(bufferH[:nInput], epsilon)
"""
for i in range(nInput):
printArrayInformation(bufferH[i])
for i in range(nInput, nIO):
printArrayInformation(bufferH[i])
for i in range(nInput, nIO):
printArrayInformation(outputCPU[i - nInput])
"""
check(bufferH[nInput:][0], outputCPU[0], True)
for buffer in bufferD:
cudart.cudaFree(buffer)
print("Test %s finish!\n" % testCase)
if __name__ == "__main__":
np.set_printoptions(precision=3, linewidth=200, suppress=True)
os.system("rm -rf ./*.plan")
run([16, 64, 32], False)
os.system("rm -rf ./*.plan")
run([16, 64, 32], True)
os.system("rm -rf ./*.plan")
run([16, 64, 256], False)
os.system("rm -rf ./*.plan")
run([16, 64, 256], True)
os.system("rm -rf ./*.plan")
run([16, 64, 1024], False)
os.system("rm -rf ./*.plan")
run([16, 64, 1024], True)
os.system("rm -rf ./*.plan")
run([16, 64, 1600], False)
os.system("rm -rf ./*.plan")
run([16, 64, 1600], True)
print("Test all finish!")
| trt-samples-for-hackathon-cn-master | cookbook/05-Plugin/PluginReposity/LayerNormPlugin-TRT8/testLayerNormPluginCUBV4.py |
#
# Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import ctypes
import os
import numpy as np
import tensorrt as trt
from cuda import cudart
soFile = "./LayerNormPluginOneFlow.so"
epsilon = 1e-6
np.random.seed(31193)
def printArrayInformation(x, info="", n=5):
if 0 in x.shape:
print('%s:%s' % (info, str(x.shape)))
print()
return
print( '%s:%s,SumAbs=%.5e,Var=%.5f,Max=%.5f,Min=%.5f,SAD=%.5f'%( \
info,str(x.shape),np.sum(abs(x)),np.var(x),np.max(x),np.min(x),np.sum(np.abs(np.diff(x.reshape(-1)))) ))
print('\t', x.reshape(-1)[:n], x.reshape(-1)[-n:])
def check(a, b, weak=False, checkEpsilon=1e-5):
if weak:
a = a.astype(np.float32)
b = b.astype(np.float32)
res = np.all(np.abs(a - b) < checkEpsilon)
else:
res = np.all(a == b)
diff0 = np.max(np.abs(a - b))
diff1 = np.max(np.abs(a - b) / (np.abs(b) + checkEpsilon))
print("check:%s, absDiff=%f, relDiff=%f" % (res, diff0, diff1))
def layerNormCPU(bufferH, epsilon):
_x = bufferH[0]
_0 = np.mean(_x, 2)[:, :, np.newaxis]
_1 = _x - _0
_2 = _1 * _1
_3 = np.mean(_2, 2)[:, :, np.newaxis]
_4 = np.array(epsilon, dtype=np.float32)
_5 = _4.reshape(1, 1, 1)
_6 = _3 + _5
_7 = np.sqrt(_6)
_8 = 1 / _7 # 1/sqrt(...)
_9 = _1 * _8
return [_9]
def getLayerNormPlugin(epsilon):
for c in trt.get_plugin_registry().plugin_creator_list:
#print(c.name)
if c.name == "LayerNorm" and c.plugin_version == "5":
print("Find %s V%s" % (c.name, c.plugin_version))
parameterList = []
parameterList.append(trt.PluginField("epsilon", np.float32(epsilon), trt.PluginFieldType.FLOAT32))
return c.create_plugin(c.name, trt.PluginFieldCollection(parameterList))
return None
def run(shape, bFp16):
testCase = "<shape=%s,dataType=%s>" % (shape, "FP16" if bFp16 else "FP32")
trtFile = "./model-%d-%s.plan" % (shape[2], "FP16" if bFp16 else "FP32")
print("Test %s" % testCase)
logger = trt.Logger(trt.Logger.ERROR)
trt.init_libnvinfer_plugins(logger, '')
ctypes.cdll.LoadLibrary(soFile)
if os.path.isfile(trtFile):
with open(trtFile, "rb") as f:
engineStr = f.read()
engine = trt.Runtime(logger).deserialize_cuda_engine(engineStr)
if engine == None:
print("Failed loading engine!")
exit()
print("Succeeded loading engine!")
else:
builder = trt.Builder(logger)
network = builder.create_network(1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH))
profile = builder.create_optimization_profile()
config = builder.create_builder_config()
if bFp16:
config.set_flag(trt.BuilderFlag.FP16)
inputT0 = network.add_input("inputT0", trt.float16 if bFp16 else trt.float32, [-1 for i in shape])
profile.set_shape(inputT0.name, [1, 1, shape[2]], shape, shape)
config.add_optimization_profile(profile)
pluginLayer = network.add_plugin_v2([inputT0], getLayerNormPlugin(epsilon))
network.mark_output(pluginLayer.get_output(0))
engineString = builder.build_serialized_network(network, config)
if engineString == None:
print("Failed building engine!")
return
print("Succeeded building engine!")
with open(trtFile, "wb") as f:
f.write(engineString)
engine = trt.Runtime(logger).deserialize_cuda_engine(engineString)
context = engine.create_execution_context()
context.set_binding_shape(0, shape)
#print("Binding all? %s"%(["No","Yes"][int(context.all_binding_shapes_specified)]))
nInput = np.sum([engine.binding_is_input(i) for i in range(engine.num_bindings)])
nOutput = engine.num_bindings - nInput
#for i in range(nInput):
# print("Bind[%2d]:i[%2d]->" % (i, i), engine.get_binding_dtype(i), engine.get_binding_shape(i), context.get_binding_shape(i), engine.get_binding_name(i))
#for i in range(nInput, nInput + nOutput):
# print("Bind[%2d]:o[%2d]->" % (i, i - nInput), engine.get_binding_dtype(i), engine.get_binding_shape(i), context.get_binding_shape(i), engine.get_binding_name(i))
# print("Bind[%2d]:o[%2d]->" % (i, i - nInput), engine.get_binding_dtype(i), engine.get_binding_shape(i), context.get_binding_shape(i), engine.get_binding_name(i))
bufferH = []
bufferH.append(np.random.rand(np.prod(shape)).astype(np.float32).reshape(shape))
for i in range(nOutput):
bufferH.append(np.empty(context.get_binding_shape(nInput + i), dtype=trt.nptype(engine.get_binding_dtype(nInput + i))))
bufferD = []
for i in range(engine.num_bindings):
bufferD.append(cudart.cudaMalloc(bufferH[i].nbytes)[1])
for i in range(nInput):
cudart.cudaMemcpy(bufferD[i], np.ascontiguousarray(bufferH[i].reshape(-1)).ctypes.data, bufferH[i].nbytes, cudart.cudaMemcpyKind.cudaMemcpyHostToDevice)
context.execute_v2(bufferD)
for i in range(nOutput):
cudart.cudaMemcpy(bufferH[nInput + i].ctypes.data, bufferD[nInput + i], bufferH[nInput + i].nbytes, cudart.cudaMemcpyKind.cudaMemcpyDeviceToHost)
outputCPU = layerNormCPU(bufferH[:nInput], epsilon)
"""
for i in range(nInput):
printArrayInformation(bufferH[i])
for i in range(nOutput):
printArrayInformation(bufferH[nInput + i])
for i in range(nOutput):
printArrayInformation(outputCPU[i])
"""
check(bufferH[nInput:][0], outputCPU[0], True)
for buffer in bufferD:
cudart.cudaFree(buffer)
print("Test %s finish!\n" % testCase)
if __name__ == "__main__":
np.set_printoptions(precision=3, linewidth=200, suppress=True)
os.system("rm -rf ./*.plan")
run([1, 1, 256], False)
os.system("rm -rf ./*.plan")
run([16, 64, 256], False)
os.system("rm -rf ./*.plan")
run([1, 1, 256], True)
os.system("rm -rf ./*.plan")
run([16, 64, 256], True)
print("Test all finish!")
| trt-samples-for-hackathon-cn-master | cookbook/05-Plugin/PluginReposity/LayerNormPlugin-TRT8/testLayerNormPluginOneFlow.py |
#
# Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import ctypes
import os
import numpy as np
import tensorrt as trt
from cuda import cudart
soFile = "./LayerNormPluginCUB.so"
epsilon = 1e-6
np.random.seed(31193)
def printArrayInformation(x, info="", n=5):
if 0 in x.shape:
print('%s:%s' % (info, str(x.shape)))
print()
return
print( '%s:%s,SumAbs=%.5e,Var=%.5f,Max=%.5f,Min=%.5f,SAD=%.5f'%( \
info,str(x.shape),np.sum(abs(x)),np.var(x),np.max(x),np.min(x),np.sum(np.abs(np.diff(x.reshape(-1)))) ))
print('\t', x.reshape(-1)[:n], x.reshape(-1)[-n:])
def check(a, b, weak=False, checkEpsilon=1e-5):
if weak:
a = a.astype(np.float32)
b = b.astype(np.float32)
res = np.all(np.abs(a - b) < checkEpsilon)
else:
res = np.all(a == b)
diff0 = np.max(np.abs(a - b))
diff1 = np.max(np.abs(a - b) / (np.abs(b) + checkEpsilon))
print("check:%s, absDiff=%f, relDiff=%f" % (res, diff0, diff1))
def layerNormCPU(bufferH, epsilon):
_x = bufferH[0]
_0 = np.mean(_x, 2)[:, :, np.newaxis]
_1 = _x - _0
_2 = _1 * _1
_3 = np.mean(_2, 2)[:, :, np.newaxis]
_4 = np.array(epsilon, dtype=np.float32)
_5 = _4.reshape(1, 1, 1)
_6 = _3 + _5
_7 = np.sqrt(_6)
_8 = 1 / _7 # 1/sqrt(...)
_9 = _1 * _8
return [_9]
def getLayerNormPlugin(epsilon):
for c in trt.get_plugin_registry().plugin_creator_list:
#print(c.name)
if c.name == "LayerNorm" and c.plugin_version == "1":
print("Find %s V%s" % (c.name, c.plugin_version))
parameterList = []
parameterList.append(trt.PluginField("epsilon", np.float32(epsilon), trt.PluginFieldType.FLOAT32))
return c.create_plugin(c.name, trt.PluginFieldCollection(parameterList))
return None
def run(shape):
testCase = "<shape=%s>" % (shape)
trtFile = "./model-%d.plan" % (shape[2])
print("Test %s" % testCase)
logger = trt.Logger(trt.Logger.ERROR)
trt.init_libnvinfer_plugins(logger, '')
ctypes.cdll.LoadLibrary(soFile)
if os.path.isfile(trtFile):
with open(trtFile, "rb") as f:
engineStr = f.read()
engine = trt.Runtime(logger).deserialize_cuda_engine(engineStr)
if engine == None:
print("Failed loading engine!")
exit()
print("Succeeded loading engine!")
else:
builder = trt.Builder(logger)
network = builder.create_network(1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH))
profile = builder.create_optimization_profile()
config = builder.create_builder_config()
inputT0 = network.add_input("inputT0", trt.float32, [-1 for i in shape])
profile.set_shape(inputT0.name, [1, 1, shape[2]], shape, shape)
config.add_optimization_profile(profile)
pluginLayer = network.add_plugin_v2([inputT0], getLayerNormPlugin(epsilon))
network.mark_output(pluginLayer.get_output(0))
engineString = builder.build_serialized_network(network, config)
if engineString == None:
print("Failed building engine!")
return
print("Succeeded building engine!")
with open(trtFile, "wb") as f:
f.write(engineString)
engine = trt.Runtime(logger).deserialize_cuda_engine(engineString)
context = engine.create_execution_context()
context.set_binding_shape(0, shape)
#print("Binding all? %s"%(["No","Yes"][int(context.all_binding_shapes_specified)]))
nInput = np.sum([engine.binding_is_input(i) for i in range(engine.num_bindings)])
nOutput = engine.num_bindings - nInput
#for i in range(nInput):
# print("Bind[%2d]:i[%2d]->" % (i, i), engine.get_binding_dtype(i), engine.get_binding_shape(i), context.get_binding_shape(i), engine.get_binding_name(i))
#for i in range(nInput, nInput + nOutput):
# print("Bind[%2d]:o[%2d]->" % (i, i - nInput), engine.get_binding_dtype(i), engine.get_binding_shape(i), context.get_binding_shape(i), engine.get_binding_name(i))
bufferH = []
bufferH.append(np.random.rand(np.prod(shape)).astype(np.float32).reshape(shape))
for i in range(nOutput):
bufferH.append(np.empty(context.get_binding_shape(nInput + i), dtype=trt.nptype(engine.get_binding_dtype(nInput + i))))
bufferD = []
for i in range(engine.num_bindings):
bufferD.append(cudart.cudaMalloc(bufferH[i].nbytes)[1])
for i in range(nInput):
cudart.cudaMemcpy(bufferD[i], np.ascontiguousarray(bufferH[i].reshape(-1)).ctypes.data, bufferH[i].nbytes, cudart.cudaMemcpyKind.cudaMemcpyHostToDevice)
context.execute_v2(bufferD)
for i in range(nOutput):
cudart.cudaMemcpy(bufferH[nInput + i].ctypes.data, bufferD[nInput + i], bufferH[nInput + i].nbytes, cudart.cudaMemcpyKind.cudaMemcpyDeviceToHost)
outputCPU = layerNormCPU(bufferH[:nInput], epsilon)
"""
for i in range(nInput):
printArrayInformation(bufferH[i])
for i in range(nInput, nIO):
printArrayInformation(bufferH[i])
for i in range(nInput, nIO):
printArrayInformation(outputCPU[i - nInput])
"""
check(bufferH[nInput:][0], outputCPU[0], True)
for buffer in bufferD:
cudart.cudaFree(buffer)
print("Test %s finish!\n" % testCase)
if __name__ == "__main__":
np.set_printoptions(precision=3, linewidth=200, suppress=True)
os.system("rm -rf ./*.plan")
run([1, 1, 256])
os.system("rm -rf ./*.plan")
run([16, 64, 256])
os.system("rm -rf ./*.plan")
print("Test all finish!")
| trt-samples-for-hackathon-cn-master | cookbook/05-Plugin/PluginReposity/LayerNormPlugin-TRT8/testLayerNormPluginCUBV1.py |
#
# Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import ctypes
import os
import numpy as np
import tensorrt as trt
from cuda import cudart
soFile = "./LayerNormPluginCUB.so"
epsilon = 1e-6
np.random.seed(31193)
def printArrayInformation(x, info="", n=5):
if 0 in x.shape:
print('%s:%s' % (info, str(x.shape)))
print()
return
print( '%s:%s,SumAbs=%.5e,Var=%.5f,Max=%.5f,Min=%.5f,SAD=%.5f'%( \
info,str(x.shape),np.sum(abs(x)),np.var(x),np.max(x),np.min(x),np.sum(np.abs(np.diff(x.reshape(-1)))) ))
print('\t', x.reshape(-1)[:n], x.reshape(-1)[-n:])
def check(a, b, weak=False, checkEpsilon=1e-5):
if weak:
a = a.astype(np.float32)
b = b.astype(np.float32)
res = np.all(np.abs(a - b) < checkEpsilon)
else:
res = np.all(a == b)
diff0 = np.max(np.abs(a - b))
diff1 = np.max(np.abs(a - b) / (np.abs(b) + checkEpsilon))
print("check:%s, absDiff=%f, relDiff=%f" % (res, diff0, diff1))
def layerNormCPU(bufferH, epsilon):
_x = bufferH[0]
_0 = np.mean(_x, 2)[:, :, np.newaxis]
_1 = _x - _0
_2 = _1 * _1
_3 = np.mean(_2, 2)[:, :, np.newaxis]
_4 = np.array(epsilon, dtype=np.float32)
_5 = _4.reshape(1, 1, 1)
_6 = _3 + _5
_7 = np.sqrt(_6)
_8 = 1 / _7 # 1/sqrt(...)
_9 = _1 * _8
return [_9]
def getLayerNormPlugin(epsilon):
for c in trt.get_plugin_registry().plugin_creator_list:
#print(c.name)
if c.name == "LayerNorm" and c.plugin_version == "2":
print("Find %s V%s" % (c.name, c.plugin_version))
parameterList = []
parameterList.append(trt.PluginField("epsilon", np.float32(epsilon), trt.PluginFieldType.FLOAT32))
return c.create_plugin(c.name, trt.PluginFieldCollection(parameterList))
return None
def run(shape, bFp16):
testCase = "<shape=%s,dataType=%s>" % (shape, "FP16" if bFp16 else "FP32")
trtFile = "./model-%d-%s.plan" % (shape[2], "FP16" if bFp16 else "FP32")
print("Test %s" % testCase)
logger = trt.Logger(trt.Logger.ERROR)
trt.init_libnvinfer_plugins(logger, '')
ctypes.cdll.LoadLibrary(soFile)
if os.path.isfile(trtFile):
with open(trtFile, "rb") as f:
engineStr = f.read()
engine = trt.Runtime(logger).deserialize_cuda_engine(engineStr)
if engine == None:
print("Failed loading engine!")
exit()
print("Succeeded loading engine!")
else:
builder = trt.Builder(logger)
network = builder.create_network(1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH))
profile = builder.create_optimization_profile()
config = builder.create_builder_config()
if bFp16:
config.set_flag(trt.BuilderFlag.FP16)
inputT0 = network.add_input("inputT0", trt.float16 if bFp16 else trt.float32, [-1 for i in shape])
profile.set_shape(inputT0.name, [1, 1, shape[2]], shape, shape)
config.add_optimization_profile(profile)
pluginLayer = network.add_plugin_v2([inputT0], getLayerNormPlugin(epsilon))
network.mark_output(pluginLayer.get_output(0))
engineString = builder.build_serialized_network(network, config)
if engineString == None:
print("Failed building engine!")
return
print("Succeeded building engine!")
with open(trtFile, "wb") as f:
f.write(engineString)
engine = trt.Runtime(logger).deserialize_cuda_engine(engineString)
context = engine.create_execution_context()
context.set_binding_shape(0, shape)
#print("Binding all? %s"%(["No","Yes"][int(context.all_binding_shapes_specified)]))
nInput = np.sum([engine.binding_is_input(i) for i in range(engine.num_bindings)])
nOutput = engine.num_bindings - nInput
#for i in range(nInput):
# print("Bind[%2d]:i[%2d]->" % (i, i), engine.get_binding_dtype(i), engine.get_binding_shape(i), context.get_binding_shape(i), engine.get_binding_name(i))
#for i in range(nInput, nInput + nOutput):
# print("Bind[%2d]:o[%2d]->" % (i, i - nInput), engine.get_binding_dtype(i), engine.get_binding_shape(i), context.get_binding_shape(i), engine.get_binding_name(i))
bufferH = []
bufferH.append(np.random.rand(np.prod(shape)).astype(np.float16 if bFp16 else np.float32).reshape(shape))
for i in range(nOutput):
bufferH.append(np.empty(context.get_binding_shape(nInput + i), dtype=trt.nptype(engine.get_binding_dtype(nInput + i))))
bufferD = []
for i in range(engine.num_bindings):
bufferD.append(cudart.cudaMalloc(bufferH[i].nbytes)[1])
for i in range(nInput):
cudart.cudaMemcpy(bufferD[i], np.ascontiguousarray(bufferH[i].reshape(-1)).ctypes.data, bufferH[i].nbytes, cudart.cudaMemcpyKind.cudaMemcpyHostToDevice)
context.execute_v2(bufferD)
for i in range(nOutput):
cudart.cudaMemcpy(bufferH[nInput + i].ctypes.data, bufferD[nInput + i], bufferH[nInput + i].nbytes, cudart.cudaMemcpyKind.cudaMemcpyDeviceToHost)
outputCPU = layerNormCPU(bufferH[:nInput], epsilon)
"""
for i in range(nInput):
printArrayInformation(bufferH[i])
for i in range(nInput, nIO):
printArrayInformation(bufferH[i])
for i in range(nInput, nIO):
printArrayInformation(outputCPU[i - nInput])
"""
check(bufferH[nInput:][0], outputCPU[0], True)
for buffer in bufferD:
cudart.cudaFree(buffer)
print("Test %s finish!\n" % testCase)
if __name__ == "__main__":
os.system("rm -rf ./*.plan")
np.set_printoptions(precision=3, linewidth=200, suppress=True)
os.system("rm -rf ./*.plan")
run([1, 1, 256], False)
os.system("rm -rf ./*.plan")
run([16, 64, 256], False)
os.system("rm -rf ./*.plan")
run([1, 1, 256], True)
os.system("rm -rf ./*.plan")
run([16, 64, 256], True)
os.system("rm -rf ./*.plan")
print("Test all finish!")
| trt-samples-for-hackathon-cn-master | cookbook/05-Plugin/PluginReposity/LayerNormPlugin-TRT8/testLayerNormPluginCUBV2.py |
#
# Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import ctypes
import os
import numpy as np
import tensorrt as trt
from cuda import cudart
soFile = "./LayerNormPluginCUB.so"
epsilon = 1e-6
np.random.seed(31193)
def printArrayInformation(x, info="", n=5):
if 0 in x.shape:
print('%s:%s' % (info, str(x.shape)))
print()
return
print( '%s:%s,SumAbs=%.5e,Var=%.5f,Max=%.5f,Min=%.5f,SAD=%.5f'%( \
info,str(x.shape),np.sum(abs(x)),np.var(x),np.max(x),np.min(x),np.sum(np.abs(np.diff(x.reshape(-1)))) ))
print('\t', x.reshape(-1)[:n], x.reshape(-1)[-n:])
def check(a, b, weak=False, checkEpsilon=1e-5):
if weak:
a = a.astype(np.float32)
b = b.astype(np.float32)
res = np.all(np.abs(a - b) < checkEpsilon)
else:
res = np.all(a == b)
diff0 = np.max(np.abs(a - b))
diff1 = np.max(np.abs(a - b) / (np.abs(b) + checkEpsilon))
print("check:%s, absDiff=%f, relDiff=%f" % (res, diff0, diff1))
def layerNormCPU(bufferH, epsilon):
_x, gamma, beta = bufferH
nHiddenSize = bufferH[0].shape[2]
_0 = np.mean(_x, 2)[:, :, np.newaxis]
_1 = _x - _0
_2 = _1 * _1
_3 = np.mean(_2, 2)[:, :, np.newaxis]
_4 = np.array(epsilon, dtype=np.float32)
_5 = _4.reshape(1, 1, 1)
_6 = _3 + _5
_7 = np.sqrt(_6)
_8 = 1 / _7 # 1/sqrt(...)
_9 = gamma
_10 = _9.reshape(1, 1, nHiddenSize)
_11 = _8 * _10 # gamma/sqrt(...)
_12 = _0 * _11 # bμ/sqrt(...)
_13 = beta
_14 = _13.reshape(1, 1, nHiddenSize)
_15 = _14 - _12 # beta-bμ/sqrt(...)
_16 = _x * _11 # bx/sqrt(...)
_17 = _15 + _16 # gamma(x-μ)/sqrt(...)+beta
_18 = _17.reshape(bufferH[0].shape[0], bufferH[0].shape[1], bufferH[0].shape[2])
return [_18]
def getLayerNormPlugin(epsilon):
for c in trt.get_plugin_registry().plugin_creator_list:
#print(c.name)
if c.name == "LayerNorm" and c.plugin_version == "3":
print("Find %s V%s" % (c.name, c.plugin_version))
parameterList = []
parameterList.append(trt.PluginField("epsilon", np.float32(epsilon), trt.PluginFieldType.FLOAT32))
return c.create_plugin(c.name, trt.PluginFieldCollection(parameterList))
return None
def run(shape, bFp16):
testCase = "<shape=%s,dataType=%s>" % (shape, "FP16" if bFp16 else "FP32")
trtFile = "./model-%d-%s.plan" % (shape[2], "FP16" if bFp16 else "FP32")
print("Test %s" % testCase)
logger = trt.Logger(trt.Logger.ERROR)
trt.init_libnvinfer_plugins(logger, '')
ctypes.cdll.LoadLibrary(soFile)
if os.path.isfile(trtFile):
with open(trtFile, "rb") as f:
engineStr = f.read()
engine = trt.Runtime(logger).deserialize_cuda_engine(engineStr)
if engine == None:
print("Failed loading engine!")
exit()
print("Succeeded loading engine!")
else:
builder = trt.Builder(logger)
network = builder.create_network(1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH))
profile = builder.create_optimization_profile()
config = builder.create_builder_config()
if bFp16:
config.set_flag(trt.BuilderFlag.FP16)
inputT0 = network.add_input("inputT0", trt.float16 if bFp16 else trt.float32, [-1 for i in shape])
profile.set_shape(inputT0.name, [1, 1, shape[2]], shape, shape)
inputT1 = network.add_input("inputGamma", trt.float16 if bFp16 else trt.float32, [256])
inputT2 = network.add_input("inputBeta", trt.float16 if bFp16 else trt.float32, [256])
config.add_optimization_profile(profile)
pluginLayer = network.add_plugin_v2([inputT0, inputT1, inputT2], getLayerNormPlugin(epsilon))
network.mark_output(pluginLayer.get_output(0))
engineString = builder.build_serialized_network(network, config)
if engineString == None:
print("Failed building engine!")
return
print("Succeeded building engine!")
with open(trtFile, "wb") as f:
f.write(engineString)
engine = trt.Runtime(logger).deserialize_cuda_engine(engineString)
context = engine.create_execution_context()
context.set_binding_shape(0, shape)
#print("Binding all? %s"%(["No","Yes"][int(context.all_binding_shapes_specified)]))
nInput = np.sum([engine.binding_is_input(i) for i in range(engine.num_bindings)])
nOutput = engine.num_bindings - nInput
#for i in range(nInput):
# print("Bind[%2d]:i[%2d]->" % (i, i), engine.get_binding_dtype(i), engine.get_binding_shape(i), context.get_binding_shape(i), engine.get_binding_name(i))
#for i in range(nInput, nInput + nOutput):
# print("Bind[%2d]:o[%2d]->" % (i, i - nInput), engine.get_binding_dtype(i), engine.get_binding_shape(i), context.get_binding_shape(i), engine.get_binding_name(i))
bufferH = []
bufferH.append(np.random.rand(np.prod(shape)).astype(np.float16 if bFp16 else np.float32).reshape(shape) * 2 - 1)
bufferH.append(np.ones(shape[2]).astype(np.float16 if bFp16 else np.float32))
bufferH.append(np.zeros(shape[2]).astype(np.float16 if bFp16 else np.float32))
for i in range(nOutput):
bufferH.append(np.empty(context.get_binding_shape(nInput + i), dtype=trt.nptype(engine.get_binding_dtype(nInput + i))))
bufferD = []
for i in range(engine.num_bindings):
bufferD.append(cudart.cudaMalloc(bufferH[i].nbytes)[1])
for i in range(nInput):
cudart.cudaMemcpy(bufferD[i], np.ascontiguousarray(bufferH[i].reshape(-1)).ctypes.data, bufferH[i].nbytes, cudart.cudaMemcpyKind.cudaMemcpyHostToDevice)
context.execute_v2(bufferD)
for i in range(nOutput):
cudart.cudaMemcpy(bufferH[nInput + i].ctypes.data, bufferD[nInput + i], bufferH[nInput + i].nbytes, cudart.cudaMemcpyKind.cudaMemcpyDeviceToHost)
outputCPU = layerNormCPU(bufferH[:nInput], epsilon)
"""
for i in range(nInput):
printArrayInformation(bufferH[i])
for i in range(nInput, nIO):
printArrayInformation(bufferH[i])
for i in range(nInput, nIO):
printArrayInformation(outputCPU[i - nInput])
"""
check(bufferH[nInput:][0], outputCPU[0], True)
for buffer in bufferD:
cudart.cudaFree(buffer)
print("Test %s finish!\n" % testCase)
if __name__ == "__main__":
os.system("rm -rf ./*.plan")
np.set_printoptions(precision=3, linewidth=200, suppress=True)
os.system("rm -rf ./*.plan")
run([1, 1, 256], False)
os.system("rm -rf ./*.plan")
run([16, 64, 256], False)
os.system("rm -rf ./*.plan")
run([1, 1, 256], True)
os.system("rm -rf ./*.plan")
run([16, 64, 256], True)
os.system("rm -rf ./*.plan")
print("Test all finish!")
| trt-samples-for-hackathon-cn-master | cookbook/05-Plugin/PluginReposity/LayerNormPlugin-TRT8/testLayerNormPluginCUBV3.py |
#
# Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import ctypes
import os
import numpy as np
import pycuda.autoinit
import pycuda.driver as cuda
#from time import time_ns
import tensorrt as trt
soFilePath = "./MaskPlugin.so"
np.random.seed(31193)
npToTRT = {np.int8: trt.int8, np.float16: trt.float16, np.int32: trt.int32, np.float32: trt.float32}
def check(a, b, weak=False, checkEpsilon=1e-5):
if weak:
res = np.all(np.abs(a - b) < checkEpsilon)
else:
res = np.all(a == b)
diff0 = np.max(np.abs(a - b))
diff1 = np.max(np.abs(a - b) / (np.abs(b) + checkEpsilon))
print("check:%s, absDiff=%f, relDiff=%f" % (res, diff0, diff1))
def maskCPU(bufferH):
input0, input1 = bufferH
bs, sl, _ = input0.shape
negValue = [-3.0e38, -6.0e4][int(input0.dtype == np.float16)]
output0 = np.zeros([bs, 4, sl, sl], dtype=input0.dtype) + 0
output1 = np.zeros([bs, 4, sl, sl], dtype=input0.dtype) + negValue
output2 = np.zeros([bs, sl, 320], dtype=input0.dtype) + 0
for i in range(bs):
validWidth = input1[i]
output0[i, :, :validWidth, :validWidth] = 1
output1[i, :, :validWidth, :validWidth] = 0
output2[i, :validWidth, :] = 1
return output0, output1, output2
def getMaskPlugin():
for c in trt.get_plugin_registry().plugin_creator_list:
if c.name == "MaskPlugin":
return c.create_plugin(c.name, trt.PluginFieldCollection([]))
return None
def buildEngine(logger, datatype):
builder = trt.Builder(logger)
network = builder.create_network(1 << 0)
config = builder.create_builder_config()
config.flags = [0, 1 << int(trt.BuilderFlag.FP16)][int(datatype == np.float16)]
inputT0 = network.add_input("inputT0", npToTRT[datatype], [-1, -1, 560])
inputT1 = network.add_input("inputT1", npToTRT[np.int32], [-1])
profile = builder.create_optimization_profile()
profile.set_shape(inputT0.name, [1, 1, 560], [2, 4, 560], [4, 8, 560])
profile.set_shape(inputT1.name, [1], [2], [4])
config.add_optimization_profile(profile)
pluginLayer = network.add_plugin_v2([inputT0, inputT1], getMaskPlugin())
pluginLayer.get_output(0).dtype = npToTRT[datatype]
pluginLayer.get_output(1).dtype = npToTRT[datatype]
pluginLayer.get_output(2).dtype = npToTRT[datatype]
network.mark_output(pluginLayer.get_output(0))
network.mark_output(pluginLayer.get_output(1))
network.mark_output(pluginLayer.get_output(2))
return builder.build_engine(network, config)
def run(datatype, nBS, nSL):
testCase = "test<fp%s,bs=%d,sl=%d>" % (["32", "16"][int(datatype == np.float16)], nBS, nSL)
logger = trt.Logger(trt.Logger.ERROR)
trt.init_libnvinfer_plugins(logger, '')
ctypes.cdll.LoadLibrary(soFilePath)
trtFile = "./model-fp" + ["32", "16"][int(datatype == np.float16)] + ".plan"
if os.path.isfile(trtFile):
with open(trtFile, "rb") as f:
engineStr = f.read()
engine = trt.Runtime(logger).deserialize_cuda_engine(engineStr)
if engine == None:
print("Failed loading engine!")
return
print("Succeeded loading engine!")
else:
engine = buildEngine(logger, datatype)
if engine == None:
print("Failed building engine!")
return
print("Succeeded building engine!")
with open(trtFile, "wb") as f:
f.write(engine.serialize())
context = engine.create_execution_context()
context.set_binding_shape(0, [nBS, nSL, 560])
context.set_binding_shape(1, [nBS])
print("Binding all? %s" % (["No", "Yes"][int(context.all_binding_shapes_specified)]))
stream = cuda.Stream()
nInput = np.sum([engine.binding_is_input(i) for i in range(engine.num_bindings)])
nOutput = engine.num_bindings - nInput
for i in range(engine.num_bindings):
print("input ->" if engine.binding_is_input(i) else "output->", engine.get_binding_dtype(i), engine.get_binding_shape(i), context.get_binding_shape(i))
bufferH = []
bufferH.append(np.random.rand(nBS * nSL * 560).reshape(nBS, nSL, 560).astype(datatype))
bufferH.append(1 + np.arange(nBS).reshape(nBS).astype(np.int32))
bufferH.append(np.empty(context.get_binding_shape(2), dtype=trt.nptype(engine.get_binding_dtype(2))))
bufferH.append(np.empty(context.get_binding_shape(3), dtype=trt.nptype(engine.get_binding_dtype(3))))
bufferH.append(np.empty(context.get_binding_shape(4), dtype=trt.nptype(engine.get_binding_dtype(4))))
bufferD = []
for i in range(engine.num_bindings):
bufferD.append(cuda.mem_alloc(bufferH[i].nbytes))
for i in range(nInput):
cuda.memcpy_htod_async(bufferD[i], np.ascontiguousarray(bufferH[i].reshape(-1)), stream)
context.execute_async_v2(bufferD, stream.handle)
for i in range(nOutput):
cuda.memcpy_dtoh_async(bufferH[nInput + i], bufferD[nInput + i], stream)
stream.synchronize()
for i in range(nInput):
temp = bufferH[i]
print( 'input%d: %s,SumAbs=%.5e,Var=%.5f,Max=%.5f,Min=%.5f,SAD=%.5f'%( \
i,str(temp.shape),np.sum(abs(temp)),np.var(temp),np.max(temp),np.min(temp),np.sum(np.abs(np.diff(temp.reshape(-1)))) ))
print("\t", temp.reshape(-1)[:10])
for i in range(nOutput):
temp = bufferH[nInput + i]
print( 'output%d: %s,SumAbs=%.5e,Var=%.5f,Max=%.5f,Min=%.5f,SAD=%.5f'%( \
i,str(temp.shape),np.sum(abs(temp)),np.var(temp),np.max(temp),np.min(temp),np.sum(np.abs(np.diff(temp.reshape(-1)))) ))
print("\t", temp.reshape(-1)[:10])
cpu = maskCPU(bufferH[:2])
for i in range(nOutput):
temp = bufferH[nInput + i] - cpu[i]
print( 'diff%d: %s,SumAbs=%.5e,Var=%.5f,Max=%.5f,Min=%.5f,SAD=%.5f'%( \
i,str(temp.shape),np.sum(abs(temp)),np.var(temp),np.max(temp),np.min(temp),np.sum(np.abs(np.diff(temp.reshape(-1)))) ))
print("\t", temp.reshape(-1)[:10])
print("Test", testCase, "finish!")
if __name__ == "__main__":
os.system("rm -f ./*.plan")
np.set_printoptions(precision=3, linewidth=200, suppress=True)
#cuda.Device(0).make_context()
#testEncoderCPU()
run(np.float32, 4, 8)
run(np.float16, 4, 8)
#cuda.Context.pop()
#print("test all finish!")
| trt-samples-for-hackathon-cn-master | cookbook/05-Plugin/PluginReposity/MaskPugin/testMaskPlugin.py |
#
# Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import ctypes
import numpy as np
import pycuda.autoinit
import pycuda.driver as cuda
import tensorrt as trt
npToTrt = {np.int8: trt.int8, np.float16: trt.float16, np.int32: trt.int32, np.float32: trt.float32}
soFilePath = "./ReversePlugin.so"
def reverseCPU(inputH0, inputH1):
outputH0CPU = np.zeros_like(inputH0)
for i in range(inputH0.shape[0]):
validWidth = inputH1[i]
for k in range(validWidth):
outputH0CPU[i, validWidth - 1 - k, :] = inputH0[i, k, :]
return outputH0CPU
def cleanTrash(outputH0, inputH1): # clean the trash data in the output of GPU
sh = outputH0.shape
for i in range(sh[0]):
outputH0[i, inputH1[i]:, :] = 0
return outputH0
def getReversePlugin():
for c in trt.get_plugin_registry().plugin_creator_list:
if c.name == "ReversePlugin":
return c.create_plugin(c.name, trt.PluginFieldCollection([]))
return None
def buildEngine(logger, inDatatype, nDimIn):
builder = trt.Builder(logger)
network = builder.create_network(1)
profile = builder.create_optimization_profile()
config = builder.create_builder_config()
config.flags = int(inDatatype == np.float16)
inputT0 = network.add_input("inputT0", npToTrt[inDatatype], [-1, -1, -1])
profile.set_shape(inputT0.name, [1, 1, 1], [2, 4, 3], [4, 9, 12])
inputT1 = network.add_input("inputT1", trt.int32, [-1])
profile.set_shape(inputT1.name, [1], [4], [9])
config.add_optimization_profile(profile)
pluginLayer = network.add_plugin_v2([inputT0, inputT1], getReversePlugin())
network.mark_output(pluginLayer.get_output(0))
return builder.build_engine(network, config)
def run(inDim, inDatatype):
print("test", inDim, inDatatype)
logger = trt.Logger(trt.Logger.ERROR)
trt.init_libnvinfer_plugins(logger, '')
ctypes.cdll.LoadLibrary(soFilePath)
engine = buildEngine(logger, inDatatype, len(inDim))
if engine == None:
print("Failed building engine!")
return None
print("Succeeded building engine!")
context = engine.create_execution_context()
context.set_binding_shape(0, inDim)
context.set_binding_shape(1, inDim[:1])
#print("Bind0->",engine.get_binding_shape(0),context.get_binding_shape(0))
#print("Bind1->",engine.get_binding_shape(1),context.get_binding_shape(1))
#print("Bind2->",engine.get_binding_shape(2),context.get_binding_shape(2))
#print("All bind:",context.all_binding_shapes_specified)
stream = cuda.Stream()
data0 = np.arange(np.prod(inDim), dtype=inDatatype).reshape(inDim)
data1 = np.arange(1, inDim[0] + 1, dtype=np.int32)
data1[data1 > inDim[1]] = inDim[1]
inputH0 = np.ascontiguousarray(data0)
inputD0 = cuda.mem_alloc(inputH0.nbytes)
inputH1 = np.ascontiguousarray(data1)
inputD1 = cuda.mem_alloc(inputH1.nbytes)
outputH0 = np.empty(context.get_binding_shape(2), dtype=trt.nptype(engine.get_binding_dtype(2)))
outputD0 = cuda.mem_alloc(outputH0.nbytes)
cuda.memcpy_htod_async(inputD0, inputH0, stream)
cuda.memcpy_htod_async(inputD1, inputH1, stream)
context.execute_async_v2([int(inputD0), int(inputD1), int(outputD0)], stream.handle)
cuda.memcpy_dtoh_async(outputH0, outputD0, stream)
stream.synchronize()
outputH0CPU = reverseCPU(inputH0, inputH1)
#print("InputH0->",inputH0.shape, engine.get_binding_dtype(0))
#print(inputH0)
#print("InputH1->",inputH1.shape, engine.get_binding_dtype(1))
#print(inputH1)
#print("OutputH0->",outputH0.shape, engine.get_binding_dtype(2))
#print(cleanTrash(outputH0,inputH1))
#print("OutputH0CPU->",outputH0CPU.shape)
#print(outputH0CPU)
print("Check result:", ["True" if np.all(cleanTrash(outputH0, inputH1) == outputH0CPU) else "False"][0])
if __name__ == "__main__":
np.set_printoptions(precision=3, linewidth=200, suppress=True)
cuda.Device(0).make_context()
run([2, 4, 3], np.int32)
run([4, 9, 12], np.int32)
run([2, 4, 3], np.float32)
run([4, 9, 3], np.float32)
run([2, 4, 3], np.float16)
run([4, 9, 12], np.float16)
cuda.Context.pop()
print("test finish!")
| trt-samples-for-hackathon-cn-master | cookbook/05-Plugin/PluginReposity/ReversePlugin/testReversePlugin.py |
#
# Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import ctypes
import numpy as np
import pycuda.autoinit
import pycuda.driver as cuda
import tensorrt as trt
npToTrt = {np.float32: trt.float32, np.float16: trt.float16}
soFilePath = "./MMTPlugin.so"
def MMTCPU(inputH0, inputH1, weight):
sh0 = inputH0.shape
sh1 = inputH1.shape
h, dim_t, _ = weight.shape
outputCPU = np.zeros([sh0[0], dim_t, sh0[1], sh1[1]], dtype=np.float32)
for i in range(sh0[0]):
outputCPU[i] = np.matmul(np.matmul(inputH0[0], weight.transpose(0, 2, 1)).transpose(2, 1, 0), inputH1[0].transpose())
return outputCPU
def getMMTPlugin(h, dim_t, weight):
for c in trt.get_plugin_registry().plugin_creator_list:
if c.name == "MMTPlugin":
p0 = trt.PluginField("w", np.array([weight], dtype=np.float32), trt.PluginFieldType.FLOAT32)
p1 = trt.PluginField("h", np.array([h], dtype=np.int32), trt.PluginFieldType.INT32)
p2 = trt.PluginField("dim_t", np.array([dim_t], dtype=np.int32), trt.PluginFieldType.INT32)
return c.create_plugin(c.name, trt.PluginFieldCollection([p0, p1, p2]))
return None
def buildEngine(logger, shape, dim_t, weight, datatype):
builder = trt.Builder(logger)
network = builder.create_network(1)
profile = builder.create_optimization_profile()
config = builder.create_builder_config()
config.flags = int(datatype == np.float16)
inputT0 = network.add_input("x", npToTrt[datatype], (-1, -1, -1))
profile.set_shape(inputT0.name, (1, 1, 1), shape, [i * 2 for i in shape])
inputT1 = network.add_input("y", npToTrt[datatype], (-1, -1, -1))
profile.set_shape(inputT1.name, (1, 1, 1), shape, [i * 2 for i in shape])
config.add_optimization_profile(profile)
pluginLayer = network.add_plugin_v2([inputT0, inputT1], getMMTPlugin(shape[-1], dim_t, weight))
network.mark_output(pluginLayer.get_output(0))
return builder.build_engine(network, config)
def run(nGroup, xWidth, yWidth, h, dim_t, datatype):
print("test [%d,%d/%d,%d],dim_t=%d" % (nGroup, xWidth, yWidth, h, dim_t), datatype)
logger = trt.Logger(trt.Logger.ERROR)
trt.init_libnvinfer_plugins(logger, '')
ctypes.cdll.LoadLibrary(soFilePath)
weight = np.full([h, dim_t, h], 0.1, dtype=np.float32)
engine = buildEngine(logger, [nGroup, max(xWidth, yWidth), h], dim_t, weight, datatype)
if engine == None:
print("Failed building engine!")
return None
print("Succeed building engine!")
context = engine.create_execution_context()
context.set_binding_shape(0, [nGroup, xWidth, h])
context.set_binding_shape(1, [nGroup, yWidth, h])
#print("Binding0->",engine.get_binding_shape(0),context.get_binding_shape(0))
#print("Binding1->",engine.get_binding_shape(1),context.get_binding_shape(1))
#print("Binding2->",engine.get_binding_shape(2),context.get_binding_shape(2))
#print("All bind:",context.all_binding_shapes_specified)
stream = cuda.Stream()
data0 = np.ones([nGroup, xWidth, h], dtype=datatype)
data1 = np.ones([nGroup, yWidth, h], dtype=datatype)
inputH0 = np.ascontiguousarray(data0)
inputD0 = cuda.mem_alloc(inputH0.nbytes)
inputH1 = np.ascontiguousarray(data1)
inputD1 = cuda.mem_alloc(inputH1.nbytes)
outputH0 = np.empty(context.get_binding_shape(2), dtype=trt.nptype(engine.get_binding_dtype(2)))
outputD0 = cuda.mem_alloc(outputH0.nbytes)
cuda.memcpy_htod_async(inputD0, inputH0, stream)
cuda.memcpy_htod_async(inputD1, inputH1, stream)
context.execute_async_v2([int(inputD0), int(inputD1), int(outputD0)], stream.handle)
cuda.memcpy_dtoh_async(outputH0, outputD0, stream)
stream.synchronize()
outputH0CPU = MMTCPU(inputH0, inputH1, weight)
#print("InputH0->",inputH0.shape, engine.get_binding_dtype(0))
#print(inputH0)
#print("InputH1->",inputH1.shape, engine.get_binding_dtype(1))
#print(inputH1)
#print("OutputH0->",outputH0.shape, engine.get_binding_dtype(2))
#print(outputH0)
print("Check result:", ["True" if np.all(outputH0 == outputH0CPU) else "False"][0])
if __name__ == "__main__":
np.set_printoptions(precision=3, linewidth=200, suppress=True)
cuda.Device(0).make_context()
run(4, 5, 6, 2, 3, np.float32)
run(4, 5, 6, 2, 3, np.float16)
cuda.Context.pop()
print("test finish!")
| trt-samples-for-hackathon-cn-master | cookbook/05-Plugin/PluginReposity/MMTPlugin/testMMTPlugin.py |
#
# Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import ctypes
import os
from time import time
import numpy as np
import pycuda.autoinit
import pycuda.driver as cuda
import tensorrt as trt
soFilePath = "./SignPlugin.so"
np.random.seed(31193)
def reverseCPU(inputH0):
return None
def getSignPlugin():
for c in trt.get_plugin_registry().plugin_creator_list:
if c.name == "SignPlugin":
return c.create_plugin(c.name, trt.PluginFieldCollection([]))
return None
def buildEngine(logger, shape):
builder = trt.Builder(logger)
builder.max_batch_size = 4
builder.max_workspace_size = 3 << 30
network = builder.create_network()
inputT0 = network.add_input("inputT0", trt.float32, shape)
oneHotLayer = network.add_plugin_v2([inputT0], getSignPlugin())
network.mark_output(oneHotLayer.get_output(0))
return builder.build_cuda_engine(network)
def run(batchSize, shape):
print("test", batchSize, *shape)
logger = trt.Logger(trt.Logger.INFO)
trt.init_libnvinfer_plugins(logger, '')
ctypes.cdll.LoadLibrary(soFilePath)
engine = buildEngine(logger, shape)
if engine == None:
print("Failed building engine!")
return None
print("Succeeded building engine!")
context = engine.create_execution_context()
stream = cuda.Stream()
data = np.array(np.random.rand(batchSize, *shape) * 2 - 1, dtype=np.float32)
inputH0 = np.ascontiguousarray(data.reshape(-1))
inputD0 = cuda.mem_alloc(inputH0.nbytes)
outputH0 = np.empty((batchSize, ) + tuple(context.get_binding_shape(1)), dtype=trt.nptype(engine.get_binding_dtype(1)))
outputD0 = cuda.mem_alloc(outputH0.nbytes)
cuda.memcpy_htod_async(inputD0, inputH0, stream)
context.execute_async(batchSize, [int(inputD0), int(outputD0)], stream.handle)
cuda.memcpy_dtoh_async(outputH0, outputD0, stream)
stream.synchronize()
#print("data:", np.shape(data), data.dtype, np.mean(data), np.var(data), np.max(data), np.min(data))
#print(data)
#print("hOut:", np.shape(outputH0), outputH0.dtype, np.mean(outputH0), np.var(outputH0), np.max(outputH0), np.min(outputH0))
#print(outputH0)
print("check result:", np.all(np.sign(data) == outputH0), "\n")
if __name__ == "__main__":
np.set_printoptions(precision=3, linewidth=200, suppress=True)
run(4, [16])
run(4, [18])
run(4, [600])
print("test finish!")
| trt-samples-for-hackathon-cn-master | cookbook/05-Plugin/PluginReposity/SignPlugin/testSignPlugin.py |
#
# Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import ctypes
import os
import numpy as np
import pycuda.autoinit
import pycuda.driver as cuda
import tensorrt as trt
soFilePath = "./WherePlugin.so"
usingFp16 = False
def whereCPU(condition, inputX, inputY):
return inputX * condition + inputY * (1 - condition)
def getWherePlugin():
for c in trt.get_plugin_registry().plugin_creator_list:
if c.name == "WherePlugin":
return c.create_plugin(c.name, trt.PluginFieldCollection([]))
return None
def buildEngine(logger, nRow, nCol):
builder = trt.Builder(logger)
builder.max_batch_size = 4
builder.max_workspace_size = 3 << 30
builder.fp16_mode = usingFp16
network = builder.create_network()
tensor1 = network.add_input("condition", trt.int32, (nRow, nCol))
tensor2 = network.add_input("inputX", trt.float32, (nRow, nCol))
tensor3 = network.add_input("inputY", trt.float32, (nRow, nCol))
whereLayer = network.add_plugin_v2([tensor1, tensor2, tensor3], getWherePlugin())
network.mark_output(whereLayer.get_output(0))
return builder.build_cuda_engine(network)
def run(batchSize, nRow, nCol):
print("test", batchSize, nRow, nCol)
logger = trt.Logger(trt.Logger.ERROR)
trt.init_libnvinfer_plugins(logger, '')
ctypes.cdll.LoadLibrary(soFilePath)
engine = buildEngine(logger, nRow, nCol)
if engine == None:
print("Failed building engine!")
return None
print("Succeeded building engine!")
context = engine.create_execution_context()
stream = cuda.Stream()
condition = np.array(np.random.randint(0, 2, [batchSize, nRow, nCol]), dtype=np.int32)
inputX = np.full([batchSize, nRow, nCol], 1, dtype=np.float32)
inputY = np.full([batchSize, nRow, nCol], -1, dtype=np.float32)
inputH0 = np.ascontiguousarray(condition.reshape(-1))
inputH1 = np.ascontiguousarray(inputX.reshape(-1))
inputH2 = np.ascontiguousarray(inputY.reshape(-1))
inputD0 = cuda.mem_alloc(inputH0.nbytes)
inputD1 = cuda.mem_alloc(inputH1.nbytes)
inputD2 = cuda.mem_alloc(inputH2.nbytes)
outputH0 = np.empty((batchSize, ) + tuple(engine.get_binding_shape(3)), dtype=trt.nptype(engine.get_binding_dtype(3)))
outputD0 = cuda.mem_alloc(outputH0.nbytes)
cuda.memcpy_htod_async(inputD0, inputH0, stream)
cuda.memcpy_htod_async(inputD1, inputH1, stream)
cuda.memcpy_htod_async(inputD2, inputH2, stream)
context.execute_async(batchSize, [int(inputD0), int(inputD1), int(inputD2), int(outputD0)], stream.handle)
cuda.memcpy_dtoh_async(outputH0, outputD0, stream)
stream.synchronize()
outputH0CPU = whereCPU(condition, inputX, inputY)
print("Check result:", ["True" if np.all(outputH0 == outputH0CPU) else "False"][0])
if __name__ == "__main__":
np.set_printoptions(precision=3, linewidth=200, suppress=True)
run(4, 5, 4)
run(4, 20, 9)
run(4, 200, 10)
print("test finish!")
| trt-samples-for-hackathon-cn-master | cookbook/05-Plugin/PluginReposity/WherePlugin/testWherePlugin.py |
#
# Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import ctypes
import os
import numpy as np
import tensorrt as trt
from cuda import cudart
soFile = "./MultinomialDistributionPlugin.so"
np.random.seed(31193)
def printArrayInformation(x, info="", n=5):
if 0 in x.shape:
print('%s:%s' % (info, str(x.shape)))
print()
return
print( '%s:%s,SumAbs=%.5e,Var=%.5f,Max=%.5f,Min=%.5f,SAD=%.5f'%( \
info,str(x.shape),np.sum(abs(x)),np.var(x),np.max(x),np.min(x),np.sum(np.abs(np.diff(x.reshape(-1)))) ))
print('\t', x.reshape(-1)[:n], x.reshape(-1)[-n:])
def check(a, b, weak=False, checkEpsilon=1e-5):
if weak:
a = a.astype(np.float32)
b = b.astype(np.float32)
res = np.all(np.abs(a - b) < checkEpsilon)
else:
res = np.all(a == b)
diff0 = np.max(np.abs(a - b))
diff1 = np.max(np.abs(a - b) / (np.abs(b) + checkEpsilon))
print("check:%s, absDiff=%f, relDiff=%f" % (res, diff0, diff1))
def getMultinomialDistributionPlugin(nCol, seed):
for c in trt.get_plugin_registry().plugin_creator_list:
if c.name == "MultinomialDistribution":
parameterList = []
parameterList.append(trt.PluginField("seed", np.int32(seed), trt.PluginFieldType.INT32))
return c.create_plugin(c.name, trt.PluginFieldCollection(parameterList))
return None
def run(nBatchSize, nCol, seed):
testCase = "<nRow=%d,nCol=%s,seed=%d>" % (nBatchSize, nCol, seed)
trtFile = "./model-nCol%d-seed-%d.plan" % (nCol, seed)
print("Test %s" % testCase)
logger = trt.Logger(trt.Logger.ERROR)
trt.init_libnvinfer_plugins(logger, '')
ctypes.cdll.LoadLibrary(soFile)
if os.path.isfile(trtFile):
with open(trtFile, "rb") as f:
engineStr = f.read()
engine = trt.Runtime(logger).deserialize_cuda_engine(engineStr)
if engine == None:
print("Failed loading engine!")
exit()
print("Succeeded loading engine!")
else:
builder = trt.Builder(logger)
network = builder.create_network(1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH))
profile = builder.create_optimization_profile()
config = builder.create_builder_config()
inputT0 = network.add_input("inputT0", trt.float32, [-1, nCol])
profile.set_shape(inputT0.name, [1, nCol], [32, nCol], [1024, nCol])
config.add_optimization_profile(profile)
pluginLayer = network.add_plugin_v2([inputT0], getMultinomialDistributionPlugin(nCol, seed))
network.mark_output(pluginLayer.get_output(0))
network.mark_output(pluginLayer.get_output(1))
engineString = builder.build_serialized_network(network, config)
if engineString == None:
print("Failed building engine!")
return
print("Succeeded building engine!")
with open(trtFile, "wb") as f:
f.write(engineString)
engine = trt.Runtime(logger).deserialize_cuda_engine(engineString)
context = engine.create_execution_context()
data = np.full([nBatchSize, nCol], 1, dtype=np.float32) # uniform distribution
#data = np.tile(np.arange(0,nCol,1,dtype=np.float32),[nBatchSize,1]) # non-uniform distribution
context.set_binding_shape(0, [nBatchSize, nCol])
#print("Binding all? %s"%(["No","Yes"][int(context.all_binding_shapes_specified)]))
nInput = np.sum([engine.binding_is_input(i) for i in range(engine.num_bindings)])
nOutput = engine.num_bindings - nInput
#for i in range(nInput):
# print("Bind[%2d]:i[%2d]->" % (i, i), engine.get_binding_dtype(i), engine.get_binding_shape(i), context.get_binding_shape(i), engine.get_binding_name(i))
#for i in range(nInput, nInput + nOutput):
# print("Bind[%2d]:o[%2d]->" % (i, i - nInput), engine.get_binding_dtype(i), engine.get_binding_shape(i), context.get_binding_shape(i), engine.get_binding_name(i))
bufferH = []
bufferH.append(data.astype(np.float32).reshape(nBatchSize, nCol))
for i in range(nOutput):
bufferH.append(np.empty(context.get_binding_shape(nInput + i), dtype=trt.nptype(engine.get_binding_dtype(nInput + i))))
bufferD = []
for i in range(engine.num_bindings):
bufferD.append(cudart.cudaMalloc(bufferH[i].nbytes)[1])
for i in range(nInput):
cudart.cudaMemcpy(bufferD[i], np.ascontiguousarray(bufferH[i].reshape(-1)).ctypes.data, bufferH[i].nbytes, cudart.cudaMemcpyKind.cudaMemcpyHostToDevice)
context.execute_v2(bufferD)
for i in range(nOutput):
cudart.cudaMemcpy(bufferH[nInput + i].ctypes.data, bufferD[nInput + i], bufferH[nInput + i].nbytes, cudart.cudaMemcpyKind.cudaMemcpyDeviceToHost)
"""
for i in range(nInput):
printArrayInformation(bufferH[i])
for i in range(nOutput):
printArrayInformation(bufferH[nInput + i])
"""
count, _ = np.histogram(bufferH[nInput], np.arange(nCol + 1))
for i in range(nCol):
print("[%3d]:%4d ---- %.3f %%" % (i, count[i], count[i] / nBatchSize * 100))
for buffer in bufferD:
cudart.cudaFree(buffer)
print("Test %s finish!\n" % testCase)
if __name__ == "__main__":
os.system("rm -rf ./*.plan")
np.set_printoptions(precision=3, linewidth=200, suppress=True)
run(1024, 4, 97)
run(1024, 32, 97)
run(1024, 128, 97)
run(1024, 4, 89)
run(1024, 32, 89)
run(1024, 128, 89)
print("Test all finish!")
| trt-samples-for-hackathon-cn-master | cookbook/05-Plugin/PluginReposity/MultinomialDistributionPlugin-thrust-TRT8/testMultinomialDistributionPlugin.py |
import ctypes
import os
from time import time_ns
import numpy as np
import pycuda.autoinit
import pycuda.driver as cuda
import tensorrt as trt
soFilePath = "multinomial/RandomPlugin.so"
useFile = False
ipnutDataFile = "random_data.npz"
category_number = 192
npToTRT = {np.int8: trt.int8, np.float16: trt.float16, np.int32: trt.int32, np.float32: trt.float32}
npToPFT = {np.int8: trt.PluginFieldType.INT8, np.float16: trt.PluginFieldType.FLOAT16, np.int32: trt.PluginFieldType.INT32, np.float32: trt.PluginFieldType.FLOAT32}
def getRandomPlugin():
for c in trt.get_plugin_registry().plugin_creator_list:
#print(c.name)
if c.name == "RandomPlugin":
return c.create_plugin(c.name, trt.PluginFieldCollection([trt.PluginField("seed", np.int32(0), trt.PluginFieldType.INT32)]))
return None
def buildEngine(logger, datatype):
builder = trt.Builder(logger)
network = builder.create_network(1 << 0)
config = builder.create_builder_config()
config.flags = [0, 1 << int(trt.BuilderFlag.FP16)][int(datatype == np.float16)]
inputTensorList = []
inputTensorList.append(network.add_input("inputT", npToTRT[datatype], [-1, -1]))
profile = builder.create_optimization_profile()
profile.set_shape("inputT", [1, category_number], [16, category_number], [64, category_number])
config.add_optimization_profile(profile)
pluginLayer = network.add_plugin_v2(inputTensorList, getRandomPlugin())
pluginLayer.get_output(0).dtype = trt.int32
network.mark_output(pluginLayer.get_output(0))
return builder.build_engine(network, config)
def run(datatype, nBatchSize):
testCase = "test<bs=%d,fp%s>" % (nBatchSize, ["32", "16"][int(datatype == np.float16)])
logger = trt.Logger(trt.Logger.ERROR)
trt.init_libnvinfer_plugins(logger, '')
ctypes.cdll.LoadLibrary(soFilePath)
trtFile = "engine-fp" + ["32", "16"][int(datatype == np.float16)] + ".plan"
if os.path.isfile(trtFile):
with open(trtFile, "rb") as f:
engineStr = f.read()
engine = trt.Runtime(logger).deserialize_cuda_engine(engineStr)
if engine == None:
print("Failed loading engine!")
return
print("Succeeded loading engine!")
else:
engine = buildEngine(logger, datatype)
if engine == None:
print("Failed building engine!")
return
print("Succeeded building engine!")
with open(trtFile, "wb") as f:
f.write(engine.serialize())
context = engine.create_execution_context()
context.set_binding_shape(0, [nBatchSize, category_number])
print("Binding all? %s" % (["No", "Yes"][int(context.all_binding_shapes_specified)]))
stream = cuda.Stream()
nInput = np.sum([engine.binding_is_input(i) for i in range(engine.num_bindings)])
nOutput = engine.num_bindings - nInput
for i in range(engine.num_bindings):
print("input ->" if engine.binding_is_input(i) else "output->", engine.get_binding_dtype(i), engine.get_binding_shape(i), context.get_binding_shape(i))
bufferH = []
if useFile:
io = np.load(ipnutDataFile)
bufferH.append(io["input"][:nBatchSize])
else:
temp = np.random.randint(1, size=(nBatchSize, category_number)).astype(np.float32)
for i in range(nBatchSize):
for j in range(category_number):
if j == 2 or j == 9 or j == 6:
temp[i][j] = 3
else:
temp[i][j] = -1
bufferH.append(temp)
pass
bufferH.append(np.empty(context.get_binding_shape(1), dtype=trt.nptype(engine.get_binding_dtype(1))))
bufferD = []
for i in range(engine.num_bindings):
bufferD.append(cuda.mem_alloc(bufferH[i].nbytes))
for i in range(nInput):
cuda.memcpy_htod_async(bufferD[i], np.ascontiguousarray(bufferH[i].reshape(-1)), stream)
context.execute_async_v2(bufferD, stream.handle)
stream.synchronize()
for i in range(nOutput):
cuda.memcpy_dtoh_async(bufferH[nInput + i], bufferD[nInput + i], stream)
stream.synchronize()
for i in range(nInput):
temp = bufferH[i]
print("inputH%d" % i, temp.shape, np.sum(abs(temp)), np.var(temp), np.max(temp), np.min(temp), np.sum(np.abs(np.diff(temp.reshape(-1)))))
print("check result:")
temp1 = bufferH[-1]
# temp2 = io["output"]
# max = np.max(np.abs(np.abs(temp1 - temp2)))
print("max is:", max)
if __name__ == "__main__":
os.system("rm -f ./*.plan")
np.set_printoptions(precision=3, linewidth=200, suppress=True)
run(np.float32, 20)
| trt-samples-for-hackathon-cn-master | cookbook/05-Plugin/PluginReposity/MultinomialDistributionPlugin-thrust-TRT8/random_trt.py |
#
# Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import ctypes
import os
import numpy as np
import tensorrt as trt
from cuda import cudart
soFile = "./CumSumPlugin.so"
dataTypeNpToTrt = {np.float32: trt.float32, np.float16: trt.float16, np.int32: trt.int32}
def printArrayInformation(x, info="", n=5):
if 0 in x.shape:
print('%s:%s' % (info, str(x.shape)))
print()
return
print( '%s:%s,SumAbs=%.5e,Var=%.5f,Max=%.5f,Min=%.5f,SAD=%.5f'%( \
info,str(x.shape),np.sum(abs(x)),np.var(x),np.max(x),np.min(x),np.sum(np.abs(np.diff(x.reshape(-1)))) ))
print('\t', x.reshape(-1)[:n], x.reshape(-1)[-n:])
def check(a, b, weak=False, checkEpsilon=1e-5):
if weak:
a = a.astype(np.float32)
b = b.astype(np.float32)
res = np.all(np.abs(a - b) < checkEpsilon)
else:
res = np.all(a == b)
diff0 = np.max(np.abs(a - b))
diff1 = np.max(np.abs(a - b) / (np.abs(b) + checkEpsilon))
print("check:%s, absDiff=%f, relDiff=%f" % (res, diff0, diff1))
def cumSumCPU(inputH, axis):
return [np.cumsum(inputH[0], axis)]
def getCumSumPlugin(axis):
for c in trt.get_plugin_registry().plugin_creator_list:
#print(c.name)
if c.name == "CumSum":
parameterList = []
parameterList.append(trt.PluginField("axis", np.int32(axis), trt.PluginFieldType.INT32))
return c.create_plugin(c.name, trt.PluginFieldCollection(parameterList))
return None
def run(shape, dataType, axis):
if dataType == np.float32:
dataTypeStr = "FP32"
elif dataType == np.float16:
dataTypeStr = "FP16"
elif dataType == np.int32:
dataTypeStr = "INT32"
else:
dataTypeStr = "Other"
testCase = "<shape=%s,dataType=%s,axis=%d>" % (shape, dataTypeStr, axis)
trtFile = "./model-%s-%s-%d.plan" % (shape, dataTypeStr, axis)
print("Test %s" % testCase)
logger = trt.Logger(trt.Logger.ERROR)
trt.init_libnvinfer_plugins(logger, '')
ctypes.cdll.LoadLibrary(soFile)
if os.path.isfile(trtFile):
with open(trtFile, "rb") as f:
engine = trt.Runtime(logger).deserialize_cuda_engine(f.read())
if engine == None:
print("Failed loading engine!")
return
print("Succeeded loading engine!")
else:
builder = trt.Builder(logger)
network = builder.create_network(1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH))
profile = builder.create_optimization_profile()
config = builder.create_builder_config()
if dataType == np.float16:
config.set_flag(trt.BuilderFlag.FP16)
inputT0 = network.add_input("inputT0", dataTypeNpToTrt[dataType], [-1 for i in shape])
profile.set_shape(inputT0.name, [1 for i in shape], [8 for i in shape], [32 for i in shape[:-1]] + [256])
config.add_optimization_profile(profile)
pluginLayer = network.add_plugin_v2([inputT0], getCumSumPlugin(axis))
network.mark_output(pluginLayer.get_output(0))
engineString = builder.build_serialized_network(network, config)
if engineString == None:
print("Failed building engine!")
return
print("Succeeded building engine!")
with open(trtFile, "wb") as f:
f.write(engineString)
engine = trt.Runtime(logger).deserialize_cuda_engine(engineString)
context = engine.create_execution_context()
context.set_binding_shape(0, shape)
#print("Binding all? %s"%(["No","Yes"][int(context.all_binding_shapes_specified)]))
nInput = np.sum([engine.binding_is_input(i) for i in range(engine.num_bindings)])
nOutput = engine.num_bindings - nInput
#for i in range(nInput):
# print("Bind[%2d]:i[%2d]->" % (i, i), engine.get_binding_dtype(i), engine.get_binding_shape(i), context.get_binding_shape(i), engine.get_binding_name(i))
#for i in range(nInput, nInput + nOutput):
# print("Bind[%2d]:o[%2d]->" % (i, i - nInput), engine.get_binding_dtype(i), engine.get_binding_shape(i), context.get_binding_shape(i), engine.get_binding_name(i))
bufferH = []
if dataType == np.int32:
bufferH.append(np.random.randint(-10, 10, shape).astype(np.int32).reshape(shape))
#bufferH.append(np.arange(np.prod(shape)).astype(np.int32).reshape(shape))
else:
bufferH.append(np.random.rand(np.prod(shape)).astype(dataType).reshape(shape) * 2 - 1)
#bufferH.append(np.arange(np.prod(shape)).astype(dataType).reshape(shape))
for i in range(nOutput):
bufferH.append(np.empty(context.get_binding_shape(nInput + i), dtype=trt.nptype(engine.get_binding_dtype(nInput + i))))
bufferD = []
for i in range(engine.num_bindings):
bufferD.append(cudart.cudaMalloc(bufferH[i].nbytes)[1])
for i in range(nInput):
cudart.cudaMemcpy(bufferD[i], np.ascontiguousarray(bufferH[i].reshape(-1)).ctypes.data, bufferH[i].nbytes, cudart.cudaMemcpyKind.cudaMemcpyHostToDevice)
context.execute_v2(bufferD)
for i in range(nOutput):
cudart.cudaMemcpy(bufferH[nInput + i].ctypes.data, bufferD[nInput + i], bufferH[nInput + i].nbytes, cudart.cudaMemcpyKind.cudaMemcpyDeviceToHost)
outputCPU = cumSumCPU(bufferH[:nInput], axis)
"""
for i in range(nInput):
printArrayInformation(bufferH[i])
print(bufferH[i])
for i in range(nOutput):
printArrayInformation(bufferH[nInput+i])
for i in range(nOutput):
printArrayInformation(outputCPU[i])
print(bufferH[nInput+i])
"""
check(bufferH[nInput:][0], outputCPU[0], True)
for buffer in bufferD:
cudart.cudaFree(buffer)
print("Test %s finish!\n" % testCase)
if __name__ == "__main__":
os.system("rm -rf ./*.plan")
np.set_printoptions(precision=3, linewidth=200, suppress=True)
# dimension w
run([16], np.float32, 0)
run([16], np.float16, 0)
run([16], np.int32, 0)
run([2, 16], np.float32, 1)
run([2, 16], np.float16, 1)
run([2, 16], np.int32, 1)
run([2, 3, 16], np.float32, 2)
run([2, 3, 16], np.float16, 2)
run([2, 3, 16], np.int32, 2)
run([2, 3, 4, 16], np.float32, 3)
run([2, 3, 4, 16], np.float16, 3)
run([2, 3, 4, 16], np.int32, 3)
run([256], np.float32, 0)
run([256], np.float16, 0)
run([256], np.int32, 0)
run([2, 256], np.float32, 1)
run([2, 256], np.float16, 1)
run([2, 256], np.int32, 1)
run([2, 3, 256], np.float32, 2)
run([2, 3, 256], np.float16, 2)
run([2, 3, 256], np.int32, 2)
run([2, 3, 4, 256], np.float32, 3)
run([2, 3, 4, 256], np.float16, 3)
run([2, 3, 4, 256], np.int32, 3)
# dimension h
run([2, 16], np.float32, 0)
run([2, 16], np.float16, 0)
run([2, 16], np.int32, 0)
run([2, 3, 16], np.float32, 1)
run([2, 3, 16], np.float16, 1)
run([2, 3, 16], np.int32, 1)
run([2, 3, 4, 16], np.float32, 2)
run([2, 3, 4, 16], np.float16, 2)
run([2, 3, 4, 16], np.int32, 2)
run([2, 256], np.float32, 0)
run([2, 256], np.float16, 0)
run([2, 256], np.int32, 0)
run([2, 3, 256], np.float32, 1)
run([2, 3, 256], np.float16, 1)
run([2, 3, 256], np.int32, 1)
run([2, 3, 4, 256], np.float32, 2)
run([2, 3, 4, 256], np.float16, 2)
run([2, 3, 4, 256], np.int32, 2)
# dimension c
run([2, 3, 16], np.float32, 0)
run([2, 3, 16], np.float16, 0)
run([2, 3, 16], np.int32, 0)
run([2, 3, 4, 16], np.float32, 1)
run([2, 3, 4, 16], np.float16, 1)
run([2, 3, 4, 16], np.int32, 1)
run([2, 3, 256], np.float32, 0)
run([2, 3, 256], np.float16, 0)
run([2, 3, 256], np.int32, 0)
run([2, 3, 4, 256], np.float32, 1)
run([2, 3, 4, 256], np.float16, 1)
run([2, 3, 4, 256], np.int32, 1)
# dimension n
run([2, 3, 4, 16], np.float32, 0)
run([2, 3, 4, 16], np.float16, 0)
run([2, 3, 4, 16], np.int32, 0)
run([2, 3, 4, 256], np.float32, 0)
run([2, 3, 4, 256], np.float16, 0)
run([2, 3, 4, 256], np.int32, 0)
print("Test all finish!")
| trt-samples-for-hackathon-cn-master | cookbook/05-Plugin/PluginReposity/CumSumPlugin-V2.1-TRT8/testCumSumPlugin.py |
#
# Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import ctypes
import os
import numpy as np
import tensorrt as trt
from cuda import cudart
soFile = "./AddScalarPlugin.so"
np.random.seed(31193)
def printArrayInformation(x, info="", n=5):
if 0 in x.shape:
print('%s:%s' % (info, str(x.shape)))
print()
return
print( '%s:%s,SumAbs=%.5e,Var=%.5f,Max=%.5f,Min=%.5f,SAD=%.5f'%( \
info,str(x.shape),np.sum(abs(x)),np.var(x),np.max(x),np.min(x),np.sum(np.abs(np.diff(x.reshape(-1)))) ))
print('\t', x.reshape(-1)[:n], x.reshape(-1)[-n:])
def check(a, b, weak=False, checkEpsilon=1e-5):
if weak:
a = a.astype(np.float32)
b = b.astype(np.float32)
res = np.all(np.abs(a - b) < checkEpsilon)
else:
res = np.all(a == b)
diff0 = np.max(np.abs(a - b))
diff1 = np.max(np.abs(a - b) / (np.abs(b) + checkEpsilon))
print("check:%s, absDiff=%f, relDiff=%f" % (res, diff0, diff1))
def addScalarCPU(inputH, scalar):
return [inputH[0] + scalar]
def getAddScalarPlugin(scalar):
for c in trt.get_plugin_registry().plugin_creator_list:
#print(c.name)
if c.name == "AddScalar":
parameterList = []
parameterList.append(trt.PluginField("scalar", np.float32(scalar), trt.PluginFieldType.FLOAT32))
return c.create_plugin(c.name, trt.PluginFieldCollection(parameterList))
return None
def run(shape, scalar):
testCase = "<shape=%s,scalar=%f>" % (shape, scalar)
trtFile = "./model-Dim%s.plan" % str(len(shape))
print("Test %s" % testCase)
logger = trt.Logger(trt.Logger.ERROR)
trt.init_libnvinfer_plugins(logger, '')
ctypes.cdll.LoadLibrary(soFile)
if os.path.isfile(trtFile):
with open(trtFile, "rb") as f:
engineStr = f.read()
engine = trt.Runtime(logger).deserialize_cuda_engine(engineStr)
if engine == None:
print("Failed loading engine!")
exit()
print("Succeeded loading engine!")
else:
builder = trt.Builder(logger)
network = builder.create_network(1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH))
profile = builder.create_optimization_profile()
config = builder.create_builder_config()
inputT0 = network.add_input("inputT0", trt.float32, [-1 for i in shape])
profile.set_shape(inputT0.name, [1 for i in shape], [8 for i in shape], [32 for i in shape])
config.add_optimization_profile(profile)
pluginLayer = network.add_plugin_v2([inputT0], getAddScalarPlugin(scalar))
network.mark_output(pluginLayer.get_output(0))
engineString = builder.build_serialized_network(network, config)
if engineString == None:
print("Failed building engine!")
return
print("Succeeded building engine!")
with open(trtFile, "wb") as f:
f.write(engineString)
engine = trt.Runtime(logger).deserialize_cuda_engine(engineString)
context = engine.create_execution_context()
context.set_binding_shape(0, shape)
#print("Binding all? %s"%(["No","Yes"][int(context.all_binding_shapes_specified)]))
nInput = np.sum([engine.binding_is_input(i) for i in range(engine.num_bindings)])
nOutput = engine.num_bindings - nInput
#for i in range(nInput):
# print("Bind[%2d]:i[%2d]->" % (i, i), engine.get_binding_dtype(i), engine.get_binding_shape(i), context.get_binding_shape(i), engine.get_binding_name(i))
#for i in range(nInput, nInput + nOutput):
# print("Bind[%2d]:o[%2d]->" % (i, i - nInput), engine.get_binding_dtype(i), engine.get_binding_shape(i), context.get_binding_shape(i), engine.get_binding_name(i))
bufferH = []
bufferH.append(np.arange(np.prod(shape), dtype=np.float32).reshape(shape))
for i in range(nOutput):
bufferH.append(np.empty(context.get_binding_shape(nInput + i), dtype=trt.nptype(engine.get_binding_dtype(nInput + i))))
bufferD = []
for i in range(engine.num_bindings):
bufferD.append(cudart.cudaMalloc(bufferH[i].nbytes)[1])
for i in range(nInput):
cudart.cudaMemcpy(bufferD[i], np.ascontiguousarray(bufferH[i].reshape(-1)).ctypes.data, bufferH[i].nbytes, cudart.cudaMemcpyKind.cudaMemcpyHostToDevice)
context.execute_v2(bufferD)
for i in range(nOutput):
cudart.cudaMemcpy(bufferH[nInput + i].ctypes.data, bufferD[nInput + i], bufferH[nInput + i].nbytes, cudart.cudaMemcpyKind.cudaMemcpyDeviceToHost)
outputCPU = addScalarCPU(bufferH[:nInput], scalar)
"""
for i in range(nInput):
printArrayInformation(bufferH[i])
for i in range(nInput, nIO):
printArrayInformation(bufferH[i])
for i in range(nInput, nIO):
printArrayInformation(outputCPU[i - nInput])
"""
check(bufferH[nInput:][0], outputCPU[0], True)
for buffer in bufferD:
cudart.cudaFree(buffer)
print("Test %s finish!\n" % testCase)
if __name__ == "__main__":
os.system("rm -rf ./*.plan")
np.set_printoptions(precision=3, linewidth=200, suppress=True)
run([32], 1)
run([32, 32], 1)
run([16, 16, 16], 1)
run([8, 8, 8, 8], 1)
print("Test all finish!")
| trt-samples-for-hackathon-cn-master | cookbook/05-Plugin/PluginReposity/AddScalarPlugin-TRT8/testAddScalarPlugin.py |
#
# Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import numpy as np
import tensorrt as trt
from cuda import cudart
nDataSize = 3840
nRetainSize = 2000
nImageHeight = 960
nImageWidth = 1024
dataFile = "data.npz"
np.random.seed(31193)
def printArrayInformation(x, info="", n=5):
if 0 in x.shape:
print('%s:%s' % (info, str(x.shape)))
print()
return
print( '%s:%s,SumAbs=%.5e,Var=%.5f,Max=%.5f,Min=%.5f,SAD=%.5f'%( \
info,str(x.shape),np.sum(abs(x)),np.var(x),np.max(x),np.min(x),np.sum(np.abs(np.diff(x.reshape(-1)))) ))
print('\t', x.reshape(-1)[:n], x.reshape(-1)[-n:])
def getBatchedNMSPlugin():
for c in trt.get_plugin_registry().plugin_creator_list:
#print(c.name)
if c.name == "BatchedNMS_TRT":
parameterList = []
parameterList.append(trt.PluginField("shareLocation", np.array([1], dtype=np.int32), trt.PluginFieldType.INT32))
parameterList.append(trt.PluginField("backgroundLabelId", np.array([-1], dtype=np.int32), trt.PluginFieldType.INT32))
parameterList.append(trt.PluginField("numClasses", np.array([1], dtype=np.int32), trt.PluginFieldType.INT32))
parameterList.append(trt.PluginField("topK", np.array([nDataSize], dtype=np.int32), trt.PluginFieldType.INT32))
parameterList.append(trt.PluginField("keepTopK", np.array([nRetainSize], dtype=np.int32), trt.PluginFieldType.INT32))
parameterList.append(trt.PluginField("scoreThreshold", np.array([0.7], dtype=np.float32), trt.PluginFieldType.FLOAT32))
parameterList.append(trt.PluginField("iouThreshold", np.array([0.7], dtype=np.float32), trt.PluginFieldType.FLOAT32))
parameterList.append(trt.PluginField("isNormalized", np.array([1], dtype=np.int32), trt.PluginFieldType.INT32))
return c.create_plugin(c.name, trt.PluginFieldCollection(parameterList))
return None
def run():
trtFile = "./model.plan"
logger = trt.Logger(trt.Logger.ERROR)
trt.init_libnvinfer_plugins(logger, '')
if os.path.isfile(trtFile):
with open(trtFile, "rb") as f:
engine = trt.Runtime(logger).deserialize_cuda_engine(f.read())
if engine == None:
print("Failed loading engine!")
return
print("Succeeded loading engine!")
else:
builder = trt.Builder(logger)
builder.max_batch_size = 1
network = builder.create_network()
config = builder.create_builder_config()
tensor1 = network.add_input("data1", trt.float32, (nDataSize, 1, 4))
tensor2 = network.add_input("data2", trt.float32, (nDataSize, 1))
scaleLayer = network.add_scale(tensor1, trt.ScaleMode.UNIFORM, np.array([0.0], dtype=np.float32), np.array([1 / max(nImageHeight, nImageWidth)], dtype=np.float32), np.array([1.0], dtype=np.float32))
nmsLayer = network.add_plugin_v2([scaleLayer.get_output(0), tensor2], getBatchedNMSPlugin())
network.mark_output(nmsLayer.get_output(0))
network.mark_output(nmsLayer.get_output(1))
network.mark_output(nmsLayer.get_output(2))
network.mark_output(nmsLayer.get_output(3))
engineString = builder.build_serialized_network(network, config)
if engineString == None:
print("Failed building engine!")
return
print("Succeeded building engine!")
with open(trtFile, "wb") as f:
f.write(engineString)
engine = trt.Runtime(logger).deserialize_cuda_engine(engineString)
context = engine.create_execution_context()
#print("Binding all? %s"%(["No","Yes"][int(context.all_binding_shapes_specified)]))
nInput = np.sum([engine.binding_is_input(i) for i in range(engine.num_bindings)])
nOutput = engine.num_bindings - nInput
#for i in range(nInput):
# print("Bind[%2d]:i[%2d]->" % (i, i), engine.get_binding_dtype(i), engine.get_binding_shape(i), context.get_binding_shape(i), engine.get_binding_name(i))
#for i in range(nInput, nInput + nOutput):
# print("Bind[%2d]:o[%2d]->" % (i, i - nInput), engine.get_binding_dtype(i), engine.get_binding_shape(i), context.get_binding_shape(i), engine.get_binding_name(i))
data = np.load(dataFile)["prop"][:nDataSize]
norm = max(nImageHeight, nImageWidth)
data[:, :4] /= norm
bufferH = []
bufferH.append(np.ascontiguousarray(data[:, :4].reshape(nDataSize, 1, 4)))
bufferH.append(np.ascontiguousarray(data[:, 4].reshape(nDataSize, 1)))
for i in range(nOutput):
bufferH.append(np.empty(context.get_binding_shape(nInput + i), dtype=trt.nptype(engine.get_binding_dtype(nInput + i))))
bufferD = []
for i in range(engine.num_bindings):
bufferD.append(cudart.cudaMalloc(bufferH[i].nbytes)[1])
for i in range(nInput):
cudart.cudaMemcpy(bufferD[i], np.ascontiguousarray(bufferH[i].reshape(-1)).ctypes.data, bufferH[i].nbytes, cudart.cudaMemcpyKind.cudaMemcpyHostToDevice)
context.execute(1, bufferD)
for i in range(nOutput):
cudart.cudaMemcpy(bufferH[nInput + i].ctypes.data, bufferD[nInput + i], bufferH[nInput + i].nbytes, cudart.cudaMemcpyKind.cudaMemcpyDeviceToHost)
for i in range(nInput):
printArrayInformation(bufferH[i], "Input %d" % i)
for i in range(nOutput):
printArrayInformation(bufferH[nInput + i] if i != 1 else bufferH[nInput + i] * norm, "Output%d" % i)
if __name__ == "__main__":
np.set_printoptions(precision=3, linewidth=200, suppress=True)
run()
| trt-samples-for-hackathon-cn-master | cookbook/05-Plugin/PluginReposity/BatchedNMS_TRTPlugin-TRT8/testNMSPlugin.py |
#
# Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import ctypes
import os
import numpy as np
import tensorrt as trt
from cuda import cudart
dataFile = "./data.npz"
dataName = "data"
dataShape = [4, 4, 4, 4]
soFile = "./LoadNpzPlugin.so"
np.set_printoptions(precision=3, linewidth=200, suppress=True)
np.random.seed(31193)
cudart.cudaDeviceSynchronize()
def printArrayInformation(x, info="", n=5):
if 0 in x.shape:
print('%s:%s' % (info, str(x.shape)))
print()
return
print( '%s:%s,SumAbs=%.5e,Var=%.5f,Max=%.5f,Min=%.5f,SAD=%.5f'%( \
info,str(x.shape),np.sum(abs(x)),np.var(x),np.max(x),np.min(x),np.sum(np.abs(np.diff(x.reshape(-1)))) ))
print('\t', x.reshape(-1)[:n], x.reshape(-1)[-n:])
def check(a, b, weak=False, checkEpsilon=1e-5):
if weak:
a = a.astype(np.float32)
b = b.astype(np.float32)
res = np.all(np.abs(a - b) < checkEpsilon)
else:
res = np.all(a == b)
diff0 = np.max(np.abs(a - b))
diff1 = np.max(np.abs(a - b) / (np.abs(b) + checkEpsilon))
print("check:%s, absDiff=%f, relDiff=%f" % (res, diff0, diff1))
def createData():
dataDict = {}
dataDict[dataName] = np.ones(dataShape, dtype=np.float32)
np.savez(dataFile, **dataDict)
print("Succeeded saving data as .npz file!")
return
def LoadNpzCPU(dummyInputTensor):
return np.load(dataFile)[dataName]
def getLoadNpzPlugin():
for c in trt.get_plugin_registry().plugin_creator_list:
#print(c.name)
if c.name == "LoadNpzPlugin":
return c.create_plugin(c.name, trt.PluginFieldCollection([]))
return None
def run():
trtFile = "./model.plan"
print("Test")
logger = trt.Logger(trt.Logger.ERROR)
trt.init_libnvinfer_plugins(logger, '')
ctypes.cdll.LoadLibrary(soFile)
if os.path.isfile(trtFile):
with open(trtFile, "rb") as f:
engine = trt.Runtime(logger).deserialize_cuda_engine(f.read())
if engine == None:
print("Failed loading engine!")
return
print("Succeeded loading engine!")
else:
builder = trt.Builder(logger)
network = builder.create_network(1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH))
config = builder.create_builder_config()
#inputT0 = network.add_input("inputT0", trt.float32, [1]) # dummy input
# Plugin Layer must have a input tensor, or we will get the error:
# [TRT] [E] 2: [stdArchiveReader.h::readManyHelper::333] Error Code 2: Internal Error (Assertion prefix.count failed. Enums must always have at least one entry.)
#pluginLayer = network.add_plugin_v2([inputT0], getLoadNpzPlugin())
pluginLayer = network.add_plugin_v2([], getLoadNpzPlugin())
network.mark_output(pluginLayer.get_output(0))
engineString = builder.build_serialized_network(network, config)
if engineString == None:
print("Failed building engine!")
return
print("Succeeded building engine!")
with open(trtFile, "wb") as f:
f.write(engineString)
engine = trt.Runtime(logger).deserialize_cuda_engine(engineString)
nIO = engine.num_io_tensors
lTensorName = [engine.get_tensor_name(i) for i in range(nIO)]
nInput = [engine.get_tensor_mode(lTensorName[i]) for i in range(nIO)].count(trt.TensorIOMode.INPUT)
context = engine.create_execution_context()
for i in range(nIO):
print("[%2d]%s->" % (i, "Input " if i < nInput else "Output"), engine.get_tensor_dtype(lTensorName[i]), engine.get_tensor_shape(lTensorName[i]), context.get_tensor_shape(lTensorName[i]), lTensorName[i])
bufferH = []
bufferH.append(np.array([0], dtype=np.float32))
for i in range(nInput, nIO):
bufferH.append(np.empty(context.get_tensor_shape(lTensorName[i]), dtype=trt.nptype(engine.get_tensor_dtype(lTensorName[i]))))
bufferD = []
for i in range(nIO):
bufferD.append(cudart.cudaMalloc(bufferH[i].nbytes)[1])
for i in range(nInput):
cudart.cudaMemcpy(bufferD[i], bufferH[i].ctypes.data, bufferH[i].nbytes, cudart.cudaMemcpyKind.cudaMemcpyHostToDevice)
for i in range(nIO):
context.set_tensor_address(lTensorName[i], int(bufferD[i]))
context.execute_async_v3(0)
for i in range(nInput, nIO):
cudart.cudaMemcpy(bufferH[i].ctypes.data, bufferD[i], bufferH[i].nbytes, cudart.cudaMemcpyKind.cudaMemcpyDeviceToHost)
outputCPU = LoadNpzCPU(bufferH[:nInput])
for i in range(nInput):
printArrayInformation(bufferH[i])
for i in range(nInput, nIO):
printArrayInformation(bufferH[i])
for i in range(nInput, nIO):
printArrayInformation(outputCPU[i - nInput])
check(bufferH[nInput:], outputCPU, True)
for b in bufferD:
cudart.cudaFree(b)
print("Test finish!\n")
if __name__ == "__main__":
os.system("rm -rf ./*.plan ./*.npz")
createData()
run()
print("Test all finish!")
| trt-samples-for-hackathon-cn-master | cookbook/05-Plugin/LoadDataFromNpz/testLoadNpzPlugin.py |
#
# Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections import OrderedDict
import numpy as np
import onnx
import onnx_graphsurgeon as gs
tensor0 = gs.Variable("tensor0", np.float32, ["B", 1, 64, 64])
tensor1 = gs.Variable("tensor1", np.float32, ["B", 1, 64, 64])
node0 = gs.Node("AddScalar", "myAddAcalar", inputs=[tensor0], outputs=[tensor1], attrs=OrderedDict([('scalar', np.array([10],dtype=np.float32))]))
graph = gs.Graph(nodes=[node0], inputs=[tensor0], outputs=[tensor1])
graph.cleanup().toposort()
onnx.save(gs.export_onnx(graph), "./model.onnx")
np.random.seed(31193)
dd = {}
dd["inferenceData"] = np.random.rand(4 * 1 * 64 * 64).astype(np.float32).reshape([4, 1, 64, 64])
np.savez("data.npz",**dd)
| trt-samples-for-hackathon-cn-master | cookbook/05-Plugin/C++-PluginInsideEngine/getOnnxModelAndData.py |
#
# Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import ctypes
import os
import numpy as np
import onnx
import onnx_graphsurgeon as gs
import tensorrt as trt
import torch as t
from cuda import cudart
onnxFile = "./model.onnx"
onnxSurgeonFile = "./model-surgeon.onnx"
soFile = "./AddScalarPlugin.so"
trtFile = "./model.plan"
shape = [2, 3, 4, 5]
inputX = np.random.rand(*shape).astype(np.float32).reshape(shape)
np.set_printoptions(precision=3, linewidth=200, suppress=True)
cudart.cudaDeviceSynchronize()
def printArrayInformation(x, info="", n=5):
if 0 in x.shape:
print('%s:%s' % (info, str(x.shape)))
print()
return
print( '%s:%s,SumAbs=%.5e,Var=%.5f,Max=%.5f,Min=%.5f,SAD=%.5f'%( \
info,str(x.shape),np.sum(abs(x)),np.var(x),np.max(x),np.min(x),np.sum(np.abs(np.diff(x.reshape(-1)))) ))
print('\t', x.reshape(-1)[:n], x.reshape(-1)[-n:])
def check(a, b, weak=False, checkEpsilon=1e-5):
if weak:
a = a.astype(np.float32)
b = b.astype(np.float32)
res = np.all(np.abs(a - b) < checkEpsilon)
else:
res = np.all(a == b)
diff0 = np.max(np.abs(a - b))
diff1 = np.max(np.abs(a - b) / (np.abs(b) + checkEpsilon))
print("check:%s, absDiff=%f, relDiff=%f" % (res, diff0, diff1))
# Create network in pyTorch and export as ONNX ----------------------------------
class Net(t.nn.Module):
def __init__(self):
super(Net, self).__init__()
def forward(self, x):
x = t.mul(x, 1.0)
x = t.add(x, 1.0)
y = t.mul(x, 1.0)
return y
net = Net().cuda()
outputPyTorch = net(t.from_numpy(inputX).cuda()).detach().cpu().numpy()
t.onnx.export(net, t.from_numpy(inputX).cuda(), onnxFile, input_names=["x"], output_names=["y"], do_constant_folding=True, verbose=True, opset_version=14, dynamic_axes={"x": {
0: "nBS",
}})
print("Succeeded converting model into ONNX!")
# Replace LayerNorm module into LayerNorm plugin node --------------------------
graph = gs.import_onnx(onnx.load(onnxFile))
graph.inputs[0].shape = ["nBS"] + shape[1:]
graph.outputs[0].shape = ["nBS"] + shape[1:]
nPlugin = 0
for node in graph.nodes:
if node.op == "Add":
scalar = float(node.i(1).attrs["value"].values)
pluginV = gs.Variable("MyAddPluginVariable-%d" % nPlugin, np.dtype(np.float32), None)
pluginN = gs.Node("AddScalar", "MyAddPluginNode-%d" % nPlugin, inputs=[node.inputs[0]], outputs=[pluginV], attrs={"scalar": float(scalar)})
graph.nodes.append(pluginN)
node.o().inputs[0] = pluginV
node.outputs.clear()
nPlugin += 1
graph.cleanup()
onnx.save(gs.export_onnx(graph), onnxSurgeonFile)
print("Succeeded replacing AddScalar plugin!")
# build TensorRT engine with ONNX file and plugin.so ---------------------------
logger = trt.Logger(trt.Logger.INFO)
trt.init_libnvinfer_plugins(logger, '')
ctypes.cdll.LoadLibrary(soFile)
builder = trt.Builder(logger)
network = builder.create_network(1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH))
profile = builder.create_optimization_profile()
config = builder.create_builder_config()
parser = trt.OnnxParser(network, logger)
if not os.path.exists(onnxSurgeonFile):
print("Failed finding ONNX file!")
exit()
print("Succeeded finding ONNX file!")
with open(onnxSurgeonFile, "rb") as model:
if not parser.parse(model.read()):
print("Failed parsing .onnx file!")
for error in range(parser.num_errors):
print(parser.get_error(error))
exit()
print("Succeeded parsing .onnx file!")
inputTensor = network.get_input(0)
inputTensor.shape = [-1] + shape[1:]
profile.set_shape(inputTensor.name, [1] + shape[1:], shape, shape)
config.add_optimization_profile(profile)
engineString = builder.build_serialized_network(network, config)
if engineString == None:
print("Failed building engine!")
exit()
print("Succeeded building engine!")
with open(trtFile, "wb") as f:
f.write(engineString)
engine = trt.Runtime(logger).deserialize_cuda_engine(engineString)
nIO = engine.num_io_tensors
lTensorName = [engine.get_tensor_name(i) for i in range(nIO)]
nInput = [engine.get_tensor_mode(lTensorName[i]) for i in range(nIO)].count(trt.TensorIOMode.INPUT)
context = engine.create_execution_context()
context.set_input_shape(lTensorName[0], shape)
#for i in range(nIO):
# print("[%2d]%s->" % (i, "Input " if i < nInput else "Output"), engine.get_tensor_dtype(lTensorName[i]), engine.get_tensor_shape(lTensorName[i]), context.get_tensor_shape(lTensorName[i]), lTensorName[i])
bufferH = []
bufferH.append(np.ascontiguousarray(inputX))
for i in range(nInput, nIO):
bufferH.append(np.empty(context.get_tensor_shape(lTensorName[i]), dtype=trt.nptype(engine.get_tensor_dtype(lTensorName[i]))))
bufferD = []
for i in range(nIO):
bufferD.append(cudart.cudaMalloc(bufferH[i].nbytes)[1])
for i in range(nInput):
cudart.cudaMemcpy(bufferD[i], bufferH[i].ctypes.data, bufferH[i].nbytes, cudart.cudaMemcpyKind.cudaMemcpyHostToDevice)
for i in range(nIO):
context.set_tensor_address(lTensorName[i], int(bufferD[i]))
context.execute_async_v3(0)
for i in range(nInput, nIO):
cudart.cudaMemcpy(bufferH[i].ctypes.data, bufferD[i], bufferH[i].nbytes, cudart.cudaMemcpyKind.cudaMemcpyDeviceToHost)
check(bufferH[nInput:][0], outputPyTorch, True)
for b in bufferD:
cudart.cudaFree(b)
print("Succeeded running model in TensorRT!")
| trt-samples-for-hackathon-cn-master | cookbook/05-Plugin/UseONNXParserAndPlugin-pyTorch/main.py |
#
# Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import ctypes
import os
import numpy as np
import tensorrt as trt
from cuda import cudart
soFile = "./CuBLASGemmPlugin.so"
b, m, k, n = 5, 2, 3, 4
globalData = np.random.rand(b * m * k).astype(np.float32).reshape(b, m, k) * 2 - 1
globalWeight = np.random.rand(k * n).astype(np.float32).reshape(k, n) * 2 - 1
np.set_printoptions(precision=3, linewidth=200, suppress=True)
np.random.seed(31193)
cudart.cudaDeviceSynchronize()
def printArrayInformation(x, info="", n=5):
if 0 in x.shape:
print('%s:%s' % (info, str(x.shape)))
print()
return
print( '%s:%s,SumAbs=%.5e,Var=%.5f,Max=%.5f,Min=%.5f,SAD=%.5f'%( \
info,str(x.shape),np.sum(abs(x)),np.var(x),np.max(x),np.min(x),np.sum(np.abs(np.diff(x.reshape(-1)))) ))
print('\t', x.reshape(-1)[:n], x.reshape(-1)[-n:])
def check(a, b, weak=False, checkEpsilon=1e-5):
if weak:
a = a.astype(np.float32)
b = b.astype(np.float32)
res = np.all(np.abs(a - b) < checkEpsilon)
else:
res = np.all(a == b)
diff0 = np.max(np.abs(a - b))
diff1 = np.max(np.abs(a - b) / (np.abs(b) + checkEpsilon))
print("check:%s, absDiff=%f, relDiff=%f" % (res, diff0, diff1))
def CuBLASGemmCPU(inputH, weight):
return [np.matmul(inputH[0], weight)]
def getCuBLASGemmPlugin(weight):
for c in trt.get_plugin_registry().plugin_creator_list:
#print(c.name)
if c.name == "CuBLASGemm":
parameterList = []
parameterList.append(trt.PluginField("weight", np.float32(weight), trt.PluginFieldType.FLOAT32))
parameterList.append(trt.PluginField("k", np.int32(weight.shape[0]), trt.PluginFieldType.INT32))
parameterList.append(trt.PluginField("n", np.int32(weight.shape[1]), trt.PluginFieldType.INT32))
return c.create_plugin(c.name, trt.PluginFieldCollection(parameterList))
return None
def run():
trtFile = "./model.plan"
logger = trt.Logger(trt.Logger.ERROR)
trt.init_libnvinfer_plugins(logger, '')
ctypes.cdll.LoadLibrary(soFile)
if os.path.isfile(trtFile):
with open(trtFile, "rb") as f:
engine = trt.Runtime(logger).deserialize_cuda_engine(f.read())
if engine == None:
print("Failed loading engine!")
return
print("Succeeded loading engine!")
else:
builder = trt.Builder(logger)
network = builder.create_network(1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH))
profile = builder.create_optimization_profile()
config = builder.create_builder_config()
inputT0 = network.add_input("inputT0", trt.float32, [-1, -1, k])
profile.set_shape(inputT0.name, [1, 1, k], [b, m, k], [b * 2, m * 2, k])
config.add_optimization_profile(profile)
pluginLayer = network.add_plugin_v2([inputT0], getCuBLASGemmPlugin(globalWeight))
pluginLayer.get_output(0).name = "GEMM-Plugin-Output"
network.mark_output(pluginLayer.get_output(0))
engineString = builder.build_serialized_network(network, config)
if engineString == None:
print("Failed building engine!")
return
print("Succeeded building engine!")
with open(trtFile, "wb") as f:
f.write(engineString)
engine = trt.Runtime(logger).deserialize_cuda_engine(engineString)
nIO = engine.num_io_tensors
lTensorName = [engine.get_tensor_name(i) for i in range(nIO)]
nInput = [engine.get_tensor_mode(lTensorName[i]) for i in range(nIO)].count(trt.TensorIOMode.INPUT)
context = engine.create_execution_context()
context.set_input_shape(lTensorName[0], [b, m, k])
#for i in range(nIO):
# print("[%2d]%s->" % (i, "Input " if i < nInput else "Output"), engine.get_tensor_dtype(lTensorName[i]), engine.get_tensor_shape(lTensorName[i]), context.get_tensor_shape(lTensorName[i]), lTensorName[i])
bufferH = []
bufferH.append(globalData)
for i in range(nInput, nIO):
bufferH.append(np.empty(context.get_tensor_shape(lTensorName[i]), dtype=trt.nptype(engine.get_tensor_dtype(lTensorName[i]))))
bufferD = []
for i in range(nIO):
bufferD.append(cudart.cudaMalloc(bufferH[i].nbytes)[1])
for i in range(nInput):
cudart.cudaMemcpy(bufferD[i], bufferH[i].ctypes.data, bufferH[i].nbytes, cudart.cudaMemcpyKind.cudaMemcpyHostToDevice)
for i in range(nIO):
context.set_tensor_address(lTensorName[i], int(bufferD[i]))
context.execute_async_v3(0)
for i in range(nInput, nIO):
cudart.cudaMemcpy(bufferH[i].ctypes.data, bufferD[i], bufferH[i].nbytes, cudart.cudaMemcpyKind.cudaMemcpyDeviceToHost)
outputCPU = CuBLASGemmCPU(bufferH[:nInput], globalWeight)
"""
for i in range(nInput):
printArrayInformation(bufferH[i])
for i in range(nInput, nIO):
printArrayInformation(bufferH[i])
for i in range(nInput, nIO):
printArrayInformation(outputCPU[i - nInput])
"""
check(bufferH[nInput:][0], outputCPU[0], True)
for b in bufferD:
cudart.cudaFree(b)
if __name__ == "__main__":
os.system("rm -rf ./*.plan")
run() # build TensorRT engine and do inference
run() # load TensorRT engine and do inference
print("Test all finish!")
| trt-samples-for-hackathon-cn-master | cookbook/05-Plugin/UseCuBLAS/testCuBLASGemmPlugin.py |
#
# Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import ctypes
import os
import numpy as np
import tensorrt as trt
from cuda import cudart
soFile = "./AddScalarPlugin.so"
np.set_printoptions(precision=3, linewidth=200, suppress=True)
np.random.seed(31193)
cudart.cudaDeviceSynchronize()
def printArrayInformation(x, info="", n=5):
if 0 in x.shape:
print('%s:%s' % (info, str(x.shape)))
print()
return
print( '%s:%s,SumAbs=%.5e,Var=%.5f,Max=%.5f,Min=%.5f,SAD=%.5f'%( \
info,str(x.shape),np.sum(abs(x)),np.var(x),np.max(x),np.min(x),np.sum(np.abs(np.diff(x.reshape(-1)))) ))
print('\t', x.reshape(-1)[:n], x.reshape(-1)[-n:])
def check(a, b, weak=False, checkEpsilon=1e-5):
if weak:
a = a.astype(np.float32)
b = b.astype(np.float32)
res = np.all(np.abs(a - b) < checkEpsilon)
else:
res = np.all(a == b)
diff0 = np.max(np.abs(a - b))
diff1 = np.max(np.abs(a - b) / (np.abs(b) + checkEpsilon))
print("check:%s, absDiff=%f, relDiff=%f" % (res, diff0, diff1))
def addScalarCPU(inputH, scalar, version):
return [inputH[0] + scalar + int(version) - 1]
def getAddScalarPlugin(scalar, version):
for c in trt.get_plugin_registry().plugin_creator_list:
#print(c.name)
if c.name == "AddScalar" and c.plugin_version == version:
print("Find %s, %s" % (c.name, c.plugin_version))
parameterList = []
parameterList.append(trt.PluginField("scalar", np.float32(scalar), trt.PluginFieldType.FLOAT32))
return c.create_plugin(c.name, trt.PluginFieldCollection(parameterList))
return None
def run(shape, scalar, version):
testCase = "<shape=%s,scalar=%f, version=%s>" % (shape, scalar, version)
trtFile = "./model-Dim%s-v%s.plan" % (str(len(shape)), version)
print("Test %s" % testCase)
logger = trt.Logger(trt.Logger.ERROR)
trt.init_libnvinfer_plugins(logger, '')
ctypes.cdll.LoadLibrary(soFile)
if os.path.isfile(trtFile):
with open(trtFile, "rb") as f:
engine = trt.Runtime(logger).deserialize_cuda_engine(f.read())
if engine == None:
print("Failed loading engine!")
return
print("Succeeded loading engine!")
else:
builder = trt.Builder(logger)
network = builder.create_network(1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH))
profile = builder.create_optimization_profile()
config = builder.create_builder_config()
inputT0 = network.add_input("inputT0", trt.float32, [-1 for i in shape])
profile.set_shape(inputT0.name, [1 for i in shape], [8 for i in shape], [32 for i in shape])
config.add_optimization_profile(profile)
pluginLayer = network.add_plugin_v2([inputT0], getAddScalarPlugin(scalar, version))
network.mark_output(pluginLayer.get_output(0))
engineString = builder.build_serialized_network(network, config)
if engineString == None:
print("Failed building engine!")
return
print("Succeeded building engine!")
with open(trtFile, "wb") as f:
f.write(engineString)
engine = trt.Runtime(logger).deserialize_cuda_engine(engineString)
nIO = engine.num_io_tensors
lTensorName = [engine.get_tensor_name(i) for i in range(nIO)]
nInput = [engine.get_tensor_mode(lTensorName[i]) for i in range(nIO)].count(trt.TensorIOMode.INPUT)
context = engine.create_execution_context()
context.set_input_shape(lTensorName[0], shape)
#for i in range(nIO):
# print("[%2d]%s->" % (i, "Input " if i < nInput else "Output"), engine.get_tensor_dtype(lTensorName[i]), engine.get_tensor_shape(lTensorName[i]), context.get_tensor_shape(lTensorName[i]), lTensorName[i])
bufferH = []
bufferH.append(np.arange(np.prod(shape), dtype=np.float32).reshape(shape))
for i in range(nInput, nIO):
bufferH.append(np.empty(context.get_tensor_shape(lTensorName[i]), dtype=trt.nptype(engine.get_tensor_dtype(lTensorName[i]))))
bufferD = []
for i in range(nIO):
bufferD.append(cudart.cudaMalloc(bufferH[i].nbytes)[1])
bufferH[0] = np.arange(np.prod(shape), dtype=np.float32).reshape(shape)
for i in range(nInput):
cudart.cudaMemcpy(bufferD[i], bufferH[i].ctypes.data, bufferH[i].nbytes, cudart.cudaMemcpyKind.cudaMemcpyHostToDevice)
for i in range(nIO):
context.set_tensor_address(lTensorName[i], int(bufferD[i]))
context.execute_async_v3(0)
for i in range(nInput, nIO):
cudart.cudaMemcpy(bufferH[i].ctypes.data, bufferD[i], bufferH[i].nbytes, cudart.cudaMemcpyKind.cudaMemcpyDeviceToHost)
outputCPU = addScalarCPU(bufferH[:nInput], scalar, version)
"""
for i in range(nInput):
printArrayInformation(bufferH[i])
for i in range(nInput, nIO):
printArrayInformation(bufferH[i])
for i in range(nInput, nIO):
printArrayInformation(outputCPU[i - nInput])
"""
check(bufferH[nInput:][0], outputCPU[0], True)
for b in bufferD:
cudart.cudaFree(b)
print("Test %s finish!\n" % testCase)
if __name__ == "__main__":
os.system("rm -rf ./*.plan")
run([32], 1, "1")
run([32, 32], 1, "1")
run([16, 16, 16], 1, "1")
run([8, 8, 8, 8], 1, "1")
run([32], 1, "2")
run([32, 32], 1, "2")
run([16, 16, 16], 1, "2")
run([8, 8, 8, 8], 1, "2")
print("Test all finish!")
| trt-samples-for-hackathon-cn-master | cookbook/05-Plugin/MultipleVersion/testAddScalarPlugin.py |
#
# Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import ctypes
import os
import numpy as np
import tensorrt as trt
from cuda import cudart
soFile = "./AddSubMulPlugin.so"
np.set_printoptions(precision=3, linewidth=200, suppress=True)
np.random.seed(31193)
cudart.cudaDeviceSynchronize()
def printArrayInformation(x, info="", n=5):
if 0 in x.shape:
print('%s:%s' % (info, str(x.shape)))
print()
return
print( '%s:%s,SumAbs=%.5e,Var=%.5f,Max=%.5f,Min=%.5f,SAD=%.5f'%( \
info,str(x.shape),np.sum(abs(x)),np.var(x),np.max(x),np.min(x),np.sum(np.abs(np.diff(x.reshape(-1)))) ))
print('\t', x.reshape(-1)[:n], x.reshape(-1)[-n:])
def check(a, b, weak=False, checkEpsilon=1e-5):
if weak:
a = a.astype(np.float32)
b = b.astype(np.float32)
res = np.all(np.abs(a - b) < checkEpsilon)
else:
res = np.all(a == b)
diff0 = np.max(np.abs(a - b))
diff1 = np.max(np.abs(a - b) / (np.abs(b) + checkEpsilon))
print("check:%s, absDiff=%f, relDiff=%f" % (res, diff0, diff1))
def AddSubMulCPU(inputList):
a = inputList[0]
b = inputList[1]
nBatch = a.shape[0]
nLengthA = a.shape[1]
nLengthB = b.shape[1]
nLength = min(nLengthA, nLengthB)
res0 = np.zeros([nBatch, nLengthA, nLengthB], dtype=np.float32)
for i in range(nBatch):
res0[i] = np.matmul(a[i].reshape(-1, 1), b[i].reshape(1, -1))
res1 = a[:, np.newaxis, :nLength] + b[:, np.newaxis, :nLength]
return [res0, res1]
def getAddSubMulPlugin():
for c in trt.get_plugin_registry().plugin_creator_list:
#print(c.name)
if c.name == "AddSubMul":
parameterList = []
return c.create_plugin(c.name, trt.PluginFieldCollection(parameterList))
return None
def run(shapeA, shapeB):
testCase = "<shapeA=%s,shapeB=%s>" % (shapeA, shapeB)
trtFile = "./model.plan"
print("Test %s" % testCase)
logger = trt.Logger(trt.Logger.ERROR)
trt.init_libnvinfer_plugins(logger, '')
ctypes.cdll.LoadLibrary(soFile)
if os.path.isfile(trtFile):
with open(trtFile, "rb") as f:
engine = trt.Runtime(logger).deserialize_cuda_engine(f.read())
if engine == None:
print("Failed loading engine!")
return
print("Succeeded loading engine!")
else:
builder = trt.Builder(logger)
network = builder.create_network(1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH))
profile = builder.create_optimization_profile()
config = builder.create_builder_config()
config.set_flag(trt.BuilderFlag.FP16)
inputT0 = network.add_input("inputT0", trt.float32, [-1, -1])
profile.set_shape(inputT0.name, [1, 1], [4, 256], [16, 1024])
inputT1 = network.add_input("inputT1", trt.float32, [-1, -1])
profile.set_shape(inputT1.name, [1, 1], [4, 256], [16, 1024])
config.add_optimization_profile(profile)
pluginLayer = network.add_plugin_v2([inputT0, inputT1], getAddSubMulPlugin())
network.mark_output(pluginLayer.get_output(0))
network.mark_output(pluginLayer.get_output(1))
engineString = builder.build_serialized_network(network, config)
if engineString == None:
print("Failed building engine!")
return
print("Succeeded building engine!")
with open(trtFile, "wb") as f:
f.write(engineString)
engine = trt.Runtime(logger).deserialize_cuda_engine(engineString)
nIO = engine.num_io_tensors
lTensorName = [engine.get_tensor_name(i) for i in range(nIO)]
nInput = [engine.get_tensor_mode(lTensorName[i]) for i in range(nIO)].count(trt.TensorIOMode.INPUT)
context = engine.create_execution_context()
context.set_input_shape(lTensorName[0], shapeA)
context.set_input_shape(lTensorName[1], shapeB)
for i in range(nIO):
print("[%2d]%s->" % (i, "Input " if i < nInput else "Output"), engine.get_tensor_dtype(lTensorName[i]), engine.get_tensor_shape(lTensorName[i]), context.get_tensor_shape(lTensorName[i]), lTensorName[i])
bufferH = []
bufferH.append(np.arange(np.prod(shapeA), dtype=np.float32).reshape(shapeA) / 10000)
bufferH.append(np.arange(np.prod(shapeB), dtype=np.float32).reshape(shapeB) / 10000)
#bufferH.append(np.random.rand(np.prod(shapeA)).astype(np.float32).reshape(shapeA))
#bufferH.append(np.random.rand(np.prod(shapeB)).astype(np.float32).reshape(shapeB))
for i in range(nInput, nIO):
bufferH.append(np.empty(context.get_tensor_shape(lTensorName[i]), dtype=trt.nptype(engine.get_tensor_dtype(lTensorName[i]))))
bufferD = []
for i in range(nIO):
bufferD.append(cudart.cudaMalloc(bufferH[i].nbytes)[1])
for i in range(nInput):
cudart.cudaMemcpy(bufferD[i], bufferH[i].ctypes.data, bufferH[i].nbytes, cudart.cudaMemcpyKind.cudaMemcpyHostToDevice)
for i in range(nIO):
context.set_tensor_address(lTensorName[i], int(bufferD[i]))
context.execute_async_v3(0)
for i in range(nInput, nIO):
cudart.cudaMemcpy(bufferH[i].ctypes.data, bufferD[i], bufferH[i].nbytes, cudart.cudaMemcpyKind.cudaMemcpyDeviceToHost)
outputCPU = AddSubMulCPU(bufferH[:nInput])
"""
for i in range(nInput):
printArrayInformation(bufferH[i], "Input")
for i in range(nInput, nIO):
printArrayInformation(bufferH[i], "GPU")
for i in range(nInput, nIO):
printArrayInformation(outputCPU[i - nInput], "CPU")
"""
for i in range(nIO - nInput):
check(bufferH[nInput:][i], outputCPU[i], True, checkEpsilon=1e-3)
for b in bufferD:
cudart.cudaFree(b)
print("Test %s finish!\n" % testCase)
if __name__ == "__main__":
os.system("rm -rf ./*.plan")
run([1, 8], [1, 8]) # small, equal
run([1, 256], [1, 256]) # medium, equal
run([1, 500], [1, 500]) # large, equal, not the times of 256
run([2, 8], [2, 24]) # small, not equal
run([3, 256], [3, 300]) # medium, not equal
run([4, 500], [4, 1000]) # large, equal, not the times of 256
print("Test all finish!")
| trt-samples-for-hackathon-cn-master | cookbook/05-Plugin/MultiInputOutputAndWorkspace/testAddSubMulPlugin.py |
#
# Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import ctypes
import os
import numpy as np
import tensorrt as trt
from calibrator import MyCalibrator
from cuda import cudart
soFile = "./AddScalarPlugin.so"
cacheFile = "./int8.cache"
np.set_printoptions(precision=3, linewidth=200, suppress=True)
np.random.seed(31193)
cudart.cudaDeviceSynchronize()
def printArrayInformation(x, info="", n=5):
if 0 in x.shape:
print('%s:%s' % (info, str(x.shape)))
print()
return
print( '%s:%s,SumAbs=%.5e,Var=%.5f,Max=%.5f,Min=%.5f,SAD=%.5f'%( \
info,str(x.shape),np.sum(abs(x)),np.var(x),np.max(x),np.min(x),np.sum(np.abs(np.diff(x.reshape(-1)))) ))
print('\t', x.reshape(-1)[:n], x.reshape(-1)[-n:])
def check(a, b, weak=False, checkEpsilon=1e-5):
if weak:
a = a.astype(np.float32)
b = b.astype(np.float32)
res = np.all(np.abs(a - b) < checkEpsilon)
else:
res = np.all(a == b)
diff0 = np.max(np.abs(a - b))
diff1 = np.max(np.abs(a - b) / (np.abs(b) + checkEpsilon))
print("check:%s, absDiff=%f, relDiff=%f" % (res, diff0, diff1))
def addScalarCPU(inputH, scalar):
return [inputH[0] + scalar]
def getAddScalarPlugin(scalar):
for c in trt.get_plugin_registry().plugin_creator_list:
#print(c.name)
if c.name == "AddScalar":
parameterList = []
parameterList.append(trt.PluginField("scalar", np.float32(scalar), trt.PluginFieldType.FLOAT32))
return c.create_plugin(c.name, trt.PluginFieldCollection(parameterList))
return None
def run(shape, scalar):
testCase = "<shape=%s,scalar=%f>" % (shape, scalar)
trtFile = "./model-Dim%s.plan" % str(len(shape))
print("Test %s" % testCase)
logger = trt.Logger(trt.Logger.ERROR)
trt.init_libnvinfer_plugins(logger, '')
ctypes.cdll.LoadLibrary(soFile)
if os.path.isfile(trtFile):
with open(trtFile, "rb") as f:
engine = trt.Runtime(logger).deserialize_cuda_engine(f.read())
if engine == None:
print("Failed loading engine!")
return
print("Succeeded loading engine!")
else:
builder = trt.Builder(logger)
network = builder.create_network(1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH))
profile = builder.create_optimization_profile()
config = builder.create_builder_config()
config.set_flag(trt.BuilderFlag.INT8)
config.int8_calibrator = MyCalibrator(1, shape, cacheFile)
inputT0 = network.add_input("inputT0", trt.float32, [-1 for i in shape])
profile.set_shape(inputT0.name, [1 for i in shape], [8 for i in shape], [32 for i in shape])
config.add_optimization_profile(profile)
#inputT0.dynamic_range = [-100,100] # set dynamic range if calibrator is not used
pluginLayer = network.add_plugin_v2([inputT0], getAddScalarPlugin(scalar))
pluginLayer.precision = trt.int8
pluginLayer.set_output_type(0, trt.int8)
pluginLayer.get_output(0).dtype = trt.int8
#pluginLayer.get_output(0).dynamic_range = [-120,120]
identityLayer = network.add_identity(pluginLayer.get_output(0)) # convert to float type , or output is int8 type
identityLayer.get_output(0).dtype = trt.float32
network.mark_output(identityLayer.get_output(0))
engineString = builder.build_serialized_network(network, config)
if engineString == None:
print("Failed building engine!")
return
print("Succeeded building engine!")
with open(trtFile, "wb") as f:
f.write(engineString)
engine = trt.Runtime(logger).deserialize_cuda_engine(engineString)
nIO = engine.num_io_tensors
lTensorName = [engine.get_tensor_name(i) for i in range(nIO)]
nInput = [engine.get_tensor_mode(lTensorName[i]) for i in range(nIO)].count(trt.TensorIOMode.INPUT)
context = engine.create_execution_context()
context.set_input_shape(lTensorName[0], shape)
#for i in range(nIO):
# print("[%2d]%s->" % (i, "Input " if i < nInput else "Output"), engine.get_tensor_dtype(lTensorName[i]), engine.get_tensor_shape(lTensorName[i]), context.get_tensor_shape(lTensorName[i]), lTensorName[i])
bufferH = []
bufferH.append(np.arange(np.prod(shape), dtype=np.float32).reshape(shape))
for i in range(nInput, nIO):
bufferH.append(np.empty(context.get_tensor_shape(lTensorName[i]), dtype=trt.nptype(engine.get_tensor_dtype(lTensorName[i]))))
bufferD = []
for i in range(nIO):
bufferD.append(cudart.cudaMalloc(bufferH[i].nbytes)[1])
for i in range(nInput):
cudart.cudaMemcpy(bufferD[i], bufferH[i].ctypes.data, bufferH[i].nbytes, cudart.cudaMemcpyKind.cudaMemcpyHostToDevice)
for i in range(nIO):
context.set_tensor_address(lTensorName[i], int(bufferD[i]))
context.execute_async_v3(0)
for i in range(nInput, nIO):
cudart.cudaMemcpy(bufferH[i].ctypes.data, bufferD[i], bufferH[i].nbytes, cudart.cudaMemcpyKind.cudaMemcpyDeviceToHost)
outputCPU = addScalarCPU(bufferH[:nInput], scalar)
"""
for i in range(nInput):
printArrayInformation(bufferH[i])
for i in range(nInput, nIO):
printArrayInformation(bufferH[i])
for i in range(nInput, nIO):
printArrayInformation(outputCPU[i - nInput])
"""
check(bufferH[nInput:][0], outputCPU[0], True)
for b in bufferD:
cudart.cudaFree(b)
print("Test %s finish!\n" % testCase)
if __name__ == "__main__":
os.system("rm -rf ./*.plan ./*.cache")
run([32], 0.1)
os.system("rm -rf ./*.plan ./*.cache") # cache files can not be shared among engines because input data ranges are different
run([32, 32], 0.1)
os.system("rm -rf ./*.plan ./*.cache")
run([16, 16, 16], 0.1) # CHW4 format needs input tensor with at least 4 Dimensions
os.system("rm -rf ./*.plan ./*.cache")
run([8, 8, 8, 8], 0.1)
print("Test all finish!")
| trt-samples-for-hackathon-cn-master | cookbook/05-Plugin/UseINT8-PTQ/testAddScalarPlugin.py |
#
# Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import numpy as np
import tensorrt as trt
from cuda import cudart
class MyCalibrator(trt.IInt8EntropyCalibrator2):
def __init__(self, nCalibration, inputShape, cacheFile):
trt.IInt8EntropyCalibrator2.__init__(self)
self.nCalibration = nCalibration
self.shape = inputShape
self.buffeSize = trt.volume(inputShape) * trt.float32.itemsize
self.cacheFile = cacheFile
_, self.dIn = cudart.cudaMalloc(self.buffeSize)
self.count = 0
def __del__(self):
cudart.cudaFree(self.dIn)
def get_batch_size(self): # necessary API
return self.shape[0]
def get_batch(self, nameList=None, inputNodeName=None): # necessary API
if self.count < self.nCalibration:
self.count += 1
data = np.random.rand(np.prod(self.shape)).astype(np.float32).reshape(*self.shape)
data = data * np.prod(self.shape) * 2 - np.prod(self.shape)
data = np.ascontiguousarray(data)
cudart.cudaMemcpy(self.dIn, data.ctypes.data, self.buffeSize, cudart.cudaMemcpyKind.cudaMemcpyHostToDevice)
return [int(self.dIn)]
else:
return None
def read_calibration_cache(self): # necessary API
if os.path.exists(self.cacheFile):
print("Succeed finding cahce file: %s" % (self.cacheFile))
with open(self.cacheFile, "rb") as f:
cache = f.read()
return cache
else:
print("Failed finding int8 cache!")
return
def write_calibration_cache(self, cache): # necessary API
with open(self.cacheFile, "wb") as f:
f.write(cache)
print("Succeed saving int8 cache!")
return
if __name__ == "__main__":
cudart.cudaDeviceSynchronize()
m = MyCalibrator(5, (1, 1, 28, 28), "./int8.cache")
m.get_batch("FakeNameList")
m.get_batch("FakeNameList")
m.get_batch("FakeNameList")
m.get_batch("FakeNameList")
m.get_batch("FakeNameList")
| trt-samples-for-hackathon-cn-master | cookbook/05-Plugin/UseINT8-PTQ/calibrator.py |
#
# Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections import OrderedDict
import numpy as np
import onnx
import onnx_graphsurgeon as gs
tensor0 = gs.Variable("tensor0", np.float32, ["B", 1, 64, 64])
tensor1 = gs.Variable("tensor1", np.float32, ["B", 1, 64, 64])
node0 = gs.Node("AddScalar", "myAddAcalar", inputs=[tensor0], outputs=[tensor1], attrs=OrderedDict([('scalar', np.array([10],dtype=np.float32))]))
graph = gs.Graph(nodes=[node0], inputs=[tensor0], outputs=[tensor1])
graph.cleanup().toposort()
onnx.save(gs.export_onnx(graph), "./model.onnx")
np.random.seed(31193)
dd = {}
dd["inferenceData"] = np.random.rand(4 * 1 * 64 * 64).astype(np.float32).reshape([4, 1, 64, 64])
np.savez("data.npz",**dd)
| trt-samples-for-hackathon-cn-master | cookbook/05-Plugin/C++-PluginOutsideEngine/getOnnxModelAndData.py |
#
# Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import ctypes
import os
from glob import glob
import numpy as np
import tensorrt as trt
from cuda import cudart
soFile = "./AddScalarPlugin.so"
np.set_printoptions(precision=3, linewidth=200, suppress=True)
np.random.seed(31193)
cudart.cudaDeviceSynchronize()
def getAddScalarPlugin(scalar):
for c in trt.get_plugin_registry().plugin_creator_list:
#print(c.name)
if c.name == "AddScalar":
parameterList = []
parameterList.append(trt.PluginField("scalar", np.float32(scalar), trt.PluginFieldType.FLOAT32))
return c.create_plugin(c.name, trt.PluginFieldCollection(parameterList))
return None
os.chdir("/w/gitlab/tensorrt-cookbook/05-Plugin/API/")
# Load default plugin creators
logger = trt.Logger(trt.Logger.ERROR)
trt.init_libnvinfer_plugins(logger, '')
pluginRegistry = trt.get_plugin_registry()
print("Count of default plugin creators = %d" % len(pluginRegistry.plugin_creator_list))
# Attributions of Plugin Registry
print("pluginRegistry.error_recorder =", pluginRegistry.error_recorder) # ErrorRecorder can be set into EngineInspector, usage of ErrorRecorder refer to 02-API/ErrorRecorder
pluginRegistry.parent_search_enabled = True # whether search plugin creators in parent directory, default value is True
# Load local plugin creators
for soFile in glob("./*.so"):
if True: # common method
ctypes.cdll.LoadLibrary(soFile)
else: # use TensorRT API, but there are some problems, do not use this temporarily
handle = pluginRegistry.load_library(soFile)
#pluginRegistry.deregister_library(handle) # deregiste the library
print("Count of total plugin creators = %d" % len(pluginRegistry.plugin_creator_list)) # one more plugin creator "AddScalar" added
#pluginRegistry.deregister_library(?) # deregiste the library
# print information of all plugin creators
print("TensorRTVersion Namespace PluginVersion Name")
for creator in pluginRegistry.plugin_creator_list:
print("%4s %s %s %s" % (creator.tensorrt_version, ("\"\"" if creator.plugin_namespace == "" else creator.plugin_namespace), creator.plugin_version, creator.name))
for creator in pluginRegistry.plugin_creator_list:
if creator.name == "AddScalar" and creator.plugin_version == "1": # check name and version during selecting plugin
# print the necessary parameters for creating the plugin
for i, pluginField in enumerate(creator.field_names):
print("%2d->%s, %s, %s, %s" % (i, pluginField.name, pluginField.type, pluginField.size, pluginField.data))
# We can registe and deregiste a plugin creator in Plugin Registry, but not required
#pluginRegistry.deregister_creator(creator) # deregiste the plugin creator
#pluginRegistry.register_creator(creator) # registe the plugin creator again
# feed the PluginCreator with parameters
pluginFieldCollection = trt.PluginFieldCollection()
pluginField = trt.PluginField("scalar", np.float32(1.0), trt.PluginFieldType.FLOAT32)
# tensorrt.PluginFieldType: FLOAT16, FLOAT32, FLOAT64, INT8, INT16, INT32, CHAR, DIMS, UNKNOWN
print(pluginField.name, pluginField.type, pluginField.size, pluginField.data)
pluginFieldCollection.append(pluginField) # use like a list
#pluginFieldCollection.insert(1,pluginField)
#pluginFieldCollection.extend([pluginField])
#pluginFieldCollection.clear()
#pluginFieldCollection.pop(1)
plugin = creator.create_plugin(creator.name, pluginFieldCollection) # create a plugin by parameters
plugin.__class__ = trt.IPluginV2Ext # change class of plugin from IPluginV2 to IPluginV2Ext, we still do not have IPluginV2Dynamic class
# methods not work in python API
# plugin.supports_format(trt.float32, None) # nvinfer1::TensorFormat::kLINEAR
#plugin.attach_to_context(None, None)
#plugin.detach_from_context()
#plugin.configure_with_format([[2]], [[2]], trt.float32, None, 1) # nvinfer1::TensorFormat::kLINEAR
#plugin.configure_plugin([[2]],[[2]],[trt.float32],[trt.float32],[False],[False], None, 1) # nvinfer1::TensorFormat::kLINEAR
#plugin.execute_async(1, [None], [None], None, 0) # address of input / output / workspace memory
#plugin.initialize()
#plugin.terminate()
#plugin.destroy()
# methods work (but useless) in python API
print("plugin.plugin_type =", plugin.plugin_type)
print("plugin.plugin_namespace =", plugin.plugin_namespace)
print("plugin.plugin_version =", plugin.plugin_version)
print("plugin.num_outputs =", plugin.num_outputs)
print("plugin.serialization_size =", plugin.serialization_size)
print("plugin.tensorrt_version =", plugin.tensorrt_version)
print("plugin.clone() =", plugin.clone())
print("plugin.get_output_data_type(0, [trt.float32]) =", plugin.get_output_data_type(0, [trt.float32]))
print("plugin.get_output_shape(0, [trt.Dims([2])])) =", plugin.get_output_shape(0, [trt.Dims([2])])) # output is always ((0))?
print("plugin.get_workspace_size(1) =", plugin.get_workspace_size(1)) # output is always 0?
pluginString = plugin.serialize()
plugin = creator.deserialize_plugin(creator.name, pluginString) # create a plugin by memory of serialized plugin
builder = trt.Builder(logger)
network = builder.create_network(1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH))
profile = builder.create_optimization_profile()
config = builder.create_builder_config()
inputT0 = network.add_input("inputT0", trt.float32, [-1])
profile.set_shape(inputT0.name, [1], [2], [4])
config.add_optimization_profile(profile)
pluginLayer = network.add_plugin_v2([inputT0], plugin)
print(pluginLayer.plugin) # other members and methods refer to 02-API/Layer
print("Finish")
# methods not work
#trt.get_builder_plugin_registry(None) # nvinfer1::EngineCapability
| trt-samples-for-hackathon-cn-master | cookbook/05-Plugin/API/main.py |
#
# Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import ctypes
import os
import numpy as np
import tensorrt as trt
from cuda import cudart
soFile = "./AddScalarPlugin.so"
np.set_printoptions(precision=3, linewidth=200, suppress=True)
np.random.seed(31193)
cudart.cudaDeviceSynchronize()
def printArrayInformation(x, info="", n=5):
if 0 in x.shape:
print('%s:%s' % (info, str(x.shape)))
print()
return
print( '%s:%s,SumAbs=%.5e,Var=%.5f,Max=%.5f,Min=%.5f,SAD=%.5f'%( \
info,str(x.shape),np.sum(abs(x)),np.var(x),np.max(x),np.min(x),np.sum(np.abs(np.diff(x.reshape(-1)))) ))
print('\t', x.reshape(-1)[:n], x.reshape(-1)[-n:])
def check(a, b, weak=False, checkEpsilon=1e-5):
if weak:
a = a.astype(np.float32)
b = b.astype(np.float32)
res = np.all(np.abs(a - b) < checkEpsilon)
else:
res = np.all(a == b)
diff0 = np.max(np.abs(a - b))
diff1 = np.max(np.abs(a - b) / (np.abs(b) + checkEpsilon))
print("check:%s, absDiff=%f, relDiff=%f" % (res, diff0, diff1))
def addScalarCPU(inputH, scalar):
return [inputH[0] + scalar]
def getAddScalarPlugin(scalar):
for c in trt.get_plugin_registry().plugin_creator_list:
#print(c.name)
if c.name == "AddScalar":
parameterList = []
parameterList.append(trt.PluginField("scalar", np.float32(scalar), trt.PluginFieldType.FLOAT32))
return c.create_plugin(c.name, trt.PluginFieldCollection(parameterList))
return None
def run(shape, scalar):
testCase = "<shape=%s,scalar=%f>" % (shape, scalar)
trtFile = "./model-Dim%s.plan" % str(len(shape))
print("Test %s" % testCase)
logger = trt.Logger(trt.Logger.ERROR)
trt.init_libnvinfer_plugins(logger, '')
ctypes.cdll.LoadLibrary(soFile)
if os.path.isfile(trtFile):
with open(trtFile, "rb") as f:
engine = trt.Runtime(logger).deserialize_cuda_engine(f.read())
if engine == None:
print("Failed loading engine!")
return
print("Succeeded loading engine!")
else:
builder = trt.Builder(logger)
network = builder.create_network(1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH))
profile = builder.create_optimization_profile()
config = builder.create_builder_config()
config.set_flag(trt.BuilderFlag.FP16)
inputT0 = network.add_input("inputT0", trt.float32, [-1 for i in shape])
profile.set_shape(inputT0.name, [1 for i in shape], [8 for i in shape], [32 for i in shape])
config.add_optimization_profile(profile)
pluginLayer = network.add_plugin_v2([inputT0], getAddScalarPlugin(scalar))
network.mark_output(pluginLayer.get_output(0))
engineString = builder.build_serialized_network(network, config)
if engineString == None:
print("Failed building engine!")
return
print("Succeeded building engine!")
with open(trtFile, "wb") as f:
f.write(engineString)
engine = trt.Runtime(logger).deserialize_cuda_engine(engineString)
nIO = engine.num_io_tensors
lTensorName = [engine.get_tensor_name(i) for i in range(nIO)]
nInput = [engine.get_tensor_mode(lTensorName[i]) for i in range(nIO)].count(trt.TensorIOMode.INPUT)
context = engine.create_execution_context()
context.set_input_shape(lTensorName[0], shape)
#for i in range(nIO):
# print("[%2d]%s->" % (i, "Input " if i < nInput else "Output"), engine.get_tensor_dtype(lTensorName[i]), engine.get_tensor_shape(lTensorName[i]), context.get_tensor_shape(lTensorName[i]), lTensorName[i])
bufferH = []
bufferH.append(np.arange(np.prod(shape), dtype=np.float32).reshape(shape))
for i in range(nInput, nIO):
bufferH.append(np.empty(context.get_tensor_shape(lTensorName[i]), dtype=trt.nptype(engine.get_tensor_dtype(lTensorName[i]))))
bufferD = []
for i in range(nIO):
bufferD.append(cudart.cudaMalloc(bufferH[i].nbytes)[1])
for i in range(nInput):
cudart.cudaMemcpy(bufferD[i], bufferH[i].ctypes.data, bufferH[i].nbytes, cudart.cudaMemcpyKind.cudaMemcpyHostToDevice)
for i in range(nIO):
context.set_tensor_address(lTensorName[i], int(bufferD[i]))
context.execute_async_v3(0)
for i in range(nInput, nIO):
cudart.cudaMemcpy(bufferH[i].ctypes.data, bufferD[i], bufferH[i].nbytes, cudart.cudaMemcpyKind.cudaMemcpyDeviceToHost)
outputCPU = addScalarCPU(bufferH[:nInput], scalar)
"""
for i in range(nInput):
printArrayInformation(bufferH[i])
for i in range(nInput, nIO):
printArrayInformation(bufferH[i])
for i in range(nInput, nIO):
printArrayInformation(outputCPU[i - nInput])
"""
check(bufferH[nInput:][0], outputCPU[0], True)
for b in bufferD:
cudart.cudaFree(b)
print("Test %s finish!\n" % testCase)
if __name__ == "__main__":
os.system("rm -rf ./*.plan")
run([32], 1)
run([32, 32], 1)
run([16, 16, 16], 1)
run([8, 8, 8, 8], 1)
run([32], 1)
run([32, 32], 1)
run([16, 16, 16], 1)
run([8, 8, 8, 8], 1)
print("Test all finish!")
| trt-samples-for-hackathon-cn-master | cookbook/05-Plugin/UseFP16/testAddScalarPlugin.py |
#
# Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import ctypes
import os
import numpy as np
import tensorrt as trt
from cuda import cudart
soFile = "./AddScalarPlugin.so"
np.set_printoptions(precision=3, linewidth=200, suppress=True)
np.random.seed(31193)
cudart.cudaDeviceSynchronize()
def printArrayInformation(x, info="", n=5):
if 0 in x.shape:
print('%s:%s' % (info, str(x.shape)))
return
x = x.astype(np.float32)
print( '%s:%s,SumAbs=%.5e,Var=%.5f,Max=%.5f,Min=%.5f,SAD=%.5f'%( \
info,str(x.shape),np.sum(abs(x)),np.var(x),np.max(x),np.min(x),np.sum(np.abs(np.diff(x.reshape(-1)))) ))
print('\t', x.reshape(-1)[:n], x.reshape(-1)[-n:])
return
def check(a, b, weak=False, checkEpsilon=1e-5, info=""):
if a.shape != b.shape:
print("Error shape: A%s : B%s" % (str(a.shape), str(b.shape)))
return
if weak:
a = a.astype(np.float32)
b = b.astype(np.float32)
res = np.all(np.abs(a - b) < checkEpsilon)
else:
res = np.all(a == b)
maxAbsDiff = np.max(np.abs(a - b))
meanAbsDiff = np.mean(np.abs(a - b))
maxRelDiff = np.max(np.abs(a - b) / (np.abs(b) + checkEpsilon))
meanRelDiff = np.mean(np.abs(a - b) / (np.abs(b) + checkEpsilon))
res = "%s:%s,MaxAbsDiff=%.2e,MeanAbsDiff=%.2e,MaxRelDiff=%.2e,MeanRelDiff=%.2e," % (info, res, maxAbsDiff, meanAbsDiff, maxRelDiff, meanRelDiff)
index = np.argmax(np.abs(a - b))
valueA, valueB= a.flatten()[index], b.flatten()[index]
shape = a.shape
indexD = []
for i in range(len(shape) - 1, -1, -1):
x = index % shape[i]
indexD = [x] + indexD
index = index // shape[i]
res += "WorstPair=(%f:%f)at%s" %(valueA, valueB, str(indexD))
print(res)
return
def addScalarCPU(inputH, scalar):
return [inputH[0] + scalar]
def getAddScalarPlugin(scalar):
for c in trt.get_plugin_registry().plugin_creator_list:
#print(c.name)
if c.name == "AddScalar":
parameterList = []
parameterList.append(trt.PluginField("scalar", np.float32(scalar), trt.PluginFieldType.FLOAT32))
return c.create_plugin(c.name, trt.PluginFieldCollection(parameterList))
return None
def run(shape, scalar):
testCase = "<shape=%s,scalar=%f>" % (shape, scalar)
trtFile = "./model-Dim%s.plan" % str(len(shape))
print("Test %s" % testCase)
logger = trt.Logger(trt.Logger.ERROR)
trt.init_libnvinfer_plugins(logger, '')
ctypes.cdll.LoadLibrary(soFile)
if os.path.isfile(trtFile):
with open(trtFile, "rb") as f:
engine = trt.Runtime(logger).deserialize_cuda_engine(f.read())
if engine == None:
print("Failed loading engine!")
return
print("Succeeded loading engine!")
else:
builder = trt.Builder(logger)
network = builder.create_network(1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH))
profile = builder.create_optimization_profile()
config = builder.create_builder_config()
inputT0 = network.add_input("inputT0", trt.float32, [-1 for i in shape])
profile.set_shape(inputT0.name, [1 for i in shape], [8 for i in shape], [32 for i in shape])
config.add_optimization_profile(profile)
pluginLayer = network.add_plugin_v2([inputT0], getAddScalarPlugin(scalar))
network.mark_output(pluginLayer.get_output(0))
engineString = builder.build_serialized_network(network, config)
if engineString == None:
print("Failed building engine!")
return
print("Succeeded building engine!")
with open(trtFile, "wb") as f:
f.write(engineString)
engine = trt.Runtime(logger).deserialize_cuda_engine(engineString)
nIO = engine.num_io_tensors
lTensorName = [engine.get_tensor_name(i) for i in range(nIO)]
nInput = [engine.get_tensor_mode(lTensorName[i]) for i in range(nIO)].count(trt.TensorIOMode.INPUT)
context = engine.create_execution_context()
context.set_input_shape(lTensorName[0], shape)
#for i in range(nIO):
# print("[%2d]%s->" % (i, "Input " if i < nInput else "Output"), engine.get_tensor_dtype(lTensorName[i]), engine.get_tensor_shape(lTensorName[i]), context.get_tensor_shape(lTensorName[i]), lTensorName[i])
bufferH = []
bufferH.append(np.arange(np.prod(shape), dtype=np.float32).reshape(shape))
for i in range(nInput, nIO):
bufferH.append(np.empty(context.get_tensor_shape(lTensorName[i]), dtype=trt.nptype(engine.get_tensor_dtype(lTensorName[i]))))
bufferD = []
for i in range(nIO):
bufferD.append(cudart.cudaMalloc(bufferH[i].nbytes)[1])
for i in range(nInput):
cudart.cudaMemcpy(bufferD[i], bufferH[i].ctypes.data, bufferH[i].nbytes, cudart.cudaMemcpyKind.cudaMemcpyHostToDevice)
for i in range(nIO):
context.set_tensor_address(lTensorName[i], int(bufferD[i]))
context.execute_async_v3(0)
for i in range(nInput, nIO):
cudart.cudaMemcpy(bufferH[i].ctypes.data, bufferD[i], bufferH[i].nbytes, cudart.cudaMemcpyKind.cudaMemcpyDeviceToHost)
outputCPU = addScalarCPU(bufferH[:nInput], scalar)
"""
for i in range(nInput):
printArrayInformation(bufferH[i])
for i in range(nInput, nIO):
printArrayInformation(bufferH[i])
for i in range(nInput, nIO):
printArrayInformation(outputCPU[i - nInput])
"""
check(bufferH[nInput:][0], outputCPU[0], True)
for b in bufferD:
cudart.cudaFree(b)
print("Test %s finish!\n" % testCase)
if __name__ == "__main__":
os.system("rm -rf ./*.plan")
run([32], 1)
run([32, 32], 1)
run([16, 16, 16], 1)
run([8, 8, 8, 8], 1)
run([32], 1)
run([32, 32], 1)
run([16, 16, 16], 1)
run([8, 8, 8, 8], 1)
print("Test all finish!")
| trt-samples-for-hackathon-cn-master | cookbook/05-Plugin/BasicExample/testAddScalarPlugin.py |
#
# Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# For TensorRT < 8.5 with deprecated Binding API
import ctypes
import os
import numpy as np
import tensorrt as trt
from cuda import cudart
soFile = "./AddScalarPlugin.so"
nProfile = 2
np.random.seed(31193)
np.set_printoptions(precision=3, linewidth=200, suppress=True)
cudart.cudaDeviceSynchronize()
def printArrayInformation(x, info="", n=5):
if 0 in x.shape:
print('%s:%s' % (info, str(x.shape)))
print()
return
print( '%s:%s,SumAbs=%.5e,Var=%.5f,Max=%.5f,Min=%.5f,SAD=%.5f'%( \
info,str(x.shape),np.sum(abs(x)),np.var(x),np.max(x),np.min(x),np.sum(np.abs(np.diff(x.reshape(-1)))) ))
print('\t', x.reshape(-1)[:n], x.reshape(-1)[-n:])
def check(a, b, weak=False, checkEpsilon=1e-5):
if weak:
a = a.astype(np.float32)
b = b.astype(np.float32)
res = np.all(np.abs(a - b) < checkEpsilon)
else:
res = np.all(a == b)
diff0 = np.max(np.abs(a - b))
diff1 = np.max(np.abs(a - b) / (np.abs(b) + checkEpsilon))
print("check:%s, absDiff=%f, relDiff=%f" % (res, diff0, diff1))
def addScalarCPU(inputH, scalar):
return [inputH[0] + scalar]
def getAddScalarPlugin(scalar):
for c in trt.get_plugin_registry().plugin_creator_list:
#print(c.name)
if c.name == "AddScalar":
parameterList = []
parameterList.append(trt.PluginField("scalar", np.float32(scalar), trt.PluginFieldType.FLOAT32))
return c.create_plugin(c.name, trt.PluginFieldCollection(parameterList))
return None
def run(bFP16):
shapeSmall = [2, 4, 4, 4]
scalar = 1
testCase = "<FP16=%s>" % bFP16
trtFile = "./model-FP%s.plan" % ("16" if bFP16 else "32")
print("Test %s" % testCase)
logger = trt.Logger(trt.Logger.ERROR)
trt.init_libnvinfer_plugins(logger, '')
ctypes.cdll.LoadLibrary(soFile)
if os.path.isfile(trtFile):
with open(trtFile, "rb") as f:
engine = trt.Runtime(logger).deserialize_cuda_engine(f.read())
if engine == None:
print("Failed loading engine!")
return
print("Succeeded loading engine!")
else:
builder = trt.Builder(logger)
network = builder.create_network(1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH))
profileList = [builder.create_optimization_profile() for index in range(nProfile)]
config = builder.create_builder_config()
if bFP16:
config.set_flag(trt.BuilderFlag.FP16)
inputT0 = network.add_input("inputT0", trt.float32, [-1, -1, -1, -1])
for profile in profileList:
profile.set_shape(inputT0.name, shapeSmall, shapeSmall, (np.array(shapeSmall) * 2).tolist())
config.add_optimization_profile(profile)
pluginLayer = network.add_plugin_v2([inputT0], getAddScalarPlugin(scalar))
network.mark_output(pluginLayer.get_output(0))
engineString = builder.build_serialized_network(network, config)
if engineString == None:
print("Failed building engine!")
return
print("Succeeded building engine!")
with open(trtFile, "wb") as f:
f.write(engineString)
engine = trt.Runtime(logger).deserialize_cuda_engine(engineString)
nIO = engine.num_bindings
nInput = np.sum([engine.binding_is_input(i) for i in range(engine.num_bindings)])
nOutput = nIO - nInput
nIO = nIO // nProfile
nInput = nInput // nProfile
nOutput = nOutput // nProfile
cudaStreamList = [int(cudart.cudaStreamCreate()[1]) for i in range(nProfile)]
context = engine.create_execution_context()
bufferH = [] # use respective buffers for different Optimization Profile
for index in range(nProfile):
context.set_optimization_profile_async(index, cudaStreamList[index])
bindingPad = nIO * index # skip bindings previous OptimizationProfile occupy
shape = (np.array(shapeSmall) * (index + 1)).tolist() # use different shapes
context.set_binding_shape(bindingPad + 0, shape)
for i in range(nInput):
bufferH.append(np.arange(np.prod(shape)).astype(np.float32).reshape(shape))
for i in range(nInput, nIO):
bufferH.append(np.empty(context.get_binding_shape(bindingPad + i), dtype=trt.nptype(engine.get_binding_dtype(bindingPad + i))))
bufferD = []
for i in range(len(bufferH)):
bufferD.append(cudart.cudaMalloc(bufferH[i].nbytes)[1])
for index in range(nProfile):
print("Use Profile %d" % index)
context.set_optimization_profile_async(index, cudaStreamList[index]) # set shape again after changing the optimization profile
bindingPad = nIO * index
shape = (np.array(shapeSmall) * (index + 1)).tolist()
context.set_binding_shape(bindingPad + 0, shape)
for i in range(nIO * nProfile):
print("[%2d]%s->" % (i, "Input " if i < nInput else "Output"), engine.get_binding_dtype(i), engine.get_binding_shape(i), context.get_binding_shape(i), engine.get_binding_name(i))
for i in range(nInput):
cudart.cudaMemcpyAsync(bufferD[bindingPad + i], bufferH[bindingPad + i].ctypes.data, bufferH[bindingPad + i].nbytes, cudart.cudaMemcpyKind.cudaMemcpyHostToDevice, cudaStreamList[index])
context.execute_async_v2(bufferD, cudaStreamList[index])
for i in range(nInput, nIO):
cudart.cudaMemcpyAsync(bufferH[bindingPad + i].ctypes.data, bufferD[bindingPad + i], bufferH[bindingPad + i].nbytes, cudart.cudaMemcpyKind.cudaMemcpyDeviceToHost, cudaStreamList[index])
cudart.cudaStreamSynchronize(cudaStreamList[index])
for index in range(nProfile):
bindingPad = nIO * index
print("check OptimizationProfile %d:" % index)
check(bufferH[bindingPad + 1], bufferH[bindingPad + 0] + 1, True)
for b in bufferD:
cudart.cudaFree(b)
if __name__ == "__main__":
os.system("rm -rf ./*.plan")
run(False)
run(False)
run(True)
run(True)
print("Test all finish!")
| trt-samples-for-hackathon-cn-master | cookbook/05-Plugin/PluginProcess/testAddScalarPlugin-BindingAPI.py |
#
# Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import ctypes
import os
import numpy as np
import tensorrt as trt
from cuda import cudart
soFile = "./AddScalarPlugin.so"
nProfile = 1
np.random.seed(31193)
np.set_printoptions(precision=3, linewidth=200, suppress=True)
cudart.cudaDeviceSynchronize()
def printArrayInformation(x, info="", n=5):
if 0 in x.shape:
print('%s:%s' % (info, str(x.shape)))
print()
return
print( '%s:%s,SumAbs=%.5e,Var=%.5f,Max=%.5f,Min=%.5f,SAD=%.5f'%( \
info,str(x.shape),np.sum(abs(x)),np.var(x),np.max(x),np.min(x),np.sum(np.abs(np.diff(x.reshape(-1)))) ))
print('\t', x.reshape(-1)[:n], x.reshape(-1)[-n:])
def check(a, b, weak=False, checkEpsilon=1e-5):
if weak:
a = a.astype(np.float32)
b = b.astype(np.float32)
res = np.all(np.abs(a - b) < checkEpsilon)
else:
res = np.all(a == b)
diff0 = np.max(np.abs(a - b))
diff1 = np.max(np.abs(a - b) / (np.abs(b) + checkEpsilon))
print("check:%s, absDiff=%f, relDiff=%f" % (res, diff0, diff1))
def addScalarCPU(inputH, scalar):
return [inputH[0] + scalar]
def getAddScalarPlugin(scalar):
for c in trt.get_plugin_registry().plugin_creator_list:
#print(c.name)
if c.name == "AddScalar":
parameterList = []
parameterList.append(trt.PluginField("scalar", np.float32(scalar), trt.PluginFieldType.FLOAT32))
return c.create_plugin(c.name, trt.PluginFieldCollection(parameterList))
return None
def run(bFP16):
shapeSmall = [2, 4, 4, 4]
scalar = 1
testCase = "<FP16=%s>" % bFP16
trtFile = "./model-FP%s.plan" % ("16" if bFP16 else "32")
print("Test %s" % testCase)
logger = trt.Logger(trt.Logger.ERROR)
trt.init_libnvinfer_plugins(logger, '')
ctypes.cdll.LoadLibrary(soFile)
if os.path.isfile(trtFile):
with open(trtFile, "rb") as f:
engine = trt.Runtime(logger).deserialize_cuda_engine(f.read())
if engine == None:
print("Failed loading engine!")
return
print("Succeeded loading engine!")
else:
builder = trt.Builder(logger)
network = builder.create_network(1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH))
profileList = [builder.create_optimization_profile() for index in range(nProfile)]
config = builder.create_builder_config()
if bFP16:
config.set_flag(trt.BuilderFlag.FP16)
inputT0 = network.add_input("inputT0", trt.float32, [-1, -1, -1, -1])
for profile in profileList:
profile.set_shape(inputT0.name, shapeSmall, shapeSmall, (np.array(shapeSmall) * 2).tolist())
config.add_optimization_profile(profile)
pluginLayer = network.add_plugin_v2([inputT0], getAddScalarPlugin(scalar))
network.mark_output(pluginLayer.get_output(0))
engineString = builder.build_serialized_network(network, config)
if engineString == None:
print("Failed building engine!")
return
print("Succeeded building engine!")
with open(trtFile, "wb") as f:
f.write(engineString)
engine = trt.Runtime(logger).deserialize_cuda_engine(engineString)
nIO = engine.num_io_tensors
lTensorName = [engine.get_tensor_name(i) for i in range(nIO)]
nInput = [engine.get_tensor_mode(lTensorName[i]) for i in range(nIO)].count(trt.TensorIOMode.INPUT)
cudaStreamList = [int(cudart.cudaStreamCreate()[1]) for i in range(nProfile)]
context = engine.create_execution_context()
bufferH = [] # use respective buffers for different Optimization Profile
for index in range(nProfile):
context.set_optimization_profile_async(index, cudaStreamList[index])
shape = (np.array(shapeSmall) * (index + 1)).tolist() # use different shapes
context.set_input_shape(lTensorName[0], shape)
for i in range(nInput):
bufferH.append(np.arange(np.prod(shape)).astype(np.float32).reshape(shape))
for i in range(nInput, nIO):
bufferH.append(np.empty(context.get_tensor_shape(lTensorName[i]), dtype=trt.nptype(engine.get_tensor_dtype(lTensorName[i]))))
bufferD = []
for i in range(len(bufferH)):
bufferD.append(cudart.cudaMalloc(bufferH[i].nbytes)[1])
for index in range(nProfile):
print("Use Profile %d" % index)
context.set_optimization_profile_async(index, cudaStreamList[index]) # set shape again after changing the optimization profile
bindingPad = nIO * index
shape = (np.array(shapeSmall) * (index + 1)).tolist()
context.set_input_shape(lTensorName[0], shape)
for i in range(nIO):
print("[%2d]%s->" % (i, "Input " if i < nInput else "Output"), engine.get_tensor_dtype(lTensorName[i]), engine.get_tensor_shape(lTensorName[i]), context.get_tensor_shape(lTensorName[i]), lTensorName[i])
bindingPad = nIO * index
for i in range(nInput):
cudart.cudaMemcpyAsync(bufferD[bindingPad + i], bufferH[bindingPad + i].ctypes.data, bufferH[bindingPad + i].nbytes, cudart.cudaMemcpyKind.cudaMemcpyHostToDevice, cudaStreamList[index])
for i in range(nIO):
context.set_tensor_address(lTensorName[i], int(bufferD[bindingPad + i]))
context.execute_async_v3(cudaStreamList[index])
for i in range(nInput, nIO):
cudart.cudaMemcpyAsync(bufferH[bindingPad + i].ctypes.data, bufferD[bindingPad + i], bufferH[bindingPad + i].nbytes, cudart.cudaMemcpyKind.cudaMemcpyDeviceToHost, cudaStreamList[index])
cudart.cudaStreamSynchronize(cudaStreamList[index])
for index in range(nProfile):
bindingPad = nIO * index
print("check OptimizationProfile %d:" % index)
check(bufferH[bindingPad + 1], bufferH[bindingPad + 0] + 1, True)
for b in bufferD:
cudart.cudaFree(b)
if __name__ == "__main__":
os.system("rm -rf ./*.plan")
#run(False)
#run(False)
run(True)
#run(True)
print("Test all finish!")
| trt-samples-for-hackathon-cn-master | cookbook/05-Plugin/PluginProcess/testAddScalarPlugin.py |
#
# Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import ctypes
import os
import numpy as np
import tensorrt as trt
from cuda import cudart
soFile = "./AddScalarPlugin.so"
np.set_printoptions(precision=3, linewidth=200, suppress=True)
np.random.seed(31193)
cudart.cudaDeviceSynchronize()
def printArrayInformation(x, info="", n=5):
if 0 in x.shape:
print('%s:%s' % (info, str(x.shape)))
print()
return
print( '%s:%s,SumAbs=%.5e,Var=%.5f,Max=%.5f,Min=%.5f,SAD=%.5f'%( \
info,str(x.shape),np.sum(abs(x)),np.var(x),np.max(x),np.min(x),np.sum(np.abs(np.diff(x.reshape(-1)))) ))
print('\t', x.reshape(-1)[:n], x.reshape(-1)[-n:])
def check(a, b, weak=False, checkEpsilon=1e-5):
if weak:
a = a.astype(np.float32)
b = b.astype(np.float32)
res = np.all(np.abs(a - b) < checkEpsilon)
else:
res = np.all(a == b)
diff0 = np.max(np.abs(a - b))
diff1 = np.max(np.abs(a - b) / (np.abs(b) + checkEpsilon))
print("check:%s, absDiff=%f, relDiff=%f" % (res, diff0, diff1))
def addScalarCPU(inputH, scalar):
return [inputH[0] + scalar]
def getAddScalarPlugin(scalar):
for c in trt.get_plugin_registry().plugin_creator_list:
#print(c.name)
if c.name == "AddScalar":
parameterList = []
parameterList.append(trt.PluginField("scalar", np.float32(scalar), trt.PluginFieldType.FLOAT32))
return c.create_plugin(c.name, trt.PluginFieldCollection(parameterList))
return None
def run(bPutPluginInTrtFile):
shape = [2, 3, 4]
scalar = 1
testCase = "<bPutPluginInTrtFile=%s>" % (bPutPluginInTrtFile)
trtFile = "./model-Plugin%s.plan" % ("Inside" if bPutPluginInTrtFile else "Outside")
print("Test %s" % testCase)
logger = trt.Logger(trt.Logger.ERROR)
trt.init_libnvinfer_plugins(logger, '')
if os.path.isfile(trtFile):
with open(trtFile, "rb") as f:
runtime = trt.Runtime(logger)
if not bPutPluginInTrtFile:
runtime.get_plugin_registry().load_library(soFile) # laod .so file explicitly if we ship plugin file externally
engine = runtime.deserialize_cuda_engine(f.read())
if engine == None:
print("Failed loading engine!")
return
print("Succeeded loading engine!")
else:
ctypes.cdll.LoadLibrary(soFile) # load .so file explicitly during building engine, this will not be used any more if a plan file exists
builder = trt.Builder(logger)
network = builder.create_network(1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH))
profile = builder.create_optimization_profile()
config = builder.create_builder_config()
if bPutPluginInTrtFile:
config.plugins_to_serialize = [soFile] # .so files need to be put in plan file
inputT0 = network.add_input("inputT0", trt.float32, [-1 for i in shape])
profile.set_shape(inputT0.name, [1 for i in shape], [8 for i in shape], [32 for i in shape])
config.add_optimization_profile(profile)
pluginLayer = network.add_plugin_v2([inputT0], getAddScalarPlugin(scalar))
network.mark_output(pluginLayer.get_output(0))
engineString = builder.build_serialized_network(network, config)
if engineString == None:
print("Failed building engine!")
return
print("Succeeded building engine!")
with open(trtFile, "wb") as f:
f.write(engineString)
#os.system("sha256sum %s" % trtFile)
engine = trt.Runtime(logger).deserialize_cuda_engine(engineString)
nIO = engine.num_io_tensors
lTensorName = [engine.get_tensor_name(i) for i in range(nIO)]
nInput = [engine.get_tensor_mode(lTensorName[i]) for i in range(nIO)].count(trt.TensorIOMode.INPUT)
context = engine.create_execution_context()
context.set_input_shape(lTensorName[0], shape)
#for i in range(nIO):
# print("[%2d]%s->" % (i, "Input " if i < nInput else "Output"), engine.get_tensor_dtype(lTensorName[i]), engine.get_tensor_shape(lTensorName[i]), context.get_tensor_shape(lTensorName[i]), lTensorName[i])
bufferH = []
bufferH.append(np.arange(np.prod(shape), dtype=np.float32).reshape(shape))
for i in range(nInput, nIO):
bufferH.append(np.empty(context.get_tensor_shape(lTensorName[i]), dtype=trt.nptype(engine.get_tensor_dtype(lTensorName[i]))))
bufferD = []
for i in range(nIO):
bufferD.append(cudart.cudaMalloc(bufferH[i].nbytes)[1])
for i in range(nInput):
cudart.cudaMemcpy(bufferD[i], bufferH[i].ctypes.data, bufferH[i].nbytes, cudart.cudaMemcpyKind.cudaMemcpyHostToDevice)
for i in range(nIO):
context.set_tensor_address(lTensorName[i], int(bufferD[i]))
context.execute_async_v3(0)
for i in range(nInput, nIO):
cudart.cudaMemcpy(bufferH[i].ctypes.data, bufferD[i], bufferH[i].nbytes, cudart.cudaMemcpyKind.cudaMemcpyDeviceToHost)
outputCPU = addScalarCPU(bufferH[:nInput], scalar)
"""
for i in range(nInput):
printArrayInformation(bufferH[i])
for i in range(nInput, nIO):
printArrayInformation(bufferH[i])
for i in range(nInput, nIO):
printArrayInformation(outputCPU[i - nInput])
"""
check(bufferH[nInput:][0], outputCPU[0], True)
for b in bufferD:
cudart.cudaFree(b)
print("Test %s finish!\n" % testCase)
if __name__ == "__main__":
os.system("rm -rf ./*.plan")
run(True)
run(True)
run(False)
run(False)
print("Test all finish!")
| trt-samples-for-hackathon-cn-master | cookbook/05-Plugin/PluginSerialize/main.py |
#
# Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
from datetime import datetime as dt
from glob import glob
import cv2
import numpy as np
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3"
import tensorflow as tf1
from tensorflow.python.compiler.tensorrt import trt_convert as tftrt
np.random.seed(31193)
tf1.compat.v1.set_random_seed(97)
nTrainBatchSize = 128
nHeight = 28
nWidth = 28
TFModelPath = "./TFModel/"
TRTModelPath = "./TRTModel/"
dataPath = os.path.dirname(os.path.realpath(__file__)) + "/../../00-MNISTData/"
trainFileList = sorted(glob(dataPath + "train/*.jpg"))
testFileList = sorted(glob(dataPath + "test/*.jpg"))
inferenceImage = dataPath + "8.png"
os.system("rm -rf %s/* %s/*" % (TFModelPath, TRTModelPath))
np.set_printoptions(precision=3, linewidth=200, suppress=True)
tf1.compat.v1.disable_eager_execution()
def getBatch(fileList, nSize=1, isTrain=True):
if isTrain:
indexList = np.random.choice(len(fileList), nSize)
else:
nSize = len(fileList)
indexList = np.arange(nSize)
xData = np.zeros([nSize, nHeight, nWidth, 1], dtype=np.float32)
yData = np.zeros([nSize, 10], dtype=np.float32)
for i, index in enumerate(indexList):
imageName = fileList[index]
data = cv2.imread(imageName, cv2.IMREAD_GRAYSCALE)
label = np.zeros(10, dtype=np.float32)
label[int(imageName[-7])] = 1
xData[i] = data.reshape(nHeight, nWidth, 1).astype(np.float32) / 255
yData[i] = label
return xData, yData
# TensorFlow 中创建网络并保存为 .pb 文件 -------------------------------------------
x = tf1.compat.v1.placeholder(tf1.float32, [None, nHeight, nWidth, 1], name="x")
y_ = tf1.compat.v1.placeholder(tf1.float32, [None, 10], name="y_")
w1 = tf1.compat.v1.get_variable("w1", shape=[5, 5, 1, 32], initializer=tf1.truncated_normal_initializer(mean=0, stddev=0.1))
b1 = tf1.compat.v1.get_variable("b1", shape=[32], initializer=tf1.constant_initializer(value=0.1))
h1 = tf1.nn.conv2d(x, w1, strides=[1, 1, 1, 1], padding="SAME")
h2 = h1 + b1
h3 = tf1.nn.relu(h2)
h4 = tf1.nn.max_pool2d(h3, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding="SAME")
w2 = tf1.compat.v1.get_variable("w2", shape=[5, 5, 32, 64], initializer=tf1.truncated_normal_initializer(mean=0, stddev=0.1))
b2 = tf1.compat.v1.get_variable("b2", shape=[64], initializer=tf1.constant_initializer(value=0.1))
h5 = tf1.nn.conv2d(h4, w2, strides=[1, 1, 1, 1], padding="SAME")
h6 = h5 + b2
h7 = tf1.nn.relu(h6)
h8 = tf1.nn.max_pool2d(h7, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding="SAME")
w3 = tf1.compat.v1.get_variable("w3", shape=[7 * 7 * 64, 1024], initializer=tf1.truncated_normal_initializer(mean=0, stddev=0.1))
b3 = tf1.compat.v1.get_variable("b3", shape=[1024], initializer=tf1.constant_initializer(value=0.1))
h9 = tf1.reshape(h8, [-1, 7 * 7 * 64])
h10 = tf1.matmul(h9, w3)
h11 = h10 + b3
h12 = tf1.nn.relu(h11)
w4 = tf1.compat.v1.get_variable("w4", shape=[1024, 10], initializer=tf1.truncated_normal_initializer(mean=0, stddev=0.1))
b4 = tf1.compat.v1.get_variable("b4", shape=[10], initializer=tf1.constant_initializer(value=0.1))
h13 = tf1.matmul(h12, w4)
h14 = h13 + b4
y = tf1.nn.softmax(h14, name="y")
z = tf1.argmax(y, 1, name="z")
crossEntropy = -tf1.reduce_sum(y_ * tf1.math.log(y))
trainStep = tf1.compat.v1.train.AdamOptimizer(1e-4).minimize(crossEntropy)
accuracy = tf1.reduce_mean(tf1.cast(tf1.equal(z, tf1.argmax(y_, 1)), tf1.float32), name="accuracy")
tfConfig = tf1.compat.v1.ConfigProto()
tfConfig.gpu_options.per_process_gpu_memory_fraction = 0.5
sess = tf1.compat.v1.Session(config=tfConfig)
sess.run(tf1.compat.v1.global_variables_initializer())
for i in range(100):
xSample, ySample = getBatch(trainFileList, nTrainBatchSize, True)
trainStep.run(session=sess, feed_dict={x: xSample, y_: ySample})
if i % 10 == 0:
accuracyValue = accuracy.eval(session=sess, feed_dict={x: xSample, y_: ySample})
print("%s, batch %3d, acc = %f" % (dt.now(), 10 + i, accuracyValue))
tf1.saved_model.simple_save(sess, TFModelPath, inputs={'x': x}, outputs={'z': z})
sess.close()
print("Succeeded building model in TensorFlow1!")
# 将模型改造为 TRT 可用的形式 ------------------------------------------------------
converter = tftrt.TrtGraphConverter(TFModelPath)
graph_def = converter.convert()
converter.save(TRTModelPath)
os.system("cp %s/variables/* %s/variables/" % (TFModelPath, TRTModelPath))
# 使用 TF-TRT 推理 --------------------------------------------------------------
tfConfig = tf1.compat.v1.ConfigProto()
tfConfig.gpu_options.per_process_gpu_memory_fraction = 0.5
session = tf1.compat.v1.Session(config=tfConfig)
tf1.saved_model.loader.load(session, [tf1.saved_model.SERVING], TRTModelPath)
data = cv2.imread(inferenceImage, cv2.IMREAD_GRAYSCALE).astype(np.float32).reshape(1, 28, 28, 1)
output = session.run(z, feed_dict={x: data})
print(output)
session.close()
print("Succeeded running model in TF-TRT!")
# 使用原生 TF 推理 ---------------------------------------------------------------
"""
tfConfig = tf1.compat.v1.ConfigProto()
tfConfig.gpu_options.per_process_gpu_memory_fraction = 0.5
session = tf1.compat.v1.Session(config=tfConfig)
tf1.saved_model.loader.load(session, [tf1.saved_model.SERVING], TFModelPath)
data = cv2.imread(inferenceImage, cv2.IMREAD_GRAYSCALE).astype(np.float32).reshape(1, 28, 28, 1)
output = session.run(z, feed_dict={x: data})
print(output)
session.close()
"""
| trt-samples-for-hackathon-cn-master | cookbook/06-UseFrameworkTRT/TensorFlow1-TFTRT/main.py |
#
# Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
from datetime import datetime as dt
from glob import glob
import cv2
import numpy as np
import torch as t
import torch.nn.functional as F
import torch_tensorrt
from torch.autograd import Variable
from torch.utils import data
np.random.seed(31193)
t.manual_seed(97)
t.cuda.manual_seed_all(97)
t.backends.cudnn.deterministic = True
nTrainBatchSize = 128
nHeight = 28
nWidth = 28
tsFile = "./model.ts"
dataPath = os.path.dirname(os.path.realpath(__file__)) + "/../../00-MNISTData/"
trainFileList = sorted(glob(dataPath + "train/*.jpg"))
testFileList = sorted(glob(dataPath + "test/*.jpg"))
inferenceImage = dataPath + "8.png"
os.system("rm -rf ./*.ps")
np.set_printoptions(precision=3, linewidth=200, suppress=True)
# Create network and train model in pyTorch ------------------------------------
class Net(t.nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = t.nn.Conv2d(1, 32, (5, 5), padding=(2, 2), bias=True)
self.conv2 = t.nn.Conv2d(32, 64, (5, 5), padding=(2, 2), bias=True)
self.fc1 = t.nn.Linear(64 * 7 * 7, 1024, bias=True)
self.fc2 = t.nn.Linear(1024, 10, bias=True)
def forward(self, x):
x = F.max_pool2d(F.relu(self.conv1(x)), (2, 2))
x = F.max_pool2d(F.relu(self.conv2(x)), (2, 2))
x = x.reshape(-1, 64 * 7 * 7)
x = F.relu(self.fc1(x))
y = self.fc2(x)
return y # ArgMAx is not supported in Torch TensorRT so we will not add it here
class MyData(t.utils.data.Dataset):
def __init__(self, isTrain=True):
if isTrain:
self.data = trainFileList
else:
self.data = testFileList
def __getitem__(self, index):
imageName = self.data[index]
data = cv2.imread(imageName, cv2.IMREAD_GRAYSCALE)
label = np.zeros(10, dtype=np.float32)
index = int(imageName[-7])
label[index] = 1
return t.from_numpy(data.reshape(1, nHeight, nWidth).astype(np.float32)), t.from_numpy(label)
def __len__(self):
return len(self.data)
model = Net().cuda()
ceLoss = t.nn.CrossEntropyLoss()
opt = t.optim.Adam(model.parameters(), lr=0.001)
trainDataset = MyData(True)
testDataset = MyData(False)
trainLoader = t.utils.data.DataLoader(dataset=trainDataset, batch_size=nTrainBatchSize, shuffle=True)
testLoader = t.utils.data.DataLoader(dataset=testDataset, batch_size=nTrainBatchSize, shuffle=True)
for epoch in range(10):
for xTrain, yTrain in trainLoader:
xTrain = Variable(xTrain).cuda()
yTrain = Variable(yTrain).cuda()
opt.zero_grad()
y_ = model(xTrain)
loss = ceLoss(y_, yTrain)
loss.backward()
opt.step()
with t.no_grad():
acc = 0
n = 0
for xTest, yTest in testLoader:
xTest = Variable(xTest).cuda()
yTest = Variable(yTest).cuda()
y_ = model(xTest)
acc += t.sum(t.argmax(t.softmax(y_, dim=1), dim=1) == t.matmul(yTest, t.Tensor([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]).to("cuda:0"))).cpu().numpy()
n += xTest.shape[0]
print("%s, epoch %2d, loss = %f, test acc = %f" % (dt.now(), epoch + 1, loss.data, acc / n))
# Use Torch-TensorRT -----------------------------------------------------------
tsModel = t.jit.trace(model, t.randn(1, 1, nHeight, nWidth, device="cuda"))
trtModel = torch_tensorrt.compile(tsModel, inputs=[t.randn(1, 1, nHeight, nWidth, device="cuda").float()], enabled_precisions={t.float})
data = cv2.imread(inferenceImage, cv2.IMREAD_GRAYSCALE).reshape(1, 1, 28, 28).astype(np.float32)
inputData = t.from_numpy(data).cuda()
outputData = trtModel(inputData) # run inference in TensorRT
print(t.argmax(t.softmax(outputData, dim=1), dim=1))
t.jit.save(trtModel, tsFile) # save TRT embedded Torchscript as .ts file
| trt-samples-for-hackathon-cn-master | cookbook/06-UseFrameworkTRT/Torch-TensorRT/main.py |
#
# Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
from datetime import datetime as dt
from glob import glob
import cv2
import numpy as np
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3"
import tensorflow as tf1
from tensorflow.python.compiler.tensorrt import trt_convert as tftrt
np.random.seed(31193)
tf1.compat.v1.set_random_seed(97)
nTrainBatchSize = 128
nHeight = 28
nWidth = 28
TFModelPath = "./TFModel/"
TRTModelPath = "./TRTModel/"
dataPath = os.path.dirname(os.path.realpath(__file__)) + "/../../00-MNISTData/"
trainFileList = sorted(glob(dataPath + "train/*.jpg"))
testFileList = sorted(glob(dataPath + "test/*.jpg"))
inferenceImage = dataPath + "8.png"
os.system("rm -rf %s/* %s/*" % (TFModelPath, TRTModelPath))
np.set_printoptions(precision=3, linewidth=200, suppress=True)
tf1.compat.v1.disable_eager_execution()
def getBatch(fileList, nSize=1, isTrain=True):
if isTrain:
indexList = np.random.choice(len(fileList), nSize)
else:
nSize = len(fileList)
indexList = np.arange(nSize)
xData = np.zeros([nSize, nHeight, nWidth, 1], dtype=np.float32)
yData = np.zeros([nSize, 10], dtype=np.float32)
for i, index in enumerate(indexList):
imageName = fileList[index]
data = cv2.imread(imageName, cv2.IMREAD_GRAYSCALE)
label = np.zeros(10, dtype=np.float32)
label[int(imageName[-7])] = 1
xData[i] = data.reshape(nHeight, nWidth, 1).astype(np.float32) / 255
yData[i] = label
return xData, yData
# TensorFlow 中创建网络并保存为 .pb 文件 -------------------------------------------
x = tf1.compat.v1.placeholder(tf1.float32, [None, nHeight, nWidth, 1], name="x")
y_ = tf1.compat.v1.placeholder(tf1.float32, [None, 10], name="y_")
w1 = tf1.compat.v1.get_variable("w1", shape=[5, 5, 1, 32], initializer=tf1.truncated_normal_initializer(mean=0, stddev=0.1))
b1 = tf1.compat.v1.get_variable("b1", shape=[32], initializer=tf1.constant_initializer(value=0.1))
h1 = tf1.nn.conv2d(x, w1, strides=[1, 1, 1, 1], padding="SAME")
h2 = h1 + b1
h3 = tf1.nn.relu(h2)
h4 = tf1.nn.max_pool2d(h3, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding="SAME")
w2 = tf1.compat.v1.get_variable("w2", shape=[5, 5, 32, 64], initializer=tf1.truncated_normal_initializer(mean=0, stddev=0.1))
b2 = tf1.compat.v1.get_variable("b2", shape=[64], initializer=tf1.constant_initializer(value=0.1))
h5 = tf1.nn.conv2d(h4, w2, strides=[1, 1, 1, 1], padding="SAME")
h6 = h5 + b2
h7 = tf1.nn.relu(h6)
h8 = tf1.nn.max_pool2d(h7, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding="SAME")
w3 = tf1.compat.v1.get_variable("w3", shape=[7 * 7 * 64, 1024], initializer=tf1.truncated_normal_initializer(mean=0, stddev=0.1))
b3 = tf1.compat.v1.get_variable("b3", shape=[1024], initializer=tf1.constant_initializer(value=0.1))
h9 = tf1.reshape(h8, [-1, 7 * 7 * 64])
h10 = tf1.matmul(h9, w3)
h11 = h10 + b3
h12 = tf1.nn.relu(h11)
w4 = tf1.compat.v1.get_variable("w4", shape=[1024, 10], initializer=tf1.truncated_normal_initializer(mean=0, stddev=0.1))
b4 = tf1.compat.v1.get_variable("b4", shape=[10], initializer=tf1.constant_initializer(value=0.1))
h13 = tf1.matmul(h12, w4)
h14 = h13 + b4
y = tf1.nn.softmax(h14, name="y")
z = tf1.argmax(y, 1, name="z")
crossEntropy = -tf1.reduce_sum(y_ * tf1.math.log(y))
trainStep = tf1.compat.v1.train.AdamOptimizer(1e-4).minimize(crossEntropy)
accuracy = tf1.reduce_mean(tf1.cast(tf1.equal(z, tf1.argmax(y_, 1)), tf1.float32), name="accuracy")
tfConfig = tf1.compat.v1.ConfigProto()
tfConfig.gpu_options.per_process_gpu_memory_fraction = 0.5
sess = tf1.compat.v1.Session(config=tfConfig)
sess.run(tf1.compat.v1.global_variables_initializer())
for i in range(100):
xSample, ySample = getBatch(trainFileList, nTrainBatchSize, True)
trainStep.run(session=sess, feed_dict={x: xSample, y_: ySample})
if i % 10 == 0:
accuracyValue = accuracy.eval(session=sess, feed_dict={x: xSample, y_: ySample})
print("%s, batch %3d, acc = %f" % (dt.now(), 10 + i, accuracyValue))
tf1.saved_model.simple_save(sess, TFModelPath, inputs={'x': x}, outputs={'z': z})
sess.close()
print("Succeeded building model in TensorFlow1!")
# 将模型改造为 TRT 可用的形式 ------------------------------------------------------
converter = tftrt.TrtGraphConverter(TFModelPath)
graph_def = converter.convert()
converter.save(TRTModelPath)
os.system("cp %s/variables/* %s/variables/" % (TFModelPath, TRTModelPath))
# 使用 TF-TRT 推理 --------------------------------------------------------------
tfConfig = tf1.compat.v1.ConfigProto()
tfConfig.gpu_options.per_process_gpu_memory_fraction = 0.5
session = tf1.compat.v1.Session(config=tfConfig)
tf1.saved_model.loader.load(session, [tf1.saved_model.SERVING], TRTModelPath)
data = cv2.imread(inferenceImage, cv2.IMREAD_GRAYSCALE).astype(np.float32).reshape(1, 28, 28, 1)
output = session.run(z, feed_dict={x: data})
print(output)
session.close()
print("Succeeded running model in TF-TRT!")
# 使用原生 TF 推理 ---------------------------------------------------------------
"""
tfConfig = tf1.compat.v1.ConfigProto()
tfConfig.gpu_options.per_process_gpu_memory_fraction = 0.5
session = tf1.compat.v1.Session(config=tfConfig)
tf1.saved_model.loader.load(session, [tf1.saved_model.SERVING], TFModelPath)
data = cv2.imread(inferenceImage, cv2.IMREAD_GRAYSCALE).astype(np.float32).reshape(1, 28, 28, 1)
output = session.run(z, feed_dict={x: data})
print(output)
session.close()
"""
| trt-samples-for-hackathon-cn-master | cookbook/06-UseFrameworkTRT/TensorFlow2-TFTRT/main.py |
#
# Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import numpy as np
import tensorrt as trt
from cuda import cudart
shape = [4, 5, 6]
data = np.zeros(shape).astype(np.float32)
data[0, 0, 1] = 1
data[0, 2, 3] = 2
data[0, 3, 4] = 3
data[1, 1, 0] = 4
data[1, 1, 1] = 5
data[1, 1, 2] = 6
data[1, 1, 3] = 7
data[1, 1, 4] = 8
data[1, 1, 5] = 9
data[2, 0, 1] = 10
data[2, 1, 1] = 11
data[2, 2, 1] = 12
data[2, 3, 1] = 13
data[2, 4, 1] = 14
np.set_printoptions(precision=3, linewidth=200, suppress=True)
cudart.cudaDeviceSynchronize()
class MyOutputAllocator(trt.IOutputAllocator):
def __init__(self):
print("[MyOutputAllocator::__init__]")
super(MyOutputAllocator, self).__init__()
self.shape = None
self.size = 0
self.address = 0
def reallocate_output(self, tensor_name, memory, size, alignment):
print("[MyOutputAllocator::reallocate_output] TensorName=%s, Memory=%s, Size=%d, Alignment=%d" % (tensor_name, memory, size, alignment))
if size <= self.size: # the buffer is enough to use
return memory
if memory != 0:
status = cudart.cudaFree(memory)
if status != cudart.cudaError_t.cudaSuccess:
print("Failed freeing old memory")
return 0
status, adress = cudart.cudaMalloc(size)
if status != cudart.cudaError_t.cudaSuccess:
print("Failed allocating size %d")
return 0
self.size = size
self.address = adress
return adress
def notify_shape(self, tensor_name, shape):
print("[MyOutputAllocator::notify_shape] TensorName=%s, Shape=%s" % (tensor_name, shape))
self.shape = shape
return
logger = trt.Logger(trt.Logger.ERROR)
builder = trt.Builder(logger)
network = builder.create_network(1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH))
profile = builder.create_optimization_profile()
config = builder.create_builder_config()
inputT0 = network.add_input("inputT0", trt.float32, shape)
profile.set_shape(inputT0.name, shape, shape, shape)
config.add_optimization_profile(profile)
nonZeroLayer = network.add_non_zero(inputT0) # use a data-dependent network as example, normal network is also OK
network.mark_output(nonZeroLayer.get_output(0))
engineString = builder.build_serialized_network(network, config)
engine = trt.Runtime(logger).deserialize_cuda_engine(engineString)
nIO = engine.num_io_tensors
lTensorName = [engine.get_tensor_name(i) for i in range(nIO)]
nInput = [engine.get_tensor_mode(lTensorName[i]) for i in range(nIO)].count(trt.TensorIOMode.INPUT)
context = engine.create_execution_context()
myOutputAllocator = MyOutputAllocator()
for i in range(nInput, nIO):
context.set_output_allocator(lTensorName[i], myOutputAllocator) # assign Output Allocator to Context, one Output Allocator for each output tensor
for i in range(nIO):
# context.get_tensor_shape(lTensorName[1]) here returns (3,-1)
print("[%2d]%s->" % (i, "Input " if i < nInput else "Output"), engine.get_tensor_dtype(lTensorName[i]), engine.get_tensor_shape(lTensorName[i]), context.get_tensor_shape(lTensorName[i]), lTensorName[i])
bufferH = []
bufferH.append(data) # only prepare input buffer
bufferD = []
for i in range(nInput): # prepare the input buffer
bufferD.append(cudart.cudaMalloc(bufferH[i].nbytes)[1])
for i in range(nInput, nIO): # use nullptr for output buffer
bufferD.append(int(0))
for i in range(nInput):
cudart.cudaMemcpy(bufferD[i], bufferH[i].ctypes.data, bufferH[i].nbytes, cudart.cudaMemcpyKind.cudaMemcpyHostToDevice)
for i in range(nIO):
context.set_tensor_address(lTensorName[i], int(bufferD[i]))
context.execute_async_v3(0)
print("After do inference")
for i in range(nIO):
# context.get_tensor_shape(lTensorName[1]) here returns real shape of output tensor
print("[%2d]%s->" % (i, "Input " if i < nInput else "Output"), engine.get_tensor_dtype(lTensorName[i]), engine.get_tensor_shape(lTensorName[i]), context.get_tensor_shape(lTensorName[i]), lTensorName[i])
for i in range(nInput, nIO): # get buffer from Output Allocator
myOutputAllocator = context.get_output_allocator(lTensorName[i])
bufferH.append(np.empty(myOutputAllocator.shape, dtype=trt.nptype(engine.get_tensor_dtype(lTensorName[i]))))
bufferD[i] = myOutputAllocator.address
for i in range(nInput, nIO):
cudart.cudaMemcpy(bufferH[i].ctypes.data, bufferD[i], bufferH[i].nbytes, cudart.cudaMemcpyKind.cudaMemcpyDeviceToHost)
for i in range(nIO):
print(lTensorName[i])
print(bufferH[i])
for b in bufferD:
cudart.cudaFree(b)
| trt-samples-for-hackathon-cn-master | cookbook/02-API/OutputAllocator/main.py |
#
# Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import numpy as np
import tensorrt as trt
from cuda import cudart
shape = [1, 1, 28, 28]
np.random.seed(31193)
data = np.random.rand(np.prod(shape)).astype(np.float32).reshape(shape) * 2 - 1
np.set_printoptions(precision=3, linewidth=200, suppress=True)
cudart.cudaDeviceSynchronize()
def getSizeString(xByte):
if xByte < (1 << 10):
return "%5.1f B" % xByte
if xByte < (1 << 20):
return "%5.1fKiB" % (xByte / (1 << 10))
if xByte < (1 << 30):
return "%5.1fMiB" % (xByte / (1 << 20))
return "%5.1fGiB" % (xByte / (1 << 30))
class MyAlgorithmSelector(trt.IAlgorithmSelector):
def __init__(self, iStrategy=0): # initialize with a number of our customerized strategies to select algorithm
super(MyAlgorithmSelector, self).__init__()
self.iStrategy = iStrategy
def select_algorithms(self, layerAlgorithmContext, layerAlgorithmList):
# we print the alternative algorithms of each layer here
nInput = layerAlgorithmContext.num_inputs
nOutput = layerAlgorithmContext.num_outputs
print("Layer %s,in=%d,out=%d" % (layerAlgorithmContext.name, nInput, nOutput))
for i in range(nInput + nOutput):
print(" %s %2d: shape=%s" % ("Input " if i < nInput else "Output", i if i < nInput else i - nInput, layerAlgorithmContext.get_shape(i)))
for i, algorithm in enumerate(layerAlgorithmList):
print(" algorithm%3d:implementation[%10d], tactic[%20d], timing[%7.3fus], workspace[%s]" % ( \
i,
algorithm.algorithm_variant.implementation,
algorithm.algorithm_variant.tactic,
algorithm.timing_msec * 1000,
getSizeString(algorithm.workspace_size)))
if self.iStrategy == 0: # choose the algorithm spending shortest time, the same as TensorRT
timeList = [algorithm.timing_msec for algorithm in layerAlgorithmList]
result = [np.argmin(timeList)]
elif self.iStrategy == 1: # choose the algorithm spending longest time to get a TensorRT engine with worst performance, just for fun :)
timeList = [algorithm.timing_msec for algorithm in layerAlgorithmList]
result = [np.argmax(timeList)]
elif self.iStrategy == 2: # choose the algorithm using smallest workspace
workspaceSizeList = [algorithm.workspace_size for algorithm in layerAlgorithmList]
result = [np.argmin(workspaceSizeList)]
elif self.iStrategy == 3: # choose one certain algorithm we have known
# This strategy can be a workaround for building the exactly same engine for many times, but Timing Cache is more recommended to do so.
# The reason is that function select_algorithms is called after the performance test of all algorithms of a layer is finished (you can find algorithm.timing_msec > 0),
# so it will not save the time of the test.
# On the contrary, performance test of the algorithms will be skiped using Timing Cache (though performance test of Reformating can not be skiped),
# so it surely saves a lot of time comparing with Algorithm Selector.
if layerAlgorithmContext.name == "(Unnamed Layer* 0) [Convolution] + (Unnamed Layer* 1) [Activation]":
# the number 2147483648 is from VERBOSE log, marking the certain algorithm
result = [index for index, algorithm in enumerate(layerAlgorithmList) if algorithm.algorithm_variant.implementation == 2147483648]
else: # keep all algorithms for other layers
result = list(range(len(layerAlgorithmList)))
else: # default behavior: keep all algorithms
result = list(range(len(layerAlgorithmList)))
return result
def report_algorithms(self, modelAlgorithmContext, modelAlgorithmList): # report the tactic of the whole network
# some bug in report_algorithms to make the algorithm.timing_msec and algorithm.workspace_size are always 0?
print("[MyAlgorithmSelector::report_algorithms]")
for i in range(len(modelAlgorithmContext)):
context = modelAlgorithmContext[i]
algorithm = modelAlgorithmList[i]
nInput = context.num_inputs
nOutput = context.num_outputs
print("Layer %s,in=%d,out=%d" % (context.name, nInput, nOutput))
for i in range(nInput + nOutput):
ioInfo = algorithm.get_algorithm_io_info(i)
print(" %s %2d: %s stride=%s, vectorized_dim=%d, components_per_element=%d, shape=%s" % ( \
"Input " if i < nInput else "Output",
i if i < nInput else i - nInput,
ioInfo.dtype,
ioInfo.strides,
ioInfo.vectorized_dim,
ioInfo.components_per_element,
context.get_shape(i)))
print(" algorithm :implementation[%10d], tactic[%20d], timing[%7.3fus], workspace[%s]" % ( \
algorithm.algorithm_variant.implementation,
algorithm.algorithm_variant.tactic,
algorithm.timing_msec * 1000,
getSizeString(algorithm.workspace_size)))
logger = trt.Logger(trt.Logger.INFO)
builder = trt.Builder(logger)
network = builder.create_network(1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH))
profile = builder.create_optimization_profile()
config = builder.create_builder_config()
config.algorithm_selector = MyAlgorithmSelector(1) # assign Algorithm Selector to BuilderConfig, number here is the index of our customerized strategies to select algorithm
config.set_flag(trt.BuilderFlag.FP16) # add FP16 to get more alternative algorithms
inputTensor = network.add_input("inputT0", trt.float32, [-1] + shape[1:])
profile.set_shape(inputTensor.name, [1] + shape[1:], [2] + shape[1:], [4] + shape[1:])
config.add_optimization_profile(profile)
w = np.ascontiguousarray(np.random.rand(32, 1, 5, 5).astype(np.float32))
b = np.ascontiguousarray(np.random.rand(32, 1, 1).astype(np.float32))
_0 = network.add_convolution_nd(inputTensor, 32, [5, 5], trt.Weights(w), trt.Weights(b))
_0.padding_nd = [2, 2]
_1 = network.add_activation(_0.get_output(0), trt.ActivationType.RELU)
_2 = network.add_pooling_nd(_1.get_output(0), trt.PoolingType.MAX, [2, 2])
_2.stride_nd = [2, 2]
w = np.ascontiguousarray(np.random.rand(64, 32, 5, 5).astype(np.float32))
b = np.ascontiguousarray(np.random.rand(64, 1, 1).astype(np.float32))
_3 = network.add_convolution_nd(_2.get_output(0), 64, [5, 5], trt.Weights(w), trt.Weights(b))
_3.padding_nd = [2, 2]
_4 = network.add_activation(_3.get_output(0), trt.ActivationType.RELU)
_5 = network.add_pooling_nd(_4.get_output(0), trt.PoolingType.MAX, [2, 2])
_5.stride_nd = [2, 2]
_6 = network.add_shuffle(_5.get_output(0))
_6.reshape_dims = (-1, 64 * 7 * 7)
w = np.ascontiguousarray(np.random.rand(64 * 7 * 7, 1024).astype(np.float32))
b = np.ascontiguousarray(np.random.rand(1, 1024).astype(np.float32))
_7 = network.add_constant(w.shape, trt.Weights(w))
_8 = network.add_matrix_multiply(_6.get_output(0), trt.MatrixOperation.NONE, _7.get_output(0), trt.MatrixOperation.NONE)
_9 = network.add_constant(b.shape, trt.Weights(b))
_10 = network.add_elementwise(_8.get_output(0), _9.get_output(0), trt.ElementWiseOperation.SUM)
_11 = network.add_activation(_10.get_output(0), trt.ActivationType.RELU)
w = np.ascontiguousarray(np.random.rand(1024, 10).astype(np.float32))
b = np.ascontiguousarray(np.random.rand(1, 10).astype(np.float32))
_12 = network.add_constant(w.shape, trt.Weights(w))
_13 = network.add_matrix_multiply(_11.get_output(0), trt.MatrixOperation.NONE, _12.get_output(0), trt.MatrixOperation.NONE)
_14 = network.add_constant(b.shape, trt.Weights(b))
_15 = network.add_elementwise(_13.get_output(0), _14.get_output(0), trt.ElementWiseOperation.SUM)
_16 = network.add_softmax(_15.get_output(0))
_16.axes = 1 << 1
_17 = network.add_topk(_16.get_output(0), trt.TopKOperation.MAX, 1, 1 << 1)
network.mark_output(_17.get_output(1))
engineString = builder.build_serialized_network(network, config)
| trt-samples-for-hackathon-cn-master | cookbook/02-API/AlgorithmSelector/main.py |
#
# Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import tensorrt as trt
class MyLogger(trt.ILogger): # customerized Logger
def __init__(self):
trt.ILogger.__init__(self)
def log(self, severity, msg):
if severity <= self.min_severity:
# int(trt.ILogger.Severity.VERBOSE) == 4
# int(trt.ILogger.Severity.INFO) == 3
# int(trt.ILogger.Severity.WARNING) == 2
# int(trt.ILogger.Severity.ERROR) == 1
# int(trt.ILogger.Severity.INTERNAL_ERROR) == 0
print("My Logger[%s] %s" % (severity, msg)) # customerized log content
logger = MyLogger() # default severity is VERBOSE
print("Build time --------------------------------------------------------------")
logger.min_severity = trt.ILogger.Severity.INFO # use severity INFO in build time
builder = trt.Builder(logger) # assign logger to Builder
network = builder.create_network(1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH))
config = builder.create_builder_config()
inputTensor = network.add_input("inputT0", trt.float32, [3, 4, 5])
identityLayer = network.add_identity(inputTensor)
network.mark_output(identityLayer.get_output(0))
engineString = builder.build_serialized_network(network, config)
print("Run time ----------------------------------------------------------------")
logger.min_severity = trt.ILogger.Severity.VERBOSE # change severity into VERBOSE in run time
engine = trt.Runtime(logger).deserialize_cuda_engine(engineString) # assign logger to Runtime
| trt-samples-for-hackathon-cn-master | cookbook/02-API/Logger/main.py |
#
# Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import tensorrt as trt
def printFlagFromBit(bit):
flagList = []
if bit & 1 << int(trt.BuilderFlag.FP16): # 0
flagList.append("FP16")
if bit & 1 << int(trt.BuilderFlag.INT8): # 1
flagList.append("INT8")
if bit & 1 << int(trt.BuilderFlag.DEBUG): # 2
flagList.append("DEBUG")
if bit & 1 << int(trt.BuilderFlag.GPU_FALLBACK): # 3
flagList.append("GPU_FALLBACK")
if bit & 1 << int(trt.BuilderFlag.STRICT_TYPES): # 4
flagList.append("STRICT_TYPES")
if bit & 1 << int(trt.BuilderFlag.REFIT): # 5
flagList.append("REFIT")
if bit & 1 << int(trt.BuilderFlag.DISABLE_TIMING_CACHE): # 6
flagList.append("DISABLE_TIMING_CACHE")
if bit & 1 << int(trt.BuilderFlag.TF32): # 7
flagList.append("TF32")
if bit & 1 << int(trt.BuilderFlag.SPARSE_WEIGHTS): # 8
flagList.append("SPARSE_WEIGHTS")
if bit & 1 << int(trt.BuilderFlag.SAFETY_SCOPE): # 9
flagList.append("SAFETY_SCOPE")
if bit & 1 << int(trt.BuilderFlag.OBEY_PRECISION_CONSTRAINTS): # 10
flagList.append("OBEY_PRECISION_CONSTRAINTS")
if bit & 1 << int(trt.BuilderFlag.PREFER_PRECISION_CONSTRAINTS): # 11
flagList.append("PREFER_PRECISION_CONSTRAINTS")
if bit & 1 << int(trt.BuilderFlag.DIRECT_IO): # 12
flagList.append("DIRECT_IO")
if bit & 1 << int(trt.BuilderFlag.REJECT_EMPTY_ALGORITHMS): # 13
flagList.append("REJECT_EMPTY_ALGORITHMS")
if bit & 1 << int(trt.BuilderFlag.ENABLE_TACTIC_HEURISTIC): # 14
flagList.append("ENABLE_TACTIC_HEURISTIC")
if bit & 1 << int(trt.BuilderFlag.VERSION_COMPATIBLE): # 15
flagList.append("VERSION_COMPATIBLE")
if bit & 1 << int(trt.BuilderFlag.EXCLUDE_LEAN_RUNTIME): # 16
flagList.append("EXCLUDE_LEAN_RUNTIME")
if bit & 1 << int(trt.BuilderFlag.FP8): # 17
flagList.append("FP8")
print(flagList)
return
logger = trt.Logger(trt.Logger.ERROR)
builder = trt.Builder(logger)
network = builder.create_network(1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH))
profile = builder.create_optimization_profile()
config = builder.create_builder_config()
config.reset() # reset BuidlerConfig as default, not required
inputTensor = network.add_input("inputT0", trt.float32, [-1, -1, -1])
profile.set_shape(inputTensor.name, [1, 1, 1], [3, 4, 5], [6, 8, 10])
config.add_optimization_profile(profile)
identityLayer = network.add_identity(inputTensor)
network.mark_output(identityLayer.get_output(0))
print("config.__sizeof__() = %d" % config.__sizeof__())
print("config.__str__() = %s" % config.__str__())
print("\nDevice type part ======================================================")
config.engine_capability = trt.EngineCapability.STANDARD # default without targeting safety runtime, supporting GPU and DLA
#config.engine_capability = trt.EngineCapability.SAFETY # targeting safety runtime, supporting GPU on NVIDIA Drive(R) products
#config.engine_capability = trt.EngineCapability.DLA_STANDALONE # targeting DLA runtime, supporting DLA
#config.engine_capability = trt.EngineCapability.DEFAULT # same as STANDARD, deprecated since TensorRT 8.0
#config.engine_capability = trt.EngineCapability.SAFE_GPU # same as SAFETY, deprecated since TensorRT 8.0
#config.engine_capability = trt.EngineCapability.SAFE_DLA # same as DLA_STANDALONE, deprecated since TensorRT 8.0
print("config.engine_capability = %s" % config.engine_capability)
print("config.default_device_type = %s" % config.default_device_type)
print("config.DLA_core = %d" % config.DLA_core)
print("config.can_run_on_DLA(identityLayer) = %s" % config.can_run_on_DLA(identityLayer))
print("Set device type of certain layer ----------------------------------------")
config.set_device_type(identityLayer, trt.DeviceType.DLA) # device type: [trt.DeviceType.GPU, trt.DeviceType.DLA]
print("config.get_device_type(identityLayer) = %s" % config.get_device_type(identityLayer)) # offload one layer running on certain device
print("config.is_device_type_set(identityLayer) = %s" % config.is_device_type_set(identityLayer))
print("Reset device type of certain layer to default ---------------------------")
config.reset_device_type(identityLayer)
print("config.get_device_type(identityLayer) = %s" % config.get_device_type(identityLayer))
print("\nFlag part =============================================================")
print("config.flags = %d" % config.flags) # check all flags, when running TensorRT on Ampere above, TF32 (1<<7) is set as default
printFlagFromBit(config.flags)
print("Set Flag FP16 -----------------------------------------------------------")
config.set_flag(trt.BuilderFlag.FP16) # set single flag
print("config.get_flag(trt.BuilderFlag.FP16) = %s" % config.get_flag(trt.BuilderFlag.FP16)) # check single flag
printFlagFromBit(config.flags)
print("Clear Flag FP16 ---------------------------------------------------------")
config.clear_flag(trt.BuilderFlag.FP16) # unset single flag
print("config.get_flag(trt.BuilderFlag.FP16) = %s" % config.get_flag(trt.BuilderFlag.FP16)) # check single flag
printFlagFromBit(config.flags)
print("Set Flag by bit operation -----------------------------------------------")
config.flags = 1 << int(trt.BuilderFlag.FP16) | 1 << int(trt.BuilderFlag.INT8) # set multiple flags
printFlagFromBit(config.flags)
config.flags = 0 # unset all flags
printFlagFromBit(config.flags)
print("config.quantization_flags = %d" % config.quantization_flags) # check quantization flag
print("Set flag CALIBRATE_BEFORE_FUSION ----------------------------------------")
config.set_quantization_flag(trt.QuantizationFlag.CALIBRATE_BEFORE_FUSION) # set quantization flag
print("config.get_quantization_flag(trt.QuantizationFlag.CALIBRATE_BEFORE_FUSION) = %s" % config.set_quantization_flag(trt.QuantizationFlag.CALIBRATE_BEFORE_FUSION))
print("config.quantization_flags = %d" % config.quantization_flags)
print("Clear flag CALIBRATE_BEFORE_FUSION --------------------------------------")
config.clear_quantization_flag(trt.QuantizationFlag.CALIBRATE_BEFORE_FUSION) # unset quantization flag
print("config.quantization_flags = %d" % config.quantization_flags)
print("\nPreview feature part ==================================================")
config.set_preview_feature(trt.PreviewFeature.FASTER_DYNAMIC_SHAPES_0805, True)
print("config.get_preview_feature(FASTER_DYNAMIC_SHAPES_0805) = %d" % config.get_preview_feature(trt.PreviewFeature.FASTER_DYNAMIC_SHAPES_0805))
# available vavaluesle:
#config.set_preview_feature(trt.PreviewFeature.DISABLE_EXTERNAL_TACTIC_SOURCES_FOR_CORE_0805, True)
#config.set_preview_feature(trt.PreviewFeature.PROFILE_SHARING_0806, True)
print("\nEngine build part =====================================================")
print("config.get_memory_pool_limit(trt.MemoryPoolType.WORKSPACE) = %d Byte (%.1f GiB)" % (config.get_memory_pool_limit(trt.MemoryPoolType.WORKSPACE), config.get_memory_pool_limit(trt.MemoryPoolType.WORKSPACE) / (1 << 30))) # all GPU memory is occupied by default
print("config.get_memory_pool_limit(trt.MemoryPoolType.DLA_MANAGED_SRAM) = %d" % config.get_memory_pool_limit(trt.MemoryPoolType.DLA_MANAGED_SRAM))
print("config.get_memory_pool_limit(trt.MemoryPoolType.DLA_LOCAL_DRAM) = %d" % config.get_memory_pool_limit(trt.MemoryPoolType.DLA_LOCAL_DRAM))
print("config.get_memory_pool_limit(trt.MemoryPoolType.DLA_GLOBAL_DRAM) = %d" % config.get_memory_pool_limit(trt.MemoryPoolType.DLA_GLOBAL_DRAM))
print("config.get_memory_pool_limit(trt.MemoryPoolType.TACTIC_DRAM) = %d Byte (%.1f GiB)" % (config.get_memory_pool_limit(trt.MemoryPoolType.TACTIC_DRAM), config.get_memory_pool_limit(trt.MemoryPoolType.TACTIC_DRAM) / (1 << 30)))
print("Set workspace manually---------------------------------------------------")
config.set_memory_pool_limit(trt.MemoryPoolType.WORKSPACE, 1 << 30)
print("config.get_memory_pool_limit(trt.MemoryPoolType.WORKSPACE) = %d Byte (%.1f GiB)" % (config.get_memory_pool_limit(trt.MemoryPoolType.WORKSPACE), config.get_memory_pool_limit(trt.MemoryPoolType.WORKSPACE) / (1 << 30)))
print("config.num_optimization_profiles = %d" % config.num_optimization_profiles)
print("config.builder_optimization_level = %d" % config.builder_optimization_level) # optimzation level of autotuning from 0 (shortest building time) to 5 (best performance)
config.profile_stream = 0 # set the CUDA stream for auto tuning, default value is 0
config.avg_timing_iterations = 10 # average times to running each tactic for auto tuning, default value is 1
#config.min_timing_iterations = 1 # minimum times to running each tactic for auto tuning, default value is 1, deprecated since TensorRT 8.4
print("config.hardware_compatibility_level = %d" % config.hardware_compatibility_level)
# available values:
#config.hardware_compatibility_level = trt.HardwareCompatibilityLevel.AMPERE_PLUS
#config.hardware_compatibility_level = trt.HardwareCompatibilityLevel.NONE
print("config.max_aux_streams = %d" % config.max_aux_streams)
print("config.plugins_to_serialize =", config.plugins_to_serialize)
engineString = builder.build_serialized_network(network, config)
"""
Member of IBuilderConfig:
++++ shown above
---- not shown above
[no prefix] others
++++DLA_core
----__class__
__del__
__delattr__
__dir__
__doc__
__enter__
__eq__
__exit__
__format__
__ge__
__getattribute__
__gt__
__hash__
__init__
__init_subclass__
__le__
__lt__
__module__
__ne__
__new__
----__pybind11_module_local_v4_gcc_libstdcpp_cxxabi1013__
__reduce__
__reduce_ex__
__repr__
__setattr__
++++__sizeof__
++++__str__
__subclasshook__
++++add_optimization_profile
----algorithm_selector refer to 02-API/AlgorithmSelector
++++avg_timing_iterations
++++builder_optimization_level
++++can_run_on_DLA
++++clear_flag
++++clear_quantization_flag
----create_timing_cache refer to 02-API/TimingCache
++++default_device_type
++++engine_capability
++++flags
----get_calibration_profile refer to 02-API/Int8-PTQ
++++get_device_type
++++get_flag
++++get_memory_pool_limit
++++get_preview_feature
++++get_quantization_flag
----get_tactic_sources refer to 02-API/TacticSource
----get_timing_cache refer to 02-API/TimingCache
++++hardware_compatibility_level
----int8_calibrator needed by INT8 mode, refer to 03-BuildEngineByTensorRTAPI/MNISTExample-pyTorch/main.py
----is_device_type_set
++++max_aux_streams refer to 02-API/AuxStream
----max_workspace_size deprecated since TensorRT 8.0, use get_memory_pool_limit instead
++++min_timing_iterations
++++num_optimization_profiles
++++plugins_to_serialize refer to 05-Plugin/PluginSerialize
++++profile_stream
----profiling_verbosity refer to 02-API/ProfilingVerbosity
++++quantization_flags
++++reset
++++reset_device_type
----set_calibration_profile needed by INT8 mode, refer to 03-BuildEngineByTensorRTAPI/MNISTExample-pyTorch/main.py
++++set_device_type
++++set_flag
++++set_memory_pool_limit
++++set_preview_feature
++++set_quantization_flag
----set_tactic_sources refer to 02-API/TacticSource
----set_timing_cache refer to 02-API/TimingCache
"""
| trt-samples-for-hackathon-cn-master | cookbook/02-API/BuilderConfig/main.py |
#
# Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import tensorrt as trt
trtFile = "./model.plan"
class MyErrorRecorder(trt.IErrorRecorder):
def __init__(self):
super(MyErrorRecorder, self).__init__()
self.errorList = []
self.nError = 0
self.nMaxError = 256
def clear(self):
print("[MyErrorRecorder::clear]")
self.nError = []
self.nError = 0
return None
def get_error_code(self, index):
print("[MyErrorRecorder::get_error_code]")
if index < 0 or index >= self.nError:
print("Error index")
return trt.ErrorCodeTRT.SUCCESS
return self.errorList[index][0]
def get_error_desc(self, index):
print("[MyErrorRecorder::get_error_desc]")
if index < 0 or index >= self.nError:
print("Error index")
return ""
# Error number in self.errorList[index][0]:
# trt.ErrorCodeTRT.SUCCESS # 0
# trt.ErrorCodeTRT.UNSPECIFIED_ERROR # 1
# trt.ErrorCodeTRT.INTERNAL_ERROR # 2
# trt.ErrorCodeTRT.INVALID_ARGUMENT # 3
# trt.ErrorCodeTRT.INVALID_CONFIG # 4
# trt.ErrorCodeTRT.FAILED_ALLOCATION # 5
# trt.ErrorCodeTRT.FAILED_INITIALIZATION # 6
# trt.ErrorCodeTRT.FAILED_EXECUTION # 7
# trt.ErrorCodeTRT.FAILED_COMPUTATION # 8
# trt.ErrorCodeTRT.INVALID_STATE # 9
# trt.ErrorCodeTRT.UNSUPPORTED_STATE # 10
return self.errorList[index][1]
def has_overflowed(self):
print("[MyErrorRecorder::has_overflowed]")
if self.nError >= self.nMaxError:
print("Error recorder overflowed!")
return True
return False
def num_errors(self):
print("[MyErrorRecorder::num_errors]")
return self.nError
def report_error(self, errorCode, errorDescription):
print("[MyErrorRecorder::report_error]\n\tNumber=%d,Code=%d,Information=%s" % (self.nError, int(errorCode), errorDescription))
self.nError += 1
self.errorList.append([errorCode, errorDescription])
if self.has_overflowed():
print("Error Overflow!")
return
def helloWorld(self): # not required API, just for fun
return "Hello World!"
myErrorRecorder = MyErrorRecorder()
logger = trt.Logger(trt.Logger.ERROR)
builder = trt.Builder(logger)
network = builder.create_network(1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH))
profile = builder.create_optimization_profile()
config = builder.create_builder_config()
inputTensor = network.add_input("inputT0", trt.float32, [-1, -1, -1])
profile.set_shape(inputTensor.name, [1, 1, 1], [3, 4, 5], [6, 8, 10])
config.add_optimization_profile(profile)
identityLayer = network.add_identity(inputTensor)
network.mark_output(identityLayer.get_output(0))
engineString = builder.build_serialized_network(network, config)
runtime = trt.Runtime(logger)
runtime.error_recorder = myErrorRecorder # ErrorRecorder for runtime, it can be assigned to Runtime or Engine or ExecutionContext
engine = runtime.deserialize_cuda_engine(engineString)
#engine.error_recorder = myErrorRecorder
context = engine.create_execution_context()
#context.error_recorder = myErrorRecorder
print("Runtime.error_recorder:", runtime.error_recorder, runtime.error_recorder.helloWorld())
print("Engine.error_recorder:", engine.error_recorder, engine.error_recorder.helloWorld())
print("Context.error_recorder:", context.error_recorder, context.error_recorder.helloWorld())
context.execute_v2([int(0), int(0)]) # use null pointer to do inference, TensorRT raises a error
print("Failed doing inference!")
print("Report error after all other work ---------------------------------------")
print("There is %d error" % myErrorRecorder.num_errors())
for i in range(myErrorRecorder.num_errors()):
print("\tNumber=%d,Code=%d,Information=%s" % (i, int(myErrorRecorder.get_error_code(i)), myErrorRecorder.get_error_desc(i)))
myErrorRecorder.clear() # clear all error information
| trt-samples-for-hackathon-cn-master | cookbook/02-API/ErrorRecoder/main-runtime.py |
#
# Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import tensorrt as trt
trtFile = "./model.plan"
class MyErrorRecorder(trt.IErrorRecorder):
def __init__(self):
super(MyErrorRecorder, self).__init__()
self.errorList = []
self.nError = 0
self.nMaxError = 256
def clear(self):
print("[MyErrorRecorder::clear]")
self.nError = []
self.nError = 0
return None
def get_error_code(self, index):
print("[MyErrorRecorder::get_error_code]")
if index < 0 or index >= self.nError:
print("Error index")
return trt.ErrorCodeTRT.SUCCESS
return self.errorList[index][0]
def get_error_desc(self, index):
print("[MyErrorRecorder::get_error_desc]")
if index < 0 or index >= self.nError:
print("Error index")
return ""
# Error number in self.errorList[index][0]:
# trt.ErrorCodeTRT.SUCCESS # 0
# trt.ErrorCodeTRT.UNSPECIFIED_ERROR # 1
# trt.ErrorCodeTRT.INTERNAL_ERROR # 2
# trt.ErrorCodeTRT.INVALID_ARGUMENT # 3
# trt.ErrorCodeTRT.INVALID_CONFIG # 4
# trt.ErrorCodeTRT.FAILED_ALLOCATION # 5
# trt.ErrorCodeTRT.FAILED_INITIALIZATION # 6
# trt.ErrorCodeTRT.FAILED_EXECUTION # 7
# trt.ErrorCodeTRT.FAILED_COMPUTATION # 8
# trt.ErrorCodeTRT.INVALID_STATE # 9
# trt.ErrorCodeTRT.UNSUPPORTED_STATE # 10
return self.errorList[index][1]
def has_overflowed(self):
print("[MyErrorRecorder::has_overflowed]")
if self.nError >= self.nMaxError:
print("Error recorder overflowed!")
return True
return False
def num_errors(self):
print("[MyErrorRecorder::num_errors]")
return self.nError
def report_error(self, errorCode, errorDescription):
print("[MyErrorRecorder::report_error]\n\tNumber=%d,Code=%d,Information=%s" % (self.nError, int(errorCode), errorDescription))
self.nError += 1
self.errorList.append([errorCode, errorDescription])
if self.has_overflowed():
print("Error Overflow!")
return
def helloWorld(self): # not required API, just for fun
return "Hello World!"
myErrorRecorder = MyErrorRecorder()
logger = trt.Logger(trt.Logger.ERROR)
builder = trt.Builder(logger)
builder.error_recorder = myErrorRecorder # assign ErrorRecorder to Builder
network = builder.create_network(1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH))
profile = builder.create_optimization_profile()
config = builder.create_builder_config()
print("Builder.error_recorder:", builder.error_recorder.helloWorld()) # once assigned, Builder and Network share the same Error Recorder
print("Network.error_recorder:", network.error_recorder.helloWorld())
inputTensor = network.add_input("inputT0", trt.float32, [-1, -1, -1])
profile.set_shape(inputTensor.name, [1, 1, 1], [3, 4, 5], [6, 8, 10])
config.add_optimization_profile(profile)
identityLayer = network.add_identity(inputTensor)
#network.mark_output(identityLayer.get_output(0)) # TensorRT raises a error without this line
print("Report error during building serialized network -------------------------")
engineString = builder.build_serialized_network(network, config)
if engineString == None:
print("Failed building serialized engine!")
print("Report error after all other work -----------------------------------")
print("There is %d error" % myErrorRecorder.num_errors())
for i in range(myErrorRecorder.num_errors()):
print("\tNumber=%d,Code=%d,Information=%s" % (i, int(myErrorRecorder.get_error_code(i)), myErrorRecorder.get_error_desc(i)))
myErrorRecorder.clear() # clear all error information
else:
print("Succeeded building serialized engine!")
| trt-samples-for-hackathon-cn-master | cookbook/02-API/ErrorRecoder/main-buildtime.py |
#
# Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import numpy as np
import tensorrt as trt
trtFile = "./model.plan"
shape = [1, 1, 28, 28]
os.system("rm -rf ./*.plan")
logger = trt.Logger(trt.Logger.ERROR)
builder = trt.Builder(logger)
network = builder.create_network(1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH))
profile = builder.create_optimization_profile()
config = builder.create_builder_config()
config.profiling_verbosity = trt.ProfilingVerbosity.DETAILED # use profiling_verbosity to get more information
inputTensor = network.add_input("inputT0", trt.float32, [-1] + shape[1:])
profile.set_shape(inputTensor.name, [1] + shape[1:], [2] + shape[1:], [4] + shape[1:])
config.add_optimization_profile(profile)
w = np.ascontiguousarray(np.random.rand(32, 1, 5, 5).astype(np.float32))
b = np.ascontiguousarray(np.random.rand(32, 1, 1).astype(np.float32))
_0 = network.add_convolution_nd(inputTensor, 32, [5, 5], trt.Weights(w), trt.Weights(b))
_0.padding_nd = [2, 2]
_1 = network.add_activation(_0.get_output(0), trt.ActivationType.RELU)
_2 = network.add_pooling_nd(_1.get_output(0), trt.PoolingType.MAX, [2, 2])
_2.stride_nd = [2, 2]
w = np.ascontiguousarray(np.random.rand(64, 32, 5, 5).astype(np.float32))
b = np.ascontiguousarray(np.random.rand(64, 1, 1).astype(np.float32))
_3 = network.add_convolution_nd(_2.get_output(0), 64, [5, 5], trt.Weights(w), trt.Weights(b))
_3.padding_nd = [2, 2]
_4 = network.add_activation(_3.get_output(0), trt.ActivationType.RELU)
_5 = network.add_pooling_nd(_4.get_output(0), trt.PoolingType.MAX, [2, 2])
_5.stride_nd = [2, 2]
_6 = network.add_shuffle(_5.get_output(0))
_6.reshape_dims = (-1, 64 * 7 * 7)
w = np.ascontiguousarray(np.random.rand(64 * 7 * 7, 1024).astype(np.float32))
b = np.ascontiguousarray(np.random.rand(1, 1024).astype(np.float32))
_7 = network.add_constant(w.shape, trt.Weights(w))
_8 = network.add_matrix_multiply(_6.get_output(0), trt.MatrixOperation.NONE, _7.get_output(0), trt.MatrixOperation.NONE)
_9 = network.add_constant(b.shape, trt.Weights(b))
_10 = network.add_elementwise(_8.get_output(0), _9.get_output(0), trt.ElementWiseOperation.SUM)
_11 = network.add_activation(_10.get_output(0), trt.ActivationType.RELU)
w = np.ascontiguousarray(np.random.rand(1024, 10).astype(np.float32))
b = np.ascontiguousarray(np.random.rand(1, 10).astype(np.float32))
_12 = network.add_constant(w.shape, trt.Weights(w))
_13 = network.add_matrix_multiply(_11.get_output(0), trt.MatrixOperation.NONE, _12.get_output(0), trt.MatrixOperation.NONE)
_14 = network.add_constant(b.shape, trt.Weights(b))
_15 = network.add_elementwise(_13.get_output(0), _14.get_output(0), trt.ElementWiseOperation.SUM)
_16 = network.add_softmax(_15.get_output(0))
_16.axes = 1 << 1
_17 = network.add_topk(_16.get_output(0), trt.TopKOperation.MAX, 1, 1 << 1)
network.mark_output(_17.get_output(1))
engineString = builder.build_serialized_network(network, config)
engine = trt.Runtime(logger).deserialize_cuda_engine(engineString)
inspector = engine.create_engine_inspector()
print("inspector.execution_context=", inspector.execution_context)
print("inspector.error_recorder=", inspector.error_recorder) # ErrorRecorder can be set into EngineInspector, usage of ErrorRecorder refer to 02-API/ErrorRecorder
print("Engine information:") # engine information is equivalent to put all layer information together
print(inspector.get_engine_information(trt.LayerInformationFormat.ONELINE)) # .txt format
#print(inspector.get_engine_information(trt.LayerInformationFormat.JSON)) # .json format
print("Layer information:")
for i in range(engine.num_layers):
print(inspector.get_layer_information(i, trt.LayerInformationFormat.ONELINE))
| trt-samples-for-hackathon-cn-master | cookbook/02-API/EngineInspector/main.py |
#
# Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import numpy as np
import tensorrt as trt
from cuda import cudart
nB, nC, nH, nW = 1, 3, 4, 5
data = np.arange(nB * nC * nH * nW, dtype=np.float32).astype(np.float32).reshape(nB, nC, nH, nW)
np.set_printoptions(precision=3, edgeitems=8, linewidth=300, suppress=True)
cudart.cudaDeviceSynchronize()
logger = trt.Logger(trt.Logger.ERROR)
builder = trt.Builder(logger)
network = builder.create_network(1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH))
#network = builder.create_network((1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)) | (1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_PRECISION))) # EXPLICIT_PRECISION is deprecated since TensorRT 8.5
network.name = "Identity Network"
profile = builder.create_optimization_profile()
config = builder.create_builder_config()
inputT0 = network.add_input("inputT0", trt.float32, (-1, nC, nH, nW))
profile.set_shape(inputT0.name, [1, nC, nH, nW], [nB, nC, nH, nW], [nB * 2, nC, nH, nW])
config.add_optimization_profile(profile)
layer = network.add_identity(inputT0)
network.mark_output(layer.get_output(0))
network.unmark_output(layer.get_output(0))
network.mark_output(layer.get_output(0))
#engineString = builder.build_serialized_network(network, config)
print("network.name = %s" % network.name)
print("network.__len__() = %d" % len(network))
print("network.__sizeof__() = %d" % network.__sizeof__())
print("network.__str__() = %s" % network.__str__())
print("network.num_inputs = %d" % network.num_inputs)
for i in range(network.num_inputs):
print("\tnetwork.get_input(%d) = %s" % (i, network.get_input(i)))
print("network.num_outputs = %d" % network.num_outputs)
for i in range(network.num_outputs):
print("\tnetwork.get_output(%d) = %s" % (i, network.get_output(i)))
print("network.num_layers = %d" % network.num_layers)
for i in range(network.num_layers):
print("\tnetwork.get_layer(%d) = %s" % (i, network.get_layer(i)))
#print("\tnetwork.__getitem__(%d) = %s" % (i, network.__getitem__(i))) # same as get_layer()
print("netwrok.has_explicit_precision = %s" % network.has_explicit_precision)
print("netwrok.has_implicit_batch_dimension = %s" % network.has_implicit_batch_dimension)
"""
Member of INetwork:
++++ shown above
---- not shown above
[no prefix] others
----__class__
__del__
__delattr__
__dir__
__doc__
__enter__
__eq__
__exit__
__format__
__ge__
__getattribute__
++++__getitem__ same as get_layer
__gt__
__hash__
__init__
__init_subclass__
__le__
+++__len__
__lt__
__module__
__ne__
__new__
__reduce__
__reduce_ex__
__repr__
__setattr__
+++__sizeof__
+++__str__
__subclasshook__
----add_activation all layers refer to 02-API/Layer
----add_assertion
----add_concatenation
----add_constant
----add_convolution
----add_convolution_nd
----add_deconvolution
----add_deconvolution_nd
----add_dequantize
----add_einsum
----add_elementwise
----add_fill
----add_fully_connected
----add_gather
----add_gather_v2
----add_grid_sample
----add_identity
----add_if_conditional
----add_input
----add_loop
----add_lrn
----add_matrix_multiply
----add_nms
----add_non_zero
----add_one_hot
----add_padding
----add_padding_nd
----add_parametric_relu
----add_plugin_v2
----add_pooling
----add_pooling_nd
----add_quantize
----add_ragged_softmax
----add_reduce
----add_resize
----add_rnn_v2
----add_scale
----add_scale_nd
----add_scatter
----add_select
----add_shape
----add_shuffle
----add_slice
----add_softmax
----add_topk
----add_unary
----error_recorder refer to 02-API/ErrorRecorder
++++get_input
++++get_layer
++++get_output
++++has_explicit_precision
++++has_implicit_batch_dimension
++++mark_output
----mark_output_for_shapes refer to 02-API/Layer/ShuffleLayer/DynamicShuffleWithShapeTensor.py
++++name
++++num_inputs
++++num_layers
++++num_outputs
----remove_tensor refer to 02-API/TensorRTGraphSurgeon
----set_weights_name refer to 02-API/Refit
++++unmark_output
----unmark_output_for_shapes unmark_output() for shape tensor, reder to 02-API/Layer/ShuffleLayer/DynamicShuffleWithShapeTensor.py
"""
| trt-samples-for-hackathon-cn-master | cookbook/02-API/Network/main.py |
#
# Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import numpy as np
import tensorrt as trt
from cuda import cudart
trtFile = "./model.plan"
data = np.arange(3 * 4 * 5, dtype=np.float32).reshape(3, 4, 5)
logger = trt.Logger(trt.Logger.ERROR)
if os.path.isfile(trtFile):
with open(trtFile, "rb") as f:
engineString = f.read()
if engineString == None:
print("Failed getting serialized engine!")
exit()
print("Succeeded getting serialized engine!")
else:
builder = trt.Builder(logger)
network = builder.create_network(1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH))
profile = builder.create_optimization_profile()
config = builder.create_builder_config()
config.set_memory_pool_limit(trt.MemoryPoolType.WORKSPACE, 1 << 30)
inputTensor = network.add_input("inputT0", trt.float32, [-1, -1, -1])
profile.set_shape(inputTensor.name, [1, 1, 1], [3, 4, 5], [6, 8, 10])
config.add_optimization_profile(profile)
identityLayer = network.add_identity(inputTensor)
network.mark_output(identityLayer.get_output(0))
engineString = builder.build_serialized_network(network, config)
if engineString == None:
print("Failed building serialized engine!")
exit()
print("Succeeded building serialized engine!")
with open(trtFile, "wb") as f:
f.write(engineString)
print("Succeeded saving .plan file!")
runtime = trt.Runtime(logger)
print("runtime.__sizeof__() = %d" % runtime.__sizeof__())
print("runtime.__str__() = %s" % runtime.__str__())
print("\nRuntime related =======================================================")
print("runtime.logger = %s" % runtime.logger)
print("runtime.DLA_core = %d" % runtime.DLA_core)
print("runtime.num_DLA_cores = %d" % runtime.num_DLA_cores)
print("runtime.engine_host_code_allowed = %s" % runtime.engine_host_code_allowed)
runtime.max_threads = 16 # The maximum thread that can be used by the Runtime
tempfile_control_flags = trt.TempfileControlFlag.ALLOW_IN_MEMORY_FILES
# available values
#tempfile_control_flags = trt.TempfileControlFlag.ALLOW_TEMPORARY_FILES
temporary_directory = "."
engine = runtime.deserialize_cuda_engine(engineString)
"""
Member of IExecutionContext:
++++ shown above
==== shown in binding part
~~~~ deprecated
---- not shown above
[no prefix] others
++++DLA_core
----__class__
__del__
__delattr__
__dir__
__doc__
__enter__
__eq__
__exit__
__format__
__ge__
__getattribute__
__gt__
__hash__
__init__
__init_subclass__
__le__
__lt__
__module__
__ne__
__new__
----__pybind11_module_local_v4_gcc_libstdcpp_cxxabi1013__
__reduce__
__reduce_ex__
__repr__
__setattr__
++++__sizeof__
++++__str__
__subclasshook__
++++deserialize_cuda_engine
++++engine_host_code_allowed
error_recorder refer to 02-API/ErrorRecoder
get_plugin_registry
gpu_allocator refer to 02-API/GPUAllocator
load_runtime
++++logger
++++max_threads
++++num_DLA_cores
++++tempfile_control_flags
++++temporary_directory
"""
| trt-samples-for-hackathon-cn-master | cookbook/02-API/Runtime/main.py |
#
# Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import numpy as np
import tensorrt as trt
from cuda import cudart
nHeight = 28
nWidth = 28
data = np.random.rand(1, 1, nHeight, nWidth).astype(np.float32).reshape(1, 1, nHeight, nWidth) * 2 - 1
trtFile = "./model.plan"
np.random.seed(31193)
np.set_printoptions(precision=3, linewidth=200, suppress=True)
cudart.cudaDeviceSynchronize()
logger = trt.Logger(trt.Logger.VERBOSE)
builder = trt.Builder(logger)
network = builder.create_network(1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH))
profile = builder.create_optimization_profile()
config = builder.create_builder_config()
config.profiling_verbosity = trt.ProfilingVerbosity.DETAILED # the same as VERBOSE (deprecated since TensorRT 8.4), print only the layer names. This is the default setting.
#config.profiling_verbosity = trt.ProfilingVerbosity.LAYER_NAMES_ONLY # the same as DEFAULT (deprecated since TensorRT 8.4), print detailed layer information including layer names and layer parameters.
#config.profiling_verbosity = trt.ProfilingVerbosity.NONE # do not print any layer information.
inputTensor = network.add_input("inputT0", trt.float32, [-1, 1, nHeight, nWidth])
profile.set_shape(inputTensor.name, [1, 1, nHeight, nWidth], [4, 1, nHeight, nWidth], [8, 1, nHeight, nWidth])
config.add_optimization_profile(profile)
w = np.ascontiguousarray(np.random.rand(32, 1, 5, 5).astype(np.float32))
b = np.ascontiguousarray(np.random.rand(32, 1, 1).astype(np.float32))
_0 = network.add_convolution_nd(inputTensor, 32, [5, 5], trt.Weights(w), trt.Weights(b))
_0.padding_nd = [2, 2]
_1 = network.add_activation(_0.get_output(0), trt.ActivationType.RELU)
_2 = network.add_pooling_nd(_1.get_output(0), trt.PoolingType.MAX, [2, 2])
_2.stride_nd = [2, 2]
w = np.ascontiguousarray(np.random.rand(64, 32, 5, 5).astype(np.float32))
b = np.ascontiguousarray(np.random.rand(64, 1, 1).astype(np.float32))
_3 = network.add_convolution_nd(_2.get_output(0), 64, [5, 5], trt.Weights(w), trt.Weights(b))
_3.padding_nd = [2, 2]
_4 = network.add_activation(_3.get_output(0), trt.ActivationType.RELU)
_5 = network.add_pooling_nd(_4.get_output(0), trt.PoolingType.MAX, [2, 2])
_5.stride_nd = [2, 2]
_6 = network.add_shuffle(_5.get_output(0))
_6.reshape_dims = (-1, 64 * 7 * 7)
w = np.ascontiguousarray(np.random.rand(64 * 7 * 7, 1024).astype(np.float32))
b = np.ascontiguousarray(np.random.rand(1, 1024).astype(np.float32))
_7 = network.add_constant(w.shape, trt.Weights(w))
_8 = network.add_matrix_multiply(_6.get_output(0), trt.MatrixOperation.NONE, _7.get_output(0), trt.MatrixOperation.NONE)
_9 = network.add_constant(b.shape, trt.Weights(b))
_10 = network.add_elementwise(_8.get_output(0), _9.get_output(0), trt.ElementWiseOperation.SUM)
_11 = network.add_activation(_10.get_output(0), trt.ActivationType.RELU)
w = np.ascontiguousarray(np.random.rand(1024, 10).astype(np.float32))
b = np.ascontiguousarray(np.random.rand(1, 10).astype(np.float32))
_12 = network.add_constant(w.shape, trt.Weights(w))
_13 = network.add_matrix_multiply(_11.get_output(0), trt.MatrixOperation.NONE, _12.get_output(0), trt.MatrixOperation.NONE)
_14 = network.add_constant(b.shape, trt.Weights(b))
_15 = network.add_elementwise(_13.get_output(0), _14.get_output(0), trt.ElementWiseOperation.SUM)
_16 = network.add_softmax(_15.get_output(0))
_16.axes = 1 << 1
_17 = network.add_topk(_16.get_output(0), trt.TopKOperation.MAX, 1, 1 << 1)
network.mark_output(_17.get_output(1))
engineString = builder.build_serialized_network(network, config)
engine = trt.Runtime(logger).deserialize_cuda_engine(engineString)
nIO = engine.num_io_tensors
lTensorName = [engine.get_tensor_name(i) for i in range(nIO)]
nInput = [engine.get_tensor_mode(lTensorName[i]) for i in range(nIO)].count(trt.TensorIOMode.INPUT)
context = engine.create_execution_context()
context.set_input_shape(lTensorName[0], [1, 1, nHeight, nWidth])
for i in range(nIO):
print("[%2d]%s->" % (i, "Input " if i < nInput else "Output"), engine.get_tensor_dtype(lTensorName[i]), engine.get_tensor_shape(lTensorName[i]), context.get_tensor_shape(lTensorName[i]), lTensorName[i])
bufferH = []
bufferH.append(np.ascontiguousarray(data))
for i in range(nInput, nIO):
bufferH.append(np.empty(context.get_tensor_shape(lTensorName[i]), dtype=trt.nptype(engine.get_tensor_dtype(lTensorName[i]))))
bufferD = []
for i in range(nIO):
bufferD.append(cudart.cudaMalloc(bufferH[i].nbytes)[1])
for i in range(nInput):
cudart.cudaMemcpy(bufferD[i], bufferH[i].ctypes.data, bufferH[i].nbytes, cudart.cudaMemcpyKind.cudaMemcpyHostToDevice)
for i in range(nIO):
context.set_tensor_address(lTensorName[i], int(bufferD[i]))
context.execute_async_v3(0)
for i in range(nInput, nIO):
cudart.cudaMemcpy(bufferH[i].ctypes.data, bufferD[i], bufferH[i].nbytes, cudart.cudaMemcpyKind.cudaMemcpyDeviceToHost)
for i in range(nIO):
print(lTensorName[i])
print(bufferH[i])
for b in bufferD:
cudart.cudaFree(b)
| trt-samples-for-hackathon-cn-master | cookbook/02-API/ProfilingVerbosity/main.py |
#
# Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import numpy as np
import tensorrt as trt
from cuda import cudart
shape = [1, 4, 8, 8]
data = (np.arange(1, 1 + np.prod(shape), dtype=np.float32) / np.prod(shape) * 128).astype(np.float32).reshape(shape)
np.set_printoptions(precision=3, edgeitems=8, linewidth=300, suppress=True)
cudart.cudaDeviceSynchronize()
logger = trt.Logger(trt.Logger.ERROR)
builder = trt.Builder(logger)
network = builder.create_network(1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH))
profile = builder.create_optimization_profile()
config = builder.create_builder_config()
config.set_flag(trt.BuilderFlag.INT8)
inputT0 = network.add_input("inputT0", trt.float32, [-1] + shape[1:])
profile.set_shape(inputT0.name, [1] + shape[1:], [2] + shape[1:], [4] + shape[1:])
config.add_optimization_profile(profile)
layer = network.add_identity(inputT0)
layer.name = "MyIdentityLayer"
layer.get_output(0).dtype = trt.int8
layer.set_output_type(0, trt.int8)
layer.get_output(0).allowed_formats = 1 << int(trt.TensorFormat.CHW4) # use a uncommon data format
layer.get_output(0).dynamic_range = [-128, 128]
network.mark_output(layer.get_output(0))
engineString = builder.build_serialized_network(network, config)
engine = trt.Runtime(logger).deserialize_cuda_engine(engineString)
print("engine.__len__() = %d" % len(engine))
print("engine.__sizeof__() = %d" % engine.__sizeof__())
print("engine.__str__() = %s" % engine.__str__())
print("\nEngine related ========================================================")
# All member functions with "binding" in name are deprecated since TEnsorRT 8.5
nIO = engine.num_io_tensors
lTensorName = [engine.get_tensor_name(i) for i in range(nIO)]
nInput = [engine.get_tensor_mode(lTensorName[i]) for i in range(nIO)].count(trt.TensorIOMode.INPUT) # count of input / output tensor
nOutput = [engine.get_tensor_mode(lTensorName[i]) for i in range(nIO)].count(trt.TensorIOMode.OUTPUT)
#nIO = engine.num_bindings # deprecated, and this nIO is different from that got by Tensor API, refer to 02-API/MultiOptimizationProfile
#nInput = np.sum([engine.binding_is_input(i) for i in range(engine.num_bindings)])
#nOutput = engine.num_bindings - nInput
print("engine.name = %s" % engine.name)
print("engine.device_memory_size = %d" % engine.device_memory_size)
print("engine.engine_capability = %d" % engine.engine_capability) # refer to 02-API/BuilderConfig
print("engine.hardware_compatibility_level = %d" % engine.hardware_compatibility_level)
print("engine.num_aux_streams = %d" % engine.num_aux_streams)
print("engine.has_implicit_batch_dimension = %s" % engine.has_implicit_batch_dimension)
#print("engine.max_batch_size = %d" % engine.max_batch_size) # used in Implicit Batch mode, deprecated since TensorRT 8.4, use Dyanmic Shape mode instead
print("engine.num_io_tensors = %d" % engine.num_io_tensors)
#print("engine.num_bindings = %d" % engine.num_bindings) # deprecated since TensorRT 8.5
print("engine.num_layers = %d" % engine.num_layers)
print("engine.num_optimization_profiles = %d" % engine.num_optimization_profiles)
print("engine.refittable = %s" % engine.refittable) # refer to 02-API/Refit
print("engine.tactic_sources = %d" % engine.tactic_sources) # refer to 02-API/TacticSource
print("\nLayer related =========================================================")
print("engine.get_tensor_location(%s): %s" % (layer.get_output(0).name, engine.get_tensor_location(layer.get_output(0).name)))
print("\nInput / Output tensor related =========================================")
print("No. Input output: %s 0,%s 1" % (" " * 56, " " * 56))
print("engine.get_tensor_name(): %58s,%58s" % (engine.get_tensor_name(0), engine.get_tensor_name(1)))
#print("get_binding_name(): %58s,%58s" % (engine.get_binding_name(0), engine.get_binding_name(1)))
print("get_tensor_shape(): %58s,%58s" % (engine.get_tensor_shape(lTensorName[0]), engine.get_tensor_shape(lTensorName[1])))
#print("get_binding_shape(): %58s,%58s" % (engine.get_binding_shape(0), engine.get_binding_shape(1)))
print("get_tensor_dtype(): %58s,%58s" % (engine.get_tensor_dtype(lTensorName[0]), engine.get_tensor_dtype(lTensorName[1])))
#print("get_binding_dtype(): %58s,%58s" % (engine.get_binding_dtype(0), engine.get_binding_dtype(1)))
print("get_tensor_format(): %58s,%58s" % (engine.get_tensor_format(lTensorName[0]), engine.get_tensor_format(lTensorName[1])))
#print("get_binding_format(): %58s,%58s" % (engine.get_binding_format(0), engine.get_binding_format(1)))
print("get_tensor_format_desc(): %58s,%58s" % (engine.get_tensor_format_desc(lTensorName[0]), engine.get_tensor_format_desc(lTensorName[1])))
#print("get_binding_format_desc(): %58s,%58s" % (engine.get_binding_format_desc(0), engine.get_binding_format_desc(1)))
print("get_tensor_bytes_per_component(): %58d,%58d" % (engine.get_tensor_bytes_per_component(lTensorName[0]), engine.get_tensor_bytes_per_component(lTensorName[1])))
#print("get_binding_bytes_per_component(): %58d,%58d" % (engine.get_binding_bytes_per_component(0), engine.get_binding_bytes_per_component(1)))
print("get_tensor_components_per_element(): %58d,%58d" % (engine.get_tensor_components_per_element(lTensorName[0]), engine.get_tensor_components_per_element(lTensorName[1])))
#print("get_binding_components_per_element():%58d,%58d" % (engine.get_binding_components_per_element(0), engine.get_binding_components_per_element(1)))
print("get_tensor_vectorized_dim(): %58d,%58d" % (engine.get_tensor_vectorized_dim(lTensorName[0]), engine.get_tensor_vectorized_dim(lTensorName[1])))
#print("get_binding_vectorized_dim(): %58d,%58d" % (engine.get_binding_vectorized_dim(0), engine.get_binding_vectorized_dim(1)))
print("")
print("get_tensor_mode(): %58s,%58s" % (engine.get_tensor_mode(lTensorName[0]), engine.get_tensor_mode(lTensorName[1])))
#print("binding_is_input(): %58s,%58s" % (engine.binding_is_input(0), engine.binding_is_input(1)))
print("get_tensor_location(): %58s,%58s" % (engine.get_tensor_location(lTensorName[0]), engine.get_tensor_location(lTensorName[0])))
print("Comment: Execution input / output tensor is on Device, while Shape input / output tensor is on CPU")
#print("get_location(int): %58s,%58s" % (engine.get_location(0), engine.get_location(1)))
#print("get_location(str): %58s,%58s" % (engine.get_location(lTensorName[0]), engine.get_location(lTensorName[1])))
print("is_shape_inference_io(): %58s,%58s" % (engine.is_shape_inference_io(lTensorName[0]), engine.is_shape_inference_io(lTensorName[0])))
#print("is_execution_binding(): %58s,%58s" % (engine.is_execution_binding(0), engine.is_execution_binding(1)))
#print("is_shape_binding(): %58s,%58s" % (engine.is_shape_binding(0), engine.is_shape_binding(1)))
print("get_tensor_profile_shape(): %58s,%58s" % (engine.get_tensor_profile_shape(lTensorName[0], 0), "Optimization Profile is only for input tensor"))
#print("get_profile_shape(): %58s,%58s" % (engine.get_profile_shape(0, 0), "Optimization Profile is only for input tensor"))
#print("get_profile_shape_input(): %58s,%58s" % ("No input shape tensor in this network", ""))
print("__getitem__(int): %58s,%58s" % (engine[0], engine[1]))
print("__getitem__(str): %58d,%58d" % (engine[lTensorName[0]], engine[lTensorName[1]]))
#print("get_binding_index: %58d,%58d" % (engine.get_binding_index(lTensorName[0]), engine.get_binding_index(lTensorName[1])))
context = engine.create_execution_context()
"""
Member of ICudaEngine:
++++ shown above
==== shown in binding part
~~~~ deprecated
---- not shown above
[no prefix] others
----__class__
__del__
__delattr__
__dir__
__doc__
__enter__
__eq__
__exit__
__format__
__ge__
__getattribute__
++++__getitem__
__gt__
__hash__
__init__
__init_subclass__
__le__
++++__len__
__lt__
__module__
__ne__
__new__
----__pybind11_module_local_v4_gcc_libstdcpp_cxxabi1013__
__reduce__
__reduce_ex__
__repr__
__setattr__
++++__sizeof__
++++__str__
__subclasshook__
++++binding_is_input
----create_engine_inspector refer to 02-API/EngineInspector
++++create_execution_context
----create_execution_context_without_device_memory refer to 0-Advance/CreateExecutionContextWithoutDeviceMemory
++++device_memory_size
++++engine_capability
----error_recorder refer to 02-API/ErrorRecorder
++++get_binding_bytes_per_component
++++get_binding_components_per_element
++++get_binding_dtype
++++get_binding_format
++++get_binding_format_desc
++++get_binding_index
++++get_binding_name
++++get_binding_shape
++++get_binding_vectorized_dim
++++get_location
++++get_profile_shape
++++get_profile_shape_input
++++get_tensor_bytes_per_component
++++get_tensor_components_per_element
++++get_tensor_dtype
++++get_tensor_format
++++get_tensor_format_desc
++++get_tensor_location
++++get_tensor_mode
++++get_tensor_name
++++get_tensor_profile_shape
++++get_tensor_shape
++++get_tensor_vectorized_dim
++++hardware_compatibility_level refer to 02-API/BuilderConfig
++++has_implicit_batch_dimension
++++is_execution_binding
++++is_shape_binding
++++is_shape_inference_io
++++max_batch_size
++++name
++++num_aux_streams refer to 02-API/AuxStream
++++num_bindings
++++num_io_tensors
++++num_layers
++++num_optimization_profiles
----profiling_verbosity refer to 02-API/ProfilingVerbosity
++++refittable
----serialize refer to 01-SimpleDemo/TensorRT8.5
++++tactic_sources
"""
| trt-samples-for-hackathon-cn-master | cookbook/02-API/CudaEngine/main.py |
#
# Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
from glob import glob
for layerKind in sorted(glob("./*")):
for pyFile in sorted(glob(layerKind + "/*.py")):
resultFile = layerKind + "/result-" + pyFile.split("/")[-1][:-3] + ".log"
os.system("python3 %s > %s 2>&1" % (pyFile, resultFile))
print("\tFinish %s" % pyFile)
print("Finish %s" % layerKind)
print("Finish all layer!")
| trt-samples-for-hackathon-cn-master | cookbook/02-API/Layer/testAllLayer.py |
#
# Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import numpy as np
import tensorrt as trt
from cuda import cudart
nB, nC, nH, nW = 1, 4, 8, 8 # nC % 4 ==0, safe shape
#nB, nC, nH, nW = 1, 3, 8, 8 # nC % 4 !=0, may lose data in FP16 mode CHW4 format
data = (np.arange(1, 1 + nB * nC * nH * nW, dtype=np.float32) / np.prod(nB * nC * nH * nW) * 128).astype(np.float32).reshape(nB, nC, nH, nW)
np.set_printoptions(precision=3, edgeitems=8, linewidth=300, suppress=True)
cudart.cudaDeviceSynchronize()
logger = trt.Logger(trt.Logger.ERROR)
builder = trt.Builder(logger)
network = builder.create_network(1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH))
profile = builder.create_optimization_profile()
config = builder.create_builder_config()
config.set_flag(trt.BuilderFlag.INT8)
inputT0 = network.add_input("inputT0", trt.float32, (-1, nC, nH, nW))
profile.set_shape(inputT0.name, [1, nC, nH, nW], [nB, nC, nH, nW], [nB * 2, nC, nH, nW])
config.add_optimization_profile(profile)
layer = network.add_identity(inputT0)
layer.name = "Identity Layer"
layer.metadata = "My message" # since TensorRT 8.6
layer.precision = trt.int8
layer.reset_precision()
layer.precision = trt.int8
layer.get_output(0).dtype = trt.int8
layer.set_output_type(0, trt.int8)
layer.reset_output_type(0)
layer.set_output_type(0, trt.int8)
layer.get_output(0).allowed_formats = 1 << int(trt.TensorFormat.CHW4)
layer.get_output(0).dynamic_range = [-128, 128]
network.mark_output(layer.get_output(0))
engineString = builder.build_serialized_network(network, config)
print("layer.name = %s" % layer.name)
print("layer.metadata = %s" % layer.metadata)
print("layer.type = %s" % layer.type)
print("layer.__sizeof__() = %s" % layer.__sizeof__())
print("layer.__str__ = %s" % layer.__str__())
print("layer.num_inputs = %d" % layer.num_inputs)
for i in range(layer.num_inputs):
print("\tlayer.get_input(%d) = %s" % (i, layer.get_input(i)))
print("layer.num_outputs = %d" % layer.num_outputs)
for i in range(layer.num_outputs):
print("\tlayer.get_output(%d) = %s" % (i, layer.get_output(i)))
print("\tlayer.get_output_type(%d) = %s" % (i, layer.get_output_type(i)))
print("\tlayer.output_type_is_set(%d) = %s" % (i, layer.output_type_is_set(i)))
print("layer.precision = %s" % layer.precision)
print("layer.precision_is_set = %s" % layer.precision_is_set)
"""
Member of ILayer:
++++ shown above
---- not shown above
[no prefix] others
----__class__
__delattr__
__dir__
__doc__
__eq__
__format__
__ge__
__getattribute__
__gt__
__hash__
__init__
__init_subclass__
__le__
__lt__
__module__
__ne__
__new__
__reduce__
__reduce_ex__
__repr__
__setattr__
++++__sizeof__
++++__str__
__subclasshook__
++++get_input
++++get_output
++++get_output_type
++++name
++++num_inputs
++++num_outputs
++++output_type_is_set
++++precision
++++precision_is_set
++++reset_precision
----set_input refer to 02-API/Layer/ShuffleLayer/DynamicShuffleWithShapeTensor.py
++++set_output_type
++++type
"""
| trt-samples-for-hackathon-cn-master | cookbook/02-API/Layer/main.py |
#
# Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import numpy as np
import tensorrt as trt
from cuda import cudart
nB, nC, nH, nW = 1, 1, 3, 3
data = np.arange(-4, 5, dtype=np.float32).reshape(nB, nC, nH, nW)
np.set_printoptions(precision=3, linewidth=200, suppress=True)
cudart.cudaDeviceSynchronize()
logger = trt.Logger(trt.Logger.ERROR)
builder = trt.Builder(logger)
network = builder.create_network(1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH))
config = builder.create_builder_config()
inputT0 = network.add_input("inputT0", trt.float32, (nB, nC, nH, nW))
#------------------------------------------------------------------------------- Network
unaryLayer = network.add_unary(inputT0, trt.UnaryOperation.NEG)
unaryLayer.op = trt.UnaryOperation.ABS # 重设使用的一元函数
#------------------------------------------------------------------------------- Network
network.mark_output(unaryLayer.get_output(0))
engineString = builder.build_serialized_network(network, config)
engine = trt.Runtime(logger).deserialize_cuda_engine(engineString)
nIO = engine.num_io_tensors
lTensorName = [engine.get_tensor_name(i) for i in range(nIO)]
nInput = [engine.get_tensor_mode(lTensorName[i]) for i in range(nIO)].count(trt.TensorIOMode.INPUT)
context = engine.create_execution_context()
for i in range(nIO):
print("[%2d]%s->" % (i, "Input " if i < nInput else "Output"), engine.get_tensor_dtype(lTensorName[i]), engine.get_tensor_shape(lTensorName[i]), context.get_tensor_shape(lTensorName[i]), lTensorName[i])
bufferH = []
for i in range(nIO):
bufferH.append(np.empty(context.get_tensor_shape(lTensorName[i]), dtype=trt.nptype(engine.get_tensor_dtype(lTensorName[i]))))
bufferD = []
for i in range(nIO):
bufferD.append(cudart.cudaMalloc(bufferH[i].nbytes)[1])
bufferH[0] = data
for i in range(nInput):
cudart.cudaMemcpy(bufferD[i], bufferH[i].ctypes.data, bufferH[i].nbytes, cudart.cudaMemcpyKind.cudaMemcpyHostToDevice)
for i in range(nIO):
context.set_tensor_address(lTensorName[i], int(bufferD[i]))
context.execute_async_v3(0)
for i in range(nInput, nIO):
cudart.cudaMemcpy(bufferH[i].ctypes.data, bufferD[i], bufferH[i].nbytes, cudart.cudaMemcpyKind.cudaMemcpyDeviceToHost)
for i in range(nIO):
print(lTensorName[i])
print(bufferH[i])
for b in bufferD:
cudart.cudaFree(b)
| trt-samples-for-hackathon-cn-master | cookbook/02-API/Layer/UnaryLayer/Op.py |
#
# Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import numpy as np
import tensorrt as trt
from cuda import cudart
nB, nC, nH, nW = 1, 1, 3, 3
data = np.arange(-4, 5, dtype=np.float32).reshape(nB, nC, nH, nW)
np.set_printoptions(precision=3, linewidth=200, suppress=True)
cudart.cudaDeviceSynchronize()
logger = trt.Logger(trt.Logger.ERROR)
builder = trt.Builder(logger)
network = builder.create_network(1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH))
config = builder.create_builder_config()
inputT0 = network.add_input("inputT0", trt.float32, (nB, nC, nH, nW))
#------------------------------------------------------------------------------- Network
unaryLayer = network.add_unary(inputT0, trt.UnaryOperation.ABS)
#------------------------------------------------------------------------------- Network
network.mark_output(unaryLayer.get_output(0))
engineString = builder.build_serialized_network(network, config)
engine = trt.Runtime(logger).deserialize_cuda_engine(engineString)
nIO = engine.num_io_tensors
lTensorName = [engine.get_tensor_name(i) for i in range(nIO)]
nInput = [engine.get_tensor_mode(lTensorName[i]) for i in range(nIO)].count(trt.TensorIOMode.INPUT)
context = engine.create_execution_context()
for i in range(nIO):
print("[%2d]%s->" % (i, "Input " if i < nInput else "Output"), engine.get_tensor_dtype(lTensorName[i]), engine.get_tensor_shape(lTensorName[i]), context.get_tensor_shape(lTensorName[i]), lTensorName[i])
bufferH = []
for i in range(nIO):
bufferH.append(np.empty(context.get_tensor_shape(lTensorName[i]), dtype=trt.nptype(engine.get_tensor_dtype(lTensorName[i]))))
bufferD = []
for i in range(nIO):
bufferD.append(cudart.cudaMalloc(bufferH[i].nbytes)[1])
bufferH[0] = data
for i in range(nInput):
cudart.cudaMemcpy(bufferD[i], bufferH[i].ctypes.data, bufferH[i].nbytes, cudart.cudaMemcpyKind.cudaMemcpyHostToDevice)
for i in range(nIO):
context.set_tensor_address(lTensorName[i], int(bufferD[i]))
context.execute_async_v3(0)
for i in range(nInput, nIO):
cudart.cudaMemcpy(bufferH[i].ctypes.data, bufferD[i], bufferH[i].nbytes, cudart.cudaMemcpyKind.cudaMemcpyDeviceToHost)
for i in range(nIO):
print(lTensorName[i])
print(bufferH[i])
for b in bufferD:
cudart.cudaFree(b)
| trt-samples-for-hackathon-cn-master | cookbook/02-API/Layer/UnaryLayer/SimpleExample.py |
#
# Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import numpy as np
import tensorrt as trt
from cuda import cudart
nB, nC, nH, nW = 1, 3, 4, 5
data = np.arange(nB * nC * nH * nW, dtype=np.float32).reshape(nB, nC, nH, nW) * 10 - 300 # [0,59] -> [-300, 290]
np.set_printoptions(precision=3, linewidth=200, suppress=True)
cudart.cudaDeviceSynchronize()
logger = trt.Logger(trt.Logger.ERROR)
builder = trt.Builder(logger)
network = builder.create_network(1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH))
config = builder.create_builder_config()
inputT0 = network.add_input("inputT0", trt.float32, (nB, nC, nH, nW))
#------------------------------------------------------------------------------- Network
castLayer = network.add_cast(inputT0, trt.uint8)
castLayer.get_output(0).dtype = trt.uint8 # need this explicit mark
#------------------------------------------------------------------------------- Network
network.mark_output(castLayer.get_output(0))
engineString = builder.build_serialized_network(network, config)
engine = trt.Runtime(logger).deserialize_cuda_engine(engineString)
nIO = engine.num_io_tensors
lTensorName = [engine.get_tensor_name(i) for i in range(nIO)]
nInput = [engine.get_tensor_mode(lTensorName[i]) for i in range(nIO)].count(trt.TensorIOMode.INPUT)
context = engine.create_execution_context()
context.set_input_shape(lTensorName[0], [nB, nC, nH, nW])
for i in range(nIO):
print("[%2d]%s->" % (i, "Input " if i < nInput else "Output"), engine.get_tensor_dtype(lTensorName[i]), engine.get_tensor_shape(lTensorName[i]), context.get_tensor_shape(lTensorName[i]), lTensorName[i])
bufferH = []
for i in range(nIO):
bufferH.append(np.empty(context.get_tensor_shape(lTensorName[i]), dtype=trt.nptype(engine.get_tensor_dtype(lTensorName[i]))))
bufferD = []
for i in range(nIO):
bufferD.append(cudart.cudaMalloc(bufferH[i].nbytes)[1])
bufferH[0] = data
for i in range(nInput):
cudart.cudaMemcpy(bufferD[i], bufferH[i].ctypes.data, bufferH[i].nbytes, cudart.cudaMemcpyKind.cudaMemcpyHostToDevice)
for i in range(nIO):
context.set_tensor_address(lTensorName[i], int(bufferD[i]))
context.execute_async_v3(0)
for i in range(nInput, nIO):
cudart.cudaMemcpy(bufferH[i].ctypes.data, bufferD[i], bufferH[i].nbytes, cudart.cudaMemcpyKind.cudaMemcpyDeviceToHost)
for i in range(nIO):
print(lTensorName[i])
print(bufferH[i])
for b in bufferD:
cudart.cudaFree(b)
| trt-samples-for-hackathon-cn-master | cookbook/02-API/Layer/CastLayer/SimpleExample.py |
#
# Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import numpy as np
import tensorrt as trt
from cuda import cudart
np.random.seed(31193)
nB, nC, nH, nW = 1, 3, 4, 5
data0 = np.ones(nC * nH * nW, dtype=np.float32).reshape(nC, nH, nW)
data1 = np.tile(2 * np.arange(nH, dtype=np.int32), (nC, 1)).reshape(nC, nH, 1)
np.set_printoptions(precision=3, linewidth=200, suppress=True)
cudart.cudaDeviceSynchronize()
logger = trt.Logger(trt.Logger.ERROR)
builder = trt.Builder(logger)
network = builder.create_network(1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH))
config = builder.create_builder_config()
inputT0 = network.add_input("inputT0", trt.float32, (nC, nH, nW))
inputT1 = network.add_input("inputT1", trt.int32, (nC, nH, 1))
#------------------------------------------------------------------------------- Network
raggedSoftMaxLayer = network.add_ragged_softmax(inputT0, inputT1)
#------------------------------------------------------------------------------- Network
network.mark_output(raggedSoftMaxLayer.get_output(0))
engineString = builder.build_serialized_network(network, config)
engine = trt.Runtime(logger).deserialize_cuda_engine(engineString)
context = engine.create_execution_context()
nInput = np.sum([engine.binding_is_input(i) for i in range(engine.num_bindings)])
nOutput = engine.num_bindings - nInput
bufferH = []
bufferH.append(data0)
bufferH.append(data1)
for i in range(nOutput):
bufferH.append(np.empty(context.get_binding_shape(nInput + i), dtype=trt.nptype(engine.get_binding_dtype(nInput + i))))
bufferD = []
for i in range(engine.num_bindings):
bufferD.append(cudart.cudaMalloc(bufferH[i].nbytes)[1])
for i in range(nInput):
cudart.cudaMemcpy(bufferD[i], np.ascontiguousarray(bufferH[i].reshape(-1)).ctypes.data, bufferH[i].nbytes, cudart.cudaMemcpyKind.cudaMemcpyHostToDevice)
context.execute_v2(bufferD)
for i in range(nOutput):
cudart.cudaMemcpy(bufferH[nInput + i].ctypes.data, bufferD[nInput + i], bufferH[nInput + i].nbytes, cudart.cudaMemcpyKind.cudaMemcpyDeviceToHost)
for i in range(nInput):
print("Input %d:" % i, bufferH[i].shape, "\n", bufferH[i])
for i in range(nOutput):
print("Output %d:" % i, bufferH[nInput + i].shape, "\n", bufferH[nInput + i])
for buffer in bufferD:
cudart.cudaFree(buffer)
| trt-samples-for-hackathon-cn-master | cookbook/02-API/Layer/RaggedSoftMaxLayer/SimpleExample.py |
Subsets and Splits