python_code
stringlengths
0
679k
repo_name
stringlengths
9
41
file_path
stringlengths
6
149
# # Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import numpy as np import tensorrt as trt from cuda import cudart np.random.seed(31193) nB, nC, nH, nW = 1, 3, 4, 5 data = np.arange(nB * nC * nH * nW, dtype=np.float32).reshape(nB, nC, nH, nW) np.set_printoptions(precision=3, linewidth=200, suppress=True) cudart.cudaDeviceSynchronize() logger = trt.Logger(trt.Logger.ERROR) builder = trt.Builder(logger) network = builder.create_network(1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)) config = builder.create_builder_config() inputT0 = network.add_input("inputT0", trt.float32, (nB, nC, nH, nW)) #------------------------------------------------------------------------------- Network factorShape = data.shape constantLayer = network.add_constant(factorShape, trt.Weights(np.ascontiguousarray(np.ones(factorShape, dtype=np.float32)))) # 这里的 constantayer.get_output(0) 是初始范例代码的转置版本,在 matrixMultiplyLayer 中再转置一次恢复 matrixMultiplyLayer = network.add_matrix_multiply(inputT0, trt.MatrixOperation.NONE, constantLayer.get_output(0), trt.MatrixOperation.NONE) matrixMultiplyLayer.op0 = trt.MatrixOperation.NONE matrixMultiplyLayer.op1 = trt.MatrixOperation.TRANSPOSE #------------------------------------------------------------------------------- Network network.mark_output(matrixMultiplyLayer.get_output(0)) engineString = builder.build_serialized_network(network, config) engine = trt.Runtime(logger).deserialize_cuda_engine(engineString) context = engine.create_execution_context() nInput = np.sum([engine.binding_is_input(i) for i in range(engine.num_bindings)]) nOutput = engine.num_bindings - nInput bufferH = [] bufferH.append(data) for i in range(nOutput): bufferH.append(np.empty(context.get_binding_shape(nInput + i), dtype=trt.nptype(engine.get_binding_dtype(nInput + i)))) bufferD = [] for i in range(engine.num_bindings): bufferD.append(cudart.cudaMalloc(bufferH[i].nbytes)[1]) for i in range(nInput): cudart.cudaMemcpy(bufferD[i], np.ascontiguousarray(bufferH[i].reshape(-1)).ctypes.data, bufferH[i].nbytes, cudart.cudaMemcpyKind.cudaMemcpyHostToDevice) context.execute_v2(bufferD) for i in range(nOutput): cudart.cudaMemcpy(bufferH[nInput + i].ctypes.data, bufferD[nInput + i], bufferH[nInput + i].nbytes, cudart.cudaMemcpyKind.cudaMemcpyDeviceToHost) for i in range(nInput): print("Input %d:" % i, bufferH[i].shape, "\n", bufferH[i]) for i in range(nOutput): print("Output %d:" % i, bufferH[nInput + i].shape, "\n", bufferH[nInput + i]) for buffer in bufferD: cudart.cudaFree(buffer)
trt-samples-for-hackathon-cn-master
cookbook/02-API/Layer/MatrixMultiplyLayer/Op0+Op1.py
# # Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import numpy as np import tensorrt as trt from cuda import cudart np.random.seed(31193) nB, nC, nH, nW = 1, 3, 4, 5 data = np.arange(nB * nC * nH * nW, dtype=np.float32).reshape(nB, nC, nH, nW) np.set_printoptions(precision=3, linewidth=200, suppress=True) cudart.cudaDeviceSynchronize() logger = trt.Logger(trt.Logger.ERROR) builder = trt.Builder(logger) network = builder.create_network(1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)) config = builder.create_builder_config() inputT0 = network.add_input("inputT0", trt.float32, (nB, nC, nH, nW)) #------------------------------------------------------------------------------- Network factorShape = data.transpose(0, 1, 3, 2).shape constantLayer = network.add_constant(factorShape, trt.Weights(np.ascontiguousarray(np.ones(factorShape, dtype=np.float32)))) matrixMultiplyLayer = network.add_matrix_multiply(inputT0, trt.MatrixOperation.NONE, constantLayer.get_output(0), trt.MatrixOperation.NONE) #------------------------------------------------------------------------------- Network network.mark_output(matrixMultiplyLayer.get_output(0)) engineString = builder.build_serialized_network(network, config) engine = trt.Runtime(logger).deserialize_cuda_engine(engineString) nIO = engine.num_io_tensors lTensorName = [engine.get_tensor_name(i) for i in range(nIO)] nInput = [engine.get_tensor_mode(lTensorName[i]) for i in range(nIO)].count(trt.TensorIOMode.INPUT) context = engine.create_execution_context() for i in range(nIO): print("[%2d]%s->" % (i, "Input " if i < nInput else "Output"), engine.get_tensor_dtype(lTensorName[i]), engine.get_tensor_shape(lTensorName[i]), context.get_tensor_shape(lTensorName[i]), lTensorName[i]) bufferH = [] for i in range(nIO): bufferH.append(np.empty(context.get_tensor_shape(lTensorName[i]), dtype=trt.nptype(engine.get_tensor_dtype(lTensorName[i])))) bufferD = [] for i in range(nIO): bufferD.append(cudart.cudaMalloc(bufferH[i].nbytes)[1]) bufferH[0] = data for i in range(nInput): cudart.cudaMemcpy(bufferD[i], bufferH[i].ctypes.data, bufferH[i].nbytes, cudart.cudaMemcpyKind.cudaMemcpyHostToDevice) for i in range(nIO): context.set_tensor_address(lTensorName[i], int(bufferD[i])) context.execute_async_v3(0) for i in range(nInput, nIO): cudart.cudaMemcpy(bufferH[i].ctypes.data, bufferD[i], bufferH[i].nbytes, cudart.cudaMemcpyKind.cudaMemcpyDeviceToHost) for i in range(nIO): print(lTensorName[i]) print(bufferH[i]) for b in bufferD: cudart.cudaFree(b)
trt-samples-for-hackathon-cn-master
cookbook/02-API/Layer/MatrixMultiplyLayer/SimpleExample.py
# # Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import numpy as np import tensorrt as trt from cuda import cudart np.random.seed(31193) nB, nC, nH, nW = 1, 3, 4, 5 data = np.arange(nB * nC * nH * nW, dtype=np.float32).reshape(nB, nC, nH, nW) np.set_printoptions(precision=3, linewidth=200, suppress=True) cudart.cudaDeviceSynchronize() logger = trt.Logger(trt.Logger.ERROR) builder = trt.Builder(logger) network = builder.create_network(1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)) config = builder.create_builder_config() inputT0 = network.add_input("inputT0", trt.float32, (nB, nC, nH, nW)) #------------------------------------------------------------------------------- Network factorShape = data.transpose(0, 1, 3, 2).shape[:-1] # 向量比矩阵少一维 constantLayer = network.add_constant(factorShape, trt.Weights(np.ascontiguousarray(np.ones(factorShape, dtype=np.float32)))) matrixMultiplyLayer = network.add_matrix_multiply(inputT0, trt.MatrixOperation.NONE, constantLayer.get_output(0), trt.MatrixOperation.NONE) matrixMultiplyLayer.op0 = trt.MatrixOperation.NONE matrixMultiplyLayer.op1 = trt.MatrixOperation.VECTOR #------------------------------------------------------------------------------- Network network.mark_output(matrixMultiplyLayer.get_output(0)) engineString = builder.build_serialized_network(network, config) engine = trt.Runtime(logger).deserialize_cuda_engine(engineString) nIO = engine.num_io_tensors lTensorName = [engine.get_tensor_name(i) for i in range(nIO)] nInput = [engine.get_tensor_mode(lTensorName[i]) for i in range(nIO)].count(trt.TensorIOMode.INPUT) context = engine.create_execution_context() for i in range(nIO): print("[%2d]%s->" % (i, "Input " if i < nInput else "Output"), engine.get_tensor_dtype(lTensorName[i]), engine.get_tensor_shape(lTensorName[i]), context.get_tensor_shape(lTensorName[i]), lTensorName[i]) bufferH = [] for i in range(nIO): bufferH.append(np.empty(context.get_tensor_shape(lTensorName[i]), dtype=trt.nptype(engine.get_tensor_dtype(lTensorName[i])))) bufferD = [] for i in range(nIO): bufferD.append(cudart.cudaMalloc(bufferH[i].nbytes)[1]) bufferH[0] = data for i in range(nInput): cudart.cudaMemcpy(bufferD[i], bufferH[i].ctypes.data, bufferH[i].nbytes, cudart.cudaMemcpyKind.cudaMemcpyHostToDevice) for i in range(nIO): context.set_tensor_address(lTensorName[i], int(bufferD[i])) context.execute_async_v3(0) for i in range(nInput, nIO): cudart.cudaMemcpy(bufferH[i].ctypes.data, bufferD[i], bufferH[i].nbytes, cudart.cudaMemcpyKind.cudaMemcpyDeviceToHost) for i in range(nIO): print(lTensorName[i]) print(bufferH[i]) for b in bufferD: cudart.cudaFree(b)
trt-samples-for-hackathon-cn-master
cookbook/02-API/Layer/MatrixMultiplyLayer/MatrixWithVector.py
# # Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import numpy as np import tensorrt as trt from cuda import cudart np.random.seed(31193) nB, nC, nH, nW = 1, 3, 4, 5 data = np.arange(nB * nC * nH * nW, dtype=np.float32).reshape(nB, nC, nH, nW) np.set_printoptions(precision=3, linewidth=200, suppress=True) cudart.cudaDeviceSynchronize() logger = trt.Logger(trt.Logger.ERROR) builder = trt.Builder(logger) network = builder.create_network(1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)) config = builder.create_builder_config() inputT0 = network.add_input("inputT0", trt.float32, (nB, nC, nH, nW)) #------------------------------------------------------------------------------- Network factorShape = (1, 1) + data.transpose(0, 1, 3, 2).shape[-2:] constantLayer = network.add_constant(factorShape, trt.Weights(np.ascontiguousarray(np.ones(factorShape, dtype=np.float32)))) matrixMultiplyLayer = network.add_matrix_multiply(inputT0, trt.MatrixOperation.NONE, constantLayer.get_output(0), trt.MatrixOperation.NONE) #------------------------------------------------------------------------------- Network network.mark_output(matrixMultiplyLayer.get_output(0)) engineString = builder.build_serialized_network(network, config) engine = trt.Runtime(logger).deserialize_cuda_engine(engineString) nIO = engine.num_io_tensors lTensorName = [engine.get_tensor_name(i) for i in range(nIO)] nInput = [engine.get_tensor_mode(lTensorName[i]) for i in range(nIO)].count(trt.TensorIOMode.INPUT) context = engine.create_execution_context() for i in range(nIO): print("[%2d]%s->" % (i, "Input " if i < nInput else "Output"), engine.get_tensor_dtype(lTensorName[i]), engine.get_tensor_shape(lTensorName[i]), context.get_tensor_shape(lTensorName[i]), lTensorName[i]) bufferH = [] for i in range(nIO): bufferH.append(np.empty(context.get_tensor_shape(lTensorName[i]), dtype=trt.nptype(engine.get_tensor_dtype(lTensorName[i])))) bufferD = [] for i in range(nIO): bufferD.append(cudart.cudaMalloc(bufferH[i].nbytes)[1]) bufferH[0] = data for i in range(nInput): cudart.cudaMemcpy(bufferD[i], bufferH[i].ctypes.data, bufferH[i].nbytes, cudart.cudaMemcpyKind.cudaMemcpyHostToDevice) for i in range(nIO): context.set_tensor_address(lTensorName[i], int(bufferD[i])) context.execute_async_v3(0) for i in range(nInput, nIO): cudart.cudaMemcpy(bufferH[i].ctypes.data, bufferD[i], bufferH[i].nbytes, cudart.cudaMemcpyKind.cudaMemcpyDeviceToHost) for i in range(nIO): print(lTensorName[i]) print(bufferH[i]) for b in bufferD: cudart.cudaFree(b)
trt-samples-for-hackathon-cn-master
cookbook/02-API/Layer/MatrixMultiplyLayer/Broadcast.py
# # Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import numpy as np import tensorrt as trt from cuda import cudart nB, nC, nH, nW = 1, 3, 4, 5 data = np.arange(nC, dtype=np.float32).reshape(nC, 1, 1) * 100 + np.arange(nH).reshape(1, nH, 1) * 10 + np.arange(nW).reshape(1, 1, nW) data = data.reshape(nB, nC, nH, nW).astype(np.float32) np.set_printoptions(precision=3, linewidth=200, suppress=True) cudart.cudaDeviceSynchronize() logger = trt.Logger(trt.Logger.ERROR) builder = trt.Builder(logger) network = builder.create_network(1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)) config = builder.create_builder_config() inputT0 = network.add_input("inputT0", trt.float32, (nB, nC, nH, nW)) #------------------------------------------------------------------------------- Network shuffleLayer = network.add_shuffle(inputT0) shuffleLayer.reshape_dims = (-1, 2, 15) # 指定新形状,至多有一个 -1 表示自动计算,默认值 inputT0.shape #------------------------------------------------------------------------------- Network network.mark_output(shuffleLayer.get_output(0)) engineString = builder.build_serialized_network(network, config) engine = trt.Runtime(logger).deserialize_cuda_engine(engineString) context = engine.create_execution_context() nInput = np.sum([engine.binding_is_input(i) for i in range(engine.num_bindings)]) nOutput = engine.num_bindings - nInput bufferH = [] bufferH.append(data) for i in range(nOutput): bufferH.append(np.empty(context.get_binding_shape(nInput + i), dtype=trt.nptype(engine.get_binding_dtype(nInput + i)))) bufferD = [] for i in range(engine.num_bindings): bufferD.append(cudart.cudaMalloc(bufferH[i].nbytes)[1]) for i in range(nInput): cudart.cudaMemcpy(bufferD[i], np.ascontiguousarray(bufferH[i].reshape(-1)).ctypes.data, bufferH[i].nbytes, cudart.cudaMemcpyKind.cudaMemcpyHostToDevice) context.execute_v2(bufferD) for i in range(nOutput): cudart.cudaMemcpy(bufferH[nInput + i].ctypes.data, bufferD[nInput + i], bufferH[nInput + i].nbytes, cudart.cudaMemcpyKind.cudaMemcpyDeviceToHost) for i in range(nInput): print("Input %d:" % i, bufferH[i].shape, "\n", bufferH[i]) for i in range(nOutput): print("Output %d:" % i, bufferH[nInput + i].shape, "\n", bufferH[nInput + i]) for buffer in bufferD: cudart.cudaFree(buffer)
trt-samples-for-hackathon-cn-master
cookbook/02-API/Layer/ShuffleLayer/Reshape_dims.py
# # Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import numpy as np import tensorrt as trt from cuda import cudart nB, nC, nH, nW = 1, 3, 4, 5 data = np.arange(nC, dtype=np.float32).reshape(nC, 1, 1) * 100 + np.arange(nH).reshape(1, nH, 1) * 10 + np.arange(nW).reshape(1, 1, nW) data = data.reshape(nB, nC, nH, nW).astype(np.float32) np.set_printoptions(precision=3, linewidth=200, suppress=True) cudart.cudaDeviceSynchronize() logger = trt.Logger(trt.Logger.ERROR) builder = trt.Builder(logger) network = builder.create_network(1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)) config = builder.create_builder_config() inputT0 = network.add_input("inputT0", trt.float32, (nB, nC, nH, nW)) #------------------------------------------------------------------------------- Network shuffleLayer = network.add_shuffle(inputT0) shuffleLayer.second_transpose = (0, 2, 1, 3) # 末次转置,默认值 inputT0.reshape(...).shape #------------------------------------------------------------------------------- Network network.mark_output(shuffleLayer.get_output(0)) engineString = builder.build_serialized_network(network, config) engine = trt.Runtime(logger).deserialize_cuda_engine(engineString) context = engine.create_execution_context() nInput = np.sum([engine.binding_is_input(i) for i in range(engine.num_bindings)]) nOutput = engine.num_bindings - nInput bufferH = [] bufferH.append(data) for i in range(nOutput): bufferH.append(np.empty(context.get_binding_shape(nInput + i), dtype=trt.nptype(engine.get_binding_dtype(nInput + i)))) bufferD = [] for i in range(engine.num_bindings): bufferD.append(cudart.cudaMalloc(bufferH[i].nbytes)[1]) for i in range(nInput): cudart.cudaMemcpy(bufferD[i], np.ascontiguousarray(bufferH[i].reshape(-1)).ctypes.data, bufferH[i].nbytes, cudart.cudaMemcpyKind.cudaMemcpyHostToDevice) context.execute_v2(bufferD) for i in range(nOutput): cudart.cudaMemcpy(bufferH[nInput + i].ctypes.data, bufferD[nInput + i], bufferH[nInput + i].nbytes, cudart.cudaMemcpyKind.cudaMemcpyDeviceToHost) for i in range(nInput): print("Input %d:" % i, bufferH[i].shape, "\n", bufferH[i]) for i in range(nOutput): print("Output %d:" % i, bufferH[nInput + i].shape, "\n", bufferH[nInput + i]) for buffer in bufferD: cudart.cudaFree(buffer)
trt-samples-for-hackathon-cn-master
cookbook/02-API/Layer/ShuffleLayer/Second_transpose.py
# # Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import numpy as np import tensorrt as trt from cuda import cudart nB, nC, nH, nW = 1, 3, 4, 5 data = np.arange(nC, dtype=np.float32).reshape(nC, 1, 1) * 100 + np.arange(nH).reshape(1, nH, 1) * 10 + np.arange(nW).reshape(1, 1, nW) data = data.reshape(nB, nC, nH, nW).astype(np.float32) np.set_printoptions(precision=3, linewidth=200, suppress=True) cudart.cudaDeviceSynchronize() logger = trt.Logger(trt.Logger.ERROR) builder = trt.Builder(logger) network = builder.create_network(1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)) config = builder.create_builder_config() inputT0 = network.add_input("inputT0", trt.float32, (nB, nC, nH, nW)) #------------------------------------------------------------------------------- Network constantLayer = network.add_constant([0], trt.Weights(trt.float32)) # 静态空层 shuffleLayer = network.add_shuffle(constantLayer.get_output(0)) shuffleLayer.zero_is_placeholder = False shuffleLayer.reshape_dims = (1, 3, 4, 0) # 对齐另一个张量的形状 concatenationLayer = network.add_concatenation([inputT0, shuffleLayer.get_output(0)]) concatenationLayer.axis = 3 #------------------------------------------------------------------------------- Network network.mark_output(shuffleLayer.get_output(0)) engineString = builder.build_serialized_network(network, config) engine = trt.Runtime(logger).deserialize_cuda_engine(engineString) context = engine.create_execution_context() nInput = np.sum([engine.binding_is_input(i) for i in range(engine.num_bindings)]) nOutput = engine.num_bindings - nInput bufferH = [] bufferH.append(data) for i in range(nOutput): bufferH.append(np.empty(context.get_binding_shape(nInput + i), dtype=trt.nptype(engine.get_binding_dtype(nInput + i)))) bufferD = [] for i in range(engine.num_bindings): bufferD.append(cudart.cudaMalloc(bufferH[i].nbytes)[1]) for i in range(nInput): cudart.cudaMemcpy(bufferD[i], np.ascontiguousarray(bufferH[i].reshape(-1)).ctypes.data, bufferH[i].nbytes, cudart.cudaMemcpyKind.cudaMemcpyHostToDevice) context.execute_v2(bufferD) for i in range(nOutput): cudart.cudaMemcpy(bufferH[nInput + i].ctypes.data, bufferD[nInput + i], bufferH[nInput + i].nbytes, cudart.cudaMemcpyKind.cudaMemcpyDeviceToHost) for i in range(nInput): print("Input %d:" % i, bufferH[i].shape, "\n", bufferH[i]) for i in range(nOutput): print("Output %d:" % i, bufferH[nInput + i].shape, "\n", bufferH[nInput + i]) for buffer in bufferD: cudart.cudaFree(buffer)
trt-samples-for-hackathon-cn-master
cookbook/02-API/Layer/ShuffleLayer/Zero_is_placeholder2.py
# # Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import numpy as np import tensorrt as trt from cuda import cudart nB, nC, nH, nW = 1, 3, 4, 5 data0 = np.arange(nC, dtype=np.float32).reshape(nC, 1, 1) * 100 + np.arange(nH).reshape(1, nH, 1) * 10 + np.arange(nW).reshape(1, 1, nW) data0 = data0.reshape(nB, nC, nH, nW).astype(np.float32) data1 = np.array([1, 4, 5, 3], dtype=np.int32) np.set_printoptions(precision=3, linewidth=200, suppress=True) cudart.cudaDeviceSynchronize() logger = trt.Logger(trt.Logger.ERROR) builder = trt.Builder(logger) network = builder.create_network(1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)) profile = builder.create_optimization_profile() # 需要使用 profile config = builder.create_builder_config() config.set_memory_pool_limit(trt.MemoryPoolType.WORKSPACE, 1 << 30) inputT0 = network.add_input("inputT0", trt.float32, (nB, nC, nH, nW)) inputT1 = network.add_input("inputT1", trt.int32, (4, )) profile.set_shape_input(inputT1.name, (1, 1, 1, 1), (nB, nC, nH, nW), (5, 5, 5, 5)) # 这里设置的不是 shape input 的形状而是值,范围覆盖住之后需要的值就好 config.add_optimization_profile(profile) #------------------------------------------------------------------------------- Network shuffleLayer = network.add_shuffle(inputT0) #shuffleLayer.set_input(0,inputT0) shuffleLayer.set_input(1, inputT1) #------------------------------------------------------------------------------- Network network.mark_output(shuffleLayer.get_output(0)) engineString = builder.build_serialized_network(network, config) engine = trt.Runtime(logger).deserialize_cuda_engine(engineString) context = engine.create_execution_context() context.set_shape_input(1, data1) # 运行时绑定真实形状张量值 print("context.all_binding_shapes_specified:", context.all_binding_shapes_specified) nInput = np.sum([engine.binding_is_input(i) for i in range(engine.num_bindings)]) nOutput = engine.num_bindings - nInput nIO = engine.num_io_tensors lTensorName = [engine.get_tensor_name(i) for i in range(nIO)] for i in range(nIO): print("[%2d]%s->" % (i, "Input " if i < nInput else "Output"), engine.get_tensor_dtype(lTensorName[i]), engine.get_tensor_shape(lTensorName[i]), context.get_tensor_shape(lTensorName[i]), lTensorName[i]) for i in range(nIO): print("[%2d]%s->" % (i, "Input " if i < nInput else "Output"), engine.get_tensor_dtype(lTensorName[i]), engine.get_binding_shape(i), context.get_binding_shape(i), lTensorName[i]) bufferH = [] bufferH.append(data0) bufferH.append(np.ascontiguousarray(np.zeros([4], dtype=np.int32))) # 传形状张量数据可用垃圾值 for i in range(nOutput): bufferH.append(np.empty(context.get_binding_shape(nInput + i), dtype=trt.nptype(engine.get_binding_dtype(nInput + i)))) bufferD = [] for i in range(engine.num_bindings): bufferD.append(cudart.cudaMalloc(bufferH[i].nbytes)[1]) for i in range(nInput): cudart.cudaMemcpy(bufferD[i], np.ascontiguousarray(bufferH[i].reshape(-1)).ctypes.data, bufferH[i].nbytes, cudart.cudaMemcpyKind.cudaMemcpyHostToDevice) context.execute_v2(bufferD) for i in range(nOutput): cudart.cudaMemcpy(bufferH[nInput + i].ctypes.data, bufferD[nInput + i], bufferH[nInput + i].nbytes, cudart.cudaMemcpyKind.cudaMemcpyDeviceToHost) for i in range(nInput): print("Input %d:" % i, bufferH[i].shape, "\n", bufferH[i]) for i in range(nOutput): print("Output %d:" % i, bufferH[nInput + i].shape, "\n", bufferH[nInput + i]) for buffer in bufferD: cudart.cudaFree(buffer)
trt-samples-for-hackathon-cn-master
cookbook/02-API/Layer/ShuffleLayer/DynamicShuffleWithShapeTensor.py
# # Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import numpy as np import tensorrt as trt from cuda import cudart nB, nC, nH, nW = 1, 3, 4, 5 data = np.arange(nC, dtype=np.float32).reshape(nC, 1, 1) * 100 + np.arange(nH).reshape(1, nH, 1) * 10 + np.arange(nW).reshape(1, 1, nW) data = data.reshape(nB, nC, nH, nW).astype(np.float32) np.set_printoptions(precision=3, linewidth=200, suppress=True) cudart.cudaDeviceSynchronize() logger = trt.Logger(trt.Logger.ERROR) builder = trt.Builder(logger) network = builder.create_network(1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)) config = builder.create_builder_config() inputT0 = network.add_input("inputT0", trt.float32, (nB, nC, nH, nW)) #------------------------------------------------------------------------------- Network shuffleLayer = network.add_shuffle(inputT0) shuffleLayer.first_transpose = (0, 2, 1, 3) shuffleLayer.reshape_dims = (1, 4, 5, 3) shuffleLayer.second_transpose = (0, 2, 1, 3) #------------------------------------------------------------------------------- Network network.mark_output(shuffleLayer.get_output(0)) engineString = builder.build_serialized_network(network, config) engine = trt.Runtime(logger).deserialize_cuda_engine(engineString) context = engine.create_execution_context() nInput = np.sum([engine.binding_is_input(i) for i in range(engine.num_bindings)]) nOutput = engine.num_bindings - nInput bufferH = [] bufferH.append(data) for i in range(nOutput): bufferH.append(np.empty(context.get_binding_shape(nInput + i), dtype=trt.nptype(engine.get_binding_dtype(nInput + i)))) bufferD = [] for i in range(engine.num_bindings): bufferD.append(cudart.cudaMalloc(bufferH[i].nbytes)[1]) for i in range(nInput): cudart.cudaMemcpy(bufferD[i], np.ascontiguousarray(bufferH[i].reshape(-1)).ctypes.data, bufferH[i].nbytes, cudart.cudaMemcpyKind.cudaMemcpyHostToDevice) context.execute_v2(bufferD) for i in range(nOutput): cudart.cudaMemcpy(bufferH[nInput + i].ctypes.data, bufferD[nInput + i], bufferH[nInput + i].nbytes, cudart.cudaMemcpyKind.cudaMemcpyDeviceToHost) for i in range(nInput): print("Input %d:" % i, bufferH[i].shape, "\n", bufferH[i]) for i in range(nOutput): print("Output %d:" % i, bufferH[nInput + i].shape, "\n", bufferH[nInput + i]) for buffer in bufferD: cudart.cudaFree(buffer)
trt-samples-for-hackathon-cn-master
cookbook/02-API/Layer/ShuffleLayer/Combination.py
# # Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import numpy as np import tensorrt as trt from cuda import cudart nB, nC, nH, nW = 1, 3, 4, 5 data = np.arange(nC, dtype=np.float32).reshape(nC, 1, 1) * 100 + np.arange(nH).reshape(1, nH, 1) * 10 + np.arange(nW).reshape(1, 1, nW) data = data.reshape(nB, nC, nH, nW).astype(np.float32) np.set_printoptions(precision=3, linewidth=200, suppress=True) cudart.cudaDeviceSynchronize() logger = trt.Logger(trt.Logger.ERROR) builder = trt.Builder(logger) network = builder.create_network(1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)) config = builder.create_builder_config() #------------------------------------------------------------------------------- Network constantLayer = network.add_constant([4], np.array([1, 4, 5, 3], dtype=np.int32)) # 静态新形状 shuffleLayer = network.add_shuffle(inputT0) #shuffleLayer.set_input(0,inputT0) # 0 号输入是被 shuffle 的张量 shuffleLayer.set_input(1, constantLayer.get_output(0)) # 1 号输入是新形状张量 #------------------------------------------------------------------------------- Network network.mark_output(shuffleLayer.get_output(0)) engineString = builder.build_serialized_network(network, config) engine = trt.Runtime(logger).deserialize_cuda_engine(engineString) context = engine.create_execution_context() nInput = np.sum([engine.binding_is_input(i) for i in range(engine.num_bindings)]) nOutput = engine.num_bindings - nInput bufferH = [] bufferH.append(data) for i in range(nOutput): bufferH.append(np.empty(context.get_binding_shape(nInput + i), dtype=trt.nptype(engine.get_binding_dtype(nInput + i)))) bufferD = [] for i in range(engine.num_bindings): bufferD.append(cudart.cudaMalloc(bufferH[i].nbytes)[1]) for i in range(nInput): cudart.cudaMemcpy(bufferD[i], np.ascontiguousarray(bufferH[i].reshape(-1)).ctypes.data, bufferH[i].nbytes, cudart.cudaMemcpyKind.cudaMemcpyHostToDevice) context.execute_v2(bufferD) for i in range(nOutput): cudart.cudaMemcpy(bufferH[nInput + i].ctypes.data, bufferD[nInput + i], bufferH[nInput + i].nbytes, cudart.cudaMemcpyKind.cudaMemcpyDeviceToHost) for i in range(nInput): print("Input %d:" % i, bufferH[i].shape, "\n", bufferH[i]) for i in range(nOutput): print("Output %d:" % i, bufferH[nInput + i].shape, "\n", bufferH[nInput + i]) for buffer in bufferD: cudart.cudaFree(buffer)
trt-samples-for-hackathon-cn-master
cookbook/02-API/Layer/ShuffleLayer/StaticShuffle.py
# # Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import numpy as np import tensorrt as trt from cuda import cudart nB, nC, nH, nW = 1, 3, 4, 5 data = np.arange(nC, dtype=np.float32).reshape(nC, 1, 1) * 100 + np.arange(nH).reshape(1, nH, 1) * 10 + np.arange(nW).reshape(1, 1, nW) data = data.reshape(nB, nC, nH, nW).astype(np.float32) np.set_printoptions(precision=3, linewidth=200, suppress=True) cudart.cudaDeviceSynchronize() logger = trt.Logger(trt.Logger.ERROR) builder = trt.Builder(logger) network = builder.create_network(1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)) config = builder.create_builder_config() #------------------------------------------------------------------------------- Network shuffleLayer = network.add_shuffle(inputT0) #------------------------------------------------------------------------------- Network network.mark_output(shuffleLayer.get_output(0)) engineString = builder.build_serialized_network(network, config) engine = trt.Runtime(logger).deserialize_cuda_engine(engineString) context = engine.create_execution_context() nInput = np.sum([engine.binding_is_input(i) for i in range(engine.num_bindings)]) nOutput = engine.num_bindings - nInput bufferH = [] bufferH.append(data) for i in range(nOutput): bufferH.append(np.empty(context.get_binding_shape(nInput + i), dtype=trt.nptype(engine.get_binding_dtype(nInput + i)))) bufferD = [] for i in range(engine.num_bindings): bufferD.append(cudart.cudaMalloc(bufferH[i].nbytes)[1]) for i in range(nInput): cudart.cudaMemcpy(bufferD[i], np.ascontiguousarray(bufferH[i].reshape(-1)).ctypes.data, bufferH[i].nbytes, cudart.cudaMemcpyKind.cudaMemcpyHostToDevice) context.execute_v2(bufferD) for i in range(nOutput): cudart.cudaMemcpy(bufferH[nInput + i].ctypes.data, bufferD[nInput + i], bufferH[nInput + i].nbytes, cudart.cudaMemcpyKind.cudaMemcpyDeviceToHost) for i in range(nInput): print("Input %d:" % i, bufferH[i].shape, "\n", bufferH[i]) for i in range(nOutput): print("Output %d:" % i, bufferH[nInput + i].shape, "\n", bufferH[nInput + i]) for buffer in bufferD: cudart.cudaFree(buffer)
trt-samples-for-hackathon-cn-master
cookbook/02-API/Layer/ShuffleLayer/SimpleExample.py
# # Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import numpy as np import tensorrt as trt from cuda import cudart nB, nC, nH, nW = 1, 3, 4, 5 data = np.arange(nC, dtype=np.float32).reshape(nC, 1, 1) * 100 + np.arange(nH).reshape(1, nH, 1) * 10 + np.arange(nW).reshape(1, 1, nW) data = data.reshape(nB, nC, nH, nW).astype(np.float32) np.set_printoptions(precision=3, linewidth=200, suppress=True) cudart.cudaDeviceSynchronize() logger = trt.Logger(trt.Logger.ERROR) builder = trt.Builder(logger) network = builder.create_network(1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)) config = builder.create_builder_config() inputT0 = network.add_input("inputT0", trt.float32, (nB, nC, nH, nW)) #------------------------------------------------------------------------------- Network shuffleLayer = network.add_shuffle(inputT0) shuffleLayer.reshape_dims = (0, 0, -1) #------------------------------------------------------------------------------- Network network.mark_output(shuffleLayer.get_output(0)) engineString = builder.build_serialized_network(network, config) engine = trt.Runtime(logger).deserialize_cuda_engine(engineString) context = engine.create_execution_context() nInput = np.sum([engine.binding_is_input(i) for i in range(engine.num_bindings)]) nOutput = engine.num_bindings - nInput bufferH = [] bufferH.append(data) for i in range(nOutput): bufferH.append(np.empty(context.get_binding_shape(nInput + i), dtype=trt.nptype(engine.get_binding_dtype(nInput + i)))) bufferD = [] for i in range(engine.num_bindings): bufferD.append(cudart.cudaMalloc(bufferH[i].nbytes)[1]) for i in range(nInput): cudart.cudaMemcpy(bufferD[i], np.ascontiguousarray(bufferH[i].reshape(-1)).ctypes.data, bufferH[i].nbytes, cudart.cudaMemcpyKind.cudaMemcpyHostToDevice) context.execute_v2(bufferD) for i in range(nOutput): cudart.cudaMemcpy(bufferH[nInput + i].ctypes.data, bufferD[nInput + i], bufferH[nInput + i].nbytes, cudart.cudaMemcpyKind.cudaMemcpyDeviceToHost) for i in range(nInput): print("Input %d:" % i, bufferH[i].shape, "\n", bufferH[i]) for i in range(nOutput): print("Output %d:" % i, bufferH[nInput + i].shape, "\n", bufferH[nInput + i]) for buffer in bufferD: cudart.cudaFree(buffer)
trt-samples-for-hackathon-cn-master
cookbook/02-API/Layer/ShuffleLayer/Reshape_dims-IncludeZero.py
# # Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import numpy as np import tensorrt as trt from cuda import cudart nB, nC, nH, nW = 1, 3, 4, 5 data = np.arange(nC, dtype=np.float32).reshape(nC, 1, 1) * 100 + np.arange(nH).reshape(1, nH, 1) * 10 + np.arange(nW).reshape(1, 1, nW) data = data.reshape(nB, nC, nH, nW).astype(np.float32) np.set_printoptions(precision=3, linewidth=200, suppress=True) cudart.cudaDeviceSynchronize() logger = trt.Logger(trt.Logger.ERROR) builder = trt.Builder(logger) network = builder.create_network(1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)) config = builder.create_builder_config() inputT0 = network.add_input("inputT0", trt.float32, (nB, nC, nH, nW)) #------------------------------------------------------------------------------- Network shuffleLayer = network.add_shuffle(inputT0) shuffleLayer.zero_is_placeholder = True # 使用 0 模式,默认值 True shuffleLayer.reshape_dims = (0, 0, 0, 0) #------------------------------------------------------------------------------- Network network.mark_output(shuffleLayer.get_output(0)) engineString = builder.build_serialized_network(network, config) engine = trt.Runtime(logger).deserialize_cuda_engine(engineString) context = engine.create_execution_context() nInput = np.sum([engine.binding_is_input(i) for i in range(engine.num_bindings)]) nOutput = engine.num_bindings - nInput bufferH = [] bufferH.append(data) for i in range(nOutput): bufferH.append(np.empty(context.get_binding_shape(nInput + i), dtype=trt.nptype(engine.get_binding_dtype(nInput + i)))) bufferD = [] for i in range(engine.num_bindings): bufferD.append(cudart.cudaMalloc(bufferH[i].nbytes)[1]) for i in range(nInput): cudart.cudaMemcpy(bufferD[i], np.ascontiguousarray(bufferH[i].reshape(-1)).ctypes.data, bufferH[i].nbytes, cudart.cudaMemcpyKind.cudaMemcpyHostToDevice) context.execute_v2(bufferD) for i in range(nOutput): cudart.cudaMemcpy(bufferH[nInput + i].ctypes.data, bufferD[nInput + i], bufferH[nInput + i].nbytes, cudart.cudaMemcpyKind.cudaMemcpyDeviceToHost) for i in range(nInput): print("Input %d:" % i, bufferH[i].shape, "\n", bufferH[i]) for i in range(nOutput): print("Output %d:" % i, bufferH[nInput + i].shape, "\n", bufferH[nInput + i]) for buffer in bufferD: cudart.cudaFree(buffer)
trt-samples-for-hackathon-cn-master
cookbook/02-API/Layer/ShuffleLayer/Zero_is_placeholder.py
# # Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import numpy as np import tensorrt as trt from cuda import cudart nB, nC, nH, nW = 1, 3, 4, 5 data = np.arange(nC, dtype=np.float32).reshape(nC, 1, 1) * 100 + np.arange(nH).reshape(1, nH, 1) * 10 + np.arange(nW).reshape(1, 1, nW) data = data.reshape(nB, nC, nH, nW).astype(np.float32) np.set_printoptions(precision=3, linewidth=200, suppress=True) cudart.cudaDeviceSynchronize() logger = trt.Logger(trt.Logger.ERROR) builder = trt.Builder(logger) network = builder.create_network(1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)) profile = builder.create_optimization_profile() config = builder.create_builder_config() inputT0 = network.add_input("inputT0", trt.float32, (-1, -1, -1, -1)) profile.set_shape(inputT0.name, [1, 1, 1, 1], [nB, nC, nH, nW], [nB * 2, nC * 2, nH * 2, nW * 2]) config.add_optimization_profile(profile) #------------------------------------------------------------------------------- Network oneLayer = network.add_constant([1], np.array([1], dtype=np.int32)) shape0Layer = network.add_shape(inputT0) shape1Layer = network.add_concatenation([shape0Layer.get_output(0), oneLayer.get_output(0)]) shape1Layer.axis = 0 shuffleLayer = network.add_shuffle(inputT0) # add one tail dimension 1 to input tensor shuffleLayer.set_input(1, shape1Layer.get_output(0)) #shuffleLayer = network.add_shuffle(inputT0) # wrong practice, because shape in dynamic shape mode may contain -1 and cannot be used as a new shape #shuffleLayer.reshape_dims = tuple(inputT0.shape) + (1,) shape2Layer = network.add_shape(shuffleLayer.get_output(0)) shape3Layer = network.add_slice(shape2Layer.get_output(0), [0], [4], [1]) shuffle2Layer = network.add_shuffle(shuffleLayer.get_output(0)) # remove the tail dimension 1 to input tensor shuffle2Layer.set_input(1, shape3Layer.get_output(0)) #shuffle2Layer = network.add_shuffle(shuffleLayer.get_output(0)) # wrong practice #shuffle2Layer.reshape_dims = tuple(shuffleLayer.get_output(0))[:-1] #------------------------------------------------------------------------------- Network network.mark_output(shuffleLayer.get_output(0)) network.mark_output(shuffle2Layer.get_output(0)) engineString = builder.build_serialized_network(network, config) engine = trt.Runtime(logger).deserialize_cuda_engine(engineString) nIO = engine.num_io_tensors lTensorName = [engine.get_tensor_name(i) for i in range(nIO)] nInput = [engine.get_tensor_mode(lTensorName[i]) for i in range(nIO)].count(trt.TensorIOMode.INPUT) context = engine.create_execution_context() context.set_input_shape(lTensorName[0], data.shape) bufferH = [] bufferH.append(data) for i in range(nInput, nIO): bufferH.append(np.empty(context.get_tensor_shape(lTensorName[i]), dtype=trt.nptype(engine.get_tensor_dtype(lTensorName[i])))) bufferD = [] for i in range(nIO): bufferD.append(cudart.cudaMalloc(bufferH[i].nbytes)[1]) for i in range(nInput): cudart.cudaMemcpy(bufferD[i], bufferH[i].ctypes.data, bufferH[i].nbytes, cudart.cudaMemcpyKind.cudaMemcpyHostToDevice) for i in range(nIO): context.set_tensor_address(lTensorName[i], int(bufferD[i])) context.execute_async_v3(0) for i in range(nInput, nIO): cudart.cudaMemcpy(bufferH[i].ctypes.data, bufferD[i], bufferH[i].nbytes, cudart.cudaMemcpyKind.cudaMemcpyDeviceToHost) for i in range(nIO): print(lTensorName[i]) print(bufferH[i]) for b in bufferD: cudart.cudaFree(b)
trt-samples-for-hackathon-cn-master
cookbook/02-API/Layer/ShuffleLayer/DynamicShuffle.py
# # Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import numpy as np import tensorrt as trt from cuda import cudart nB, nC, nH, nW = 1, 3, 4, 5 data = np.arange(nC, dtype=np.float32).reshape(nC, 1, 1) * 100 + np.arange(nH).reshape(1, nH, 1) * 10 + np.arange(nW).reshape(1, 1, nW) data = data.reshape(nB, nC, nH, nW).astype(np.float32) np.set_printoptions(precision=3, linewidth=200, suppress=True) cudart.cudaDeviceSynchronize() logger = trt.Logger(trt.Logger.ERROR) builder = trt.Builder(logger) network = builder.create_network(1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)) config = builder.create_builder_config() #------------------------------------------------------------------------------- Network shuffleLayer = network.add_shuffle(inputT0) shuffleLayer.first_transpose = (0, 2, 1, 3) # 首次转置,默认值 (0,1,2,...) #------------------------------------------------------------------------------- Network network.mark_output(shuffleLayer.get_output(0)) engineString = builder.build_serialized_network(network, config) engine = trt.Runtime(logger).deserialize_cuda_engine(engineString) context = engine.create_execution_context() nInput = np.sum([engine.binding_is_input(i) for i in range(engine.num_bindings)]) nOutput = engine.num_bindings - nInput bufferH = [] bufferH.append(data) for i in range(nOutput): bufferH.append(np.empty(context.get_binding_shape(nInput + i), dtype=trt.nptype(engine.get_binding_dtype(nInput + i)))) bufferD = [] for i in range(engine.num_bindings): bufferD.append(cudart.cudaMalloc(bufferH[i].nbytes)[1]) for i in range(nInput): cudart.cudaMemcpy(bufferD[i], np.ascontiguousarray(bufferH[i].reshape(-1)).ctypes.data, bufferH[i].nbytes, cudart.cudaMemcpyKind.cudaMemcpyHostToDevice) context.execute_v2(bufferD) for i in range(nOutput): cudart.cudaMemcpy(bufferH[nInput + i].ctypes.data, bufferD[nInput + i], bufferH[nInput + i].nbytes, cudart.cudaMemcpyKind.cudaMemcpyDeviceToHost) for i in range(nInput): print("Input %d:" % i, bufferH[i].shape, "\n", bufferH[i]) for i in range(nOutput): print("Output %d:" % i, bufferH[nInput + i].shape, "\n", bufferH[nInput + i]) for buffer in bufferD: cudart.cudaFree(buffer)
trt-samples-for-hackathon-cn-master
cookbook/02-API/Layer/ShuffleLayer/First_transpose.py
# # Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import numpy as np import tensorrt as trt from cuda import cudart nB, nC, nH, nW = 1, 3, 4, 5 data0 = np.full([nB, nC, nH, nW], 1, dtype=np.float32).reshape(nB, nC, nH, nW) data1 = np.full([nB, nC, nH, nW], 2, dtype=np.float32).reshape(nB, nC, nH, nW) np.set_printoptions(precision=3, linewidth=200, suppress=True) cudart.cudaDeviceSynchronize() logger = trt.Logger(trt.Logger.ERROR) builder = trt.Builder(logger) network = builder.create_network(1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)) config = builder.create_builder_config() inputT0 = network.add_input("inputT0", trt.float32, (nB, nC, nH, nW)) inputT1 = network.add_input("inputT1", trt.float32, (nB, nC, nH, nW)) #------------------------------------------------------------------------------- Network elementwiseLayer = network.add_elementwise(inputT0, inputT1, trt.ElementWiseOperation.SUM) elementwiseLayer.op = trt.ElementWiseOperation.SUB #------------------------------------------------------------------------------- Network network.mark_output(elementwiseLayer.get_output(0)) engineString = builder.build_serialized_network(network, config) engine = trt.Runtime(logger).deserialize_cuda_engine(engineString) nIO = engine.num_io_tensors lTensorName = [engine.get_tensor_name(i) for i in range(nIO)] nInput = [engine.get_tensor_mode(lTensorName[i]) for i in range(nIO)].count(trt.TensorIOMode.INPUT) context = engine.create_execution_context() for i in range(nIO): print("[%2d]%s->" % (i, "Input " if i < nInput else "Output"), engine.get_tensor_dtype(lTensorName[i]), engine.get_tensor_shape(lTensorName[i]), context.get_tensor_shape(lTensorName[i]), lTensorName[i]) bufferH = [] bufferH.append(np.ascontiguousarray(data0)) bufferH.append(np.ascontiguousarray(data1)) for i in range(nInput, nIO): bufferH.append(np.empty(context.get_tensor_shape(lTensorName[i]), dtype=trt.nptype(engine.get_tensor_dtype(lTensorName[i])))) bufferD = [] for i in range(nIO): bufferD.append(cudart.cudaMalloc(bufferH[i].nbytes)[1]) for i in range(nInput): cudart.cudaMemcpy(bufferD[i], bufferH[i].ctypes.data, bufferH[i].nbytes, cudart.cudaMemcpyKind.cudaMemcpyHostToDevice) for i in range(nIO): context.set_tensor_address(lTensorName[i], int(bufferD[i])) context.execute_async_v3(0) for i in range(nInput, nIO): cudart.cudaMemcpy(bufferH[i].ctypes.data, bufferD[i], bufferH[i].nbytes, cudart.cudaMemcpyKind.cudaMemcpyDeviceToHost) for i in range(nIO): print(lTensorName[i]) print(bufferH[i]) for b in bufferD: cudart.cudaFree(b)
trt-samples-for-hackathon-cn-master
cookbook/02-API/Layer/ElementwiseLayer/Op.py
# # Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import numpy as np import tensorrt as trt from cuda import cudart nB, nC, nH, nW = 1, 3, 4, 5 data0 = np.full([nB, nC, nH, nW], 2, dtype=np.float32).reshape(nB, nC, nH, nW) data1 = np.full([nB, nC, nH, nW], 3, dtype=np.float32).reshape(nB, nC, nH, nW) np.set_printoptions(precision=3, linewidth=200, suppress=True) cudart.cudaDeviceSynchronize() logger = trt.Logger(trt.Logger.ERROR) builder = trt.Builder(logger) network = builder.create_network(1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)) config = builder.create_builder_config() inputT0 = network.add_input("inputT0", trt.float32, (nB, nC, nH, nW)) inputT1 = network.add_input("inputT1", trt.float32, (nB, nC, nH, nW)) #------------------------------------------------------------------------------- Network elementwiseLayer = network.add_elementwise(inputT0, inputT1, trt.ElementWiseOperation.POW) #------------------------------------------------------------------------------- Network network.mark_output(elementwiseLayer.get_output(0)) engineString = builder.build_serialized_network(network, config) engine = trt.Runtime(logger).deserialize_cuda_engine(engineString) nIO = engine.num_io_tensors lTensorName = [engine.get_tensor_name(i) for i in range(nIO)] nInput = [engine.get_tensor_mode(lTensorName[i]) for i in range(nIO)].count(trt.TensorIOMode.INPUT) context = engine.create_execution_context() for i in range(nIO): print("[%2d]%s->" % (i, "Input " if i < nInput else "Output"), engine.get_tensor_dtype(lTensorName[i]), engine.get_tensor_shape(lTensorName[i]), context.get_tensor_shape(lTensorName[i]), lTensorName[i]) bufferH = [] bufferH.append(np.ascontiguousarray(data0)) bufferH.append(np.ascontiguousarray(data1)) for i in range(nInput, nIO): bufferH.append(np.empty(context.get_tensor_shape(lTensorName[i]), dtype=trt.nptype(engine.get_tensor_dtype(lTensorName[i])))) bufferD = [] for i in range(nIO): bufferD.append(cudart.cudaMalloc(bufferH[i].nbytes)[1]) for i in range(nInput): cudart.cudaMemcpy(bufferD[i], bufferH[i].ctypes.data, bufferH[i].nbytes, cudart.cudaMemcpyKind.cudaMemcpyHostToDevice) for i in range(nIO): context.set_tensor_address(lTensorName[i], int(bufferD[i])) context.execute_async_v3(0) for i in range(nInput, nIO): cudart.cudaMemcpy(bufferH[i].ctypes.data, bufferD[i], bufferH[i].nbytes, cudart.cudaMemcpyKind.cudaMemcpyDeviceToHost) for i in range(nIO): print(lTensorName[i]) print(bufferH[i]) for b in bufferD: cudart.cudaFree(b)
trt-samples-for-hackathon-cn-master
cookbook/02-API/Layer/ElementwiseLayer/SimpleExample.py
# # Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import numpy as np import tensorrt as trt from cuda import cudart nB, nC, nH, nW = 1, 3, 4, 5 data0 = np.full([nB, nC, 1, nW], 1, dtype=np.float32).reshape(nB, nC, 1, nW) data1 = np.full([nB, 1, nH, 1], 2, dtype=np.float32).reshape(nB, 1, nH, 1) np.set_printoptions(precision=3, linewidth=200, suppress=True) cudart.cudaDeviceSynchronize() logger = trt.Logger(trt.Logger.ERROR) builder = trt.Builder(logger) network = builder.create_network(1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)) config = builder.create_builder_config() #------------------------------------------------------------------------------- Network inputT0 = network.add_input("inputT0", trt.float32, (nB, nC, 1, nW)) inputT1 = network.add_input("inputT1", trt.float32, (nB, 1, nH, 1)) elementwiseLayer = network.add_elementwise(inputT0, inputT1, trt.ElementWiseOperation.SUM) #------------------------------------------------------------------------------- Network network.mark_output(elementwiseLayer.get_output(0)) engineString = builder.build_serialized_network(network, config) engine = trt.Runtime(logger).deserialize_cuda_engine(engineString) nIO = engine.num_io_tensors lTensorName = [engine.get_tensor_name(i) for i in range(nIO)] nInput = [engine.get_tensor_mode(lTensorName[i]) for i in range(nIO)].count(trt.TensorIOMode.INPUT) context = engine.create_execution_context() for i in range(nIO): print("[%2d]%s->" % (i, "Input " if i < nInput else "Output"), engine.get_tensor_dtype(lTensorName[i]), engine.get_tensor_shape(lTensorName[i]), context.get_tensor_shape(lTensorName[i]), lTensorName[i]) bufferH = [] bufferH.append(np.ascontiguousarray(data0)) bufferH.append(np.ascontiguousarray(data1)) for i in range(nInput, nIO): bufferH.append(np.empty(context.get_tensor_shape(lTensorName[i]), dtype=trt.nptype(engine.get_tensor_dtype(lTensorName[i])))) bufferD = [] for i in range(nIO): bufferD.append(cudart.cudaMalloc(bufferH[i].nbytes)[1]) for i in range(nInput): cudart.cudaMemcpy(bufferD[i], bufferH[i].ctypes.data, bufferH[i].nbytes, cudart.cudaMemcpyKind.cudaMemcpyHostToDevice) for i in range(nIO): context.set_tensor_address(lTensorName[i], int(bufferD[i])) context.execute_async_v3(0) for i in range(nInput, nIO): cudart.cudaMemcpy(bufferH[i].ctypes.data, bufferD[i], bufferH[i].nbytes, cudart.cudaMemcpyKind.cudaMemcpyDeviceToHost) for i in range(nIO): print(lTensorName[i]) print(bufferH[i]) for b in bufferD: cudart.cudaFree(b)
trt-samples-for-hackathon-cn-master
cookbook/02-API/Layer/ElementwiseLayer/Broadcast.py
# # Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import numpy as np import tensorrt as trt from cuda import cudart nB0, nH0, nW0 = 1, 2, 3 nB1, nH1, nW1 = 4, 3, 2 nH2, nW2 = 4, 5 data0 = np.arange(nB0 * nH0 * nW0, dtype=np.float32).reshape(nB0, nH0, nW0) data1 = np.ones(nB1 * nH1 * nW1, dtype=np.float32).reshape(nB1, nH1, nW1) data2 = np.ones(nH2 * nW2, dtype=np.float32).reshape(nH2, nW2) np.set_printoptions(precision=3, linewidth=200, suppress=True) cudart.cudaDeviceSynchronize() logger = trt.Logger(trt.Logger.ERROR) builder = trt.Builder(logger) network = builder.create_network(1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)) config = builder.create_builder_config() inputT0 = network.add_input("inputT0", trt.float32, (nB0, nH0, nW0)) inputT1 = network.add_input("inputT1", trt.float32, (nB1, nH1, nW1)) inputT2 = network.add_input("inputT2", trt.float32, (nH2, nW2)) #------------------------------------------------------------------------------- Network einsumLayer = network.add_einsum([inputT0, inputT1, inputT2], "abc,dcb,de->ae") #------------------------------------------------------------------------------- Network network.mark_output(einsumLayer.get_output(0)) engineString = builder.build_serialized_network(network, config) engine = trt.Runtime(logger).deserialize_cuda_engine(engineString) nIO = engine.num_io_tensors lTensorName = [engine.get_tensor_name(i) for i in range(nIO)] nInput = [engine.get_tensor_mode(lTensorName[i]) for i in range(nIO)].count(trt.TensorIOMode.INPUT) context = engine.create_execution_context() for i in range(nIO): print("[%2d]%s->" % (i, "Input " if i < nInput else "Output"), engine.get_tensor_dtype(lTensorName[i]), engine.get_tensor_shape(lTensorName[i]), context.get_tensor_shape(lTensorName[i]), lTensorName[i]) bufferH = [] bufferH.append(np.ascontiguousarray(data0)) bufferH.append(np.ascontiguousarray(data1)) bufferH.append(np.ascontiguousarray(data2)) for i in range(nInput, nIO): bufferH.append(np.empty(context.get_tensor_shape(lTensorName[i]), dtype=trt.nptype(engine.get_tensor_dtype(lTensorName[i])))) bufferD = [] for i in range(nIO): bufferD.append(cudart.cudaMalloc(bufferH[i].nbytes)[1]) for i in range(nInput): cudart.cudaMemcpy(bufferD[i], bufferH[i].ctypes.data, bufferH[i].nbytes, cudart.cudaMemcpyKind.cudaMemcpyHostToDevice) for i in range(nIO): context.set_tensor_address(lTensorName[i], int(bufferD[i])) context.execute_async_v3(0) for i in range(nInput, nIO): cudart.cudaMemcpy(bufferH[i].ctypes.data, bufferD[i], bufferH[i].nbytes, cudart.cudaMemcpyKind.cudaMemcpyDeviceToHost) for i in range(nIO): print(lTensorName[i]) print(bufferH[i]) for b in bufferD: cudart.cudaFree(b)
trt-samples-for-hackathon-cn-master
cookbook/02-API/Layer/EinsumLayer/TripleTensor.py
# # Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import numpy as np import tensorrt as trt from cuda import cudart nB0, nH0, nW0 = 2, 2, 3 nB1, nH1, nW1 = 2, 3, 4 data0 = np.arange(nB0 * nH0 * nW0, dtype=np.float32).reshape(nB0, nH0, nW0) data1 = np.ones(nB1 * nH1 * nW1, dtype=np.float32).reshape(nB1, nH1, nW1) np.set_printoptions(precision=3, linewidth=200, suppress=True) cudart.cudaDeviceSynchronize() logger = trt.Logger(trt.Logger.ERROR) builder = trt.Builder(logger) network = builder.create_network(1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)) config = builder.create_builder_config() inputT0 = network.add_input("inputT0", trt.float32, (nB0, nH0, nW0)) inputT1 = network.add_input("inputT1", trt.float32, (nB1, nH1, nW1)) #------------------------------------------------------------------------------- Network einsumLayer = network.add_einsum([inputT0, inputT1], "ijk,ikl->ijl") #------------------------------------------------------------------------------- Network network.mark_output(einsumLayer.get_output(0)) engineString = builder.build_serialized_network(network, config) engine = trt.Runtime(logger).deserialize_cuda_engine(engineString) nIO = engine.num_io_tensors lTensorName = [engine.get_tensor_name(i) for i in range(nIO)] nInput = [engine.get_tensor_mode(lTensorName[i]) for i in range(nIO)].count(trt.TensorIOMode.INPUT) context = engine.create_execution_context() for i in range(nIO): print("[%2d]%s->" % (i, "Input " if i < nInput else "Output"), engine.get_tensor_dtype(lTensorName[i]), engine.get_tensor_shape(lTensorName[i]), context.get_tensor_shape(lTensorName[i]), lTensorName[i]) bufferH = [] bufferH.append(np.ascontiguousarray(data0)) bufferH.append(np.ascontiguousarray(data1)) for i in range(nInput, nIO): bufferH.append(np.empty(context.get_tensor_shape(lTensorName[i]), dtype=trt.nptype(engine.get_tensor_dtype(lTensorName[i])))) bufferD = [] for i in range(nIO): bufferD.append(cudart.cudaMalloc(bufferH[i].nbytes)[1]) for i in range(nInput): cudart.cudaMemcpy(bufferD[i], bufferH[i].ctypes.data, bufferH[i].nbytes, cudart.cudaMemcpyKind.cudaMemcpyHostToDevice) for i in range(nIO): context.set_tensor_address(lTensorName[i], int(bufferD[i])) context.execute_async_v3(0) for i in range(nInput, nIO): cudart.cudaMemcpy(bufferH[i].ctypes.data, bufferD[i], bufferH[i].nbytes, cudart.cudaMemcpyKind.cudaMemcpyDeviceToHost) for i in range(nIO): print(lTensorName[i]) print(bufferH[i]) for b in bufferD: cudart.cudaFree(b)
trt-samples-for-hackathon-cn-master
cookbook/02-API/Layer/EinsumLayer/MatrixMultiplication.py
# # Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import numpy as np import tensorrt as trt from cuda import cudart nB, nH, nW = 1, 3, 4 data = np.arange(nB * nH * nW, dtype=np.float32).reshape(nB, nH, nW) np.set_printoptions(precision=3, linewidth=200, suppress=True) cudart.cudaDeviceSynchronize() logger = trt.Logger(trt.Logger.ERROR) builder = trt.Builder(logger) network = builder.create_network(1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)) config = builder.create_builder_config() inputT0 = network.add_input("inputT0", trt.float32, (nB, nH, nW)) #------------------------------------------------------------------------------- Network einsumLayer = network.add_einsum([inputT0], "ijk->ij") #------------------------------------------------------------------------------- Network network.mark_output(einsumLayer.get_output(0)) engineString = builder.build_serialized_network(network, config) engine = trt.Runtime(logger).deserialize_cuda_engine(engineString) nIO = engine.num_io_tensors lTensorName = [engine.get_tensor_name(i) for i in range(nIO)] nInput = [engine.get_tensor_mode(lTensorName[i]) for i in range(nIO)].count(trt.TensorIOMode.INPUT) context = engine.create_execution_context() for i in range(nIO): print("[%2d]%s->" % (i, "Input " if i < nInput else "Output"), engine.get_tensor_dtype(lTensorName[i]), engine.get_tensor_shape(lTensorName[i]), context.get_tensor_shape(lTensorName[i]), lTensorName[i]) bufferH = [] bufferH.append(np.ascontiguousarray(data)) for i in range(nInput, nIO): bufferH.append(np.empty(context.get_tensor_shape(lTensorName[i]), dtype=trt.nptype(engine.get_tensor_dtype(lTensorName[i])))) bufferD = [] for i in range(nIO): bufferD.append(cudart.cudaMalloc(bufferH[i].nbytes)[1]) bufferH[0] = data for i in range(nInput): cudart.cudaMemcpy(bufferD[i], bufferH[i].ctypes.data, bufferH[i].nbytes, cudart.cudaMemcpyKind.cudaMemcpyHostToDevice) for i in range(nIO): context.set_tensor_address(lTensorName[i], int(bufferD[i])) context.execute_async_v3(0) for i in range(nInput, nIO): cudart.cudaMemcpy(bufferH[i].ctypes.data, bufferD[i], bufferH[i].nbytes, cudart.cudaMemcpyKind.cudaMemcpyDeviceToHost) for i in range(nIO): print(lTensorName[i]) print(bufferH[i]) for b in bufferD: cudart.cudaFree(b)
trt-samples-for-hackathon-cn-master
cookbook/02-API/Layer/EinsumLayer/Reduce.py
# # Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import numpy as np import tensorrt as trt from cuda import cudart nB0, nH0, nW0 = 1, 3, 4 nB1, nH1, nW1 = 2, 3, 5 data0 = np.arange(nB0 * nH0 * nW0, dtype=np.float32).reshape(nB0, nH0, nW0) data1 = np.arange(nB1 * nH1 * nW1, dtype=np.float32).reshape(nB1, nH1, nW1) np.set_printoptions(precision=3, linewidth=200, suppress=True) cudart.cudaDeviceSynchronize() logger = trt.Logger(trt.Logger.ERROR) builder = trt.Builder(logger) network = builder.create_network(1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)) config = builder.create_builder_config() inputT0 = network.add_input("inputT0", trt.float32, (nB0, nH0, nW0)) inputT1 = network.add_input("inputT1", trt.float32, (nB1, nH1, nW1)) #------------------------------------------------------------------------------- Network einsumLayer = network.add_einsum([inputT0, inputT1], "ijk,ijq->ijq") einsumLayer.equation = "ijk,pjr->ikpr" # reset the equation #------------------------------------------------------------------------------- Network network.mark_output(einsumLayer.get_output(0)) engineString = builder.build_serialized_network(network, config) engine = trt.Runtime(logger).deserialize_cuda_engine(engineString) nIO = engine.num_io_tensors lTensorName = [engine.get_tensor_name(i) for i in range(nIO)] nInput = [engine.get_tensor_mode(lTensorName[i]) for i in range(nIO)].count(trt.TensorIOMode.INPUT) context = engine.create_execution_context() for i in range(nIO): print("[%2d]%s->" % (i, "Input " if i < nInput else "Output"), engine.get_tensor_dtype(lTensorName[i]), engine.get_tensor_shape(lTensorName[i]), context.get_tensor_shape(lTensorName[i]), lTensorName[i]) bufferH = [] bufferH.append(np.ascontiguousarray(data0)) bufferH.append(np.ascontiguousarray(data1)) for i in range(nInput, nIO): bufferH.append(np.empty(context.get_tensor_shape(lTensorName[i]), dtype=trt.nptype(engine.get_tensor_dtype(lTensorName[i])))) bufferD = [] for i in range(nIO): bufferD.append(cudart.cudaMalloc(bufferH[i].nbytes)[1]) for i in range(nInput): cudart.cudaMemcpy(bufferD[i], bufferH[i].ctypes.data, bufferH[i].nbytes, cudart.cudaMemcpyKind.cudaMemcpyHostToDevice) for i in range(nIO): context.set_tensor_address(lTensorName[i], int(bufferD[i])) context.execute_async_v3(0) for i in range(nInput, nIO): cudart.cudaMemcpy(bufferH[i].ctypes.data, bufferD[i], bufferH[i].nbytes, cudart.cudaMemcpyKind.cudaMemcpyDeviceToHost) for i in range(nIO): print(lTensorName[i]) print(bufferH[i]) for b in bufferD: cudart.cudaFree(b)
trt-samples-for-hackathon-cn-master
cookbook/02-API/Layer/EinsumLayer/Equation.py
# # Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import numpy as np import tensorrt as trt from cuda import cudart nB, nH, nW = 1, 4, 4 data = np.arange(nB * nH * nW, dtype=np.float32).reshape(nB, nH, nW) np.set_printoptions(precision=3, linewidth=200, suppress=True) cudart.cudaDeviceSynchronize() logger = trt.Logger(trt.Logger.ERROR) builder = trt.Builder(logger) network = builder.create_network(1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)) config = builder.create_builder_config() inputT0 = network.add_input("inputT0", trt.float32, (nB, nH, nW)) #------------------------------------------------------------------------------- Network einsumLayer = network.add_einsum([inputT0], "ijj->ij") #------------------------------------------------------------------------------- Network network.mark_output(einsumLayer.get_output(0)) engineString = builder.build_serialized_network(network, config) engine = trt.Runtime(logger).deserialize_cuda_engine(engineString) nIO = engine.num_io_tensors lTensorName = [engine.get_tensor_name(i) for i in range(nIO)] nInput = [engine.get_tensor_mode(lTensorName[i]) for i in range(nIO)].count(trt.TensorIOMode.INPUT) context = engine.create_execution_context() for i in range(nIO): print("[%2d]%s->" % (i, "Input " if i < nInput else "Output"), engine.get_tensor_dtype(lTensorName[i]), engine.get_tensor_shape(lTensorName[i]), context.get_tensor_shape(lTensorName[i]), lTensorName[i]) bufferH = [] bufferH.append(np.ascontiguousarray(data)) for i in range(nInput, nIO): bufferH.append(np.empty(context.get_tensor_shape(lTensorName[i]), dtype=trt.nptype(engine.get_tensor_dtype(lTensorName[i])))) bufferD = [] for i in range(nIO): bufferD.append(cudart.cudaMalloc(bufferH[i].nbytes)[1]) for i in range(nInput): cudart.cudaMemcpy(bufferD[i], bufferH[i].ctypes.data, bufferH[i].nbytes, cudart.cudaMemcpyKind.cudaMemcpyHostToDevice) for i in range(nIO): context.set_tensor_address(lTensorName[i], int(bufferD[i])) context.execute_async_v3(0) for i in range(nInput, nIO): cudart.cudaMemcpy(bufferH[i].ctypes.data, bufferD[i], bufferH[i].nbytes, cudart.cudaMemcpyKind.cudaMemcpyDeviceToHost) for i in range(nIO): print(lTensorName[i]) print(bufferH[i]) for b in bufferD: cudart.cudaFree(b)
trt-samples-for-hackathon-cn-master
cookbook/02-API/Layer/EinsumLayer/Diagonal.py
# # Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import numpy as np import tensorrt as trt from cuda import cudart nB, nH, nW = 1, 3, 4 data = np.arange(nB * nH * nW, dtype=np.float32).reshape(nB, nH, nW) np.set_printoptions(precision=3, linewidth=200, suppress=True) cudart.cudaDeviceSynchronize() logger = trt.Logger(trt.Logger.ERROR) builder = trt.Builder(logger) network = builder.create_network(1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)) config = builder.create_builder_config() inputT0 = network.add_input("inputT0", trt.float32, (nB, nH, nW)) #------------------------------------------------------------------------------- Network einsumLayer = network.add_einsum([inputT0], "...j->...j") #------------------------------------------------------------------------------- Network network.mark_output(einsumLayer.get_output(0)) engineString = builder.build_serialized_network(network, config) engine = trt.Runtime(logger).deserialize_cuda_engine(engineString) nIO = engine.num_io_tensors lTensorName = [engine.get_tensor_name(i) for i in range(nIO)] nInput = [engine.get_tensor_mode(lTensorName[i]) for i in range(nIO)].count(trt.TensorIOMode.INPUT) context = engine.create_execution_context() for i in range(nIO): print("[%2d]%s->" % (i, "Input " if i < nInput else "Output"), engine.get_tensor_dtype(lTensorName[i]), engine.get_tensor_shape(lTensorName[i]), context.get_tensor_shape(lTensorName[i]), lTensorName[i]) bufferH = [] bufferH.append(np.ascontiguousarray(data)) for i in range(nInput, nIO): bufferH.append(np.empty(context.get_tensor_shape(lTensorName[i]), dtype=trt.nptype(engine.get_tensor_dtype(lTensorName[i])))) bufferD = [] for i in range(nIO): bufferD.append(cudart.cudaMalloc(bufferH[i].nbytes)[1]) for i in range(nInput): cudart.cudaMemcpy(bufferD[i], bufferH[i].ctypes.data, bufferH[i].nbytes, cudart.cudaMemcpyKind.cudaMemcpyHostToDevice) for i in range(nIO): context.set_tensor_address(lTensorName[i], int(bufferD[i])) context.execute_async_v3(0) for i in range(nInput, nIO): cudart.cudaMemcpy(bufferH[i].ctypes.data, bufferD[i], bufferH[i].nbytes, cudart.cudaMemcpyKind.cudaMemcpyDeviceToHost) for i in range(nIO): print(lTensorName[i]) print(bufferH[i]) for b in bufferD: cudart.cudaFree(b)
trt-samples-for-hackathon-cn-master
cookbook/02-API/Layer/EinsumLayer/Ellipsis.py
# # Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License") # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import numpy as np import tensorrt as trt from cuda import cudart nB, nH, nW = 1, 3, 4 data = np.arange(nB * nH * nW, dtype=np.float32).reshape(nB, nH, nW) np.set_printoptions(precision=3, linewidth=200, suppress=True) cudart.cudaDeviceSynchronize() logger = trt.Logger(trt.Logger.ERROR) builder = trt.Builder(logger) network = builder.create_network(1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)) config = builder.create_builder_config() inputT0 = network.add_input("inputT0", trt.float32, (nB, nH, nW)) #------------------------------------------------------------------------------- Network einsumLayer = network.add_einsum([inputT0], "ijk->jki") #------------------------------------------------------------------------------- Network network.mark_output(einsumLayer.get_output(0)) engineString = builder.build_serialized_network(network, config) engine = trt.Runtime(logger).deserialize_cuda_engine(engineString) nIO = engine.num_io_tensors lTensorName = [engine.get_tensor_name(i) for i in range(nIO)] nInput = [engine.get_tensor_mode(lTensorName[i]) for i in range(nIO)].count(trt.TensorIOMode.INPUT) context = engine.create_execution_context() for i in range(nIO): print("[%2d]%s->" % (i, "Input " if i < nInput else "Output"), engine.get_tensor_dtype(lTensorName[i]), engine.get_tensor_shape(lTensorName[i]), context.get_tensor_shape(lTensorName[i]), lTensorName[i]) bufferH = [] bufferH.append(np.ascontiguousarray(data)) for i in range(nInput, nIO): bufferH.append(np.empty(context.get_tensor_shape(lTensorName[i]), dtype=trt.nptype(engine.get_tensor_dtype(lTensorName[i])))) bufferD = [] for i in range(nIO): bufferD.append(cudart.cudaMalloc(bufferH[i].nbytes)[1]) for i in range(nInput): cudart.cudaMemcpy(bufferD[i], bufferH[i].ctypes.data, bufferH[i].nbytes, cudart.cudaMemcpyKind.cudaMemcpyHostToDevice) for i in range(nIO): context.set_tensor_address(lTensorName[i], int(bufferD[i])) context.execute_async_v3(0) for i in range(nInput, nIO): cudart.cudaMemcpy(bufferH[i].ctypes.data, bufferD[i], bufferH[i].nbytes, cudart.cudaMemcpyKind.cudaMemcpyDeviceToHost) for i in range(nIO): print(lTensorName[i]) print(bufferH[i]) for b in bufferD: cudart.cudaFree(b)
trt-samples-for-hackathon-cn-master
cookbook/02-API/Layer/EinsumLayer/Transpose.py
# # Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import numpy as np import tensorrt as trt from cuda import cudart nB0, nH0, nW0 = 1, 3, 4 nB1, nH1, nW1 = 2, 3, 5 data0 = np.arange(nB0 * nH0 * nW0, dtype=np.float32).reshape(nB0, nH0, nW0) data1 = np.arange(nB1 * nH1 * nW1, dtype=np.float32).reshape(nB1, nH1, nW1) np.set_printoptions(precision=3, linewidth=200, suppress=True) cudart.cudaDeviceSynchronize() logger = trt.Logger(trt.Logger.ERROR) builder = trt.Builder(logger) network = builder.create_network(1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)) config = builder.create_builder_config() inputT0 = network.add_input("inputT0", trt.float32, (nB0, nH0, nW0)) inputT1 = network.add_input("inputT1", trt.float32, (nB1, nH1, nW1)) #------------------------------------------------------------------------------- Network einsumLayer = network.add_einsum([inputT0, inputT1], "ijk,pjr->ikpr") #------------------------------------------------------------------------------- Network network.mark_output(einsumLayer.get_output(0)) engineString = builder.build_serialized_network(network, config) engine = trt.Runtime(logger).deserialize_cuda_engine(engineString) nIO = engine.num_io_tensors lTensorName = [engine.get_tensor_name(i) for i in range(nIO)] nInput = [engine.get_tensor_mode(lTensorName[i]) for i in range(nIO)].count(trt.TensorIOMode.INPUT) context = engine.create_execution_context() for i in range(nIO): print("[%2d]%s->" % (i, "Input " if i < nInput else "Output"), engine.get_tensor_dtype(lTensorName[i]), engine.get_tensor_shape(lTensorName[i]), context.get_tensor_shape(lTensorName[i]), lTensorName[i]) bufferH = [] bufferH.append(np.ascontiguousarray(data0)) bufferH.append(np.ascontiguousarray(data1)) for i in range(nInput, nIO): bufferH.append(np.empty(context.get_tensor_shape(lTensorName[i]), dtype=trt.nptype(engine.get_tensor_dtype(lTensorName[i])))) bufferD = [] for i in range(nIO): bufferD.append(cudart.cudaMalloc(bufferH[i].nbytes)[1]) for i in range(nInput): cudart.cudaMemcpy(bufferD[i], bufferH[i].ctypes.data, bufferH[i].nbytes, cudart.cudaMemcpyKind.cudaMemcpyHostToDevice) for i in range(nIO): context.set_tensor_address(lTensorName[i], int(bufferD[i])) context.execute_async_v3(0) for i in range(nInput, nIO): cudart.cudaMemcpy(bufferH[i].ctypes.data, bufferD[i], bufferH[i].nbytes, cudart.cudaMemcpyKind.cudaMemcpyDeviceToHost) for i in range(nIO): print(lTensorName[i]) print(bufferH[i]) for b in bufferD: cudart.cudaFree(b)
trt-samples-for-hackathon-cn-master
cookbook/02-API/Layer/EinsumLayer/SimpleExample.py
# # Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import numpy as np import tensorrt as trt from cuda import cudart nB0, nH0, nW0 = 1, 1, 4 nB1, nH1, nW1 = 1, 1, 4 #nB0, nH0, nW0 = 1, 2, 4 # substitutive example #nB1, nH1, nW1 = 1, 3, 4 data0 = np.arange(nB0 * nH0 * nW0, dtype=np.float32).reshape(nB0, nH0, nW0) data1 = np.ones(nB1 * nH1 * nW1, dtype=np.float32).reshape(nB1, nH1, nW1) np.set_printoptions(precision=3, linewidth=200, suppress=True) cudart.cudaDeviceSynchronize() logger = trt.Logger(trt.Logger.ERROR) builder = trt.Builder(logger) network = builder.create_network(1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)) config = builder.create_builder_config() inputT0 = network.add_input("inputT0", trt.float32, (nB0, nH0, nW0)) inputT1 = network.add_input("inputT1", trt.float32, (nB1, nH1, nW1)) #------------------------------------------------------------------------------- Network einsumLayer = network.add_einsum([inputT0, inputT1], "ijk,pqk->") #einsumLayer = network.add_einsum([inputT0, inputT1], "ijk,pqk->j") # substitutive example #------------------------------------------------------------------------------- Network network.mark_output(einsumLayer.get_output(0)) engineString = builder.build_serialized_network(network, config) engine = trt.Runtime(logger).deserialize_cuda_engine(engineString) nIO = engine.num_io_tensors lTensorName = [engine.get_tensor_name(i) for i in range(nIO)] nInput = [engine.get_tensor_mode(lTensorName[i]) for i in range(nIO)].count(trt.TensorIOMode.INPUT) context = engine.create_execution_context() for i in range(nIO): print("[%2d]%s->" % (i, "Input " if i < nInput else "Output"), engine.get_tensor_dtype(lTensorName[i]), engine.get_tensor_shape(lTensorName[i]), context.get_tensor_shape(lTensorName[i]), lTensorName[i]) bufferH = [] bufferH.append(np.ascontiguousarray(data0)) bufferH.append(np.ascontiguousarray(data1)) for i in range(nInput, nIO): bufferH.append(np.empty(context.get_tensor_shape(lTensorName[i]), dtype=trt.nptype(engine.get_tensor_dtype(lTensorName[i])))) bufferD = [] for i in range(nIO): bufferD.append(cudart.cudaMalloc(bufferH[i].nbytes)[1]) for i in range(nInput): cudart.cudaMemcpy(bufferD[i], bufferH[i].ctypes.data, bufferH[i].nbytes, cudart.cudaMemcpyKind.cudaMemcpyHostToDevice) for i in range(nIO): context.set_tensor_address(lTensorName[i], int(bufferD[i])) context.execute_async_v3(0) for i in range(nInput, nIO): cudart.cudaMemcpy(bufferH[i].ctypes.data, bufferD[i], bufferH[i].nbytes, cudart.cudaMemcpyKind.cudaMemcpyDeviceToHost) for i in range(nIO): print(lTensorName[i]) print(bufferH[i]) for b in bufferD: cudart.cudaFree(b)
trt-samples-for-hackathon-cn-master
cookbook/02-API/Layer/EinsumLayer/Dot.py
# # Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import numpy as np import tensorrt as trt from cuda import cudart nB, nC, nH, nW = 1, 3, 3, 3 data = np.tile(np.arange(-4, 5, dtype=np.float32).reshape(1, nH, nW), (nC, 1, 1)) np.set_printoptions(precision=3, linewidth=200, suppress=True) cudart.cudaDeviceSynchronize() logger = trt.Logger(trt.Logger.ERROR) builder = trt.Builder(logger) network = builder.create_network(1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)) config = builder.create_builder_config() inputT0 = network.add_input("inputT0", trt.float32, (nB, nC, nH, nW)) #------------------------------------------------------------------------------- Network slopeLayer = network.add_constant((1, nC, 1, 1), np.array([0.5, 1, 2], dtype=np.float32)) # 斜率张量,可广播到与 inputT0 相同大小即可,可以控制在全局、单维度、单元素的水平上的斜率 parsmetricReLULayer = network.add_parametric_relu(inputT0, slopeLayer.get_output(0)) #------------------------------------------------------------------------------- Network network.mark_output(parsmetricReLULayer.get_output(0)) engineString = builder.build_serialized_network(network, config) engine = trt.Runtime(logger).deserialize_cuda_engine(engineString) context = engine.create_execution_context() nInput = np.sum([engine.binding_is_input(i) for i in range(engine.num_bindings)]) nOutput = engine.num_bindings - nInput bufferH = [] bufferH.append(data) for i in range(nOutput): bufferH.append(np.empty(context.get_binding_shape(nInput + i), dtype=trt.nptype(engine.get_binding_dtype(nInput + i)))) bufferD = [] for i in range(engine.num_bindings): bufferD.append(cudart.cudaMalloc(bufferH[i].nbytes)[1]) for i in range(nInput): cudart.cudaMemcpy(bufferD[i], np.ascontiguousarray(bufferH[i].reshape(-1)).ctypes.data, bufferH[i].nbytes, cudart.cudaMemcpyKind.cudaMemcpyHostToDevice) context.execute_v2(bufferD) for i in range(nOutput): cudart.cudaMemcpy(bufferH[nInput + i].ctypes.data, bufferD[nInput + i], bufferH[nInput + i].nbytes, cudart.cudaMemcpyKind.cudaMemcpyDeviceToHost) for i in range(nInput): print("Input %d:" % i, bufferH[i].shape, "\n", bufferH[i]) for i in range(nOutput): print("Output %d:" % i, bufferH[nInput + i].shape, "\n", bufferH[nInput + i]) for buffer in bufferD: cudart.cudaFree(buffer)
trt-samples-for-hackathon-cn-master
cookbook/02-API/Layer/ParametricReLULayer/SimpleExample.py
# # Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import numpy as np import tensorrt as trt from cuda import cudart shape = [1, 3, 4, 5] data = np.arange(np.prod(shape), dtype=np.float32).reshape(shape) np.set_printoptions(precision=3, linewidth=200, suppress=True) cudart.cudaDeviceSynchronize() logger = trt.Logger(trt.Logger.ERROR) builder = trt.Builder(logger) network = builder.create_network(1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)) config = builder.create_builder_config() inputT0 = network.add_input("inputT0", trt.float32, (shape)) #------------------------------------------------------------------------------- Network concatenationLayer = network.add_concatenation([inputT0, inputT0]) concatenationLayer.axis = 0 #------------------------------------------------------------------------------- Network network.mark_output(concatenationLayer.get_output(0)) engineString = builder.build_serialized_network(network, config) engine = trt.Runtime(logger).deserialize_cuda_engine(engineString) nIO = engine.num_io_tensors lTensorName = [engine.get_tensor_name(i) for i in range(nIO)] nInput = [engine.get_tensor_mode(lTensorName[i]) for i in range(nIO)].count(trt.TensorIOMode.INPUT) context = engine.create_execution_context() for i in range(nIO): print("[%2d]%s->" % (i, "Input " if i < nInput else "Output"), engine.get_tensor_dtype(lTensorName[i]), engine.get_tensor_shape(lTensorName[i]), context.get_tensor_shape(lTensorName[i]), lTensorName[i]) bufferH = [] bufferH.append(np.ascontiguousarray(data)) for i in range(nInput, nIO): bufferH.append(np.empty(context.get_tensor_shape(lTensorName[i]), dtype=trt.nptype(engine.get_tensor_dtype(lTensorName[i])))) bufferD = [] for i in range(nIO): bufferD.append(cudart.cudaMalloc(bufferH[i].nbytes)[1]) for i in range(nInput): cudart.cudaMemcpy(bufferD[i], bufferH[i].ctypes.data, bufferH[i].nbytes, cudart.cudaMemcpyKind.cudaMemcpyHostToDevice) for i in range(nIO): context.set_tensor_address(lTensorName[i], int(bufferD[i])) context.execute_async_v3(0) for i in range(nInput, nIO): cudart.cudaMemcpy(bufferH[i].ctypes.data, bufferD[i], bufferH[i].nbytes, cudart.cudaMemcpyKind.cudaMemcpyDeviceToHost) for i in range(nIO): print(lTensorName[i]) print(bufferH[i]) for b in bufferD: cudart.cudaFree(b)
trt-samples-for-hackathon-cn-master
cookbook/02-API/Layer/ConcatenationLayer/Axis.py
# # Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import numpy as np import tensorrt as trt from cuda import cudart shape = [1, 3, 4, 5] data = np.arange(np.prod(shape), dtype=np.float32).reshape(shape) np.set_printoptions(precision=3, linewidth=200, suppress=True) cudart.cudaDeviceSynchronize() logger = trt.Logger(trt.Logger.ERROR) builder = trt.Builder(logger) network = builder.create_network(1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)) config = builder.create_builder_config() inputT0 = network.add_input("inputT0", trt.float32, (shape)) #------------------------------------------------------------------------------- Network concatenationLayer = network.add_concatenation([inputT0, inputT0]) #------------------------------------------------------------------------------- Network network.mark_output(concatenationLayer.get_output(0)) engineString = builder.build_serialized_network(network, config) engine = trt.Runtime(logger).deserialize_cuda_engine(engineString) nIO = engine.num_io_tensors lTensorName = [engine.get_tensor_name(i) for i in range(nIO)] nInput = [engine.get_tensor_mode(lTensorName[i]) for i in range(nIO)].count(trt.TensorIOMode.INPUT) context = engine.create_execution_context() for i in range(nIO): print("[%2d]%s->" % (i, "Input " if i < nInput else "Output"), engine.get_tensor_dtype(lTensorName[i]), engine.get_tensor_shape(lTensorName[i]), context.get_tensor_shape(lTensorName[i]), lTensorName[i]) bufferH = [] bufferH.append(np.ascontiguousarray(data)) for i in range(nInput, nIO): bufferH.append(np.empty(context.get_tensor_shape(lTensorName[i]), dtype=trt.nptype(engine.get_tensor_dtype(lTensorName[i])))) bufferD = [] for i in range(nIO): bufferD.append(cudart.cudaMalloc(bufferH[i].nbytes)[1]) for i in range(nInput): cudart.cudaMemcpy(bufferD[i], bufferH[i].ctypes.data, bufferH[i].nbytes, cudart.cudaMemcpyKind.cudaMemcpyHostToDevice) for i in range(nIO): context.set_tensor_address(lTensorName[i], int(bufferD[i])) context.execute_async_v3(0) for i in range(nInput, nIO): cudart.cudaMemcpy(bufferH[i].ctypes.data, bufferD[i], bufferH[i].nbytes, cudart.cudaMemcpyKind.cudaMemcpyDeviceToHost) for i in range(nIO): print(lTensorName[i]) print(bufferH[i]) for b in bufferD: cudart.cudaFree(b)
trt-samples-for-hackathon-cn-master
cookbook/02-API/Layer/ConcatenationLayer/SimpleExample.py
# # Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import numpy as np import tensorrt as trt from cuda import cudart nB, nC, nH, nW = 1, 3, 4, 5 nCOut = 2 data = np.arange(nC * nH * nW, dtype=np.float32).reshape(nB, nC, nH, nW) weight = np.ones(nC * nH * nW, dtype=np.float32) weight = np.ascontiguousarray(np.concatenate([weight, -weight], 0).reshape(nCOut, nC, nH, nW)) bias = np.ascontiguousarray(np.zeros(nCOut, dtype=np.float32)) np.set_printoptions(precision=3, linewidth=200, suppress=True) cudart.cudaDeviceSynchronize() logger = trt.Logger(trt.Logger.ERROR) builder = trt.Builder(logger) network = builder.create_network(1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)) config = builder.create_builder_config() config.set_flag(trt.BuilderFlag.INT8) # need INT8 mode inputT0 = network.add_input("inputT0", trt.float32, (nB, nC, nH, nW)) #------------------------------------------------------------------------------- Network constantLayer0 = network.add_constant([], np.array([1], dtype=np.float32)) constantLayer1 = network.add_constant([], np.array([1], dtype=np.float32)) quantizeLayer0 = network.add_quantize(inputT0, constantLayer0.get_output(0)) quantizeLayer0.axis = 0 dequantizeLayer0 = network.add_dequantize(quantizeLayer0.get_output(0), constantLayer1.get_output(0)) dequantizeLayer0.axis = 0 fullyConnectedLayer = network.add_fully_connected(dequantizeLayer0.get_output(0), nCOut, trt.Weights(weight), trt.Weights(bias)) #------------------------------------------------------------------------------- Network network.mark_output(fullyConnectedLayer.get_output(0)) engineString = builder.build_serialized_network(network, config) engine = trt.Runtime(logger).deserialize_cuda_engine(engineString) nIO = engine.num_io_tensors lTensorName = [engine.get_tensor_name(i) for i in range(nIO)] nInput = [engine.get_tensor_mode(lTensorName[i]) for i in range(nIO)].count(trt.TensorIOMode.INPUT) context = engine.create_execution_context() for i in range(nIO): print("[%2d]%s->" % (i, "Input " if i < nInput else "Output"), engine.get_tensor_dtype(lTensorName[i]), engine.get_tensor_shape(lTensorName[i]), context.get_tensor_shape(lTensorName[i]), lTensorName[i]) bufferH = [] bufferH.append(np.ascontiguousarray(data)) for i in range(nInput, nIO): bufferH.append(np.empty(context.get_tensor_shape(lTensorName[i]), dtype=trt.nptype(engine.get_tensor_dtype(lTensorName[i])))) bufferD = [] for i in range(nIO): bufferD.append(cudart.cudaMalloc(bufferH[i].nbytes)[1]) for i in range(nInput): cudart.cudaMemcpy(bufferD[i], bufferH[i].ctypes.data, bufferH[i].nbytes, cudart.cudaMemcpyKind.cudaMemcpyHostToDevice) for i in range(nIO): context.set_tensor_address(lTensorName[i], int(bufferD[i])) context.execute_async_v3(0) for i in range(nInput, nIO): cudart.cudaMemcpy(bufferH[i].ctypes.data, bufferD[i], bufferH[i].nbytes, cudart.cudaMemcpyKind.cudaMemcpyDeviceToHost) for i in range(nIO): print(lTensorName[i]) print(bufferH[i]) for b in bufferD: cudart.cudaFree(b)
trt-samples-for-hackathon-cn-master
cookbook/02-API/Layer/FullyConnectedLayer/Set_input+INT8QDQ.py
# # Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import numpy as np import tensorrt as trt from cuda import cudart nB, nC, nH, nW = 1, 3, 4, 5 nCOut = 2 data = np.arange(nB * nC * nH * nW, dtype=np.float32).reshape(nB, nC, nH, nW) weight = np.ones(nC * nH * nW, dtype=np.float32) weight = np.ascontiguousarray(np.concatenate([weight, -weight], 0).reshape(nCOut, nC * nH * nW)) bias = np.ascontiguousarray(np.zeros(nCOut, dtype=np.float32)) np.set_printoptions(precision=3, linewidth=200, suppress=True) cudart.cudaDeviceSynchronize() logger = trt.Logger(trt.Logger.ERROR) builder = trt.Builder(logger) network = builder.create_network(1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)) config = builder.create_builder_config() inputT0 = network.add_input("inputT0", trt.float32, (nB, nC, nH, nW)) #------------------------------------------------------------------------------- Network fullyConnectedLayer = network.add_fully_connected(inputT0, nCOut, trt.Weights(weight), trt.Weights(bias)) #------------------------------------------------------------------------------- Network network.mark_output(fullyConnectedLayer.get_output(0)) engineString = builder.build_serialized_network(network, config) engine = trt.Runtime(logger).deserialize_cuda_engine(engineString) nIO = engine.num_io_tensors lTensorName = [engine.get_tensor_name(i) for i in range(nIO)] nInput = [engine.get_tensor_mode(lTensorName[i]) for i in range(nIO)].count(trt.TensorIOMode.INPUT) context = engine.create_execution_context() for i in range(nIO): print("[%2d]%s->" % (i, "Input " if i < nInput else "Output"), engine.get_tensor_dtype(lTensorName[i]), engine.get_tensor_shape(lTensorName[i]), context.get_tensor_shape(lTensorName[i]), lTensorName[i]) bufferH = [] bufferH.append(np.ascontiguousarray(data)) for i in range(nInput, nIO): bufferH.append(np.empty(context.get_tensor_shape(lTensorName[i]), dtype=trt.nptype(engine.get_tensor_dtype(lTensorName[i])))) bufferD = [] for i in range(nIO): bufferD.append(cudart.cudaMalloc(bufferH[i].nbytes)[1]) for i in range(nInput): cudart.cudaMemcpy(bufferD[i], bufferH[i].ctypes.data, bufferH[i].nbytes, cudart.cudaMemcpyKind.cudaMemcpyHostToDevice) for i in range(nIO): context.set_tensor_address(lTensorName[i], int(bufferD[i])) context.execute_async_v3(0) for i in range(nInput, nIO): cudart.cudaMemcpy(bufferH[i].ctypes.data, bufferD[i], bufferH[i].nbytes, cudart.cudaMemcpyKind.cudaMemcpyDeviceToHost) for i in range(nIO): print(lTensorName[i]) print(bufferH[i]) for b in bufferD: cudart.cudaFree(b)
trt-samples-for-hackathon-cn-master
cookbook/02-API/Layer/FullyConnectedLayer/SimpleExample.py
# # Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import numpy as np import tensorrt as trt from cuda import cudart nB, nC, nH, nW = 1, 3, 4, 5 nCOut = 2 data = np.arange(nB * nC * nH * nW, dtype=np.float32).reshape(nB, nC, nH, nW) weight = np.ones(nC * nH * nW, dtype=np.float32) weight = np.ascontiguousarray(np.concatenate([weight, -weight], 0).reshape(nCOut, nC * nH * nW)) bias = np.ascontiguousarray(np.zeros(nCOut, dtype=np.float32)) np.set_printoptions(precision=3, linewidth=200, suppress=True) cudart.cudaDeviceSynchronize() logger = trt.Logger(trt.Logger.ERROR) builder = trt.Builder(logger) network = builder.create_network(1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)) config = builder.create_builder_config() inputT0 = network.add_input("inputT0", trt.float32, (nB, nC, nH, nW)) #------------------------------------------------------------------------------- Network placeHolder = np.zeros(1, dtype=np.float32) fullyConnectedLayer = network.add_fully_connected(inputT0, 1, placeHolder, placeHolder) fullyConnectedLayer.num_output_channels = nCOut fullyConnectedLayer.kernel = weight fullyConnectedLayer.bias = bias #------------------------------------------------------------------------------- Network network.mark_output(fullyConnectedLayer.get_output(0)) engineString = builder.build_serialized_network(network, config) engine = trt.Runtime(logger).deserialize_cuda_engine(engineString) nIO = engine.num_io_tensors lTensorName = [engine.get_tensor_name(i) for i in range(nIO)] nInput = [engine.get_tensor_mode(lTensorName[i]) for i in range(nIO)].count(trt.TensorIOMode.INPUT) context = engine.create_execution_context() for i in range(nIO): print("[%2d]%s->" % (i, "Input " if i < nInput else "Output"), engine.get_tensor_dtype(lTensorName[i]), engine.get_tensor_shape(lTensorName[i]), context.get_tensor_shape(lTensorName[i]), lTensorName[i]) bufferH = [] bufferH.append(np.ascontiguousarray(data)) for i in range(nInput, nIO): bufferH.append(np.empty(context.get_tensor_shape(lTensorName[i]), dtype=trt.nptype(engine.get_tensor_dtype(lTensorName[i])))) bufferD = [] for i in range(nIO): bufferD.append(cudart.cudaMalloc(bufferH[i].nbytes)[1]) for i in range(nInput): cudart.cudaMemcpy(bufferD[i], bufferH[i].ctypes.data, bufferH[i].nbytes, cudart.cudaMemcpyKind.cudaMemcpyHostToDevice) for i in range(nIO): context.set_tensor_address(lTensorName[i], int(bufferD[i])) context.execute_async_v3(0) for i in range(nInput, nIO): cudart.cudaMemcpy(bufferH[i].ctypes.data, bufferD[i], bufferH[i].nbytes, cudart.cudaMemcpyKind.cudaMemcpyDeviceToHost) for i in range(nIO): print(lTensorName[i]) print(bufferH[i]) for b in bufferD: cudart.cudaFree(b)
trt-samples-for-hackathon-cn-master
cookbook/02-API/Layer/FullyConnectedLayer/Num_output_channels+Kernel+Bias.py
# # Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import numpy as np import tensorrt as trt from cuda import cudart np.random.seed(31193) np.set_printoptions(precision=3, linewidth=200, suppress=True) cudart.cudaDeviceSynchronize() shape = [1, 3, 4, 5] shapeSB = [1, 1] + shape[2:] # shape=[1,1,4,5] data0 = np.arange(np.prod(shape[2:]), dtype=np.float32).reshape(1, 1, *shape[2:]) data1 = 100 - np.arange(np.prod(shape[2:]), dtype=np.float32).reshape(1, 1, *shape[2:]) data2 = np.ones(shape[2:], dtype=np.float32).reshape(1, 1, *shape[2:]) data = np.concatenate([data0, data1, data2], axis=1) scale = np.full(shapeSB, 1, dtype=np.float32) bias = np.full(shapeSB, 0, dtype=np.float32) logger = trt.Logger(trt.Logger.ERROR) builder = trt.Builder(logger) network = builder.create_network(1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)) config = builder.create_builder_config() inputT0 = network.add_input("inputT0", trt.float32, shape) inputT1 = network.add_input("inputT1", trt.float32, shapeSB) inputT2 = network.add_input("inputT2", trt.float32, shapeSB) #------------------------------------------------------------------------------- Network normalizationLayer = network.add_normalization(inputT0, inputT1, inputT2, 1 << 2 | 1 << 3) #normalizationLayer.epsilon = 1 # set epsilon, we can set it as 1 to compare the differnece of the output #normalizationLayer.axes = 1 << 3 # reset the axes mask after the constructor #normalizationLayer.compute_precision = trt.float32 # set the precision of computation #------------------------------------------------------------------------------- Network network.mark_output(normalizationLayer.get_output(0)) engineString = builder.build_serialized_network(network, config) engine = trt.Runtime(logger).deserialize_cuda_engine(engineString) nIO = engine.num_io_tensors lTensorName = [engine.get_tensor_name(i) for i in range(nIO)] nInput = [engine.get_tensor_mode(lTensorName[i]) for i in range(nIO)].count(trt.TensorIOMode.INPUT) context = engine.create_execution_context() for i in range(nIO): print("[%2d]%s->" % (i, "Input " if i < nInput else "Output"), engine.get_tensor_dtype(lTensorName[i]), engine.get_tensor_shape(lTensorName[i]), context.get_tensor_shape(lTensorName[i]), lTensorName[i]) bufferH = [] bufferH.append(data) bufferH.append(scale) bufferH.append(bias) for i in range(nIO): bufferH.append(np.empty(context.get_tensor_shape(lTensorName[i]), dtype=trt.nptype(engine.get_tensor_dtype(lTensorName[i])))) bufferD = [] for i in range(nIO): bufferD.append(cudart.cudaMalloc(bufferH[i].nbytes)[1]) for i in range(nInput): cudart.cudaMemcpy(bufferD[i], bufferH[i].ctypes.data, bufferH[i].nbytes, cudart.cudaMemcpyKind.cudaMemcpyHostToDevice) for i in range(nIO): context.set_tensor_address(lTensorName[i], int(bufferD[i])) context.execute_async_v3(0) for i in range(nInput, nIO): cudart.cudaMemcpy(bufferH[i].ctypes.data, bufferD[i], bufferH[i].nbytes, cudart.cudaMemcpyKind.cudaMemcpyDeviceToHost) for i in range(nIO): print(lTensorName[i]) print(bufferH[i]) for b in bufferD: cudart.cudaFree(b)
trt-samples-for-hackathon-cn-master
cookbook/02-API/Layer/NormalizationLayer/LayerNormalization.py
# # Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import numpy as np import tensorrt as trt from cuda import cudart np.random.seed(31193) np.set_printoptions(precision=3, linewidth=200, suppress=True) cudart.cudaDeviceSynchronize() shape = [1, 4, 3, 5] nGroup = 2 shapeSB = [1, nGroup, 1, 1] data0 = np.arange(np.prod(shape[2:]), dtype=np.float32).reshape(1, 1, *shape[2:]) data1 = np.arange(np.prod(shape[2:]), dtype=np.float32).reshape(1, 1, *shape[2:]) + 100 data2 = np.zeros(shape[2:], dtype=np.float32).reshape(1, 1, *shape[2:]) data3 = np.ones(shape[2:], dtype=np.float32).reshape(1, 1, *shape[2:]) data = np.concatenate([data0, data1, data2, data3], axis=1) scale = np.full(shapeSB, 1, dtype=np.float32) bias = np.full(shapeSB, 0, dtype=np.float32) logger = trt.Logger(trt.Logger.ERROR) builder = trt.Builder(logger) network = builder.create_network(1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)) config = builder.create_builder_config() inputT0 = network.add_input("inputT0", trt.float32, shape) inputT1 = network.add_input("inputT1", trt.float32, shapeSB) inputT2 = network.add_input("inputT2", trt.float32, shapeSB) #------------------------------------------------------------------------------- Network normalizationLayer = network.add_normalization(inputT0, inputT1, inputT2, 1 << 2 | 1 << 3) normalizationLayer.num_groups = nGroup # set number of groups #normalizationLayer.epsilon = 1 # set epsilon, we can set it as 1 to compare the differnece of the output #normalizationLayer.axes = 1 << 3 # reset the axes mask after the constructor #normalizationLayer.compute_precision = trt.float32 # set the precision of computation #------------------------------------------------------------------------------- Network network.mark_output(normalizationLayer.get_output(0)) engineString = builder.build_serialized_network(network, config) engine = trt.Runtime(logger).deserialize_cuda_engine(engineString) nIO = engine.num_io_tensors lTensorName = [engine.get_tensor_name(i) for i in range(nIO)] nInput = [engine.get_tensor_mode(lTensorName[i]) for i in range(nIO)].count(trt.TensorIOMode.INPUT) context = engine.create_execution_context() for i in range(nIO): print("[%2d]%s->" % (i, "Input " if i < nInput else "Output"), engine.get_tensor_dtype(lTensorName[i]), engine.get_tensor_shape(lTensorName[i]), context.get_tensor_shape(lTensorName[i]), lTensorName[i]) bufferH = [] bufferH.append(data) bufferH.append(scale) bufferH.append(bias) for i in range(nIO): bufferH.append(np.empty(context.get_tensor_shape(lTensorName[i]), dtype=trt.nptype(engine.get_tensor_dtype(lTensorName[i])))) bufferD = [] for i in range(nIO): bufferD.append(cudart.cudaMalloc(bufferH[i].nbytes)[1]) for i in range(nInput): cudart.cudaMemcpy(bufferD[i], bufferH[i].ctypes.data, bufferH[i].nbytes, cudart.cudaMemcpyKind.cudaMemcpyHostToDevice) for i in range(nIO): context.set_tensor_address(lTensorName[i], int(bufferD[i])) context.execute_async_v3(0) for i in range(nInput, nIO): cudart.cudaMemcpy(bufferH[i].ctypes.data, bufferD[i], bufferH[i].nbytes, cudart.cudaMemcpyKind.cudaMemcpyDeviceToHost) for i in range(nIO): print(lTensorName[i]) print(bufferH[i]) for b in bufferD: cudart.cudaFree(b)
trt-samples-for-hackathon-cn-master
cookbook/02-API/Layer/NormalizationLayer/GroupNormalization.py
# # Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import numpy as np import tensorrt as trt from cuda import cudart np.random.seed(31193) np.set_printoptions(precision=3, linewidth=200, suppress=True) cudart.cudaDeviceSynchronize() shape = [1, 3, 4, 5] shapeSB = [1] + shape[1:2] + [1, 1] # shape[1,3,1,1] data0 = np.arange(np.prod(shape[2:]), dtype=np.float32).reshape(1, 1, *shape[2:]) data1 = 100 - np.arange(np.prod(shape[2:]), dtype=np.float32).reshape(1, 1, *shape[2:]) data2 = np.ones(shape[2:], dtype=np.float32).reshape(1, 1, *shape[2:]) data = np.concatenate([data0, data1, data2], axis=1) scale = np.full(shapeSB, 1, dtype=np.float32) bias = np.full(shapeSB, 0, dtype=np.float32) logger = trt.Logger(trt.Logger.ERROR) builder = trt.Builder(logger) network = builder.create_network(1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)) config = builder.create_builder_config() inputT0 = network.add_input("inputT0", trt.float32, shape) inputT1 = network.add_input("inputT1", trt.float32, shapeSB) inputT2 = network.add_input("inputT2", trt.float32, shapeSB) #------------------------------------------------------------------------------- Network normalizationLayer = network.add_normalization(inputT0, inputT1, inputT2, 1 << 2 | 1 << 3) #normalizationLayer.epsilon = 1 # set epsilon, we can set it as 1 to compare the differnece of the output #normalizationLayer.axes = 1 << 3 # reset the axes mask after the constructor #normalizationLayer.compute_precision = trt.float32 # set the precision of computation #------------------------------------------------------------------------------- Network network.mark_output(normalizationLayer.get_output(0)) engineString = builder.build_serialized_network(network, config) engine = trt.Runtime(logger).deserialize_cuda_engine(engineString) nIO = engine.num_io_tensors lTensorName = [engine.get_tensor_name(i) for i in range(nIO)] nInput = [engine.get_tensor_mode(lTensorName[i]) for i in range(nIO)].count(trt.TensorIOMode.INPUT) context = engine.create_execution_context() for i in range(nIO): print("[%2d]%s->" % (i, "Input " if i < nInput else "Output"), engine.get_tensor_dtype(lTensorName[i]), engine.get_tensor_shape(lTensorName[i]), context.get_tensor_shape(lTensorName[i]), lTensorName[i]) bufferH = [] bufferH.append(data) bufferH.append(scale) bufferH.append(bias) for i in range(nIO): bufferH.append(np.empty(context.get_tensor_shape(lTensorName[i]), dtype=trt.nptype(engine.get_tensor_dtype(lTensorName[i])))) bufferD = [] for i in range(nIO): bufferD.append(cudart.cudaMalloc(bufferH[i].nbytes)[1]) for i in range(nInput): cudart.cudaMemcpy(bufferD[i], bufferH[i].ctypes.data, bufferH[i].nbytes, cudart.cudaMemcpyKind.cudaMemcpyHostToDevice) for i in range(nIO): context.set_tensor_address(lTensorName[i], int(bufferD[i])) context.execute_async_v3(0) for i in range(nInput, nIO): cudart.cudaMemcpy(bufferH[i].ctypes.data, bufferD[i], bufferH[i].nbytes, cudart.cudaMemcpyKind.cudaMemcpyDeviceToHost) for i in range(nIO): print(lTensorName[i]) print(bufferH[i]) for b in bufferD: cudart.cudaFree(b)
trt-samples-for-hackathon-cn-master
cookbook/02-API/Layer/NormalizationLayer/InstanceNormalization.py
# # Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import numpy as np import tensorrt as trt from cuda import cudart nB, nC, nH, nW = 1, 1, 3, 3 nCOut, nKernelHeight, nKernelWidth = 1, 3, 3 data = np.arange(1, 1 + nB * nC * nH * nW, dtype=np.float32).reshape(nB, nC, nH, nW) weight = np.asanyarray(np.power(10, range(4, -5, -1), dtype=np.float32)) bias = np.ascontiguousarray(np.zeros(nCOut, dtype=np.float32)) np.set_printoptions(precision=3, linewidth=200, suppress=True) cudart.cudaDeviceSynchronize() logger = trt.Logger(trt.Logger.ERROR) builder = trt.Builder(logger) network = builder.create_network(1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)) config = builder.create_builder_config() inputT0 = network.add_input("inputT0", trt.float32, (nB, nC, nH, nW)) #------------------------------------------------------------------------------- Network deconvolutionLayer = network.add_deconvolution_nd(inputT0, nCOut, (nKernelHeight, nKernelWidth), trt.Weights(weight), trt.Weights(bias)) deconvolutionLayer.stride_nd = (2, 2) # add stride to observe the reuslt easily. deconvolutionLayer.padding_mode = trt.PaddingMode.SAME_UPPER #deconvolutionLayer.padding_mode = trt.PaddingMode.SAME_LOWER #deconvolutionLayer.padding_mode = trt.PaddingMode.EXPLICIT_ROUND_UP #deconvolutionLayer.padding_mode = trt.PaddingMode.EXPLICIT_ROUND_DOWN #deconvolutionLayer.padding_mode = trt.PaddingMode.CAFFE_ROUND_UP #deconvolutionLayer.padding_mode = trt.PaddingMode.CAFFE_ROUND_DOWN #------------------------------------------------------------------------------- Network network.mark_output(deconvolutionLayer.get_output(0)) engineString = builder.build_serialized_network(network, config) engine = trt.Runtime(logger).deserialize_cuda_engine(engineString) nIO = engine.num_io_tensors lTensorName = [engine.get_tensor_name(i) for i in range(nIO)] nInput = [engine.get_tensor_mode(lTensorName[i]) for i in range(nIO)].count(trt.TensorIOMode.INPUT) context = engine.create_execution_context() for i in range(nIO): print("[%2d]%s->" % (i, "Input " if i < nInput else "Output"), engine.get_tensor_dtype(lTensorName[i]), engine.get_tensor_shape(lTensorName[i]), context.get_tensor_shape(lTensorName[i]), lTensorName[i]) bufferH = [] bufferH.append(np.ascontiguousarray(data)) for i in range(nInput, nIO): bufferH.append(np.empty(context.get_tensor_shape(lTensorName[i]), dtype=trt.nptype(engine.get_tensor_dtype(lTensorName[i])))) bufferD = [] for i in range(nIO): bufferD.append(cudart.cudaMalloc(bufferH[i].nbytes)[1]) for i in range(nInput): cudart.cudaMemcpy(bufferD[i], bufferH[i].ctypes.data, bufferH[i].nbytes, cudart.cudaMemcpyKind.cudaMemcpyHostToDevice) for i in range(nIO): context.set_tensor_address(lTensorName[i], int(bufferD[i])) context.execute_async_v3(0) for i in range(nInput, nIO): cudart.cudaMemcpy(bufferH[i].ctypes.data, bufferD[i], bufferH[i].nbytes, cudart.cudaMemcpyKind.cudaMemcpyDeviceToHost) for i in range(nIO): print(lTensorName[i]) print(bufferH[i]) for b in bufferD: cudart.cudaFree(b)
trt-samples-for-hackathon-cn-master
cookbook/02-API/Layer/DeconvolutionNdLayer/Pading_mode.py
# # Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import numpy as np import tensorrt as trt from cuda import cudart nB, nC, nH, nW = 1, 2, 3, 3 nGroup = 2 nCOut, nKernelHeight, nKernelWidth = nGroup, 3, 3 data = np.tile(np.arange(1, 1 + nB * nC * nH * nW, dtype=np.float32).reshape(1, nC, nH, nW), (nB, 1, 1, 1)) weight = np.power(10, range(4, -5, -1), dtype=np.float32) weight = np.ascontiguousarray(np.concatenate([weight, -weight], 0)) bias = np.ascontiguousarray(np.zeros(nCOut, dtype=np.float32)) np.set_printoptions(precision=3, linewidth=200, suppress=True) cudart.cudaDeviceSynchronize() logger = trt.Logger(trt.Logger.ERROR) builder = trt.Builder(logger) network = builder.create_network(1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)) config = builder.create_builder_config() inputT0 = network.add_input("inputT0", trt.float32, (nB, nC, nH, nW)) #------------------------------------------------------------------------------- Network deconvolutionLayer = network.add_deconvolution_nd(inputT0, nCOut, (nKernelHeight, nKernelWidth), trt.Weights(weight), trt.Weights(bias)) deconvolutionLayer.num_groups = nGroup #------------------------------------------------------------------------------- Network network.mark_output(deconvolutionLayer.get_output(0)) engineString = builder.build_serialized_network(network, config) engine = trt.Runtime(logger).deserialize_cuda_engine(engineString) nIO = engine.num_io_tensors lTensorName = [engine.get_tensor_name(i) for i in range(nIO)] nInput = [engine.get_tensor_mode(lTensorName[i]) for i in range(nIO)].count(trt.TensorIOMode.INPUT) context = engine.create_execution_context() for i in range(nIO): print("[%2d]%s->" % (i, "Input " if i < nInput else "Output"), engine.get_tensor_dtype(lTensorName[i]), engine.get_tensor_shape(lTensorName[i]), context.get_tensor_shape(lTensorName[i]), lTensorName[i]) bufferH = [] bufferH.append(np.ascontiguousarray(data)) for i in range(nInput, nIO): bufferH.append(np.empty(context.get_tensor_shape(lTensorName[i]), dtype=trt.nptype(engine.get_tensor_dtype(lTensorName[i])))) bufferD = [] for i in range(nIO): bufferD.append(cudart.cudaMalloc(bufferH[i].nbytes)[1]) for i in range(nInput): cudart.cudaMemcpy(bufferD[i], bufferH[i].ctypes.data, bufferH[i].nbytes, cudart.cudaMemcpyKind.cudaMemcpyHostToDevice) for i in range(nIO): context.set_tensor_address(lTensorName[i], int(bufferD[i])) context.execute_async_v3(0) for i in range(nInput, nIO): cudart.cudaMemcpy(bufferH[i].ctypes.data, bufferD[i], bufferH[i].nbytes, cudart.cudaMemcpyKind.cudaMemcpyDeviceToHost) for i in range(nIO): print(lTensorName[i]) print(bufferH[i]) for b in bufferD: cudart.cudaFree(b)
trt-samples-for-hackathon-cn-master
cookbook/02-API/Layer/DeconvolutionNdLayer/Num_groups.py
# # Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import numpy as np import tensorrt as trt from cuda import cudart nB, nC, nH, nW = 1, 1, 3, 3 nCOut, nKernelHeight, nKernelWidth = 1, 3, 3 data = np.arange(1, 1 + nB * nC * nH * nW, dtype=np.float32).reshape(nB, nC, nH, nW) weight = np.asanyarray(np.power(10, range(4, -5, -1), dtype=np.float32)) bias = np.ascontiguousarray(np.zeros(nCOut, dtype=np.float32)) np.set_printoptions(precision=3, linewidth=200, suppress=True) cudart.cudaDeviceSynchronize() logger = trt.Logger(trt.Logger.ERROR) builder = trt.Builder(logger) network = builder.create_network(1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)) config = builder.create_builder_config() inputT0 = network.add_input("inputT0", trt.float32, (nB, nC, nH, nW)) #------------------------------------------------------------------------------- Network hPre = wPre = 1 deconvolutionLayer = network.add_deconvolution_nd(inputT0, nCOut, (nKernelHeight, nKernelWidth), trt.Weights(weight), trt.Weights(bias)) deconvolutionLayer.pre_padding = (hPre, wPre) #------------------------------------------------------------------------------- Network network.mark_output(deconvolutionLayer.get_output(0)) engineString = builder.build_serialized_network(network, config) engine = trt.Runtime(logger).deserialize_cuda_engine(engineString) nIO = engine.num_io_tensors lTensorName = [engine.get_tensor_name(i) for i in range(nIO)] nInput = [engine.get_tensor_mode(lTensorName[i]) for i in range(nIO)].count(trt.TensorIOMode.INPUT) context = engine.create_execution_context() for i in range(nIO): print("[%2d]%s->" % (i, "Input " if i < nInput else "Output"), engine.get_tensor_dtype(lTensorName[i]), engine.get_tensor_shape(lTensorName[i]), context.get_tensor_shape(lTensorName[i]), lTensorName[i]) bufferH = [] bufferH.append(np.ascontiguousarray(data)) for i in range(nInput, nIO): bufferH.append(np.empty(context.get_tensor_shape(lTensorName[i]), dtype=trt.nptype(engine.get_tensor_dtype(lTensorName[i])))) bufferD = [] for i in range(nIO): bufferD.append(cudart.cudaMalloc(bufferH[i].nbytes)[1]) for i in range(nInput): cudart.cudaMemcpy(bufferD[i], bufferH[i].ctypes.data, bufferH[i].nbytes, cudart.cudaMemcpyKind.cudaMemcpyHostToDevice) for i in range(nIO): context.set_tensor_address(lTensorName[i], int(bufferD[i])) context.execute_async_v3(0) for i in range(nInput, nIO): cudart.cudaMemcpy(bufferH[i].ctypes.data, bufferD[i], bufferH[i].nbytes, cudart.cudaMemcpyKind.cudaMemcpyDeviceToHost) for i in range(nIO): print(lTensorName[i]) print(bufferH[i]) for b in bufferD: cudart.cudaFree(b)
trt-samples-for-hackathon-cn-master
cookbook/02-API/Layer/DeconvolutionNdLayer/Pre_padding.py
# # Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import numpy as np import tensorrt as trt from cuda import cudart nB, nC, nH, nW = 1, 1, 3, 3 nCOut, nKernelHeight, nKernelWidth = 1, 3, 3 data = np.arange(1, 1 + nB * nC * nH * nW, dtype=np.float32).reshape(nB, nC, nH, nW) weight = np.asanyarray(np.power(10, range(4, -5, -1), dtype=np.float32)) bias = np.ascontiguousarray(np.zeros(nCOut, dtype=np.float32)) np.set_printoptions(precision=3, linewidth=200, suppress=True) cudart.cudaDeviceSynchronize() logger = trt.Logger(trt.Logger.ERROR) builder = trt.Builder(logger) network = builder.create_network(1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)) config = builder.create_builder_config() inputT0 = network.add_input("inputT0", trt.float32, (nB, nC, nH, nW)) #------------------------------------------------------------------------------- Network placeHolder = np.zeros(1, dtype=np.float32) deconvolutionLayer = network.add_deconvolution_nd(inputT0, 1, (1, 1), placeHolder) deconvolutionLayer.num_output_maps = nCOut deconvolutionLayer.kernel_size_nd = (nKernelHeight, nKernelWidth) deconvolutionLayer.kernel = weight deconvolutionLayer.bias = bias #------------------------------------------------------------------------------- Network network.mark_output(deconvolutionLayer.get_output(0)) engineString = builder.build_serialized_network(network, config) engine = trt.Runtime(logger).deserialize_cuda_engine(engineString) nIO = engine.num_io_tensors lTensorName = [engine.get_tensor_name(i) for i in range(nIO)] nInput = [engine.get_tensor_mode(lTensorName[i]) for i in range(nIO)].count(trt.TensorIOMode.INPUT) context = engine.create_execution_context() for i in range(nIO): print("[%2d]%s->" % (i, "Input " if i < nInput else "Output"), engine.get_tensor_dtype(lTensorName[i]), engine.get_tensor_shape(lTensorName[i]), context.get_tensor_shape(lTensorName[i]), lTensorName[i]) bufferH = [] bufferH.append(np.ascontiguousarray(data)) for i in range(nInput, nIO): bufferH.append(np.empty(context.get_tensor_shape(lTensorName[i]), dtype=trt.nptype(engine.get_tensor_dtype(lTensorName[i])))) bufferD = [] for i in range(nIO): bufferD.append(cudart.cudaMalloc(bufferH[i].nbytes)[1]) for i in range(nInput): cudart.cudaMemcpy(bufferD[i], bufferH[i].ctypes.data, bufferH[i].nbytes, cudart.cudaMemcpyKind.cudaMemcpyHostToDevice) for i in range(nIO): context.set_tensor_address(lTensorName[i], int(bufferD[i])) context.execute_async_v3(0) for i in range(nInput, nIO): cudart.cudaMemcpy(bufferH[i].ctypes.data, bufferD[i], bufferH[i].nbytes, cudart.cudaMemcpyKind.cudaMemcpyDeviceToHost) for i in range(nIO): print(lTensorName[i]) print(bufferH[i]) for b in bufferD: cudart.cudaFree(b)
trt-samples-for-hackathon-cn-master
cookbook/02-API/Layer/DeconvolutionNdLayer/Num_output_Maps+Kernel_size_nd+Kernel+Bias.py
# # Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import numpy as np import tensorrt as trt from cuda import cudart nB, nC, nH, nW = 1, 1, 3, 3 nCOut, nKernelHeight, nKernelWidth = 1, 3, 3 data = np.arange(1, 1 + nB * nC * nH * nW, dtype=np.float32).reshape(nB, nC, nH, nW) weight = np.asanyarray(np.power(10, range(4, -5, -1), dtype=np.float32)) bias = np.ascontiguousarray(np.zeros(nCOut, dtype=np.float32)) np.set_printoptions(precision=3, linewidth=200, suppress=True) cudart.cudaDeviceSynchronize() logger = trt.Logger(trt.Logger.ERROR) builder = trt.Builder(logger) network = builder.create_network(1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)) config = builder.create_builder_config() inputT0 = network.add_input("inputT0", trt.float32, (nB, nC, nH, nW)) #------------------------------------------------------------------------------- Network deconvolutionLayer = network.add_deconvolution_nd(inputT0, nCOut, (nKernelHeight, nKernelWidth), trt.Weights(weight), trt.Weights(bias)) #------------------------------------------------------------------------------- Network network.mark_output(deconvolutionLayer.get_output(0)) engineString = builder.build_serialized_network(network, config) engine = trt.Runtime(logger).deserialize_cuda_engine(engineString) nIO = engine.num_io_tensors lTensorName = [engine.get_tensor_name(i) for i in range(nIO)] nInput = [engine.get_tensor_mode(lTensorName[i]) for i in range(nIO)].count(trt.TensorIOMode.INPUT) context = engine.create_execution_context() for i in range(nIO): print("[%2d]%s->" % (i, "Input " if i < nInput else "Output"), engine.get_tensor_dtype(lTensorName[i]), engine.get_tensor_shape(lTensorName[i]), context.get_tensor_shape(lTensorName[i]), lTensorName[i]) bufferH = [] bufferH.append(np.ascontiguousarray(data)) for i in range(nInput, nIO): bufferH.append(np.empty(context.get_tensor_shape(lTensorName[i]), dtype=trt.nptype(engine.get_tensor_dtype(lTensorName[i])))) bufferD = [] for i in range(nIO): bufferD.append(cudart.cudaMalloc(bufferH[i].nbytes)[1]) for i in range(nInput): cudart.cudaMemcpy(bufferD[i], bufferH[i].ctypes.data, bufferH[i].nbytes, cudart.cudaMemcpyKind.cudaMemcpyHostToDevice) for i in range(nIO): context.set_tensor_address(lTensorName[i], int(bufferD[i])) context.execute_async_v3(0) for i in range(nInput, nIO): cudart.cudaMemcpy(bufferH[i].ctypes.data, bufferD[i], bufferH[i].nbytes, cudart.cudaMemcpyKind.cudaMemcpyDeviceToHost) for i in range(nIO): print(lTensorName[i]) print(bufferH[i]) for b in bufferD: cudart.cudaFree(b)
trt-samples-for-hackathon-cn-master
cookbook/02-API/Layer/DeconvolutionNdLayer/Dilation_nd.py
# # Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import numpy as np import tensorrt as trt from cuda import cudart nB, nC, nH, nW = 1, 1, 3, 3 nCOut, nKernelHeight, nKernelWidth = 1, 3, 3 data = np.arange(1, 1 + nB * nC * nH * nW, dtype=np.float32).reshape(nB, nC, nH, nW) weight = np.ascontiguousarray(np.power(10, range(4, -5, -1), dtype=np.float32)) bias = np.ascontiguousarray(np.zeros(nCOut, dtype=np.float32)) np.set_printoptions(precision=3, linewidth=200, suppress=True) cudart.cudaDeviceSynchronize() logger = trt.Logger(trt.Logger.ERROR) builder = trt.Builder(logger) network = builder.create_network(1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)) config = builder.create_builder_config() config.set_flag(trt.BuilderFlag.INT8) # need INT8 mode inputT0 = network.add_input("inputT0", trt.float32, (nB, nC, nH, nW)) #------------------------------------------------------------------------------- Network constantLayer0 = network.add_constant([], np.array([1], dtype=np.float32)) constantLayer1 = network.add_constant([], np.array([1], dtype=np.float32)) weightLayer = network.add_constant([nCOut, nC, nKernelHeight, nKernelWidth], weight) quantizeLayer0 = network.add_quantize(inputT0, constantLayer0.get_output(0)) quantizeLayer0.axis = 0 dequantizeLayer0 = network.add_dequantize(quantizeLayer0.get_output(0), constantLayer1.get_output(0)) dequantizeLayer0.axis = 0 quantizeLayer1 = network.add_quantize(weightLayer.get_output(0), constantLayer0.get_output(0)) quantizeLayer1.axis = 0 dequantizeLayer1 = network.add_dequantize(quantizeLayer1.get_output(0), constantLayer1.get_output(0)) dequantizeLayer1.axis = 0 deconvolutionLayer = network.add_deconvolution_nd(dequantizeLayer0.get_output(0), nCOut, (nKernelHeight, nKernelWidth), trt.Weights()) # set weight as empty in the constructor deconvolutionLayer.set_input(1, dequantizeLayer1.get_output(0)) #------------------------------------------------------------------------------- Network network.mark_output(deconvolutionLayer.get_output(0)) engineString = builder.build_serialized_network(network, config) engine = trt.Runtime(logger).deserialize_cuda_engine(engineString) nIO = engine.num_io_tensors lTensorName = [engine.get_tensor_name(i) for i in range(nIO)] nInput = [engine.get_tensor_mode(lTensorName[i]) for i in range(nIO)].count(trt.TensorIOMode.INPUT) context = engine.create_execution_context() for i in range(nIO): print("[%2d]%s->" % (i, "Input " if i < nInput else "Output"), engine.get_tensor_dtype(lTensorName[i]), engine.get_tensor_shape(lTensorName[i]), context.get_tensor_shape(lTensorName[i]), lTensorName[i]) bufferH = [] bufferH.append(np.ascontiguousarray(data)) for i in range(nInput, nIO): bufferH.append(np.empty(context.get_tensor_shape(lTensorName[i]), dtype=trt.nptype(engine.get_tensor_dtype(lTensorName[i])))) bufferD = [] for i in range(nIO): bufferD.append(cudart.cudaMalloc(bufferH[i].nbytes)[1]) for i in range(nInput): cudart.cudaMemcpy(bufferD[i], bufferH[i].ctypes.data, bufferH[i].nbytes, cudart.cudaMemcpyKind.cudaMemcpyHostToDevice) for i in range(nIO): context.set_tensor_address(lTensorName[i], int(bufferD[i])) context.execute_async_v3(0) for i in range(nInput, nIO): cudart.cudaMemcpy(bufferH[i].ctypes.data, bufferD[i], bufferH[i].nbytes, cudart.cudaMemcpyKind.cudaMemcpyDeviceToHost) for i in range(nIO): print(lTensorName[i]) print(bufferH[i]) for b in bufferD: cudart.cudaFree(b)
trt-samples-for-hackathon-cn-master
cookbook/02-API/Layer/DeconvolutionNdLayer/Set_input+INT8QDQ.py
# # Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import numpy as np import tensorrt as trt from cuda import cudart nB, nC, nH, nW = 1, 1, 3, 3 nCOut, nKernelHeight, nKernelWidth = 1, 3, 3 data = np.arange(1, 1 + nB * nC * nH * nW, dtype=np.float32).reshape(nB, nC, nH, nW) weight = np.asanyarray(np.power(10, range(4, -5, -1), dtype=np.float32)) bias = np.ascontiguousarray(np.zeros(nCOut, dtype=np.float32)) np.set_printoptions(precision=3, linewidth=200, suppress=True) cudart.cudaDeviceSynchronize() logger = trt.Logger(trt.Logger.ERROR) builder = trt.Builder(logger) network = builder.create_network(1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)) config = builder.create_builder_config() inputT0 = network.add_input("inputT0", trt.float32, (nB, nC, nH, nW)) #-------------------------------------------------------------------------------# Network hPost = wPost = 1 deconvolutionLayer = network.add_deconvolution_nd(inputT0, nCOut, (nKernelHeight, nKernelWidth), trt.Weights(weight), trt.Weights(bias)) deconvolutionLayer.post_padding = (hPost, wPost) #-------------------------------------------------------------------------------# Network network.mark_output(deconvolutionLayer.get_output(0)) engineString = builder.build_serialized_network(network, config) engine = trt.Runtime(logger).deserialize_cuda_engine(engineString) nIO = engine.num_io_tensors lTensorName = [engine.get_tensor_name(i) for i in range(nIO)] nInput = [engine.get_tensor_mode(lTensorName[i]) for i in range(nIO)].count(trt.TensorIOMode.INPUT) context = engine.create_execution_context() for i in range(nIO): print("[%2d]%s->" % (i, "Input " if i < nInput else "Output"), engine.get_tensor_dtype(lTensorName[i]), engine.get_tensor_shape(lTensorName[i]), context.get_tensor_shape(lTensorName[i]), lTensorName[i]) bufferH = [] bufferH.append(np.ascontiguousarray(data)) for i in range(nInput, nIO): bufferH.append(np.empty(context.get_tensor_shape(lTensorName[i]), dtype=trt.nptype(engine.get_tensor_dtype(lTensorName[i])))) bufferD = [] for i in range(nIO): bufferD.append(cudart.cudaMalloc(bufferH[i].nbytes)[1]) for i in range(nInput): cudart.cudaMemcpy(bufferD[i], bufferH[i].ctypes.data, bufferH[i].nbytes, cudart.cudaMemcpyKind.cudaMemcpyHostToDevice) for i in range(nIO): context.set_tensor_address(lTensorName[i], int(bufferD[i])) context.execute_async_v3(0) for i in range(nInput, nIO): cudart.cudaMemcpy(bufferH[i].ctypes.data, bufferD[i], bufferH[i].nbytes, cudart.cudaMemcpyKind.cudaMemcpyDeviceToHost) for i in range(nIO): print(lTensorName[i]) print(bufferH[i]) for b in bufferD: cudart.cudaFree(b)
trt-samples-for-hackathon-cn-master
cookbook/02-API/Layer/DeconvolutionNdLayer/Post_padding.py
# # Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import numpy as np import tensorrt as trt from cuda import cudart nB, nC, nH, nW = 1, 2, 6, 9 nCOut, nKernelHeight, nKernelWidth = 1, 3, 3 data = np.tile(np.arange(1, 1 + nKernelHeight * nKernelWidth, dtype=np.float32).reshape(nKernelHeight, nKernelWidth), (nC, nH // nKernelHeight, nW // nKernelWidth)).reshape(nB, 1, nC, nH, nW) weight = np.power(10, range(4, -5, -1), dtype=np.float32).reshape(nCOut, nKernelHeight, nKernelWidth) weight = np.ascontiguousarray(np.concatenate([weight, -weight], 0)) bias = np.ascontiguousarray(np.zeros(nCOut, dtype=np.float32)) np.set_printoptions(precision=3, linewidth=200, suppress=True) cudart.cudaDeviceSynchronize() logger = trt.Logger(trt.Logger.ERROR) builder = trt.Builder(logger) network = builder.create_network(1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)) config = builder.create_builder_config() inputT0 = network.add_input("inputT0", trt.float32, (nB, 1, nC, nH, nW)) # dimension of input tensor is greater or equal to 5 #------------------------------------------------------------------------------- Network convolutionLayer = network.add_deconvolution_nd(inputT0, nCOut, weight.shape, trt.Weights(weight), trt.Weights(bias)) # dimension of convolution kernel is 3 #------------------------------------------------------------------------------- Network network.mark_output(convolutionLayer.get_output(0)) engineString = builder.build_serialized_network(network, config) engine = trt.Runtime(logger).deserialize_cuda_engine(engineString) nIO = engine.num_io_tensors lTensorName = [engine.get_tensor_name(i) for i in range(nIO)] nInput = [engine.get_tensor_mode(lTensorName[i]) for i in range(nIO)].count(trt.TensorIOMode.INPUT) context = engine.create_execution_context() for i in range(nIO): print("[%2d]%s->" % (i, "Input " if i < nInput else "Output"), engine.get_tensor_dtype(lTensorName[i]), engine.get_tensor_shape(lTensorName[i]), context.get_tensor_shape(lTensorName[i]), lTensorName[i]) bufferH = [] bufferH.append(np.ascontiguousarray(data)) for i in range(nInput, nIO): bufferH.append(np.empty(context.get_tensor_shape(lTensorName[i]), dtype=trt.nptype(engine.get_tensor_dtype(lTensorName[i])))) bufferD = [] for i in range(nIO): bufferD.append(cudart.cudaMalloc(bufferH[i].nbytes)[1]) for i in range(nInput): cudart.cudaMemcpy(bufferD[i], bufferH[i].ctypes.data, bufferH[i].nbytes, cudart.cudaMemcpyKind.cudaMemcpyHostToDevice) for i in range(nIO): context.set_tensor_address(lTensorName[i], int(bufferD[i])) context.execute_async_v3(0) for i in range(nInput, nIO): cudart.cudaMemcpy(bufferH[i].ctypes.data, bufferD[i], bufferH[i].nbytes, cudart.cudaMemcpyKind.cudaMemcpyDeviceToHost) for i in range(nIO): print(lTensorName[i]) print(bufferH[i]) for b in bufferD: cudart.cudaFree(b)
trt-samples-for-hackathon-cn-master
cookbook/02-API/Layer/DeconvolutionNdLayer/Deconvolution3D.py
# # Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import numpy as np import tensorrt as trt from cuda import cudart nB, nC, nH, nW = 1, 1, 3, 3 nCOut, nKernelHeight, nKernelWidth = 1, 3, 3 data = np.arange(1, 1 + nB * nC * nH * nW, dtype=np.float32).reshape(nB, nC, nH, nW) weight = np.asanyarray(np.power(10, range(4, -5, -1), dtype=np.float32)) bias = np.ascontiguousarray(np.zeros(nCOut, dtype=np.float32)) np.set_printoptions(precision=3, linewidth=200, suppress=True) cudart.cudaDeviceSynchronize() logger = trt.Logger(trt.Logger.ERROR) builder = trt.Builder(logger) network = builder.create_network(1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)) config = builder.create_builder_config() inputT0 = network.add_input("inputT0", trt.float32, (nB, nC, nH, nW)) #------------------------------------------------------------------------------- Network deconvolutionLayer = network.add_deconvolution_nd(inputT0, nCOut, (nKernelHeight, nKernelWidth), trt.Weights(weight), trt.Weights(bias)) #------------------------------------------------------------------------------- Network network.mark_output(deconvolutionLayer.get_output(0)) engineString = builder.build_serialized_network(network, config) engine = trt.Runtime(logger).deserialize_cuda_engine(engineString) nIO = engine.num_io_tensors lTensorName = [engine.get_tensor_name(i) for i in range(nIO)] nInput = [engine.get_tensor_mode(lTensorName[i]) for i in range(nIO)].count(trt.TensorIOMode.INPUT) context = engine.create_execution_context() for i in range(nIO): print("[%2d]%s->" % (i, "Input " if i < nInput else "Output"), engine.get_tensor_dtype(lTensorName[i]), engine.get_tensor_shape(lTensorName[i]), context.get_tensor_shape(lTensorName[i]), lTensorName[i]) bufferH = [] bufferH.append(np.ascontiguousarray(data)) for i in range(nInput, nIO): bufferH.append(np.empty(context.get_tensor_shape(lTensorName[i]), dtype=trt.nptype(engine.get_tensor_dtype(lTensorName[i])))) bufferD = [] for i in range(nIO): bufferD.append(cudart.cudaMalloc(bufferH[i].nbytes)[1]) for i in range(nInput): cudart.cudaMemcpy(bufferD[i], bufferH[i].ctypes.data, bufferH[i].nbytes, cudart.cudaMemcpyKind.cudaMemcpyHostToDevice) for i in range(nIO): context.set_tensor_address(lTensorName[i], int(bufferD[i])) context.execute_async_v3(0) for i in range(nInput, nIO): cudart.cudaMemcpy(bufferH[i].ctypes.data, bufferD[i], bufferH[i].nbytes, cudart.cudaMemcpyKind.cudaMemcpyDeviceToHost) for i in range(nIO): print(lTensorName[i]) print(bufferH[i]) for b in bufferD: cudart.cudaFree(b)
trt-samples-for-hackathon-cn-master
cookbook/02-API/Layer/DeconvolutionNdLayer/SimpleExample.py
# # Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import numpy as np import tensorrt as trt from cuda import cudart nB, nC, nH, nW = 1, 1, 3, 3 nCOut, nKernelHeight, nKernelWidth = 1, 3, 3 data = np.arange(1, 1 + nB * nC * nH * nW, dtype=np.float32).reshape(nB, nC, nH, nW) weight = np.asanyarray(np.power(10, range(4, -5, -1), dtype=np.float32)) bias = np.ascontiguousarray(np.zeros(nCOut, dtype=np.float32)) np.set_printoptions(precision=3, linewidth=200, suppress=True) cudart.cudaDeviceSynchronize() logger = trt.Logger(trt.Logger.ERROR) builder = trt.Builder(logger) network = builder.create_network(1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)) config = builder.create_builder_config() inputT0 = network.add_input("inputT0", trt.float32, (nB, nC, nH, nW)) #------------------------------------------------------------------------------- Network hP = wP = 1 deconvolutionLayer = network.add_deconvolution_nd(inputT0, nCOut, (nKernelHeight, nKernelWidth), trt.Weights(weight), trt.Weights(bias)) deconvolutionLayer.padding_nd = (hP, wP) #------------------------------------------------------------------------------- Network network.mark_output(deconvolutionLayer.get_output(0)) engineString = builder.build_serialized_network(network, config) engine = trt.Runtime(logger).deserialize_cuda_engine(engineString) nIO = engine.num_io_tensors lTensorName = [engine.get_tensor_name(i) for i in range(nIO)] nInput = [engine.get_tensor_mode(lTensorName[i]) for i in range(nIO)].count(trt.TensorIOMode.INPUT) context = engine.create_execution_context() for i in range(nIO): print("[%2d]%s->" % (i, "Input " if i < nInput else "Output"), engine.get_tensor_dtype(lTensorName[i]), engine.get_tensor_shape(lTensorName[i]), context.get_tensor_shape(lTensorName[i]), lTensorName[i]) bufferH = [] bufferH.append(np.ascontiguousarray(data)) for i in range(nInput, nIO): bufferH.append(np.empty(context.get_tensor_shape(lTensorName[i]), dtype=trt.nptype(engine.get_tensor_dtype(lTensorName[i])))) bufferD = [] for i in range(nIO): bufferD.append(cudart.cudaMalloc(bufferH[i].nbytes)[1]) for i in range(nInput): cudart.cudaMemcpy(bufferD[i], bufferH[i].ctypes.data, bufferH[i].nbytes, cudart.cudaMemcpyKind.cudaMemcpyHostToDevice) for i in range(nIO): context.set_tensor_address(lTensorName[i], int(bufferD[i])) context.execute_async_v3(0) for i in range(nInput, nIO): cudart.cudaMemcpy(bufferH[i].ctypes.data, bufferD[i], bufferH[i].nbytes, cudart.cudaMemcpyKind.cudaMemcpyDeviceToHost) for i in range(nIO): print(lTensorName[i]) print(bufferH[i]) for b in bufferD: cudart.cudaFree(b)
trt-samples-for-hackathon-cn-master
cookbook/02-API/Layer/DeconvolutionNdLayer/Padding_nd.py
# # Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import numpy as np import tensorrt as trt from cuda import cudart nB, nC, nH, nW = 1, 1, 3, 3 nCOut, nKernelHeight, nKernelWidth = 1, 3, 3 data = np.arange(1, 1 + nB * nC * nH * nW, dtype=np.float32).reshape(nB, nC, nH, nW) weight = np.asanyarray(np.power(10, range(4, -5, -1), dtype=np.float32)) bias = np.ascontiguousarray(np.zeros(nCOut, dtype=np.float32)) np.set_printoptions(precision=3, linewidth=200, suppress=True) cudart.cudaDeviceSynchronize() logger = trt.Logger(trt.Logger.ERROR) builder = trt.Builder(logger) network = builder.create_network(1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)) config = builder.create_builder_config() inputT0 = network.add_input("inputT0", trt.float32, (nB, nC, nH, nW)) #------------------------------------------------------------------------------- Network hS = wS = 2 deconvolutionLayer = network.add_deconvolution_nd(inputT0, nCOut, (nKernelHeight, nKernelWidth), trt.Weights(weight), trt.Weights(bias)) deconvolutionLayer.stride_nd = (hS, wS) #------------------------------------------------------------------------------- Network network.mark_output(deconvolutionLayer.get_output(0)) engineString = builder.build_serialized_network(network, config) engine = trt.Runtime(logger).deserialize_cuda_engine(engineString) nIO = engine.num_io_tensors lTensorName = [engine.get_tensor_name(i) for i in range(nIO)] nInput = [engine.get_tensor_mode(lTensorName[i]) for i in range(nIO)].count(trt.TensorIOMode.INPUT) context = engine.create_execution_context() for i in range(nIO): print("[%2d]%s->" % (i, "Input " if i < nInput else "Output"), engine.get_tensor_dtype(lTensorName[i]), engine.get_tensor_shape(lTensorName[i]), context.get_tensor_shape(lTensorName[i]), lTensorName[i]) bufferH = [] bufferH.append(np.ascontiguousarray(data)) for i in range(nInput, nIO): bufferH.append(np.empty(context.get_tensor_shape(lTensorName[i]), dtype=trt.nptype(engine.get_tensor_dtype(lTensorName[i])))) bufferD = [] for i in range(nIO): bufferD.append(cudart.cudaMalloc(bufferH[i].nbytes)[1]) for i in range(nInput): cudart.cudaMemcpy(bufferD[i], bufferH[i].ctypes.data, bufferH[i].nbytes, cudart.cudaMemcpyKind.cudaMemcpyHostToDevice) for i in range(nIO): context.set_tensor_address(lTensorName[i], int(bufferD[i])) context.execute_async_v3(0) for i in range(nInput, nIO): cudart.cudaMemcpy(bufferH[i].ctypes.data, bufferD[i], bufferH[i].nbytes, cudart.cudaMemcpyKind.cudaMemcpyDeviceToHost) for i in range(nIO): print(lTensorName[i]) print(bufferH[i]) for b in bufferD: cudart.cudaFree(b)
trt-samples-for-hackathon-cn-master
cookbook/02-API/Layer/DeconvolutionNdLayer/Stride_nd.py
# # Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import numpy as np import tensorrt as trt from cuda import cudart nB, nC, nH, nW = 1, 3, 4, 5 data = np.arange(nB * nC * nH * nW, dtype=np.float32).reshape(nB, nC, nH, nW) np.set_printoptions(precision=3, linewidth=200, suppress=True) cudart.cudaDeviceSynchronize() logger = trt.Logger(trt.Logger.ERROR) builder = trt.Builder(logger) network = builder.create_network(1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)) config = builder.create_builder_config() config.set_flag(trt.BuilderFlag.FP16) # FP16 and INT8 need corresponding mode config.set_flag(trt.BuilderFlag.INT8) inputT0 = network.add_input("inputT0", trt.float32, (nB, nC, nH, nW)) #------------------------------------------------------------------------------- Network convertToFloat16Layer = network.add_identity(inputT0) convertToFloat16Layer.get_output(0).dtype = trt.float16 convertToInt32Layer = network.add_identity(inputT0) convertToInt32Layer.get_output(0).dtype = trt.int32 convertToInt8Layer = network.add_identity(inputT0) convertToInt8Layer.get_output(0).dtype = trt.int8 convertToInt8Layer.get_output(0).set_dynamic_range(0, 127) # INT8 mode need dynamic range or calibration #------------------------------------------------------------------------------- Network network.mark_output(convertToFloat16Layer.get_output(0)) network.mark_output(convertToInt32Layer.get_output(0)) network.mark_output(convertToInt8Layer.get_output(0)) engineString = builder.build_serialized_network(network, config) engine = trt.Runtime(logger).deserialize_cuda_engine(engineString) nIO = engine.num_io_tensors lTensorName = [engine.get_tensor_name(i) for i in range(nIO)] nInput = [engine.get_tensor_mode(lTensorName[i]) for i in range(nIO)].count(trt.TensorIOMode.INPUT) context = engine.create_execution_context() context.set_input_shape(lTensorName[0], [nB, nC, nH, nW]) for i in range(nIO): print("[%2d]%s->" % (i, "Input " if i < nInput else "Output"), engine.get_tensor_dtype(lTensorName[i]), engine.get_tensor_shape(lTensorName[i]), context.get_tensor_shape(lTensorName[i]), lTensorName[i]) bufferH = [] for i in range(nIO): bufferH.append(np.empty(context.get_tensor_shape(lTensorName[i]), dtype=trt.nptype(engine.get_tensor_dtype(lTensorName[i])))) bufferD = [] for i in range(nIO): bufferD.append(cudart.cudaMalloc(bufferH[i].nbytes)[1]) bufferH[0] = data for i in range(nInput): cudart.cudaMemcpy(bufferD[i], bufferH[i].ctypes.data, bufferH[i].nbytes, cudart.cudaMemcpyKind.cudaMemcpyHostToDevice) for i in range(nIO): context.set_tensor_address(lTensorName[i], int(bufferD[i])) context.execute_async_v3(0) for i in range(nInput, nIO): cudart.cudaMemcpy(bufferH[i].ctypes.data, bufferD[i], bufferH[i].nbytes, cudart.cudaMemcpyKind.cudaMemcpyDeviceToHost) for i in range(nIO): print(lTensorName[i]) print(bufferH[i]) for b in bufferD: cudart.cudaFree(b)
trt-samples-for-hackathon-cn-master
cookbook/02-API/Layer/IdentityLayer/DataTypeConversion.py
# # Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import numpy as np import tensorrt as trt from cuda import cudart nB, nC, nH, nW = 1, 3, 4, 5 data = np.arange(nB * nC * nH * nW, dtype=np.float32).reshape(nB, nC, nH, nW) np.set_printoptions(precision=3, linewidth=200, suppress=True) cudart.cudaDeviceSynchronize() logger = trt.Logger(trt.Logger.ERROR) builder = trt.Builder(logger) network = builder.create_network(1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)) config = builder.create_builder_config() inputT0 = network.add_input("inputT0", trt.float32, (nB, nC, nH, nW)) #------------------------------------------------------------------------------- Network identityLayer = network.add_identity(inputT0) #------------------------------------------------------------------------------- Network network.mark_output(identityLayer.get_output(0)) engineString = builder.build_serialized_network(network, config) engine = trt.Runtime(logger).deserialize_cuda_engine(engineString) nIO = engine.num_io_tensors lTensorName = [engine.get_tensor_name(i) for i in range(nIO)] nInput = [engine.get_tensor_mode(lTensorName[i]) for i in range(nIO)].count(trt.TensorIOMode.INPUT) context = engine.create_execution_context() context.set_input_shape(lTensorName[0], [nB, nC, nH, nW]) for i in range(nIO): print("[%2d]%s->" % (i, "Input " if i < nInput else "Output"), engine.get_tensor_dtype(lTensorName[i]), engine.get_tensor_shape(lTensorName[i]), context.get_tensor_shape(lTensorName[i]), lTensorName[i]) bufferH = [] for i in range(nIO): bufferH.append(np.empty(context.get_tensor_shape(lTensorName[i]), dtype=trt.nptype(engine.get_tensor_dtype(lTensorName[i])))) bufferD = [] for i in range(nIO): bufferD.append(cudart.cudaMalloc(bufferH[i].nbytes)[1]) bufferH[0] = data for i in range(nInput): cudart.cudaMemcpy(bufferD[i], bufferH[i].ctypes.data, bufferH[i].nbytes, cudart.cudaMemcpyKind.cudaMemcpyHostToDevice) for i in range(nIO): context.set_tensor_address(lTensorName[i], int(bufferD[i])) context.execute_async_v3(0) for i in range(nInput, nIO): cudart.cudaMemcpy(bufferH[i].ctypes.data, bufferD[i], bufferH[i].nbytes, cudart.cudaMemcpyKind.cudaMemcpyDeviceToHost) for i in range(nIO): print(lTensorName[i]) print(bufferH[i]) for b in bufferD: cudart.cudaFree(b)
trt-samples-for-hackathon-cn-master
cookbook/02-API/Layer/IdentityLayer/SimpleExample.py
# # Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import numpy as np import tensorrt as trt from cuda import cudart shape = [1, 3, 4, 5] np.set_printoptions(precision=3, linewidth=200, suppress=True) cudart.cudaDeviceSynchronize() logger = trt.Logger(trt.Logger.ERROR) builder = trt.Builder(logger) network = builder.create_network(1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)) config = builder.create_builder_config() inputT0 = network.add_input("inputT0", trt.float32, shape) #------------------------------------------------------------------------------- Network _H1 = network.add_shape(inputT0) _H2 = network.add_slice(_H1.get_output(0), [3], [1], [1]) _C1 = network.add_constant([1], np.array([5], dtype=np.int32)) # check condition inputT0.shape[3] == 5, no error with this #_C1 = network.add_constant([1], np.array([4], dtype=np.int32)) # check condition inputT0.shape[3] == 4, it certainly fails at build time _H3 = network.add_elementwise(_H2.get_output(0), _C1.get_output(0), trt.ElementWiseOperation.EQUAL) # do this check by elementwise layer _H4 = network.add_identity(_H3.get_output(0)) _H4.get_output(0).dtype = trt.bool _HA = network.add_assertion(_H4.get_output(0), "inputT0.shape[3] != 5") # assertion layer has a Bool input tensor and no output tensor _H5 = network.add_identity(_H4.get_output(0)) _H5.get_output(0).dtype = trt.int32 #------------------------------------------------------------------------------- Network network.mark_output(_H5.get_output(0)) engineString = builder.build_serialized_network(network, config) print("%s building serialized network!" % ("Failed" if engineString == None else "Succeeded"))
trt-samples-for-hackathon-cn-master
cookbook/02-API/Layer/AssertionLayer/BuildtimeCheck.py
# # Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import numpy as np import tensorrt as trt from cuda import cudart shape = [1, 3, 4, 5] data0 = np.arange(np.prod(shape), dtype=np.float32).reshape(shape) data1 = np.arange(np.prod(shape[:2]), dtype=np.float32).reshape(shape[:2]) data2 = np.arange(shape[0] * (shape[1] + 1), dtype=np.float32).reshape(shape[0], shape[1] + 1) np.set_printoptions(precision=3, linewidth=200, suppress=True) cudart.cudaDeviceSynchronize() logger = trt.Logger(trt.Logger.ERROR) builder = trt.Builder(logger) network = builder.create_network(1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)) profile = builder.create_optimization_profile() config = builder.create_builder_config() inputT0 = network.add_input("inputT0", trt.float32, [-1, -1] + shape[2:]) profile.set_shape(inputT0.name, [1, 1] + shape[2:], shape, [2, 6] + shape[2:]) inputT1 = network.add_input("inputT1", trt.float32, (-1, -1)) profile.set_shape(inputT1.name, [1, 1], shape[:2], [2, 6]) config.add_optimization_profile(profile) #------------------------------------------------------------------------------- Network _H1 = network.add_shape(inputT0) _H2 = network.add_slice(_H1.get_output(0), [1], [1], [1]) _H3 = network.add_shape(inputT1) _H4 = network.add_slice(_H3.get_output(0), [1], [1], [1]) _H5 = network.add_elementwise(_H2.get_output(0), _H4.get_output(0), trt.ElementWiseOperation.EQUAL) # check condition inputT0.shape[1] == inputT1.shape[1] _H6 = network.add_identity(_H5.get_output(0)) _H6.get_output(0).dtype = trt.bool _HA = network.add_assertion(_H6.get_output(0), "inputT0.shape[1] != inputT1.shape[1]") _H7 = network.add_identity(_H5.get_output(0)) _H7.get_output(0).dtype = trt.int32 #------------------------------------------------------------------------------- Network network.mark_output(_H7.get_output(0)) engineString = builder.build_serialized_network(network, config) engine = trt.Runtime(logger).deserialize_cuda_engine(engineString) nIO = engine.num_io_tensors lTensorName = [engine.get_tensor_name(i) for i in range(nIO)] nInput = [engine.get_tensor_mode(lTensorName[i]) for i in range(nIO)].count(trt.TensorIOMode.INPUT) context = engine.create_execution_context() context.set_input_shape(lTensorName[0], data0.shape) context.set_input_shape(lTensorName[1], data1.shape) # inputT0[1,3,4,5] <-> inputT1[1,3], no error with this shape #context.set_input_shape(lTensorName[1], data2.shape) # inputT0[1,3,4,5] <-> inputT1[1,4], error with this shape for i in range(nIO): print("[%2d]%s->" % (i, "Input " if i < nInput else "Output"), engine.get_tensor_dtype(lTensorName[i]), engine.get_tensor_shape(lTensorName[i]), context.get_tensor_shape(lTensorName[i]), lTensorName[i])
trt-samples-for-hackathon-cn-master
cookbook/02-API/Layer/AssertionLayer/RuntimeCheck.py
# # Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import numpy as np import tensorrt as trt from cuda import cudart shape = [1, 3, 4, 5] np.set_printoptions(precision=3, linewidth=200, suppress=True) cudart.cudaDeviceSynchronize() logger = trt.Logger(trt.Logger.ERROR) builder = trt.Builder(logger) network = builder.create_network(1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)) config = builder.create_builder_config() inputT0 = network.add_input("inputT0", trt.float32, shape) #------------------------------------------------------------------------------- Network _H1 = network.add_shape(inputT0) _H2 = network.add_slice(_H1.get_output(0), [3], [1], [1]) _C1 = network.add_constant([1], np.array([4], dtype=np.int32)) _H3 = network.add_elementwise(_H2.get_output(0), _C1.get_output(0), trt.ElementWiseOperation.EQUAL) _H4 = network.add_identity(_H3.get_output(0)) _H4.get_output(0).dtype = trt.bool _HA = network.add_assertion(_H4.get_output(0), "inputT0.shape[3] != 5!") _HA.message = "Edited message!" _H5 = network.add_identity(_H4.get_output(0)) _H5.get_output(0).dtype = trt.int32 #------------------------------------------------------------------------------- Network network.mark_output(_H5.get_output(0)) engineString = builder.build_serialized_network(network, config) print("%s building serialized network!" % ("Failed" if engineString == None else "Succeeded"))
trt-samples-for-hackathon-cn-master
cookbook/02-API/Layer/AssertionLayer/Message.py
# # Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import numpy as np import tensorrt as trt from cuda import cudart shape = [1, 3, 4, 5] data = np.arange(np.prod(shape), dtype=np.float32).reshape(shape) np.set_printoptions(precision=3, linewidth=200, suppress=True) cudart.cudaDeviceSynchronize() logger = trt.Logger(trt.Logger.ERROR) builder = trt.Builder(logger) network = builder.create_network(1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)) config = builder.create_builder_config() #------------------------------------------------------------------------------- Network constantLayer = network.add_constant([1], np.array([1], dtype=np.float32)) constantLayer.weights = trt.Weights(data) # set the weight of the constant tensor constantLayer.shape = data.shape # set the shape of the constant tensor #------------------------------------------------------------------------------- Network network.mark_output(constantLayer.get_output(0)) engineString = builder.build_serialized_network(network, config) engine = trt.Runtime(logger).deserialize_cuda_engine(engineString) nIO = engine.num_io_tensors lTensorName = [engine.get_tensor_name(i) for i in range(nIO)] nInput = [engine.get_tensor_mode(lTensorName[i]) for i in range(nIO)].count(trt.TensorIOMode.INPUT) context = engine.create_execution_context() for i in range(nIO): print("[%2d]%s->" % (i, "Input " if i < nInput else "Output"), engine.get_tensor_dtype(lTensorName[i]), engine.get_tensor_shape(lTensorName[i]), context.get_tensor_shape(lTensorName[i]), lTensorName[i]) bufferH = [] for i in range(nInput, nIO): bufferH.append(np.empty(context.get_tensor_shape(lTensorName[i]), dtype=trt.nptype(engine.get_tensor_dtype(lTensorName[i])))) bufferD = [] for i in range(nIO): bufferD.append(cudart.cudaMalloc(bufferH[i].nbytes)[1]) for i in range(nInput): cudart.cudaMemcpy(bufferD[i], bufferH[i].ctypes.data, bufferH[i].nbytes, cudart.cudaMemcpyKind.cudaMemcpyHostToDevice) for i in range(nIO): context.set_tensor_address(lTensorName[i], int(bufferD[i])) context.execute_async_v3(0) for i in range(nInput, nIO): cudart.cudaMemcpy(bufferH[i].ctypes.data, bufferD[i], bufferH[i].nbytes, cudart.cudaMemcpyKind.cudaMemcpyDeviceToHost) for i in range(nIO): print(lTensorName[i]) print(bufferH[i]) for b in bufferD: cudart.cudaFree(b)
trt-samples-for-hackathon-cn-master
cookbook/02-API/Layer/ConstantLayer/Weight+Shape.py
# # Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import numpy as np import tensorrt as trt from cuda import cudart shape = [1, 3, 4, 5] data = np.arange(np.prod(shape), dtype=np.float32).reshape(shape).astype(np.int8) np.set_printoptions(precision=3, linewidth=200, suppress=True) cudart.cudaDeviceSynchronize() logger = trt.Logger(trt.Logger.ERROR) builder = trt.Builder(logger) network = builder.create_network(1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)) config = builder.create_builder_config() config.set_flag(trt.BuilderFlag.INT8) # use flags to make Int8 constant layer work config.set_flag(trt.BuilderFlag.OBEY_PRECISION_CONSTRAINTS) #------------------------------------------------------------------------------- Network constantLayer = network.add_constant(data.shape, trt.Weights(data)) constantLayer.get_output(0).dtype = trt.int8 constantLayer.get_output(0).dynamic_range = [-64, 64] constantLayer1 = network.add_constant([], np.array([1], dtype=np.float32)) # Int8 Constant layer can only be used before Dequantize layer dequantizeLayer = network.add_dequantize(constantLayer.get_output(0), constantLayer1.get_output(0)) #------------------------------------------------------------------------------- Network network.mark_output(dequantizeLayer.get_output(0)) engineString = builder.build_serialized_network(network, config) engine = trt.Runtime(logger).deserialize_cuda_engine(engineString) nIO = engine.num_io_tensors lTensorName = [engine.get_tensor_name(i) for i in range(nIO)] nInput = [engine.get_tensor_mode(lTensorName[i]) for i in range(nIO)].count(trt.TensorIOMode.INPUT) context = engine.create_execution_context() for i in range(nIO): print("[%2d]%s->" % (i, "Input " if i < nInput else "Output"), engine.get_tensor_dtype(lTensorName[i]), engine.get_tensor_shape(lTensorName[i]), context.get_tensor_shape(lTensorName[i]), lTensorName[i]) bufferH = [] for i in range(nInput, nIO): bufferH.append(np.empty(context.get_tensor_shape(lTensorName[i]), dtype=trt.nptype(engine.get_tensor_dtype(lTensorName[i])))) bufferD = [] for i in range(nIO): bufferD.append(cudart.cudaMalloc(bufferH[i].nbytes)[1]) for i in range(nInput): cudart.cudaMemcpy(bufferD[i], bufferH[i].ctypes.data, bufferH[i].nbytes, cudart.cudaMemcpyKind.cudaMemcpyHostToDevice) for i in range(nIO): context.set_tensor_address(lTensorName[i], int(bufferD[i])) context.execute_async_v3(0) for i in range(nInput, nIO): cudart.cudaMemcpy(bufferH[i].ctypes.data, bufferD[i], bufferH[i].nbytes, cudart.cudaMemcpyKind.cudaMemcpyDeviceToHost) for i in range(nIO): print(lTensorName[i]) print(bufferH[i]) for b in bufferD: cudart.cudaFree(b)
trt-samples-for-hackathon-cn-master
cookbook/02-API/Layer/ConstantLayer/DataTypeInt8.py
# # Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import numpy as np import tensorrt as trt from cuda import cudart shape = [1, 3, 4, 5] data = np.arange(np.prod(shape), dtype=np.float32).reshape(shape) np.set_printoptions(precision=3, linewidth=200, suppress=True) cudart.cudaDeviceSynchronize() logger = trt.Logger(trt.Logger.ERROR) builder = trt.Builder(logger) network = builder.create_network(1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)) config = builder.create_builder_config() #------------------------------------------------------------------------------- Network constantLayer = network.add_constant(data.shape, trt.Weights(data)) #------------------------------------------------------------------------------- Network network.mark_output(constantLayer.get_output(0)) engineString = builder.build_serialized_network(network, config) engine = trt.Runtime(logger).deserialize_cuda_engine(engineString) nIO = engine.num_io_tensors lTensorName = [engine.get_tensor_name(i) for i in range(nIO)] nInput = [engine.get_tensor_mode(lTensorName[i]) for i in range(nIO)].count(trt.TensorIOMode.INPUT) context = engine.create_execution_context() for i in range(nIO): print("[%2d]%s->" % (i, "Input " if i < nInput else "Output"), engine.get_tensor_dtype(lTensorName[i]), engine.get_tensor_shape(lTensorName[i]), context.get_tensor_shape(lTensorName[i]), lTensorName[i]) bufferH = [] for i in range(nInput, nIO): bufferH.append(np.empty(context.get_tensor_shape(lTensorName[i]), dtype=trt.nptype(engine.get_tensor_dtype(lTensorName[i])))) bufferD = [] for i in range(nIO): bufferD.append(cudart.cudaMalloc(bufferH[i].nbytes)[1]) for i in range(nInput): cudart.cudaMemcpy(bufferD[i], bufferH[i].ctypes.data, bufferH[i].nbytes, cudart.cudaMemcpyKind.cudaMemcpyHostToDevice) for i in range(nIO): context.set_tensor_address(lTensorName[i], int(bufferD[i])) context.execute_async_v3(0) for i in range(nInput, nIO): cudart.cudaMemcpy(bufferH[i].ctypes.data, bufferD[i], bufferH[i].nbytes, cudart.cudaMemcpyKind.cudaMemcpyDeviceToHost) for i in range(nIO): print(lTensorName[i]) print(bufferH[i]) for b in bufferD: cudart.cudaFree(b)
trt-samples-for-hackathon-cn-master
cookbook/02-API/Layer/ConstantLayer/SimpleExample.py
# # Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import numpy as np import tensorrt as trt from cuda import cudart shape = [1, 3, 4, 5] data = np.arange(np.prod(shape), dtype=np.float32).reshape(shape) np.set_printoptions(precision=3, linewidth=200, suppress=True) cudart.cudaDeviceSynchronize() logger = trt.Logger(trt.Logger.ERROR) builder = trt.Builder(logger) network = builder.create_network(1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)) config = builder.create_builder_config() #------------------------------------------------------------------------------- Network constantLayer = network.add_constant(data.shape, trt.Weights(data.astype(bool))) # convert to Bool constantLayer.get_output(0).dtype = trt.bool # set data type of output tensor #------------------------------------------------------------------------------- Network network.mark_output(constantLayer.get_output(0)) engineString = builder.build_serialized_network(network, config) engine = trt.Runtime(logger).deserialize_cuda_engine(engineString) nIO = engine.num_io_tensors lTensorName = [engine.get_tensor_name(i) for i in range(nIO)] nInput = [engine.get_tensor_mode(lTensorName[i]) for i in range(nIO)].count(trt.TensorIOMode.INPUT) context = engine.create_execution_context() for i in range(nIO): print("[%2d]%s->" % (i, "Input " if i < nInput else "Output"), engine.get_tensor_dtype(lTensorName[i]), engine.get_tensor_shape(lTensorName[i]), context.get_tensor_shape(lTensorName[i]), lTensorName[i]) bufferH = [] for i in range(nInput, nIO): bufferH.append(np.empty(context.get_tensor_shape(lTensorName[i]), dtype=trt.nptype(engine.get_tensor_dtype(lTensorName[i])))) bufferD = [] for i in range(nIO): bufferD.append(cudart.cudaMalloc(bufferH[i].nbytes)[1]) for i in range(nInput): cudart.cudaMemcpy(bufferD[i], bufferH[i].ctypes.data, bufferH[i].nbytes, cudart.cudaMemcpyKind.cudaMemcpyHostToDevice) for i in range(nIO): context.set_tensor_address(lTensorName[i], int(bufferD[i])) context.execute_async_v3(0) for i in range(nInput, nIO): cudart.cudaMemcpy(bufferH[i].ctypes.data, bufferD[i], bufferH[i].nbytes, cudart.cudaMemcpyKind.cudaMemcpyDeviceToHost) for i in range(nIO): print(lTensorName[i]) print(bufferH[i]) for b in bufferD: cudart.cudaFree(b)
trt-samples-for-hackathon-cn-master
cookbook/02-API/Layer/ConstantLayer/DataTypeBool.py
# # Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import numpy as np import tensorrt as trt from cuda import cudart nB, nC, nH, nW = 1, 3, 4, 5 data = np.ones([nB, nC, nH, nW], dtype=np.float32) * np.arange(1, 1 + nC, dtype=np.float32).reshape(1, nC, 1, 1) np.set_printoptions(precision=3, linewidth=200, suppress=True) cudart.cudaDeviceSynchronize() logger = trt.Logger(trt.Logger.ERROR) builder = trt.Builder(logger) network = builder.create_network(1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)) config = builder.create_builder_config() inputT0 = network.add_input("inputT0", trt.float32, (nB, nC, nH, nW)) #------------------------------------------------------------------------------- loop = network.add_loop() iteratorLayer = loop.add_iterator(inputT0, 1, False) limit = network.add_constant((), np.array([nC], dtype=np.int32)) loop.add_trip_limit(limit.get_output(0), trt.TripLimit.COUNT) _H0 = network.add_constant([1, nH, nW], np.ones(nH * nW, dtype=np.float32)) rLayer = loop.add_recurrence(_H0.get_output(0)) _H1 = network.add_elementwise(rLayer.get_output(0), iteratorLayer.get_output(0), trt.ElementWiseOperation.SUM) rLayer.set_input(1, _H1.get_output(0)) lengthLayer = network.add_constant((), np.array([nC], dtype=np.int32)) loopOutput0 = loop.add_loop_output(_H1.get_output(0), trt.LoopOutput.CONCATENATE, 1) # 修改 index 参数,仅展示 CONTENATE 和 REVERSE 模式的结果,因为 LAST_VALUE 模式中该参数被忽略 loopOutput0.set_input(1, lengthLayer.get_output(0)) loopOutput1 = loop.add_loop_output(_H1.get_output(0), trt.LoopOutput.REVERSE, 1) loopOutput1.set_input(1, lengthLayer.get_output(0)) #------------------------------------------------------------------------------- network.mark_output(loopOutput0.get_output(0)) network.mark_output(loopOutput1.get_output(0)) engineString = builder.build_serialized_network(network, config) engine = trt.Runtime(logger).deserialize_cuda_engine(engineString) context = engine.create_execution_context() nInput = np.sum([engine.binding_is_input(i) for i in range(engine.num_bindings)]) nOutput = engine.num_bindings - nInput bufferH = [] bufferH.append(data) for i in range(nOutput): bufferH.append(np.empty(context.get_binding_shape(nInput + i), dtype=trt.nptype(engine.get_binding_dtype(nInput + i)))) bufferD = [] for i in range(engine.num_bindings): bufferD.append(cudart.cudaMalloc(bufferH[i].nbytes)[1]) for i in range(nInput): cudart.cudaMemcpy(bufferD[i], np.ascontiguousarray(bufferH[i].reshape(-1)).ctypes.data, bufferH[i].nbytes, cudart.cudaMemcpyKind.cudaMemcpyHostToDevice) context.execute_v2(bufferD) for i in range(nOutput): cudart.cudaMemcpy(bufferH[nInput + i].ctypes.data, bufferD[nInput + i], bufferH[nInput + i].nbytes, cudart.cudaMemcpyKind.cudaMemcpyDeviceToHost) for i in range(nInput): print("Input %d:" % i, bufferH[i].shape, "\n", bufferH[i]) for i in range(nOutput): print("Output %d:" % i, bufferH[nInput + i].shape, "\n", bufferH[nInput + i]) for buffer in bufferD: cudart.cudaFree(buffer)
trt-samples-for-hackathon-cn-master
cookbook/02-API/Layer/LoopStructure/Iterator5.py
# # Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import numpy as np import tensorrt as trt from cuda import cudart nBatchSize, nSequenceLength, nInputDim = 3, 4, 7 nHiddenDim = 5 data = np.ones([nBatchSize, nSequenceLength, nInputDim], dtype=np.float32) weightAllX = np.ones((nHiddenDim, nInputDim), dtype=np.float32) # 权重矩阵 (X->H) weightAllH = np.ones((nHiddenDim, nHiddenDim), dtype=np.float32) # 权重矩阵 (H->H) biasAllX = np.zeros(nHiddenDim, dtype=np.float32) # 偏置 (X->H) biasAllH = np.zeros(nHiddenDim, dtype=np.float32) # 偏置 (H->H) np.set_printoptions(precision=3, linewidth=200, suppress=True) cudart.cudaDeviceSynchronize() logger = trt.Logger(trt.Logger.ERROR) builder = trt.Builder(logger) network = builder.create_network(1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)) config = builder.create_builder_config() inputT0 = network.add_input("inputT0", trt.float32, (nBatchSize, nSequenceLength, nInputDim)) # 采用单输入网络 #------------------------------------------------------------------------------- def gate(network, x, wx, hiddenStateLayer, wh, b, isSigmoid): _h0 = network.add_matrix_multiply(x, trt.MatrixOperation.NONE, wx, trt.MatrixOperation.NONE) _h1 = network.add_matrix_multiply(hiddenStateLayer, trt.MatrixOperation.NONE, wh, trt.MatrixOperation.NONE) _h2 = network.add_elementwise(_h0.get_output(0), _h1.get_output(0), trt.ElementWiseOperation.SUM) _h3 = network.add_elementwise(_h2.get_output(0), b, trt.ElementWiseOperation.SUM) _h4 = network.add_activation(_h3.get_output(0), [trt.ActivationType.TANH, trt.ActivationType.SIGMOID][int(isSigmoid)]) return _h4 weightAllXLayer = network.add_constant([nInputDim, nHiddenDim], trt.Weights(np.ascontiguousarray(weightAllX.transpose()))) weightAllHLayer = network.add_constant([nHiddenDim, nHiddenDim], trt.Weights(np.ascontiguousarray(weightAllH.transpose()))) biasAllLayer = network.add_constant([1, nHiddenDim], trt.Weights(np.ascontiguousarray(biasAllX + biasAllH))) hidden0Layer = network.add_constant([nBatchSize, nHiddenDim], trt.Weights(np.ascontiguousarray(np.ones(nBatchSize * nHiddenDim, dtype=np.float32)))) # 初始隐藏状态 cell0Layer = network.add_constant([nBatchSize, nHiddenDim], trt.Weights(np.ascontiguousarray(np.zeros(nBatchSize * nHiddenDim, dtype=np.float32)))) # 初始细胞状态 length = network.add_constant((), np.array([nSequenceLength], dtype=np.int32)) loop = network.add_loop() loop.add_trip_limit(length.get_output(0), trt.TripLimit.COUNT) iteratorLayer = loop.add_iterator(inputT0, 1, False) # 每次抛出 inputTensor 的 H 维的一层 (nBatchSize,nInputDim),双向 LSTM 要多一个反抛的迭代器 hiddenStateLayer = loop.add_recurrence(hidden0Layer.get_output(0)) # 一个 loop 中有多个循环变量 cellStateLayer = loop.add_recurrence(cell0Layer.get_output(0)) gateI = gate(network, iteratorLayer.get_output(0), weightAllXLayer.get_output(0), hiddenStateLayer.get_output(0), weightAllHLayer.get_output(0), biasAllLayer.get_output(0), True) gateC = gate(network, iteratorLayer.get_output(0), weightAllXLayer.get_output(0), hiddenStateLayer.get_output(0), weightAllHLayer.get_output(0), biasAllLayer.get_output(0), False) gateF = gate(network, iteratorLayer.get_output(0), weightAllXLayer.get_output(0), hiddenStateLayer.get_output(0), weightAllHLayer.get_output(0), biasAllLayer.get_output(0), True) gateO = gate(network, iteratorLayer.get_output(0), weightAllXLayer.get_output(0), hiddenStateLayer.get_output(0), weightAllHLayer.get_output(0), biasAllLayer.get_output(0), True) _h5 = network.add_elementwise(gateF.get_output(0), cellStateLayer.get_output(0), trt.ElementWiseOperation.PROD) _h6 = network.add_elementwise(gateI.get_output(0), gateC.get_output(0), trt.ElementWiseOperation.PROD) newCellStateLayer = network.add_elementwise(_h5.get_output(0), _h6.get_output(0), trt.ElementWiseOperation.SUM) _h7 = network.add_activation(newCellStateLayer.get_output(0), trt.ActivationType.TANH) newHiddenStateLayer = network.add_elementwise(gateO.get_output(0), _h7.get_output(0), trt.ElementWiseOperation.PROD) hiddenStateLayer.set_input(1, newHiddenStateLayer.get_output(0)) cellStateLayer.set_input(1, newCellStateLayer.get_output(0)) loopOutput0 = loop.add_loop_output(hiddenStateLayer.get_output(0), trt.LoopOutput.LAST_VALUE, 0) # 形状 (nBatchSize,nHiddenDim),nBatchSize 个独立输出,每个隐藏状态 nHiddenDim 维坐标 loopOutput1 = loop.add_loop_output(newHiddenStateLayer.get_output(0), trt.LoopOutput.CONCATENATE, 1) # 形状 (nSequenceLength,nBatchSize,nHiddenDim),nBatchSize 个独立输出,每个输出 nSequenceLength 个隐藏状态,每个隐藏状态 nHiddenDim 维坐标 loopOutput1.set_input(1, length.get_output(0)) loopOutput2 = loop.add_loop_output(cellStateLayer.get_output(0), trt.LoopOutput.LAST_VALUE, 0) # 形状 (nBatchSize,nHiddenDim),nBatchSize 个独立输出,每个隐藏状态 nHiddenDim 维坐标 #------------------------------------------------------------------------------- network.mark_output(loopOutput0.get_output(0)) network.mark_output(loopOutput1.get_output(0)) network.mark_output(loopOutput2.get_output(0)) engineString = builder.build_serialized_network(network, config) engine = trt.Runtime(logger).deserialize_cuda_engine(engineString) context = engine.create_execution_context() nInput = np.sum([engine.binding_is_input(i) for i in range(engine.num_bindings)]) nOutput = engine.num_bindings - nInput bufferH = [] bufferH.append(data) for i in range(nOutput): bufferH.append(np.empty(context.get_binding_shape(nInput + i), dtype=trt.nptype(engine.get_binding_dtype(nInput + i)))) bufferD = [] for i in range(engine.num_bindings): bufferD.append(cudart.cudaMalloc(bufferH[i].nbytes)[1]) for i in range(nInput): cudart.cudaMemcpy(bufferD[i], np.ascontiguousarray(bufferH[i].reshape(-1)).ctypes.data, bufferH[i].nbytes, cudart.cudaMemcpyKind.cudaMemcpyHostToDevice) context.execute_v2(bufferD) for i in range(nOutput): cudart.cudaMemcpy(bufferH[nInput + i].ctypes.data, bufferD[nInput + i], bufferH[nInput + i].nbytes, cudart.cudaMemcpyKind.cudaMemcpyDeviceToHost) for i in range(nInput): print("Input %d:" % i, bufferH[i].shape, "\n", bufferH[i]) for i in range(nOutput): print("Output %d:" % i, bufferH[nInput + i].shape, "\n", bufferH[nInput + i]) for buffer in bufferD: cudart.cudaFree(buffer)
trt-samples-for-hackathon-cn-master
cookbook/02-API/Layer/LoopStructure/StaticBidirectionalLSTM.py
# # Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import numpy as np import tensorrt as trt from cuda import cudart nBatchSize, nSequenceLength, nInputDim = 3, 4, 7 nHiddenDim = 5 data = np.ones([nBatchSize, nSequenceLength, nInputDim], dtype=np.float32) weightAllX = np.ones((nHiddenDim, nInputDim), dtype=np.float32) # weight of X->H weightAllH = np.ones((nHiddenDim, nHiddenDim), dtype=np.float32) # weight of H->H biasAllX = np.zeros(nHiddenDim, dtype=np.float32) # bias of X->H biasAllH = np.zeros(nHiddenDim, dtype=np.float32) # bias of H->H np.set_printoptions(precision=3, linewidth=200, suppress=True) cudart.cudaDeviceSynchronize() logger = trt.Logger(trt.Logger.ERROR) builder = trt.Builder(logger) network = builder.create_network(1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)) config = builder.create_builder_config() inputT0 = network.add_input("inputT0", trt.float32, (nBatchSize, nSequenceLength, nInputDim)) #------------------------------------------------------------------------------- def gate(network, x, wx, hiddenStateLayer, wh, b, isSigmoid): _h0 = network.add_matrix_multiply(x, trt.MatrixOperation.NONE, wx, trt.MatrixOperation.NONE) _h1 = network.add_matrix_multiply(hiddenStateLayer, trt.MatrixOperation.NONE, wh, trt.MatrixOperation.NONE) _h2 = network.add_elementwise(_h0.get_output(0), _h1.get_output(0), trt.ElementWiseOperation.SUM) _h3 = network.add_elementwise(_h2.get_output(0), b, trt.ElementWiseOperation.SUM) _h4 = network.add_activation(_h3.get_output(0), [trt.ActivationType.TANH, trt.ActivationType.SIGMOID][int(isSigmoid)]) return _h4 weightAllXLayer = network.add_constant([nInputDim, nHiddenDim], trt.Weights(np.ascontiguousarray(weightAllX.transpose()))) weightAllHLayer = network.add_constant([nHiddenDim, nHiddenDim], trt.Weights(np.ascontiguousarray(weightAllH.transpose()))) biasAllLayer = network.add_constant([1, nHiddenDim], trt.Weights(np.ascontiguousarray(biasAllX + biasAllH))) hidden0Layer = network.add_constant([nBatchSize, nHiddenDim], trt.Weights(np.ascontiguousarray(np.ones(nBatchSize * nHiddenDim, dtype=np.float32)))) # 初始隐藏状态 cell0Layer = network.add_constant([nBatchSize, nHiddenDim], trt.Weights(np.ascontiguousarray(np.zeros(nBatchSize * nHiddenDim, dtype=np.float32)))) # 初始细胞状态 length = network.add_constant((), np.array([nSequenceLength], dtype=np.int32)) loop = network.add_loop() loop.add_trip_limit(length.get_output(0), trt.TripLimit.COUNT) iteratorLayer = loop.add_iterator(inputT0, 1, False) # 每次抛出 inputTensor 的 H 维的一层 (nBatchSize,nInputDim),双向 LSTM 要多一个反抛的迭代器 hiddenStateLayer = loop.add_recurrence(hidden0Layer.get_output(0)) # 一个 loop 中有多个循环变量 cellStateLayer = loop.add_recurrence(cell0Layer.get_output(0)) gateI = gate(network, iteratorLayer.get_output(0), weightAllXLayer.get_output(0), hiddenStateLayer.get_output(0), weightAllHLayer.get_output(0), biasAllLayer.get_output(0), True) gateC = gate(network, iteratorLayer.get_output(0), weightAllXLayer.get_output(0), hiddenStateLayer.get_output(0), weightAllHLayer.get_output(0), biasAllLayer.get_output(0), False) gateF = gate(network, iteratorLayer.get_output(0), weightAllXLayer.get_output(0), hiddenStateLayer.get_output(0), weightAllHLayer.get_output(0), biasAllLayer.get_output(0), True) gateO = gate(network, iteratorLayer.get_output(0), weightAllXLayer.get_output(0), hiddenStateLayer.get_output(0), weightAllHLayer.get_output(0), biasAllLayer.get_output(0), True) _h5 = network.add_elementwise(gateF.get_output(0), cellStateLayer.get_output(0), trt.ElementWiseOperation.PROD) _h6 = network.add_elementwise(gateI.get_output(0), gateC.get_output(0), trt.ElementWiseOperation.PROD) newCellStateLayer = network.add_elementwise(_h5.get_output(0), _h6.get_output(0), trt.ElementWiseOperation.SUM) _h7 = network.add_activation(newCellStateLayer.get_output(0), trt.ActivationType.TANH) newHiddenStateLayer = network.add_elementwise(gateO.get_output(0), _h7.get_output(0), trt.ElementWiseOperation.PROD) hiddenStateLayer.set_input(1, newHiddenStateLayer.get_output(0)) cellStateLayer.set_input(1, newCellStateLayer.get_output(0)) loopOutput0 = loop.add_loop_output(hiddenStateLayer.get_output(0), trt.LoopOutput.LAST_VALUE, 0) # 形状 (nBatchSize,nHiddenDim),nBatchSize 个独立输出,每个隐藏状态 nHiddenDim 维坐标 loopOutput1 = loop.add_loop_output(newHiddenStateLayer.get_output(0), trt.LoopOutput.CONCATENATE, 1) # 形状 (nSequenceLength,nBatchSize,nHiddenDim),nBatchSize 个独立输出,每个输出 nSequenceLength 个隐藏状态,每个隐藏状态 nHiddenDim 维坐标 loopOutput1.set_input(1, length.get_output(0)) loopOutput2 = loop.add_loop_output(cellStateLayer.get_output(0), trt.LoopOutput.LAST_VALUE, 0) # 形状 (nBatchSize,nHiddenDim),nBatchSize 个独立输出,每个隐藏状态 nHiddenDim 维坐标 #------------------------------------------------------------------------------- network.mark_output(loopOutput0.get_output(0)) network.mark_output(loopOutput1.get_output(0)) network.mark_output(loopOutput2.get_output(0)) engineString = builder.build_serialized_network(network, config) engine = trt.Runtime(logger).deserialize_cuda_engine(engineString) context = engine.create_execution_context() nInput = np.sum([engine.binding_is_input(i) for i in range(engine.num_bindings)]) nOutput = engine.num_bindings - nInput bufferH = [] bufferH.append(data) for i in range(nOutput): bufferH.append(np.empty(context.get_binding_shape(nInput + i), dtype=trt.nptype(engine.get_binding_dtype(nInput + i)))) bufferD = [] for i in range(engine.num_bindings): bufferD.append(cudart.cudaMalloc(bufferH[i].nbytes)[1]) for i in range(nInput): cudart.cudaMemcpy(bufferD[i], np.ascontiguousarray(bufferH[i].reshape(-1)).ctypes.data, bufferH[i].nbytes, cudart.cudaMemcpyKind.cudaMemcpyHostToDevice) context.execute_v2(bufferD) for i in range(nOutput): cudart.cudaMemcpy(bufferH[nInput + i].ctypes.data, bufferD[nInput + i], bufferH[nInput + i].nbytes, cudart.cudaMemcpyKind.cudaMemcpyDeviceToHost) for i in range(nInput): print("Input %d:" % i, bufferH[i].shape, "\n", bufferH[i]) for i in range(nOutput): print("Output %d:" % i, bufferH[nInput + i].shape, "\n", bufferH[nInput + i]) for buffer in bufferD: cudart.cudaFree(buffer)
trt-samples-for-hackathon-cn-master
cookbook/02-API/Layer/LoopStructure/StaticUnidirectionalLSTM.py
# # Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import numpy as np import tensorrt as trt from cuda import cudart nB, nC, nH, nW = 1, 3, 4, 5 data = np.ones([nB, nC, nH, nW], dtype=np.float32) * np.arange(1, 1 + nC, dtype=np.float32).reshape(1, nC, 1, 1) np.set_printoptions(precision=3, linewidth=200, suppress=True) cudart.cudaDeviceSynchronize() logger = trt.Logger(trt.Logger.ERROR) builder = trt.Builder(logger) network = builder.create_network(1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)) config = builder.create_builder_config() inputT0 = network.add_input("inputT0", trt.float32, (nB, nC, nH, nW)) #------------------------------------------------------------------------------- loop = network.add_loop() iteratorLayer = loop.add_iterator(inputT0, 1, True) # 制造一个反向迭代器,在 C 维上每次反向抛出 1 层 (1,nH,nW) limit = network.add_constant((), np.array([nC], dtype=np.int32)) loop.add_trip_limit(limit.get_output(0), trt.TripLimit.COUNT) _H0 = network.add_constant([1, nH, nW], np.ones(nH * nW, dtype=np.float32)) rLayer = loop.add_recurrence(_H0.get_output(0)) _H1 = network.add_elementwise(rLayer.get_output(0), iteratorLayer.get_output(0), trt.ElementWiseOperation.SUM) rLayer.set_input(1, _H1.get_output(0)) loopOutput0 = loop.add_loop_output(rLayer.get_output(0), trt.LoopOutput.LAST_VALUE, 0) loopOutput1 = loop.add_loop_output(_H1.get_output(0), trt.LoopOutput.CONCATENATE, 0) lengthLayer = network.add_constant((), np.array([nC], dtype=np.int32)) loopOutput1.set_input(1, lengthLayer.get_output(0)) #------------------------------------------------------------------------------- network.mark_output(loopOutput0.get_output(0)) network.mark_output(loopOutput1.get_output(0)) engineString = builder.build_serialized_network(network, config) engine = trt.Runtime(logger).deserialize_cuda_engine(engineString) context = engine.create_execution_context() nInput = np.sum([engine.binding_is_input(i) for i in range(engine.num_bindings)]) nOutput = engine.num_bindings - nInput bufferH = [] bufferH.append(data) for i in range(nOutput): bufferH.append(np.empty(context.get_binding_shape(nInput + i), dtype=trt.nptype(engine.get_binding_dtype(nInput + i)))) bufferD = [] for i in range(engine.num_bindings): bufferD.append(cudart.cudaMalloc(bufferH[i].nbytes)[1]) for i in range(nInput): cudart.cudaMemcpy(bufferD[i], np.ascontiguousarray(bufferH[i].reshape(-1)).ctypes.data, bufferH[i].nbytes, cudart.cudaMemcpyKind.cudaMemcpyHostToDevice) context.execute_v2(bufferD) for i in range(nOutput): cudart.cudaMemcpy(bufferH[nInput + i].ctypes.data, bufferD[nInput + i], bufferH[nInput + i].nbytes, cudart.cudaMemcpyKind.cudaMemcpyDeviceToHost) for i in range(nInput): print("Input %d:" % i, bufferH[i].shape, "\n", bufferH[i]) for i in range(nOutput): print("Output %d:" % i, bufferH[nInput + i].shape, "\n", bufferH[nInput + i]) for buffer in bufferD: cudart.cudaFree(buffer)
trt-samples-for-hackathon-cn-master
cookbook/02-API/Layer/LoopStructure/Iterator4.py
# # Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import numpy as np import tensorrt as trt from cuda import cudart nBatchSize, nSequenceLength, nInputDim = 3, 4, 7 # 输入张量尺寸 nHiddenDim = 5 # 隐藏层宽度 data = np.ones([nBatchSize, nSequenceLength, nInputDim], dtype=np.float32) weightX = np.ones((nHiddenDim, nInputDim), dtype=np.float32) # 权重矩阵 (X->H) weightH = np.ones((nHiddenDim, nHiddenDim), dtype=np.float32) # 权重矩阵 (H->H) biasX = np.zeros(nHiddenDim, dtype=np.float32) # 偏置 (X->H) biasH = np.zeros(nHiddenDim, dtype=np.float32) # 偏置 (H->H) np.set_printoptions(precision=3, linewidth=200, suppress=True) cudart.cudaDeviceSynchronize() logger = trt.Logger(trt.Logger.ERROR) builder = trt.Builder(logger) network = builder.create_network(1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)) config = builder.create_builder_config() inputT0 = network.add_input("inputT0", trt.float32, (nBatchSize, nSequenceLength, nInputDim)) #------------------------------------------------------------------------------- Network weightXLayer = network.add_constant([nInputDim, nHiddenDim], weightX.transpose().reshape(-1)) weightHLayer = network.add_constant([nHiddenDim, nHiddenDim], weightH.transpose().reshape(-1)) biasLayer = network.add_constant([nBatchSize, nHiddenDim], np.tile(biasX + biasH, (nBatchSize, 1))) hidden0Layer = network.add_constant([nBatchSize, nHiddenDim], np.ones(nBatchSize * nHiddenDim, dtype=np.float32)) # 初始隐藏状态,注意形状和 RNNV2层的不一样 lengthLayer = network.add_constant((), np.array([nSequenceLength], dtype=np.int32)) # 结果保留长度 loop = network.add_loop() loop.add_trip_limit(lengthLayer.get_output(0), trt.TripLimit.COUNT) iteratorLayer = loop.add_iterator(inputT0, 1, False) # 每次抛出 inputTensor 的 H 维的一层 (nBatchSize,nInputDim) rLayer = loop.add_recurrence(hidden0Layer.get_output(0)) _H0 = network.add_matrix_multiply(iteratorLayer.get_output(0), trt.MatrixOperation.NONE, weightXLayer.get_output(0), trt.MatrixOperation.NONE) _H1 = network.add_matrix_multiply(rLayer.get_output(0), trt.MatrixOperation.NONE, weightHLayer.get_output(0), trt.MatrixOperation.NONE) _H2 = network.add_elementwise(_H0.get_output(0), _H1.get_output(0), trt.ElementWiseOperation.SUM) _H3 = network.add_elementwise(_H2.get_output(0), biasLayer.get_output(0), trt.ElementWiseOperation.SUM) _H4 = network.add_activation(_H3.get_output(0), trt.ActivationType.RELU) rLayer.set_input(1, _H4.get_output(0)) loopOutput0 = loop.add_loop_output(rLayer.get_output(0), trt.LoopOutput.LAST_VALUE, 0) # 形状 (nBatchSize,nHiddenDim),nBatchSize 个独立输出,每个输出 1 个最终隐藏状态,每个隐藏状态 nHiddenDim 维坐标 loopOutput1 = loop.add_loop_output(_H4.get_output(0), trt.LoopOutput.CONCATENATE, 1) # 形状 (nSequenceLength,nBatchSize,nHiddenDim),nBatchSize 个独立输出,每个输出 nSequenceLength 个隐藏状态,每个隐藏状态 nHiddenDim 维坐标 loopOutput1.set_input(1, lengthLayer.get_output(0)) #------------------------------------------------------------------------------- Network network.mark_output(loopOutput0.get_output(0)) network.mark_output(loopOutput1.get_output(0)) engineString = builder.build_serialized_network(network, config) engine = trt.Runtime(logger).deserialize_cuda_engine(engineString) context = engine.create_execution_context() nInput = np.sum([engine.binding_is_input(i) for i in range(engine.num_bindings)]) nOutput = engine.num_bindings - nInput bufferH = [] bufferH.append(data) for i in range(nOutput): bufferH.append(np.empty(context.get_binding_shape(nInput + i), dtype=trt.nptype(engine.get_binding_dtype(nInput + i)))) bufferD = [] for i in range(engine.num_bindings): bufferD.append(cudart.cudaMalloc(bufferH[i].nbytes)[1]) for i in range(nInput): cudart.cudaMemcpy(bufferD[i], np.ascontiguousarray(bufferH[i].reshape(-1)).ctypes.data, bufferH[i].nbytes, cudart.cudaMemcpyKind.cudaMemcpyHostToDevice) context.execute_v2(bufferD) for i in range(nOutput): cudart.cudaMemcpy(bufferH[nInput + i].ctypes.data, bufferD[nInput + i], bufferH[nInput + i].nbytes, cudart.cudaMemcpyKind.cudaMemcpyDeviceToHost) for i in range(nInput): print("Input %d:" % i, bufferH[i].shape, "\n", bufferH[i]) for i in range(nOutput): print("Output %d:" % i, bufferH[nInput + i].shape, "\n", bufferH[nInput + i]) for buffer in bufferD: cudart.cudaFree(buffer)
trt-samples-for-hackathon-cn-master
cookbook/02-API/Layer/LoopStructure/ReLURNN.py
# # Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import numpy as np import tensorrt as trt from cuda import cudart nBatchSize, nSequenceLength, nInputDim = 3, 4, 7 nHiddenDim = 5 x = np.ones([nBatchSize, nSequenceLength, nInputDim], dtype=np.float32) h0 = np.ones([nBatchSize, nHiddenDim], dtype=np.float32) # initial hidden state c0 = np.zeros([nBatchSize, nHiddenDim], dtype=np.float32) # initial cell state weightAllX = np.ones((nHiddenDim, nInputDim), dtype=np.float32) # weight of X->H weightAllH = np.ones((nHiddenDim, nHiddenDim), dtype=np.float32) # weight of H->H biasAllX = np.zeros(nHiddenDim, dtype=np.float32) # bias of X->H biasAllH = np.zeros(nHiddenDim, dtype=np.float32) # bias of H->H np.set_printoptions(precision=3, linewidth=200, suppress=True) cudart.cudaDeviceSynchronize() logger = trt.Logger(trt.Logger.ERROR) builder = trt.Builder(logger) network = builder.create_network(1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)) profile = builder.create_optimization_profile() config = builder.create_builder_config() config.set_memory_pool_limit(trt.MemoryPoolType.WORKSPACE, 4 << 30) inputT0 = network.add_input("inputT0", trt.float32, (-1, -1, nInputDim)) # 3 inputs of x, h0, c0 inputT1 = network.add_input("inputT1", trt.float32, (-1, nHiddenDim)) inputT2 = network.add_input("inputT2", trt.float32, (-1, nHiddenDim)) profile.set_shape(inputT0.name, [1, 1, nInputDim], [nBatchSize, nSequenceLength, nInputDim], [nBatchSize * 2, nSequenceLength * 2, nInputDim]) profile.set_shape(inputT1.name, [1, nHiddenDim], [nBatchSize, nHiddenDim], [nBatchSize * 2, nHiddenDim]) profile.set_shape(inputT2.name, [1, nHiddenDim], [nBatchSize, nHiddenDim], [nBatchSize * 2, nHiddenDim]) config.add_optimization_profile(profile) #------------------------------------------------------------------------------- Network def gate(network, xTensor, wx, hTensor, wh, b, isSigmoid): _h0 = network.add_matrix_multiply(xTensor, trt.MatrixOperation.NONE, wx, trt.MatrixOperation.NONE) _h1 = network.add_matrix_multiply(hTensor, trt.MatrixOperation.NONE, wh, trt.MatrixOperation.NONE) _h2 = network.add_elementwise(_h0.get_output(0), _h1.get_output(0), trt.ElementWiseOperation.SUM) _h3 = network.add_elementwise(_h2.get_output(0), b, trt.ElementWiseOperation.SUM) _h4 = network.add_activation(_h3.get_output(0), [trt.ActivationType.TANH, trt.ActivationType.SIGMOID][int(isSigmoid)]) return _h4 weightAllXLayer = network.add_constant([nInputDim, nHiddenDim], trt.Weights(np.ascontiguousarray(weightAllX.transpose()))) weightAllHLayer = network.add_constant([nHiddenDim, nHiddenDim], trt.Weights(np.ascontiguousarray(weightAllH.transpose()))) biasAllLayer = network.add_constant([1, nHiddenDim], trt.Weights(np.ascontiguousarray(biasAllX + biasAllH))) _t0 = network.add_shape(inputT0) _t1 = network.add_slice(_t0.get_output(0), [1], [1], [1]) _t2 = network.add_shuffle(_t1.get_output(0)) # scalar tensor needed for the two kinds of loop condition _t2.reshape_dims = () loop = network.add_loop() loop.add_trip_limit(_t2.get_output(0), trt.TripLimit.COUNT) iteratorLayer = loop.add_iterator(inputT0, 1, False) # one piece of inputT0 [nBatchSize, nInputDim] is indexed for computation each time, an additional anti-throw iterator is needed for two-way LSTM hiddenStateLayer = loop.add_recurrence(inputT1) # initial hidden state and cell state. There are multiple loop variables in a loop. cellStateLayer = loop.add_recurrence(inputT2) gateI = gate(network, iteratorLayer.get_output(0), weightAllXLayer.get_output(0), hiddenStateLayer.get_output(0), weightAllHLayer.get_output(0), biasAllLayer.get_output(0), True) gateF = gate(network, iteratorLayer.get_output(0), weightAllXLayer.get_output(0), hiddenStateLayer.get_output(0), weightAllHLayer.get_output(0), biasAllLayer.get_output(0), True) gateC = gate(network, iteratorLayer.get_output(0), weightAllXLayer.get_output(0), hiddenStateLayer.get_output(0), weightAllHLayer.get_output(0), biasAllLayer.get_output(0), False) gateO = gate(network, iteratorLayer.get_output(0), weightAllXLayer.get_output(0), hiddenStateLayer.get_output(0), weightAllHLayer.get_output(0), biasAllLayer.get_output(0), True) _h5 = network.add_elementwise(gateF.get_output(0), cellStateLayer.get_output(0), trt.ElementWiseOperation.PROD) _h6 = network.add_elementwise(gateI.get_output(0), gateC.get_output(0), trt.ElementWiseOperation.PROD) newCellStateLayer = network.add_elementwise(_h5.get_output(0), _h6.get_output(0), trt.ElementWiseOperation.SUM) _h7 = network.add_activation(newCellStateLayer.get_output(0), trt.ActivationType.TANH) newHiddenStateLayer = network.add_elementwise(gateO.get_output(0), _h7.get_output(0), trt.ElementWiseOperation.PROD) hiddenStateLayer.set_input(1, newHiddenStateLayer.get_output(0)) cellStateLayer.set_input(1, newCellStateLayer.get_output(0)) loopOutput0 = loop.add_loop_output(hiddenStateLayer.get_output(0), trt.LoopOutput.LAST_VALUE, 0) # shape [nBatchSize,nHiddenSize] loopOutput1 = loop.add_loop_output(newHiddenStateLayer.get_output(0), trt.LoopOutput.CONCATENATE, 1) # shape [nBatchSize,nSequenceLength,nHiddenSize] loopOutput1.set_input(1, _t2.get_output(0)) loopOutput2 = loop.add_loop_output(cellStateLayer.get_output(0), trt.LoopOutput.LAST_VALUE, 0) # shape [nBatchSize,nHiddenSize] #------------------------------------------------------------------------------- Network network.mark_output(loopOutput0.get_output(0)) network.mark_output(loopOutput1.get_output(0)) network.mark_output(loopOutput2.get_output(0)) engineString = builder.build_serialized_network(network, config) engine = trt.Runtime(logger).deserialize_cuda_engine(engineString) nIO = engine.num_io_tensors lTensorName = [engine.get_tensor_name(i) for i in range(nIO)] nInput = [engine.get_tensor_mode(lTensorName[i]) for i in range(nIO)].count(trt.TensorIOMode.INPUT) context = engine.create_execution_context() context.set_input_shape(lTensorName[0], x.shape) context.set_input_shape(lTensorName[1], h0.shape) context.set_input_shape(lTensorName[2], c0.shape) bufferH = [] bufferH.append(x) bufferH.append(h0) bufferH.append(c0) for i in range(nInput, nIO): bufferH.append(np.empty(context.get_tensor_shape(lTensorName[i]), dtype=trt.nptype(engine.get_tensor_dtype(lTensorName[i])))) bufferD = [] for i in range(nIO): bufferD.append(cudart.cudaMalloc(bufferH[i].nbytes)[1]) for i in range(nInput): cudart.cudaMemcpy(bufferD[i], bufferH[i].ctypes.data, bufferH[i].nbytes, cudart.cudaMemcpyKind.cudaMemcpyHostToDevice) for i in range(nIO): context.set_tensor_address(lTensorName[i], int(bufferD[i])) context.execute_async_v3(0) for i in range(nInput, nIO): cudart.cudaMemcpy(bufferH[i].ctypes.data, bufferD[i], bufferH[i].nbytes, cudart.cudaMemcpyKind.cudaMemcpyDeviceToHost) for i in range(nIO): print(lTensorName[i]) print(bufferH[i]) for b in bufferD: cudart.cudaFree(b)
trt-samples-for-hackathon-cn-master
cookbook/02-API/Layer/LoopStructure/DynamicUnidirectionalLSTM.py
# # Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import numpy as np import tensorrt as trt from cuda import cudart nB, nC, nH, nW = 1, 3, 4, 5 data = np.ones([nB, nC, nH, nW], dtype=np.float32) length = 7 np.set_printoptions(precision=3, linewidth=200, suppress=True) cudart.cudaDeviceSynchronize() logger = trt.Logger(trt.Logger.ERROR) builder = trt.Builder(logger) network = builder.create_network(1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)) config = builder.create_builder_config() inputT0 = network.add_input("inputT0", trt.float32, (nB, nC, nH, nW)) #------------------------------------------------------------------------------- loop = network.add_loop() # 添加 Loop 结构 rLayer = loop.add_recurrence(inputT0) # 循环入口 _H1 = network.add_reduce(rLayer.get_output(0), trt.ReduceOperation.MAX, (1 << 0) + (1 << 1) + (1 << 2) + (1 << 3), False) # 取循环体张量的第一个元素,判断其是否小于 6 _H2 = network.add_constant((), np.array([6], dtype=np.float32)) _H3 = network.add_elementwise(_H2.get_output(0), _H1.get_output(0), trt.ElementWiseOperation.SUB) _H4 = network.add_activation(_H3.get_output(0), trt.ActivationType.RELU) _H5 = network.add_identity(_H4.get_output(0)) _H5.set_output_type(0, trt.bool) _H5.get_output(0).dtype = trt.bool loop.add_trip_limit(_H5.get_output(0), trt.TripLimit.WHILE) # 判断结果转为 BOOL 类型,交给 TripLimit _H0 = network.add_scale(rLayer.get_output(0), trt.ScaleMode.UNIFORM, np.array([1], dtype=np.float32), np.array([1], dtype=np.float32), np.array([1], dtype=np.float32)) # 循环体,给输入元素加 1 rLayer.set_input(1, _H0.get_output(0)) loopOutput0 = loop.add_loop_output(rLayer.get_output(0), trt.LoopOutput.LAST_VALUE, 0) # 第一种循环输出,只保留最终结果,index 参数被忽略 loopOutput1 = loop.add_loop_output(rLayer.get_output(0), trt.LoopOutput.CONCATENATE, 0) # 第二种循环输出,保留所有中间结果,传入 rLayer 则保留“第 0 到第 t-1 次迭代的结果”(类比 while 循环),传入 _H0 则保留“第 1 到第 t 次迭代的结果”(类比 do-while 循环,不推荐使用,可能有错误) lengthLayer = network.add_constant((), np.array([length], dtype=np.int32)) loopOutput1.set_input(1, lengthLayer.get_output(0)) # 指定需要保留的长度,若这里传入张量的值 v <= t,则结果保留前 v 次迭代,若 v > t,则多出部分用 0 填充 #------------------------------------------------------------------------------- network.mark_output(loopOutput0.get_output(0)) network.mark_output(loopOutput1.get_output(0)) engineString = builder.build_serialized_network(network, config) engine = trt.Runtime(logger).deserialize_cuda_engine(engineString) context = engine.create_execution_context() nInput = np.sum([engine.binding_is_input(i) for i in range(engine.num_bindings)]) nOutput = engine.num_bindings - nInput bufferH = [] bufferH.append(data) for i in range(nOutput): bufferH.append(np.empty(context.get_binding_shape(nInput + i), dtype=trt.nptype(engine.get_binding_dtype(nInput + i)))) bufferD = [] for i in range(engine.num_bindings): bufferD.append(cudart.cudaMalloc(bufferH[i].nbytes)[1]) for i in range(nInput): cudart.cudaMemcpy(bufferD[i], np.ascontiguousarray(bufferH[i].reshape(-1)).ctypes.data, bufferH[i].nbytes, cudart.cudaMemcpyKind.cudaMemcpyHostToDevice) context.execute_v2(bufferD) for i in range(nOutput): cudart.cudaMemcpy(bufferH[nInput + i].ctypes.data, bufferD[nInput + i], bufferH[nInput + i].nbytes, cudart.cudaMemcpyKind.cudaMemcpyDeviceToHost) for i in range(nInput): print("Input %d:" % i, bufferH[i].shape, "\n", bufferH[i]) for i in range(nOutput): print("Output %d:" % i, bufferH[nInput + i].shape, "\n", bufferH[nInput + i]) for buffer in bufferD: cudart.cudaFree(buffer)
trt-samples-for-hackathon-cn-master
cookbook/02-API/Layer/LoopStructure/While+Output.py
# # Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import numpy as np import tensorrt as trt from cuda import cudart nB, nC, nH, nW = 1, 3, 4, 5 data = np.ones([nB, nC, nH, nW], dtype=np.float32) t = np.array([6], dtype=np.int32) # number of iterations np.set_printoptions(precision=3, linewidth=200, suppress=True) cudart.cudaDeviceSynchronize() logger = trt.Logger(trt.Logger.ERROR) builder = trt.Builder(logger) network = builder.create_network(1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)) profile = builder.create_optimization_profile() config = builder.create_builder_config() inputT0 = network.add_input("inputT0", trt.float32, (nB, nC, nH, nW)) inputT1 = network.add_input("inputT1", trt.int32, ()) # set number of iterations as input tensor at runtime profile.set_shape_input(inputT1.name, (1, ), (6, ), (10, )) # set value (rather than shape) config.add_optimization_profile(profile) #------------------------------------------------------------------------------- Network loop = network.add_loop() loop.add_trip_limit(inputT1, trt.TripLimit.COUNT) rLayer = loop.add_recurrence(inputT0) _H0 = network.add_elementwise(rLayer.get_output(0), rLayer.get_output(0), trt.ElementWiseOperation.SUM) rLayer.set_input(1, _H0.get_output(0)) loopOutput0 = loop.add_loop_output(rLayer.get_output(0), trt.LoopOutput.LAST_VALUE, 0) loopOutput1 = loop.add_loop_output(_H0.get_output(0), trt.LoopOutput.CONCATENATE, 0) loopOutput1.set_input(1, inputT1) #------------------------------------------------------------------------------- Network network.mark_output(loopOutput0.get_output(0)) network.mark_output(loopOutput1.get_output(0)) engineString = builder.build_serialized_network(network, config) engine = trt.Runtime(logger).deserialize_cuda_engine(engineString) context = engine.create_execution_context() context.set_shape_input(1, t) # 运行时绑定真实形状张量值 nInput = np.sum([engine.binding_is_input(i) for i in range(engine.num_bindings)]) nOutput = engine.num_bindings - nInput bufferH = [] bufferH.append(data) bufferH.append(np.ascontiguousarray(np.zeros([1], dtype=np.int32).reshape(-1))) # 形状张量数据可用垃圾值 for i in range(nOutput): bufferH.append(np.empty(context.get_binding_shape(nInput + i), dtype=trt.nptype(engine.get_binding_dtype(nInput + i)))) bufferD = [] for i in range(engine.num_bindings): bufferD.append(cudart.cudaMalloc(bufferH[i].nbytes)[1]) for i in range(nInput): cudart.cudaMemcpy(bufferD[i], np.ascontiguousarray(bufferH[i].reshape(-1)).ctypes.data, bufferH[i].nbytes, cudart.cudaMemcpyKind.cudaMemcpyHostToDevice) context.execute_v2(bufferD) for i in range(nOutput): cudart.cudaMemcpy(bufferH[nInput + i].ctypes.data, bufferD[nInput + i], bufferH[nInput + i].nbytes, cudart.cudaMemcpyKind.cudaMemcpyDeviceToHost) for i in range(nInput): print("Input %d:" % i, bufferH[i].shape, "\n", bufferH[i]) for i in range(nOutput): print("Output %d:" % i, bufferH[nInput + i].shape, "\n", bufferH[nInput + i]) for buffer in bufferD: cudart.cudaFree(buffer)
trt-samples-for-hackathon-cn-master
cookbook/02-API/Layer/LoopStructure/For+Set_shape_input.py
# # Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import numpy as np import tensorrt as trt from cuda import cudart nB, nC, nH, nW = 1, 3, 4, 5 data = np.ones([nB, nC, nH, nW], dtype=np.float32) length = 7 np.set_printoptions(precision=3, linewidth=200, suppress=True) cudart.cudaDeviceSynchronize() logger = trt.Logger(trt.Logger.ERROR) builder = trt.Builder(logger) network = builder.create_network(1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)) config = builder.create_builder_config() inputT0 = network.add_input("inputT0", trt.float32, (nB, nC, nH, nW)) #------------------------------------------------------------------------------- loop = network.add_loop() rLayer = loop.add_recurrence(inputT0) # Case 1: _H1 = network.add_reduce(rLayer.get_output(0), trt.ReduceOperation.MAX, (1 << 0) + (1 << 1) + (1 << 2) + (1 << 3), False) _H2 = network.add_constant((), np.array([64], dtype=np.float32)) _H3 = network.add_elementwise(_H2.get_output(0), _H1.get_output(0), trt.ElementWiseOperation.SUB) _H4 = network.add_activation(_H3.get_output(0), trt.ActivationType.RELU) _H5 = network.add_identity(_H4.get_output(0)) _H5.get_output(0).dtype = trt.bool _H6 = _H5 # Case 2: """ _H1 = network.add_slice(rLayer.get_output(0),[0,0,0,0],[1,1,1,1],[1,1,1,1]) _H2 = network.add_reduce(_H1.get_output(0),trt.ReduceOperation.MAX,(1<<0)+(1<<1)+(1<<2)+(1<<3),False) _H3 = network.add_constant((),np.array([64],dtype=np.float32)) _H4 = network.add_elementwise(_H3.get_output(0),_H2.get_output(0),trt.ElementWiseOperation.SUB) _H5 = network.add_activation(_H4.get_output(0),trt.ActivationType.RELU) _H6 = network.add_identity(_H5.get_output(0)) _H6.get_output(0).dtype = trt.bool """ loop.add_trip_limit(_H6.get_output(0), trt.TripLimit.WHILE) _H0 = network.add_elementwise(rLayer.get_output(0), rLayer.get_output(0), trt.ElementWiseOperation.SUM) rLayer.set_input(1, _H0.get_output(0)) loopOutput0 = loop.add_loop_output(rLayer.get_output(0), trt.LoopOutput.LAST_VALUE, 0) loopOutput1 = loop.add_loop_output(rLayer.get_output(0), trt.LoopOutput.CONCATENATE, 0) lengthLayer = network.add_constant((), np.array([length], dtype=np.int32)) loopOutput1.set_input(1, lengthLayer.get_output(0)) #------------------------------------------------------------------------------- network.mark_output(loopOutput0.get_output(0)) network.mark_output(loopOutput1.get_output(0)) engineString = builder.build_serialized_network(network, config) engine = trt.Runtime(logger).deserialize_cuda_engine(engineString) context = engine.create_execution_context() nInput = np.sum([engine.binding_is_input(i) for i in range(engine.num_bindings)]) nOutput = engine.num_bindings - nInput bufferH = [] bufferH.append(data) for i in range(nOutput): bufferH.append(np.empty(context.get_binding_shape(nInput + i), dtype=trt.nptype(engine.get_binding_dtype(nInput + i)))) bufferD = [] for i in range(engine.num_bindings): bufferD.append(cudart.cudaMalloc(bufferH[i].nbytes)[1]) for i in range(nInput): cudart.cudaMemcpy(bufferD[i], np.ascontiguousarray(bufferH[i].reshape(-1)).ctypes.data, bufferH[i].nbytes, cudart.cudaMemcpyKind.cudaMemcpyHostToDevice) context.execute_v2(bufferD) for i in range(nOutput): cudart.cudaMemcpy(bufferH[nInput + i].ctypes.data, bufferD[nInput + i], bufferH[nInput + i].nbytes, cudart.cudaMemcpyKind.cudaMemcpyDeviceToHost) for i in range(nInput): print("Input %d:" % i, bufferH[i].shape, "\n", bufferH[i]) for i in range(nOutput): print("Output %d:" % i, bufferH[nInput + i].shape, "\n", bufferH[nInput + i]) for buffer in bufferD: cudart.cudaFree(buffer)
trt-samples-for-hackathon-cn-master
cookbook/02-API/Layer/LoopStructure/While+Error.py
# # Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import numpy as np import tensorrt as trt from cuda import cudart nB, nC, nH, nW = 1, 3, 4, 5 data = np.ones([nB, nC, nH, nW], dtype=np.float32) * np.arange(1, 1 + nC, dtype=np.float32).reshape(1, nC, 1, 1) np.set_printoptions(precision=3, linewidth=200, suppress=True) cudart.cudaDeviceSynchronize() logger = trt.Logger(trt.Logger.ERROR) builder = trt.Builder(logger) network = builder.create_network(1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)) config = builder.create_builder_config() inputT0 = network.add_input("inputT0", trt.float32, (nB, nC, nH, nW)) #------------------------------------------------------------------------------- loop = network.add_loop() iteratorLayer = loop.add_iterator(inputT0, 1, False) # 制造一个迭代器,在 C 维上每次正向抛出 1 层 (1,nH,nW) iteratorLayer.axis = 1 # 重设抛出的轴号,最高维为 0,往低维递增 print(iteratorLayer.reverse) # 是否反序抛出(见后面范例),仅用于输出不能修改,这里会在运行时输出 False limit = network.add_constant((), np.array([nC], dtype=np.int32)) loop.add_trip_limit(limit.get_output(0), trt.TripLimit.COUNT) _H0 = network.add_constant([1, nH, nW], np.ones(nH * nW, dtype=np.float32)) # 首次循环前的循环体输入张量,必须在循环外初始化好,这里相当于求和的初始值 rLayer = loop.add_recurrence(_H0.get_output(0)) _H1 = network.add_elementwise(rLayer.get_output(0), iteratorLayer.get_output(0), trt.ElementWiseOperation.SUM) rLayer.set_input(1, _H1.get_output(0)) loopOutput0 = loop.add_loop_output(rLayer.get_output(0), trt.LoopOutput.LAST_VALUE, 0) # 只保留最后输出,index 参数被忽略 loopOutput1 = loop.add_loop_output(_H1.get_output(0), trt.LoopOutput.CONCATENATE, 0) # 保留所有中间输出,index 可以使用其他参数(例子见后面) lengthLayer = network.add_constant((), np.array([nC], dtype=np.int32)) loopOutput1.set_input(1, lengthLayer.get_output(0)) #------------------------------------------------------------------------------- network.mark_output(loopOutput0.get_output(0)) network.mark_output(loopOutput1.get_output(0)) engineString = builder.build_serialized_network(network, config) engine = trt.Runtime(logger).deserialize_cuda_engine(engineString) context = engine.create_execution_context() nInput = np.sum([engine.binding_is_input(i) for i in range(engine.num_bindings)]) nOutput = engine.num_bindings - nInput bufferH = [] bufferH.append(data) for i in range(nOutput): bufferH.append(np.empty(context.get_binding_shape(nInput + i), dtype=trt.nptype(engine.get_binding_dtype(nInput + i)))) bufferD = [] for i in range(engine.num_bindings): bufferD.append(cudart.cudaMalloc(bufferH[i].nbytes)[1]) for i in range(nInput): cudart.cudaMemcpy(bufferD[i], np.ascontiguousarray(bufferH[i].reshape(-1)).ctypes.data, bufferH[i].nbytes, cudart.cudaMemcpyKind.cudaMemcpyHostToDevice) context.execute_v2(bufferD) for i in range(nOutput): cudart.cudaMemcpy(bufferH[nInput + i].ctypes.data, bufferD[nInput + i], bufferH[nInput + i].nbytes, cudart.cudaMemcpyKind.cudaMemcpyDeviceToHost) for i in range(nInput): print("Input %d:" % i, bufferH[i].shape, "\n", bufferH[i]) for i in range(nOutput): print("Output %d:" % i, bufferH[nInput + i].shape, "\n", bufferH[nInput + i]) for buffer in bufferD: cudart.cudaFree(buffer)
trt-samples-for-hackathon-cn-master
cookbook/02-API/Layer/LoopStructure/Iterator.py
# # Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import numpy as np import tensorrt as trt from cuda import cudart nB, nC, nH, nW = 1, 3, 4, 5 t = np.array([6], dtype=np.int32) # number of iterations data = np.ones([nB, nC, nH, nW], dtype=np.float32) np.set_printoptions(precision=3, linewidth=200, suppress=True) cudart.cudaDeviceSynchronize() logger = trt.Logger(trt.Logger.ERROR) builder = trt.Builder(logger) network = builder.create_network(1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)) config = builder.create_builder_config() inputT0 = network.add_input("inputT0", trt.float32, (nB, nC, nH, nW)) #------------------------------------------------------------------------------- Network loop = network.add_loop() # Add Loop structure limit = network.add_constant((), np.array([t], dtype=np.int32)) # count of iteration, here as buildtime constant loop.add_trip_limit(limit.get_output(0), trt.TripLimit.COUNT) # use Count-Loop (similar to for-loop) rLayer = loop.add_recurrence(inputT0) # entrance of Loop _H0 = network.add_elementwise(rLayer.get_output(0), rLayer.get_output(0), trt.ElementWiseOperation.SUM) # body of Loop #rLayer.set_input(0,inputT0) # the zeroth input tensor of rLayer is inputT0 itself rLayer.set_input(1, _H0.get_output(0)) # the first input tensor of rLayer is the output tensor of the body of the Loop loopOutput0 = loop.add_loop_output(rLayer.get_output(0), trt.LoopOutput.LAST_VALUE, 0) # the first kind of output, only Keep the final output of the Loop, parameter index is ignored loopOutput1 = loop.add_loop_output(_H0.get_output(0), trt.LoopOutput.CONCATENATE, 0) # the second kind of output, keep all output of the Loop # keep result of iteration 1 to t if passing _H0 to loop.add_loop_output, while result of ieration 0 to t-1 if passing rLayer to loop.add_loop_output loopOutput1.set_input(1, limit.get_output(0)) # set the count of iteration kept in the output, if the value of given here v <= t, the first v ierations are kept, or 0 padding is used for the part of v > t #------------------------------------------------------------------------------- Network network.mark_output(loopOutput0.get_output(0)) network.mark_output(loopOutput1.get_output(0)) engineString = builder.build_serialized_network(network, config) engine = trt.Runtime(logger).deserialize_cuda_engine(engineString) nIO = engine.num_io_tensors lTensorName = [engine.get_tensor_name(i) for i in range(nIO)] nInput = [engine.get_tensor_mode(lTensorName[i]) for i in range(nIO)].count(trt.TensorIOMode.INPUT) context = engine.create_execution_context() context.set_input_shape(lTensorName[0], [nB, nC, nH, nW]) for i in range(nIO): print("[%2d]%s->" % (i, "Input " if i < nInput else "Output"), engine.get_tensor_dtype(lTensorName[i]), engine.get_tensor_shape(lTensorName[i]), context.get_tensor_shape(lTensorName[i]), lTensorName[i]) bufferH = [] for i in range(nIO): bufferH.append(np.empty(context.get_tensor_shape(lTensorName[i]), dtype=trt.nptype(engine.get_tensor_dtype(lTensorName[i])))) bufferD = [] for i in range(nIO): bufferD.append(cudart.cudaMalloc(bufferH[i].nbytes)[1]) bufferH[0] = data for i in range(nInput): cudart.cudaMemcpy(bufferD[i], bufferH[i].ctypes.data, bufferH[i].nbytes, cudart.cudaMemcpyKind.cudaMemcpyHostToDevice) for i in range(nIO): context.set_tensor_address(lTensorName[i], int(bufferD[i])) context.execute_async_v3(0) for i in range(nInput, nIO): cudart.cudaMemcpy(bufferH[i].ctypes.data, bufferD[i], bufferH[i].nbytes, cudart.cudaMemcpyKind.cudaMemcpyDeviceToHost) for i in range(nIO): print(lTensorName[i]) print(bufferH[i]) for b in bufferD: cudart.cudaFree(b)
trt-samples-for-hackathon-cn-master
cookbook/02-API/Layer/LoopStructure/For+Output.py
# # Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import numpy as np import tensorrt as trt from cuda import cudart nB, nC, nH, nW = 1, 3, 4, 5 data = np.ones([nB, nC, nH, nW], dtype=np.float32) * np.arange(1, 1 + nC, dtype=np.float32).reshape(1, nC, 1, 1) np.set_printoptions(precision=3, linewidth=200, suppress=True) cudart.cudaDeviceSynchronize() logger = trt.Logger(trt.Logger.ERROR) builder = trt.Builder(logger) network = builder.create_network(1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)) config = builder.create_builder_config() inputT0 = network.add_input("inputT0", trt.float32, (nB, nC, nH, nW)) #------------------------------------------------------------------------------- loop = network.add_loop() iteratorLayer = loop.add_iterator(inputT0, 3, False) # index 改成 3 limit = network.add_constant((), np.array([nW], dtype=np.int32)) # 循环次数变为 nW loop.add_trip_limit(limit.get_output(0), trt.TripLimit.COUNT) _H0 = network.add_constant([1, nC, nH], np.ones(nC * nH, dtype=np.float32)) # 循环体输入张量,尺寸变为 [1,nC,nW] rLayer = loop.add_recurrence(_H0.get_output(0)) _H1 = network.add_elementwise(rLayer.get_output(0), iteratorLayer.get_output(0), trt.ElementWiseOperation.SUM) rLayer.set_input(1, _H1.get_output(0)) loopOutput0 = loop.add_loop_output(rLayer.get_output(0), trt.LoopOutput.LAST_VALUE, 0) loopOutput1 = loop.add_loop_output(_H1.get_output(0), trt.LoopOutput.CONCATENATE, 0) lengthLayer = network.add_constant((), np.array([nW], dtype=np.int32)) # 保存长度变为 nW loopOutput1.set_input(1, lengthLayer.get_output(0)) #------------------------------------------------------------------------------- network.mark_output(loopOutput0.get_output(0)) network.mark_output(loopOutput1.get_output(0)) engineString = builder.build_serialized_network(network, config) engine = trt.Runtime(logger).deserialize_cuda_engine(engineString) context = engine.create_execution_context() nInput = np.sum([engine.binding_is_input(i) for i in range(engine.num_bindings)]) nOutput = engine.num_bindings - nInput bufferH = [] bufferH.append(data) for i in range(nOutput): bufferH.append(np.empty(context.get_binding_shape(nInput + i), dtype=trt.nptype(engine.get_binding_dtype(nInput + i)))) bufferD = [] for i in range(engine.num_bindings): bufferD.append(cudart.cudaMalloc(bufferH[i].nbytes)[1]) for i in range(nInput): cudart.cudaMemcpy(bufferD[i], np.ascontiguousarray(bufferH[i].reshape(-1)).ctypes.data, bufferH[i].nbytes, cudart.cudaMemcpyKind.cudaMemcpyHostToDevice) context.execute_v2(bufferD) for i in range(nOutput): cudart.cudaMemcpy(bufferH[nInput + i].ctypes.data, bufferD[nInput + i], bufferH[nInput + i].nbytes, cudart.cudaMemcpyKind.cudaMemcpyDeviceToHost) for i in range(nInput): print("Input %d:" % i, bufferH[i].shape, "\n", bufferH[i]) for i in range(nOutput): print("Output %d:" % i, bufferH[nInput + i].shape, "\n", bufferH[nInput + i]) for buffer in bufferD: cudart.cudaFree(buffer)
trt-samples-for-hackathon-cn-master
cookbook/02-API/Layer/LoopStructure/Iterator3.py
# # Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import numpy as np import tensorrt as trt from cuda import cudart nB, nC, nH, nW = 1, 3, 4, 5 data = np.ones([nB, nC, nH, nW], dtype=np.float32) * np.arange(1, 1 + nC, dtype=np.float32).reshape(1, nC, 1, 1) np.set_printoptions(precision=3, linewidth=200, suppress=True) cudart.cudaDeviceSynchronize() logger = trt.Logger(trt.Logger.ERROR) builder = trt.Builder(logger) network = builder.create_network(1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)) config = builder.create_builder_config() inputT0 = network.add_input("inputT0", trt.float32, (nB, nC, nH, nW)) #------------------------------------------------------------------------------- loop = network.add_loop() iteratorLayer = loop.add_iterator(inputT0, 2, False) # index 改成 2 limit = network.add_constant((), np.array([nH], dtype=np.int32)) # 循环次数变为 nH loop.add_trip_limit(limit.get_output(0), trt.TripLimit.COUNT) _H0 = network.add_constant([1, nC, nW], np.ones(nC * nW, dtype=np.float32)) # 循环体输入张量,尺寸变为 [1,nC,nW] rLayer = loop.add_recurrence(_H0.get_output(0)) _H1 = network.add_elementwise(rLayer.get_output(0), iteratorLayer.get_output(0), trt.ElementWiseOperation.SUM) rLayer.set_input(1, _H1.get_output(0)) loopOutput0 = loop.add_loop_output(rLayer.get_output(0), trt.LoopOutput.LAST_VALUE, 0) loopOutput1 = loop.add_loop_output(_H1.get_output(0), trt.LoopOutput.CONCATENATE, 0) lengthLayer = network.add_constant((), np.array([nH], dtype=np.int32)) # 保存长度变为 nH loopOutput1.set_input(1, lengthLayer.get_output(0)) #------------------------------------------------------------------------------- network.mark_output(loopOutput0.get_output(0)) network.mark_output(loopOutput1.get_output(0)) engineString = builder.build_serialized_network(network, config) engine = trt.Runtime(logger).deserialize_cuda_engine(engineString) context = engine.create_execution_context() nInput = np.sum([engine.binding_is_input(i) for i in range(engine.num_bindings)]) nOutput = engine.num_bindings - nInput bufferH = [] bufferH.append(data) for i in range(nOutput): bufferH.append(np.empty(context.get_binding_shape(nInput + i), dtype=trt.nptype(engine.get_binding_dtype(nInput + i)))) bufferD = [] for i in range(engine.num_bindings): bufferD.append(cudart.cudaMalloc(bufferH[i].nbytes)[1]) for i in range(nInput): cudart.cudaMemcpy(bufferD[i], np.ascontiguousarray(bufferH[i].reshape(-1)).ctypes.data, bufferH[i].nbytes, cudart.cudaMemcpyKind.cudaMemcpyHostToDevice) context.execute_v2(bufferD) for i in range(nOutput): cudart.cudaMemcpy(bufferH[nInput + i].ctypes.data, bufferD[nInput + i], bufferH[nInput + i].nbytes, cudart.cudaMemcpyKind.cudaMemcpyDeviceToHost) for i in range(nInput): print("Input %d:" % i, bufferH[i].shape, "\n", bufferH[i]) for i in range(nOutput): print("Output %d:" % i, bufferH[nInput + i].shape, "\n", bufferH[nInput + i]) for buffer in bufferD: cudart.cudaFree(buffer)
trt-samples-for-hackathon-cn-master
cookbook/02-API/Layer/LoopStructure/Iterator2.py
# # Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import numpy as np import tensorrt as trt from cuda import cudart nB, nC, nH, nW = 1, 3, 3, 3 data = np.arange(1, 1 + nC * nH * nW, dtype=np.float32).reshape(nB, nC, nH, nW) np.set_printoptions(precision=3, linewidth=200, suppress=True) cudart.cudaDeviceSynchronize() logger = trt.Logger(trt.Logger.ERROR) builder = trt.Builder(logger) network = builder.create_network(1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)) config = builder.create_builder_config() inputT0 = network.add_input("inputT0", trt.float32, (nB, nC, nH, nW)) #------------------------------------------------------------------------------- Network softMaxLayer = network.add_softmax(inputT0) #------------------------------------------------------------------------------- Network network.mark_output(softMaxLayer.get_output(0)) engineString = builder.build_serialized_network(network, config) engine = trt.Runtime(logger).deserialize_cuda_engine(engineString) nIO = engine.num_io_tensors lTensorName = [engine.get_tensor_name(i) for i in range(nIO)] nInput = [engine.get_tensor_mode(lTensorName[i]) for i in range(nIO)].count(trt.TensorIOMode.INPUT) context = engine.create_execution_context() context.set_input_shape(lTensorName[0], data.shape) bufferH = [] bufferH.append(data) for i in range(nInput, nIO): bufferH.append(np.empty(context.get_tensor_shape(lTensorName[i]), dtype=trt.nptype(engine.get_tensor_dtype(lTensorName[i])))) bufferD = [] for i in range(nIO): bufferD.append(cudart.cudaMalloc(bufferH[i].nbytes)[1]) for i in range(nInput): cudart.cudaMemcpy(bufferD[i], bufferH[i].ctypes.data, bufferH[i].nbytes, cudart.cudaMemcpyKind.cudaMemcpyHostToDevice) for i in range(nIO): context.set_tensor_address(lTensorName[i], int(bufferD[i])) context.execute_async_v3(0) for i in range(nInput, nIO): cudart.cudaMemcpy(bufferH[i].ctypes.data, bufferD[i], bufferH[i].nbytes, cudart.cudaMemcpyKind.cudaMemcpyDeviceToHost) for i in range(nIO): print(lTensorName[i]) print(bufferH[i]) for b in bufferD: cudart.cudaFree(b)
trt-samples-for-hackathon-cn-master
cookbook/02-API/Layer/SoftmaxLayer/SimpleExample.py
# # Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import numpy as np import tensorrt as trt from cuda import cudart nB, nC, nH, nW = 1, 3, 3, 3 data = np.arange(1, 1 + nC * nH * nW, dtype=np.float32).reshape(nB, nC, nH, nW) np.set_printoptions(precision=3, linewidth=200, suppress=True) cudart.cudaDeviceSynchronize() logger = trt.Logger(trt.Logger.ERROR) builder = trt.Builder(logger) network = builder.create_network(1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)) config = builder.create_builder_config() inputT0 = network.add_input("inputT0", trt.float32, (nB, nC, nH, nW)) #------------------------------------------------------------------------------- Network axesIndex = 0 softMaxLayer = network.add_softmax(inputT0) softMaxLayer.axes = 1 << axesIndex # 运算的轴号,默认值 1<<max(0,len(inputT0.shape)-3) #------------------------------------------------------------------------------- Network network.mark_output(softMaxLayer.get_output(0)) engineString = builder.build_serialized_network(network, config) engine = trt.Runtime(logger).deserialize_cuda_engine(engineString) nIO = engine.num_io_tensors lTensorName = [engine.get_tensor_name(i) for i in range(nIO)] nInput = [engine.get_tensor_mode(lTensorName[i]) for i in range(nIO)].count(trt.TensorIOMode.INPUT) context = engine.create_execution_context() context.set_input_shape(lTensorName[0], data.shape) bufferH = [] bufferH.append(data) for i in range(nInput, nIO): bufferH.append(np.empty(context.get_tensor_shape(lTensorName[i]), dtype=trt.nptype(engine.get_tensor_dtype(lTensorName[i])))) bufferD = [] for i in range(nIO): bufferD.append(cudart.cudaMalloc(bufferH[i].nbytes)[1]) for i in range(nInput): cudart.cudaMemcpy(bufferD[i], bufferH[i].ctypes.data, bufferH[i].nbytes, cudart.cudaMemcpyKind.cudaMemcpyHostToDevice) for i in range(nIO): context.set_tensor_address(lTensorName[i], int(bufferD[i])) context.execute_async_v3(0) for i in range(nInput, nIO): cudart.cudaMemcpy(bufferH[i].ctypes.data, bufferD[i], bufferH[i].nbytes, cudart.cudaMemcpyKind.cudaMemcpyDeviceToHost) for i in range(nIO): print(lTensorName[i]) print(bufferH[i]) for b in bufferD: cudart.cudaFree(b)
trt-samples-for-hackathon-cn-master
cookbook/02-API/Layer/SoftmaxLayer/Axes.py
# # Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import numpy as np import tensorrt as trt from cuda import cudart shape = [1, 4, 8, 8] data = np.arange(np.prod(shape), dtype=np.float32).reshape(shape) np.set_printoptions(precision=3, edgeitems=8, linewidth=300, suppress=True) cudart.cudaDeviceSynchronize() logger = trt.Logger(trt.Logger.VERBOSE) builder = trt.Builder(logger) network = builder.create_network(1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)) profile = builder.create_optimization_profile() config = builder.create_builder_config() config.set_flag(trt.BuilderFlag.INT8) inputT0 = network.add_input("inputT0", trt.float32, [-1] + shape[1:]) profile.set_shape(inputT0.name, [1] + shape[1:], [2] + shape[1:], [4] + shape[1:]) config.add_optimization_profile(profile) layer = network.add_identity(inputT0) layer.precision = trt.int8 layer.get_output(0).dtype = trt.int8 layer.set_output_type(0, trt.int8) layer.get_output(0).allowed_formats = 1 << int(trt.TensorFormat.CHW4) layer.get_output(0).dynamic_range = [-128, 128] network.mark_output(layer.get_output(0)) engineString = builder.build_serialized_network(network, config) engine = trt.Runtime(logger).deserialize_cuda_engine(engineString) nIO = engine.num_io_tensors lTensorName = [engine.get_tensor_name(i) for i in range(nIO)] nInput = [engine.get_tensor_mode(lTensorName[i]) for i in range(nIO)].count(trt.TensorIOMode.INPUT) # count of input / output tensor nOutput = [engine.get_tensor_mode(lTensorName[i]) for i in range(nIO)].count(trt.TensorIOMode.OUTPUT) context = engine.create_execution_context() print("context.__sizeof__() = %d" % context.__sizeof__()) print("context.__str__() = %s" % context.__str__()) print("\nContext related =======================================================") context.debug_sync = True # debug switch, one VERBOSE log will be added each time doing inference. context.nvtx_verbosity = trt.ProfilingVerbosity.LAYER_NAMES_ONLY # defaut value #context.nvtx_verbosity = trt.ProfilingVerbosity.kNONE #context.nvtx_verbosity = trt.ProfilingVerbosity.kDETAILED #context.nvtx_verbosity = trt.ProfilingVerbosity.kDEFAULT # same as LAYER_NAMES_ONLY, deprecated Since TensorRT 8.5 #context.nvtx_verbosity = trt.ProfilingVerbosity.kVERBOSE # same as kDETAILED, deprecated Since TensorRT 8.5 print("context.name = %s" % context.name) print("context.engine = %s" % context.engine) print("context.enqueue_emits_profile = %s" % context.enqueue_emits_profile) # refer to 02-API/Profiler print("context.active_optimization_profile = %d" % context.active_optimization_profile) print("context.persistent_cache_limit = %d" % context.persistent_cache_limit) print("\nInput / Output tensor related =========================================") print("context.infer_shapes() = %s" % context.infer_shapes()) # get name of tensor which needs set shape/value print("context.all_binding_shapes_specified = %s" % context.all_binding_shapes_specified) # only work for set_binding_shape(), not for set_input_shape() print("context.all_shape_inputs_specified = %s" % context.all_shape_inputs_specified) # only work for set_input_shape(), not for set_shape_input() for i in range(nIO): print("[%2d]%s->" % (i, "Input " if i < nInput else "Output"), context.get_tensor_shape(lTensorName[i])) #print("%s[%2d]->" % ("Input " if i < nInput else "Output", i), context.get_binding_shape(lTensorName[i])) context.set_input_shape(lTensorName[0], shape) #context.set_binding_shape(0, shape) print("context.infer_shapes() = %s" % context.infer_shapes()) # now all input tensors are set for i in range(nIO): print("[%2d]%s->" % (i, "Input " if i < nInput else "Output"), context.get_tensor_shape(lTensorName[i]), context.get_tensor_strides(lTensorName[i])) #print("[%2d]%s->" % (i, "Input " if i < nInput else "Output"), context.get_binding_shape(i), context.get_strides(i)) #print("[%2d]%s->" % (i, "Input " if i < nInput else "Output"), context.get_shape(i)) # no input shape tensor in this code example for i in range(nInput, nIO): print("[%2d]Output->" % i, context.get_max_output_size(lTensorName[i])) # usually using for data-dependent output, refer to 02-API/Layer/NonzeroLayer bufferH = [] bufferH.append(np.ascontiguousarray(data)) for i in range(nInput, nIO): bufferH.append(np.empty(context.get_tensor_shape(lTensorName[i]), dtype=trt.nptype(engine.get_tensor_dtype(lTensorName[i])))) bufferD = [] for i in range(nIO): bufferD.append(cudart.cudaMalloc(bufferH[i].nbytes)[1]) for i in range(nInput): # copy input data from host buffer into device buffer cudart.cudaMemcpy(bufferD[i], bufferH[i].ctypes.data, bufferH[i].nbytes, cudart.cudaMemcpyKind.cudaMemcpyHostToDevice) for i in range(nIO): context.set_tensor_address(lTensorName[i], int(bufferD[i])) print(context.get_tensor_address(lTensorName[i])) #context.execute(shape[0], bufferD) # deprecated since TensorRT 7.0, just for Implicit Batch mode #context.execute_async(shape[0], bufferD) # deprecated since TensorRT 7.0, just for Implicit Batch mode context.execute_v2(bufferD) context.execute_async_v2(bufferD, 0) context.execute_async_v3(0) # since TensorRT 8.5 for i in range(nInput, nIO): cudart.cudaMemcpy(bufferH[i].ctypes.data, bufferD[i], bufferH[i].nbytes, cudart.cudaMemcpyKind.cudaMemcpyDeviceToHost) for i in range(nInput): print("Input %d:" % i, bufferH[i].shape, "\n", bufferH[i]) for i in range(nInput, nIO): print("Output %d:" % i, bufferH[i].shape, "\n", bufferH[i]) print("Restore to Linear:") print(bufferH[-1].reshape(np.prod(shape[:3]) * 2, shape[-1] // 2).transpose(1, 0).reshape(shape)) for buffer in bufferD: cudart.cudaFree(buffer) """ Member of IExecutionContext: ++++ shown above ==== shown in binding part ~~~~ deprecated ---- not shown above [no prefix] others ----__class__ __del__ __delattr__ __dir__ __doc__ __enter__ __eq__ __exit__ __format__ __ge__ __getattribute__ __gt__ __hash__ __init__ __init_subclass__ __le__ __lt__ __module__ __ne__ __new__ ----__pybind11_module_local_v4_gcc_libstdcpp_cxxabi1013__ __reduce__ __reduce_ex__ __repr__ __setattr__ ++++__sizeof__ ++++__str__ __subclasshook__ ++++active_optimization_profile ++++all_binding_shapes_specified ++++all_shape_inputs_specified ++++debug_sync ----device_memory unreadable attribution ++++engine ++++enqueue_emits_profile ----error_recorder refer to 02-API/ErrorRecorder ++++execute ++++execute_async ++++execute_async_v2 ++++execute_async_v3 ++++execute_v2 ++++get_binding_shape ----get_input_consumed_event refer to 02-API/Event ++++get_max_output_size ----get_output_allocator refer to 02-API/OutputAllocator ----get_shape refer to 02-API/OptimizationProfile/main-ShapeInput.py ++++get_strides ++++get_tensor_address ++++get_tensor_shape ++++get_tensor_strides ++++infer_shapes ++++name ++++nvtx_verbosity ++++persistent_cache_limit ----profiler refer to 9-Advance/Profiler ----report_to_profiler refer to 9-Advance/Profiler ----set_aux_streams refer to 9-Advance/AuxStream ++++set_binding_shape ----set_input_consumed_event refer to 02-API/Event ++++set_input_shape ----set_optimization_profile_async refer to 02-API/MultiOptimizationProfile ----set_output_allocator refer to 02-API/OutputAllocator ----set_shape_input refer to 02-API/OptimizationProfile/main-ShapeInput-BindingAPI.py ++++set_tensor_address ----temporary_allocator refer to 02-API/GPUAllocator """
trt-samples-for-hackathon-cn-master
cookbook/02-API/ExecutionContext/main.py
# # Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from collections import OrderedDict import numpy as np import onnx import onnx_graphsurgeon as gs onnxFile = "./model.onnx" tensor0 = gs.Variable("tensor-0", np.float32, ["B", 1, 28, 28]) constant32x1 = gs.Constant("constant32x1", np.ascontiguousarray(np.random.rand(32, 1, 5, 5).reshape(32, 1, 5, 5).astype(np.float32) * 2 - 1)) constant32 = gs.Constant("constant32", np.ascontiguousarray(np.random.rand(32).reshape(32).astype(np.float32) * 2 - 1)) constant64x32 = gs.Constant("constant64x32", np.ascontiguousarray(np.random.rand(64, 32, 5, 5).reshape(64, 32, 5, 5).astype(np.float32) * 2 - 1)) constant64 = gs.Constant("constant64", np.ascontiguousarray(np.random.rand(64).reshape(64).astype(np.float32) * 2 - 1)) constantM1Comma3136 = gs.Constant("constantM1Comma3136", np.ascontiguousarray(np.array([-1, 7 * 7 * 64], dtype=np.int64))) constant3136x1024 = gs.Constant("constant3136x1024", np.ascontiguousarray(np.random.rand(3136, 1024).reshape(3136, 1024).astype(np.float32) * 2 - 1)) constant1024 = gs.Constant("constant1024", np.ascontiguousarray(np.random.rand(1024).reshape(1024).astype(np.float32) * 2 - 1)) constant1024x10 = gs.Constant("constant1024x10", np.ascontiguousarray(np.random.rand(1024, 10).reshape(1024, 10).astype(np.float32) * 2 - 1)) constant10 = gs.Constant("constant10", np.ascontiguousarray(np.random.rand(10).reshape(10).astype(np.float32) * 2 - 1)) graphNodeList = [] tensor1 = gs.Variable("tensor-1", np.float32, None) node1 = gs.Node("Conv", "Conv-1", inputs=[tensor0, constant32x1, constant32], outputs=[tensor1]) node1.attrs = OrderedDict([["kernel_shape", [5, 5]], ["pads", [2, 2, 2, 2]]]) graphNodeList.append(node1) tensor2 = gs.Variable("tensor-2", np.float32, None) node2 = gs.Node("Relu", "ReLU-2", inputs=[tensor1], outputs=[tensor2]) graphNodeList.append(node2) tensor3 = gs.Variable("tensor-3", np.float32, None) node3 = gs.Node("MaxPool", "MaxPool-3", inputs=[tensor2], outputs=[tensor3]) node3.attrs = OrderedDict([["kernel_shape", [2, 2]], ["pads", [0, 0, 0, 0]], ["strides", [2, 2]]]) graphNodeList.append(node3) tensor4 = gs.Variable("tensor-4", np.float32, None) node1 = gs.Node("Conv", "Conv-4", inputs=[tensor3, constant64x32, constant64], outputs=[tensor4]) node1.attrs = OrderedDict([["kernel_shape", [5, 5]], ["pads", [2, 2, 2, 2]]]) graphNodeList.append(node1) tensor5 = gs.Variable("tensor-5", np.float32, None) node5 = gs.Node("Relu", "ReLU-5", inputs=[tensor4], outputs=[tensor5]) graphNodeList.append(node5) tensor6 = gs.Variable("tensor-6", np.float32, None) node6 = gs.Node("MaxPool", "MaxPool-6", inputs=[tensor5], outputs=[tensor6]) node6.attrs = OrderedDict([["kernel_shape", [2, 2]], ["pads", [0, 0, 0, 0]], ["strides", [2, 2]]]) graphNodeList.append(node6) tensor7 = gs.Variable("tensor-7", np.float32, None) node7 = gs.Node("Transpose", "Transpose-7", inputs=[tensor6], outputs=[tensor7], attrs=OrderedDict([("perm", [0, 2, 3, 1])])) graphNodeList.append(node7) tensor8 = gs.Variable("tensor-8", np.float32, None) node8 = gs.Node("Reshape", "Reshape-7", inputs=[tensor7, constantM1Comma3136], outputs=[tensor8]) graphNodeList.append(node8) tensor9 = gs.Variable("tensor-9", np.float32, None) node9 = gs.Node("MatMul", "MatMul-9", inputs=[tensor8, constant3136x1024], outputs=[tensor9]) graphNodeList.append(node9) tensor10 = gs.Variable("tensor-10", np.float32, None) node10 = gs.Node("Add", "Add-10", inputs=[tensor9, constant1024], outputs=[tensor10]) graphNodeList.append(node10) tensor11 = gs.Variable("tensor-11", np.float32, None) node11 = gs.Node("Relu", "ReLU-11", inputs=[tensor10], outputs=[tensor11]) graphNodeList.append(node11) tensor12 = gs.Variable("tensor-12", np.float32, None) node12 = gs.Node("MatMul", "MatMul-12", inputs=[tensor11, constant1024x10], outputs=[tensor12]) graphNodeList.append(node12) tensor13 = gs.Variable("tensor-13", np.float32, None) node13 = gs.Node("Add", "Add-13", inputs=[tensor12, constant10], outputs=[tensor13]) graphNodeList.append(node13) tensor14 = gs.Variable("tensor-14", np.float32, None) node14 = gs.Node("Softmax", "Softmax-14", inputs=[tensor13], outputs=[tensor14], attrs=OrderedDict([("axis", 1)])) graphNodeList.append(node14) tensor15 = gs.Variable("tensor-15", np.int64, None) node15 = gs.Node("Fuck", "ArgMax-15", inputs=[tensor14], outputs=[tensor15], attrs=OrderedDict([("axis", 1), ("keepdims", 0)])) graphNodeList.append(node15) graph = gs.Graph(nodes=graphNodeList, inputs=[tensor0], outputs=[tensor15]) graph.cleanup().toposort() onnx.save(gs.export_onnx(graph), onnxFile) print("Succeeded create %s" % onnxFile)
trt-samples-for-hackathon-cn-master
cookbook/02-API/ONNXParser/getOnnxModel.py
# # Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import os import numpy as np import tensorrt as trt shape = [1, 1, 28, 28] onnxFile = "./model.onnx" logger = trt.Logger(trt.Logger.INFO) builder = trt.Builder(logger) network = builder.create_network(1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)) profile = builder.create_optimization_profile() config = builder.create_builder_config() os.chdir("/w/gitlab/tensorrt-cookbook/02-API/ONNXParser") parser = trt.OnnxParser(network, logger) # check whether one certain operator is supported by ONNX parser print("parser.supports_operator('LayerNormalization') = %s" % parser.supports_operator("LayerNormalization")) # ? print("parser.sget_used_vc_plugin_libraries() = %s" % parser.get_used_vc_plugin_libraries()) if True: # four equivalent methods to parse ONNX file res = parser.parse_from_file(onnxFile) # parse from file else: with open(onnxFile, "rb") as model: modelString = model.read() # three equivalent methods to parse ONNX byte stream, but supports_model can provide more information # both method parse and supports_model have an optional input parameter "path" pointing to the directory of the seperated weights files (used when ONNX file is larger than 2 GiB) res = parser.parse(modelString) #res, information = parser.supports_model(modelString) # In my opinion, supports_model should just tell me the result (True / False) without parsing the model into network #print(information) #res = parser.parse_with_weight_descriptors(modelString) if not res: print("Failed parsing %s" % onnxFile) for i in range(parser.num_errors): # get error information error = parser.get_error(i) print(error) # print error information print("node=%s" % error.node()) print("code=%s" % error.code()) print("desc=%s" % error.desc()) print("file=%s" % error.file()) print("func=%s" % error.func()) print("line=%s" % error.line()) parser.clear_errors() # clean the error information, not required exit() print("Succeeded parsing %s" % onnxFile) inputTensor = network.get_input(0) profile.set_shape(inputTensor.name, [1] + shape[1:], [2] + shape[1:], [4] + shape[1:]) config.add_optimization_profile(profile) engineString = builder.build_serialized_network(network, config) print("%s building serialized network" % ("Failed" if engineString is None else "Succeeded"))
trt-samples-for-hackathon-cn-master
cookbook/02-API/ONNXParser/main.py
# # Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import tensorrt as trt logger = trt.Logger(trt.Logger.ERROR) builder = trt.Builder(logger) network = builder.create_network(1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)) profile = builder.create_optimization_profile() config = builder.create_builder_config() inputTensor = network.add_input("inputT0", trt.float32, [-1, -1, -1]) profile.set_shape(inputTensor.name, [1, 1, 1], [3, 4, 5], [6, 8, 10]) config.add_optimization_profile(profile) identityLayer = network.add_identity(inputTensor) network.mark_output(identityLayer.get_output(0)) engineString = builder.build_serialized_network(network, config) print(engineString.dtype, engineString.nbytes) # print inforamtion of the HostMemory
trt-samples-for-hackathon-cn-master
cookbook/02-API/HostMemory/main.py
# # Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import os from datetime import datetime as dt from glob import glob import cv2 import numpy as np import onnx import onnx_graphsurgeon as gs import tensorrt as trt import torch as t import torch.nn.functional as F from cuda import cudart from torch.autograd import Variable np.random.seed(31193) t.manual_seed(97) t.cuda.manual_seed_all(97) t.backends.cudnn.deterministic = True nTrainBatchSize = 128 nInferBatchSize = 1 nHeight = 28 nWidth = 28 onnxFile0 = "./model0.onnx" onnxFile1 = "./model1.onnx" trtFile = "./model.plan" dataPath = os.path.dirname(os.path.realpath(__file__)) + "/../../00-MNISTData/" trainFileList = sorted(glob(dataPath + "train/*.jpg")) testFileList = sorted(glob(dataPath + "test/*.jpg")) inferenceImage = dataPath + "8.png" os.system("rm -rf ./*.onnx ./*.plan") np.set_printoptions(precision=3, linewidth=200, suppress=True) cudart.cudaDeviceSynchronize() # Create network and train model in pyTorch ------------------------------------ class Net(t.nn.Module): def __init__(self): super(Net, self).__init__() self.conv1 = t.nn.Conv2d(1, 32, (5, 5), padding=(2, 2), bias=True) self.conv2 = t.nn.Conv2d(32, 64, (5, 5), padding=(2, 2), bias=True) self.fc1 = t.nn.Linear(64 * 7 * 7, 1024, bias=True) self.fc2 = t.nn.Linear(1024, 10, bias=True) def forward(self, x): x = F.max_pool2d(F.relu(self.conv1(x)), (2, 2)) x = F.max_pool2d(F.relu(self.conv2(x)), (2, 2)) x = x.reshape(-1, 64 * 7 * 7) x = F.relu(self.fc1(x)) y = self.fc2(x) z = F.softmax(y, dim=1) z = t.argmax(z, dim=1) return y, z class MyData(t.utils.data.Dataset): def __init__(self, isTrain=True): if isTrain: self.data = trainFileList else: self.data = testFileList def __getitem__(self, index): imageName = self.data[index] data = cv2.imread(imageName, cv2.IMREAD_GRAYSCALE) label = np.zeros(10, dtype=np.float32) index = int(imageName[-7]) label[index] = 1 return t.from_numpy(data.reshape(1, nHeight, nWidth).astype(np.float32)), t.from_numpy(label) def __len__(self): return len(self.data) model = Net().cuda() # Export an untrained model as ONNX file --------------------------------------- t.onnx.export( \ model, t.randn(1, 1, nHeight, nWidth, device="cuda"), onnxFile0, input_names=["x"], output_names=["y", "z"], do_constant_folding=True, verbose=True, keep_initializers_as_inputs=True, opset_version=12, dynamic_axes={"x": {0: "nBatchSize"}, "z": {0: "nBatchSize"}}) ceLoss = t.nn.CrossEntropyLoss() opt = t.optim.Adam(model.parameters(), lr=0.001) trainDataset = MyData(True) testDataset = MyData(False) trainLoader = t.utils.data.DataLoader(dataset=trainDataset, batch_size=nTrainBatchSize, shuffle=True) testLoader = t.utils.data.DataLoader(dataset=testDataset, batch_size=nTrainBatchSize, shuffle=True) for epoch in range(10): for xTrain, yTrain in trainLoader: xTrain = Variable(xTrain).cuda() yTrain = Variable(yTrain).cuda() opt.zero_grad() y_, z = model(xTrain) loss = ceLoss(y_, yTrain) loss.backward() opt.step() with t.no_grad(): acc = 0 n = 0 for xTest, yTest in testLoader: xTest = Variable(xTest).cuda() yTest = Variable(yTest).cuda() y_, z = model(xTest) acc += t.sum(z == t.matmul(yTest, t.Tensor([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]).to("cuda:0"))).cpu().numpy() n += xTest.shape[0] print("%s, epoch %2d, loss = %f, test acc = %f" % (dt.now(), epoch + 1, loss.data, acc / n)) print("Succeeded building model in pyTorch!") # Export model as ONNX file ---------------------------------------------------- t.onnx.export( \ model, t.randn(1, 1, nHeight, nWidth, device="cuda"), onnxFile1, input_names=["x"], output_names=["y", "z"], do_constant_folding=True, verbose=True, keep_initializers_as_inputs=True, opset_version=12, dynamic_axes={"x": {0: "nBatchSize"}, "z": {0: "nBatchSize"}}) print("Succeeded converting model into ONNX!") # # Dynamic Shape mode + Refit is supported since TensorRT-8.5, or we must use Static Shape model a, b, c = [int(i) for i in trt.__version__.split(".")] if a < 8 or a == 8 and b < 5: for file in [onnxFile0, onnxFile1]: graph = gs.import_onnx(onnx.load(file)) graph.inputs[0].shape = [nInferBatchSize, 1, 28, 28] graph.cleanup() onnx.save(gs.export_onnx(graph), file) print("Succeeded converting model into static shape!") # Parse network, rebuild network and do inference in TensorRT ------------------ def run(): logger = trt.Logger(trt.Logger.WARNING) if os.path.isfile(trtFile): with open(trtFile, "rb") as f: engine = trt.Runtime(logger).deserialize_cuda_engine(f.read()) if engine == None: print("Failed loading engine!") exit() print("Succeeded loading engine!") onnxFile = onnxFile1 # we already have model.plan, so load model1.onnx to refit engine else: onnxFile = onnxFile0 # we do not have model.plan, so load model0.onnx to build engine builder = trt.Builder(logger) network = builder.create_network(1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)) config = builder.create_builder_config() config.set_flag(trt.BuilderFlag.REFIT) parser = trt.OnnxParser(network, logger) if not os.path.exists(onnxFile): print("Failed finding ONNX file!") exit() print("Succeeded finding ONNX file!") with open(onnxFile, "rb") as model: if not parser.parse(model.read()): print("Failed parsing .onnx file!") for error in range(parser.num_errors): print(parser.get_error(error)) exit() print("Succeeded parsing .onnx file!") if os.path.isfile(trtFile): # Refit refitter = trt.Refitter(engine, logger) layerNameList, weightRoleList = refitter.get_all() # get name and role of the refitable weights #layerNameList = refitter.get_all_weights() # only get name of the refitable weights #tensorDynamicRangeList = refitter.get_tensors_with_dynamic_range() # get dynamic range of all tensors in INT8 mode print("[Name,\tRole]") for name, role in zip(layerNameList, weightRoleList): print("[%s,%s]" % (name, role)) for i in range(network.num_layers): layer = network.get_layer(i) if layer.name in layerNameList: if layer.type == trt.LayerType.CONVOLUTION: layer.__class__ = trt.IConvolutionLayer refitter.set_weights(layer.name, trt.WeightsRole.KERNEL, layer.kernel) refitter.set_weights(layer.name, trt.WeightsRole.BIAS, layer.bias) #oldRange = refitter.get_dynamic_range(layer.get_output(0).name) # get tensor's old dynamic range #refitter.set_dynamic_range(layer.get_output(0).name, layer.get_output(0).dynamic_range) # set tensor's dynamic range in INT8 mode #refitter.set_dynamic_range(layer.get_output(0).name, oldRange) # or use old dynamic range as new range elif layer.type == trt.LayerType.DECONVOLUTION: layer.__class__ = trt.IConvolutionLayer refitter.set_weights(layer.name, trt.WeightsRole.KERNEL, layer.kernel) refitter.set_weights(layer.name, trt.WeightsRole.BIAS, layer.bias) #refitter.set_dynamic_range(layer.get_output(0).name, layer.get_output(0).dynamic_range) #refitter.set_dynamic_range(layer.get_output(0).name, oldRange) elif layer.type == trt.LayerType.FULLY_CONNECTED: layer.__class__ = trt.IFullyConnectedLayer refitter.set_weights(layer.name, trt.WeightsRole.KERNEL, layer.kernel) #refitter.set_dynamic_range(layer.get_output(0).name, layer.get_output(0).dynamic_range) #refitter.set_dynamic_range(layer.get_output(0).name, oldRange) elif layer.type == trt.LayerType.CONSTANT: layer.__class__ = trt.IConstantLayer refitter.set_weights(layer.name, trt.WeightsRole.CONSTANT, layer.weights) #refitter.set_dynamic_range(layer.get_output(0).name, layer.get_output(0).dynamic_range) #refitter.set_dynamic_range(layer.get_output(0).name, oldRange) elif layer.type == trt.LayerType.SCALE: layer.__class__ = trt.IScaleLayer refitter.set_weights(layer.name, trt.WeightsRole.SHIFT, layer.shift) refitter.set_weights(layer.name, trt.WeightsRole.SCALE, layer.scale) #refitter.set_dynamic_range(layer.get_output(0).name, layer.get_output(0).dynamic_range) #refitter.set_dynamic_range(layer.get_output(0).name, oldRange) elif True: # More branch might be needed pass refitter.refit_cuda_engine() else: # build engine """ # print the network, for debug for i in range(network.num_layers): layer = network.get_layer(i) print(i, "%s,in=%d,out=%d,%s" % (str(layer.type)[10:], layer.num_inputs, layer.num_outputs, layer.name)) for j in range(layer.num_inputs): tensor = layer.get_input(j) if tensor == None: print("\tInput %2d:" % j, "None") else: print("\tInput %2d:%s,%s,%s" % (j, tensor.shape, str(tensor.dtype)[9:], tensor.name)) for j in range(layer.num_outputs): tensor = layer.get_output(j) if tensor == None: print("\tOutput %2d:" % j, "None") else: print("\tOutput %2d:%s,%s,%s" % (j, tensor.shape, str(tensor.dtype)[9:], tensor.name)) """ profile = builder.create_optimization_profile() inputTensor = network.get_input(0) profile.set_shape(inputTensor.name, [1, 1, nHeight, nWidth], [4, 1, nHeight, nWidth], [8, 1, nHeight, nWidth]) config.add_optimization_profile(profile) engineString = builder.build_serialized_network(network, config) with open(trtFile, "wb") as f: f.write(engineString) engine = trt.Runtime(logger).deserialize_cuda_engine(engineString) nIO = engine.num_io_tensors lTensorName = [engine.get_tensor_name(i) for i in range(nIO)] nInput = [engine.get_tensor_mode(lTensorName[i]) for i in range(nIO)].count(trt.TensorIOMode.INPUT) context = engine.create_execution_context() context.set_input_shape(lTensorName[0], [1, 1, nHeight, nWidth]) for i in range(nIO): print("[%2d]%s->" % (i, "Input " if i < nInput else "Output"), engine.get_tensor_dtype(lTensorName[i]), engine.get_tensor_shape(lTensorName[i]), context.get_tensor_shape(lTensorName[i]), lTensorName[i]) data = cv2.imread(inferenceImage, cv2.IMREAD_GRAYSCALE).astype(np.float32) data = np.tile(data, [nInferBatchSize, 1, 1, 1]) bufferH = [] bufferH.append(np.ascontiguousarray(data)) for i in range(nInput, nIO): bufferH.append(np.empty(context.get_tensor_shape(lTensorName[i]), dtype=trt.nptype(engine.get_tensor_dtype(lTensorName[i])))) bufferD = [] for i in range(nIO): bufferD.append(cudart.cudaMalloc(bufferH[i].nbytes)[1]) for i in range(nInput): cudart.cudaMemcpy(bufferD[i], bufferH[i].ctypes.data, bufferH[i].nbytes, cudart.cudaMemcpyKind.cudaMemcpyHostToDevice) for i in range(nIO): context.set_tensor_address(lTensorName[i], int(bufferD[i])) context.execute_async_v3(0) for i in range(nInput, nIO): cudart.cudaMemcpy(bufferH[i].ctypes.data, bufferD[i], bufferH[i].nbytes, cudart.cudaMemcpyKind.cudaMemcpyDeviceToHost) for i in range(nIO): print(lTensorName[i]) print(bufferH[i]) for b in bufferD: cudart.cudaFree(b) print("Succeeded running model in TensorRT!") run() # build engine run() # Refit engine
trt-samples-for-hackathon-cn-master
cookbook/02-API/Refit/main-OnnxByParser.py
# # Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import os import numpy as np import tensorrt as trt from cuda import cudart nB, nC, nH, nW = 1, 1, 6, 9 nCOut, nKH, nKW = 1, 3, 3 data = np.tile(np.arange(1, 1 + nKH * nKW, dtype=np.float32).reshape(nKH, nKW), (nC, nH // nKH, nW // nKW)).reshape(nC, nH, nW) weight = np.power(10, range(4, -5, -1), dtype=np.float32).reshape(nCOut, nKH, nKW) bias = np.zeros(nCOut, dtype=np.float32) trtFile = "./model.plan" def run(bRefit): logger = trt.Logger(trt.Logger.ERROR) if os.path.isfile(trtFile): with open(trtFile, "rb") as f: engine = trt.Runtime(logger).deserialize_cuda_engine(f.read()) if engine == None: print("Failed loading engine!") return print("Succeeded loading engine!") else: builder = trt.Builder(logger) network = builder.create_network(1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)) profile = builder.create_optimization_profile() config = builder.create_builder_config() config.set_flag(trt.BuilderFlag.REFIT) inputT0 = network.add_input("inputT0", trt.float32, (-1, nC, nH, nW)) # Dynamic Shape mode + Refit is supported since TensorRT-8.5, or we must use Static Shape model profile.set_shape(inputT0.name, [1, nC, nH, nW], [2, nC, nH, nW], [4, nC, nH, nW]) config.add_optimization_profile(profile) fakeWeight = np.zeros([nCOut, nC, nKW, nKW], dtype=np.float32) fakeBias = np.zeros([nCOut], dtype=np.float32) convolutionLayer = network.add_convolution_nd(inputT0, nCOut, (nKH, nKW), fakeWeight, fakeBias) convolutionLayer.name = "conv" network.mark_output(convolutionLayer.get_output(0)) engineString = builder.build_serialized_network(network, config) if engineString == None: print("Failed building engine!") return print("Succeeded building engine!") with open(trtFile, "wb") as f: f.write(engineString) engine = trt.Runtime(logger).deserialize_cuda_engine(engineString) if bRefit == 0: print("Before refit ----------------------------------------------------") else: print("Refit -----------------------------------------------------------") refitter = trt.Refitter(engine, logger) refitter.set_weights("conv", trt.WeightsRole.KERNEL, weight) refitter.set_weights("conv", trt.WeightsRole.BIAS, bias) [missingLayerList, weightRoleList] = refitter.get_missing() # get name and role of the missing weights #missingLayerList = refitter.get_missing_weights() # only get name of the refitable weights for layer, role in zip(missingLayerList, weightRoleList): print("[", layer, "-", role, "]") if refitter.refit_cuda_engine() == False: print("Failed Refitting engine!") return nIO = engine.num_io_tensors lTensorName = [engine.get_tensor_name(i) for i in range(nIO)] nInput = [engine.get_tensor_mode(lTensorName[i]) for i in range(nIO)].count(trt.TensorIOMode.INPUT) context = engine.create_execution_context() context.set_input_shape(lTensorName[0], [nB, nC, nH, nW]) for i in range(nIO): print("[%2d]%s->" % (i, "Input " if i < nInput else "Output"), engine.get_tensor_dtype(lTensorName[i]), engine.get_tensor_shape(lTensorName[i]), context.get_tensor_shape(lTensorName[i]), lTensorName[i]) bufferH = [] bufferH.append(np.ascontiguousarray(data)) for i in range(nInput, nIO): bufferH.append(np.empty(context.get_tensor_shape(lTensorName[i]), dtype=trt.nptype(engine.get_tensor_dtype(lTensorName[i])))) bufferD = [] for i in range(nIO): bufferD.append(cudart.cudaMalloc(bufferH[i].nbytes)[1]) for i in range(nInput): cudart.cudaMemcpy(bufferD[i], bufferH[i].ctypes.data, bufferH[i].nbytes, cudart.cudaMemcpyKind.cudaMemcpyHostToDevice) for i in range(nIO): context.set_tensor_address(lTensorName[i], int(bufferD[i])) context.execute_async_v3(0) for i in range(nInput, nIO): cudart.cudaMemcpy(bufferH[i].ctypes.data, bufferD[i], bufferH[i].nbytes, cudart.cudaMemcpyKind.cudaMemcpyDeviceToHost) for i in range(nIO): print(lTensorName[i]) print(bufferH[i]) for b in bufferD: cudart.cudaFree(b) if __name__ == "__main__": os.system("rm -rf ./*.plan") np.set_printoptions(precision=3, linewidth=200, suppress=True) cudart.cudaDeviceSynchronize() run(0) run(1)
trt-samples-for-hackathon-cn-master
cookbook/02-API/Refit/main-set_weights.py
# # Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import os import numpy as np import tensorrt as trt from cuda import cudart nB, nC, nH, nW = 1, 1, 6, 9 nCOut, nKH, nKW = 1, 3, 3 data = np.tile(np.arange(1, 1 + nKH * nKW, dtype=np.float32).reshape(nKH, nKW), (nC, nH // nKH, nW // nKW)).reshape(nC, nH, nW) weight = np.power(10, range(4, -5, -1), dtype=np.float32).reshape(nCOut, nKH, nKW) bias = np.zeros(nCOut, dtype=np.float32) trtFile = "./model.plan" def run(bRefit): logger = trt.Logger(trt.Logger.ERROR) if os.path.isfile(trtFile): with open(trtFile, "rb") as f: engine = trt.Runtime(logger).deserialize_cuda_engine(f.read()) if engine == None: print("Failed loading engine!") return print("Succeeded loading engine!") else: builder = trt.Builder(logger) network = builder.create_network(1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)) profile = builder.create_optimization_profile() config = builder.create_builder_config() config.set_flag(trt.BuilderFlag.REFIT) inputT0 = network.add_input("inputT0", trt.float32, (-1, nC, nH, nW)) # Dynamic Shape mode + Refit is supported since TensorRT-8.5, or we must use Static Shape model profile.set_shape(inputT0.name, [1, nC, nH, nW], [2, nC, nH, nW], [4, nC, nH, nW]) config.add_optimization_profile(profile) fakeWeight = np.zeros([nCOut, nC, nKW, nKW], dtype=np.float32) fakeBias = np.zeros([nCOut], dtype=np.float32) convolutionLayer = network.add_convolution_nd(inputT0, nCOut, (nKH, nKW), fakeWeight, fakeBias) convolutionLayer.name = "conv" network.set_weights_name(convolutionLayer.kernel, "conv-w") network.set_weights_name(convolutionLayer.bias, "conv-b") network.mark_output(convolutionLayer.get_output(0)) engineString = builder.build_serialized_network(network, config) if engineString == None: print("Failed building engine!") return print("Succeeded building engine!") with open(trtFile, "wb") as f: f.write(engineString) engine = trt.Runtime(logger).deserialize_cuda_engine(engineString) if bRefit == 0: print("Before refit ----------------------------------------------------") else: print("Refit -----------------------------------------------------------") refitter = trt.Refitter(engine, logger) refitter.set_named_weights("conv-w", weight) refitter.set_named_weights("conv-b", bias) [missingLayer, weightRole] = refitter.get_missing() # get name and role of the missing weights #missingLayerList = refitter.get_missing_weights() # only get name of the refitable weights for layer, role in zip(missingLayer, weightRole): print("[", layer, "-", role, "]") if refitter.refit_cuda_engine() == False: print("Failed Refitting engine!") return nIO = engine.num_io_tensors lTensorName = [engine.get_tensor_name(i) for i in range(nIO)] nInput = [engine.get_tensor_mode(lTensorName[i]) for i in range(nIO)].count(trt.TensorIOMode.INPUT) context = engine.create_execution_context() context.set_input_shape(lTensorName[0], [nB, nC, nH, nW]) for i in range(nIO): print("[%2d]%s->" % (i, "Input " if i < nInput else "Output"), engine.get_tensor_dtype(lTensorName[i]), engine.get_tensor_shape(lTensorName[i]), context.get_tensor_shape(lTensorName[i]), lTensorName[i]) bufferH = [] bufferH.append(np.ascontiguousarray(data)) for i in range(nInput, nIO): bufferH.append(np.empty(context.get_tensor_shape(lTensorName[i]), dtype=trt.nptype(engine.get_tensor_dtype(lTensorName[i])))) bufferD = [] for i in range(nIO): bufferD.append(cudart.cudaMalloc(bufferH[i].nbytes)[1]) for i in range(nInput): cudart.cudaMemcpy(bufferD[i], bufferH[i].ctypes.data, bufferH[i].nbytes, cudart.cudaMemcpyKind.cudaMemcpyHostToDevice) for i in range(nIO): context.set_tensor_address(lTensorName[i], int(bufferD[i])) context.execute_async_v3(0) for i in range(nInput, nIO): cudart.cudaMemcpy(bufferH[i].ctypes.data, bufferD[i], bufferH[i].nbytes, cudart.cudaMemcpyKind.cudaMemcpyDeviceToHost) for i in range(nIO): print(lTensorName[i]) print(bufferH[i]) for b in bufferD: cudart.cudaFree(b) if __name__ == "__main__": os.system("rm -rf ./*.plan") np.set_printoptions(precision=3, linewidth=200, suppress=True) cudart.cudaDeviceSynchronize() run(0) run(1)
trt-samples-for-hackathon-cn-master
cookbook/02-API/Refit/main-set_named_weights.py
# # Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import os from datetime import datetime as dt from glob import glob import cv2 import numpy as np import onnx import onnx_graphsurgeon as gs import tensorrt as trt import torch as t import torch.nn.functional as F from cuda import cudart from torch.autograd import Variable np.random.seed(31193) t.manual_seed(97) t.cuda.manual_seed_all(97) t.backends.cudnn.deterministic = True nTrainBatchSize = 128 nInferBatchSize = 1 nHeight = 28 nWidth = 28 onnxFile0 = "./model0.onnx" onnxFile1 = "./model1.onnx" weightFile = "para.npz" trtFile = "./model.plan" dataPath = os.path.dirname(os.path.realpath(__file__)) + "/../../00-MNISTData/" trainFileList = sorted(glob(dataPath + "train/*.jpg")) testFileList = sorted(glob(dataPath + "test/*.jpg")) inferenceImage = dataPath + "8.png" os.system("rm -rf ./*.onnx ./*.plan") np.set_printoptions(precision=3, linewidth=200, suppress=True) cudart.cudaDeviceSynchronize() # Create network and train model in pyTorch ------------------------------------ class Net(t.nn.Module): def __init__(self): super(Net, self).__init__() self.conv1 = t.nn.Conv2d(1, 32, (5, 5), padding=(2, 2), bias=True) self.conv2 = t.nn.Conv2d(32, 64, (5, 5), padding=(2, 2), bias=True) self.fc1 = t.nn.Linear(64 * 7 * 7, 1024, bias=True) self.fc2 = t.nn.Linear(1024, 10, bias=True) def forward(self, x): x = F.max_pool2d(F.relu(self.conv1(x)), (2, 2)) x = F.max_pool2d(F.relu(self.conv2(x)), (2, 2)) x = x.reshape(-1, 64 * 7 * 7) x = F.relu(self.fc1(x)) y = self.fc2(x) z = F.softmax(y, dim=1) z = t.argmax(z, dim=1) return y, z class MyData(t.utils.data.Dataset): def __init__(self, isTrain=True): if isTrain: self.data = trainFileList else: self.data = testFileList def __getitem__(self, index): imageName = self.data[index] data = cv2.imread(imageName, cv2.IMREAD_GRAYSCALE) label = np.zeros(10, dtype=np.float32) index = int(imageName[-7]) label[index] = 1 return t.from_numpy(data.reshape(1, nHeight, nWidth).astype(np.float32)), t.from_numpy(label) def __len__(self): return len(self.data) model = Net().cuda() # Export an untrained model as ONNX file --------------------------------------- t.onnx.export( \ model, t.randn(1, 1, nHeight, nWidth, device="cuda"), onnxFile0, input_names=["x"], output_names=["y", "z"], do_constant_folding=True, verbose=True, keep_initializers_as_inputs=True, opset_version=12, dynamic_axes={"x": {0: "nBatchSize"}, "z": {0: "nBatchSize"}}) ceLoss = t.nn.CrossEntropyLoss() opt = t.optim.Adam(model.parameters(), lr=0.001) trainDataset = MyData(True) testDataset = MyData(False) trainLoader = t.utils.data.DataLoader(dataset=trainDataset, batch_size=nTrainBatchSize, shuffle=True) testLoader = t.utils.data.DataLoader(dataset=testDataset, batch_size=nTrainBatchSize, shuffle=True) for epoch in range(10): for xTrain, yTrain in trainLoader: xTrain = Variable(xTrain).cuda() yTrain = Variable(yTrain).cuda() opt.zero_grad() y_, z = model(xTrain) loss = ceLoss(y_, yTrain) loss.backward() opt.step() with t.no_grad(): acc = 0 n = 0 for xTest, yTest in testLoader: xTest = Variable(xTest).cuda() yTest = Variable(yTest).cuda() y_, z = model(xTest) acc += t.sum(z == t.matmul(yTest, t.Tensor([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]).to("cuda:0"))).cpu().numpy() n += xTest.shape[0] print("%s, epoch %2d, loss = %f, test acc = %f" % (dt.now(), epoch + 1, loss.data, acc / n)) print("Succeeded building model in pyTorch!") # Export model as ONNX file ---------------------------------------------------- t.onnx.export( \ model, t.randn(1, 1, nHeight, nWidth, device="cuda"), onnxFile1, input_names=["x"], output_names=["y", "z"], do_constant_folding=True, verbose=True, keep_initializers_as_inputs=True, opset_version=12, dynamic_axes={"x": {0: "nBatchSize"}, "z": {0: "nBatchSize"}}) print("Succeeded converting model into ONNX!") # Dynamic Shape mode + Refit is supported since TensorRT-8.5, or we must use Static Shape model a, b, c = [int(i) for i in trt.__version__.split(".")] if a < 8 or a == 8 and b < 5: for file in [onnxFile0, onnxFile1]: graph = gs.import_onnx(onnx.load(file)) graph.inputs[0].shape = [nInferBatchSize, 1, 28, 28] graph.cleanup() onnx.save(gs.export_onnx(graph), file) print("Succeeded converting model into static shape!") # use trtexec to find weights need transpose ----------------------------------- output = os.popen("trtexec --onnx=%s --refit --buildOnly 2>&1 | grep 'Refitter API,'" % onnxFile1) nameList = [] permutationList = [] for line in output.readlines(): print(line) name = line.split(" ")[5] index0 = line.find("of (") + 4 index1 = line.find(")! If") permutation = line[index0:index1] tempList = [int(i) for i in permutation.split(",")] nameList.append(name) permutationList.append(tempList) graph = gs.import_onnx(onnx.load(onnxFile1)) # save para = {} for index, (name, tensor) in enumerate(graph.tensors().items()): print("Tensor%4d: name=%s, desc=%s" % (index, name, tensor)) if str(tensor)[:8] == "Constant": if name in nameList: print("Weight %s transpose!" % name) index = nameList.index(name) value = tensor.values.transpose(permutationList[index]) if value.dtype == np.int64: value = value.astype(np.int32) para[name] = value #para[name] = tensor.values else: print("Weight %s save!" % name) value = tensor.values if value.dtype == np.int64: value = value.astype(np.int32) para[name] = value np.savez(weightFile, **para) del para # not required in practice # Parse network, rebuild network and do inference in TensorRT ------------------ def run(): logger = trt.Logger(trt.Logger.WARNING) if os.path.isfile(trtFile): # Refit engine with open(trtFile, "rb") as f: engine = trt.Runtime(logger).deserialize_cuda_engine(f.read()) if engine == None: print("Failed loading engine!") exit() print("Succeeded loading engine!") para = np.load(weightFile) refitter = trt.Refitter(engine, logger) layerNameList, weightRoleList = refitter.get_all() for name, role in zip(layerNameList, weightRoleList): print("LayerName:%s,WeightRole:%s" % (name, role)) # update the weight # NOTE: np.ascontiguousarray MUST be used during convert numpy.array to trt.Weights # name of the weights might be different refitter.set_weights("Conv_0", trt.WeightsRole.KERNEL, np.ascontiguousarray(para["conv1.weight"])) refitter.set_weights("Conv_0", trt.WeightsRole.BIAS, np.ascontiguousarray(para["conv1.bias"])) refitter.set_weights("Conv_3", trt.WeightsRole.KERNEL, np.ascontiguousarray(para["conv2.weight"])) refitter.set_weights("Conv_3", trt.WeightsRole.BIAS, np.ascontiguousarray(para["conv2.bias"])) a, b, c = [int(i) for i in trt.__version__.split(".")] if a < 8 or a == 8 and b < 4: # TensorRT 8.2 refitter.set_weights("Gemm_8", trt.WeightsRole.KERNEL, np.ascontiguousarray(para["fc1.weight"])) refitter.set_weights("Gemm_8", trt.WeightsRole.BIAS, np.ascontiguousarray(para["fc1.bias"])) refitter.set_weights("Gemm_10", trt.WeightsRole.KERNEL, np.ascontiguousarray(para["fc2.weight"])) refitter.set_weights("Gemm_10", trt.WeightsRole.BIAS, np.ascontiguousarray(para["fc2.bias"])) else: # TensorRT 8.4 refitter.set_weights("fc1.weight", trt.WeightsRole.CONSTANT, np.ascontiguousarray(para["fc1.weight"])) refitter.set_weights("fc1.bias", trt.WeightsRole.CONSTANT, np.ascontiguousarray(para["fc1.bias"])) refitter.set_weights("fc2.weight", trt.WeightsRole.CONSTANT, np.ascontiguousarray(para["fc2.weight"])) refitter.set_weights("fc2.bias", trt.WeightsRole.CONSTANT, np.ascontiguousarray(para["fc2.bias"])) refitter.refit_cuda_engine() else: # Build engine onnxFile = onnxFile0 builder = trt.Builder(logger) network = builder.create_network(1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)) config = builder.create_builder_config() config.set_flag(trt.BuilderFlag.REFIT) parser = trt.OnnxParser(network, logger) if not os.path.exists(onnxFile): print("Failed finding ONNX file!") exit() print("Succeeded finding ONNX file!") with open(onnxFile, "rb") as model: if not parser.parse(model.read()): print("Failed parsing .onnx file!") for error in range(parser.num_errors): print(parser.get_error(error)) exit() print("Succeeded parsing .onnx file!") """ # print the network, for debug for i in range(network.num_layers): layer = network.get_layer(i) print(i, "%s,in=%d,out=%d,%s" % (str(layer.type)[10:], layer.num_inputs, layer.num_outputs, layer.name)) for j in range(layer.num_inputs): tensor = layer.get_input(j) if tensor == None: print("\tInput %2d:" % j, "None") else: print("\tInput %2d:%s,%s,%s" % (j, tensor.shape, str(tensor.dtype)[9:], tensor.name)) for j in range(layer.num_outputs): tensor = layer.get_output(j) if tensor == None: print("\tOutput %2d:" % j, "None") else: print("\tOutput %2d:%s,%s,%s" % (j, tensor.shape, str(tensor.dtype)[9:], tensor.name)) """ profile = builder.create_optimization_profile() inputTensor = network.get_input(0) profile.set_shape(inputTensor.name, [1, 1, nHeight, nWidth], [4, 1, nHeight, nWidth], [8, 1, nHeight, nWidth]) config.add_optimization_profile(profile) engineString = builder.build_serialized_network(network, config) with open(trtFile, "wb") as f: f.write(engineString) engine = trt.Runtime(logger).deserialize_cuda_engine(engineString) nIO = engine.num_io_tensors lTensorName = [engine.get_tensor_name(i) for i in range(nIO)] nInput = [engine.get_tensor_mode(lTensorName[i]) for i in range(nIO)].count(trt.TensorIOMode.INPUT) context = engine.create_execution_context() context.set_input_shape(lTensorName[0], [1, 1, nHeight, nWidth]) for i in range(nIO): print("[%2d]%s->" % (i, "Input " if i < nInput else "Output"), engine.get_tensor_dtype(lTensorName[i]), engine.get_tensor_shape(lTensorName[i]), context.get_tensor_shape(lTensorName[i]), lTensorName[i]) data = cv2.imread(inferenceImage, cv2.IMREAD_GRAYSCALE).astype(np.float32) data = np.tile(data, [nInferBatchSize, 1, 1, 1]) bufferH = [] bufferH.append(np.ascontiguousarray(data)) for i in range(nInput, nIO): bufferH.append(np.empty(context.get_tensor_shape(lTensorName[i]), dtype=trt.nptype(engine.get_tensor_dtype(lTensorName[i])))) bufferD = [] for i in range(nIO): bufferD.append(cudart.cudaMalloc(bufferH[i].nbytes)[1]) for i in range(nInput): cudart.cudaMemcpy(bufferD[i], bufferH[i].ctypes.data, bufferH[i].nbytes, cudart.cudaMemcpyKind.cudaMemcpyHostToDevice) for i in range(nIO): context.set_tensor_address(lTensorName[i], int(bufferD[i])) context.execute_async_v3(0) for i in range(nInput, nIO): cudart.cudaMemcpy(bufferH[i].ctypes.data, bufferD[i], bufferH[i].nbytes, cudart.cudaMemcpyKind.cudaMemcpyDeviceToHost) for i in range(nIO): print(lTensorName[i]) print(bufferH[i]) for b in bufferD: cudart.cudaFree(b) print("Succeeded running model in TensorRT!") run() # build engine run() # Refit engine
trt-samples-for-hackathon-cn-master
cookbook/02-API/Refit/main-OnnxByWeight.py
# # Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import numpy as np import tensorrt as trt from cuda import cudart logger = trt.Logger(trt.Logger.ERROR) builder = trt.Builder(logger) network = builder.create_network(1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)) profile = builder.create_optimization_profile() config = builder.create_builder_config() inputTensor = network.add_input("inputT0", trt.float32, [-1, -1, -1]) profile.set_shape(inputTensor.name, [1, 1, 1], [3, 4, 5], [6, 8, 10]) print("OptimizationProfile is available? %s" % bool(profile)) # an equivalent API: print(profile.__nonzero__()) config.add_optimization_profile(profile) identityLayer = network.add_identity(inputTensor) network.mark_output(identityLayer.get_output(0)) engineString = builder.build_serialized_network(network, config) engine = trt.Runtime(logger).deserialize_cuda_engine(engineString) nIO = engine.num_io_tensors lTensorName = [engine.get_tensor_name(i) for i in range(nIO)] nInput = [engine.get_tensor_mode(lTensorName[i]) for i in range(nIO)].count(trt.TensorIOMode.INPUT) context = engine.create_execution_context() def run(shape): context.set_input_shape(lTensorName[0], shape) for i in range(nIO): print("[%2d]%s->" % (i, "Input " if i < nInput else "Output"), engine.get_tensor_dtype(lTensorName[i]), engine.get_tensor_shape(lTensorName[i]), context.get_tensor_shape(lTensorName[i]), lTensorName[i]) bufferH = [] bufferH.append(np.ascontiguousarray(np.arange(np.prod(shape), dtype=np.float32).reshape(shape))) for i in range(nInput, nIO): bufferH.append(np.empty(context.get_tensor_shape(lTensorName[i]), dtype=trt.nptype(engine.get_tensor_dtype(lTensorName[i])))) bufferD = [] for i in range(nIO): bufferD.append(cudart.cudaMalloc(bufferH[i].nbytes)[1]) for i in range(nInput): cudart.cudaMemcpy(bufferD[i], bufferH[i].ctypes.data, bufferH[i].nbytes, cudart.cudaMemcpyKind.cudaMemcpyHostToDevice) for i in range(nIO): context.set_tensor_address(lTensorName[i], int(bufferD[i])) context.execute_async_v3(0) for i in range(nInput, nIO): cudart.cudaMemcpy(bufferH[i].ctypes.data, bufferD[i], bufferH[i].nbytes, cudart.cudaMemcpyKind.cudaMemcpyDeviceToHost) for i in range(nIO): print(lTensorName[i]) print(bufferH[i]) for b in bufferD: cudart.cudaFree(b) return # do inference with a shape run([3, 4, 5]) # do inference with another shape run([6, 8, 10])
trt-samples-for-hackathon-cn-master
cookbook/02-API/OptimizationProfile/main-TensorInput.py
# # Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # For TensorRT <8.5 with deprecated Binding API import numpy as np import tensorrt as trt from cuda import cudart logger = trt.Logger(trt.Logger.ERROR) builder = trt.Builder(logger) network = builder.create_network(1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)) profile = builder.create_optimization_profile() config = builder.create_builder_config() inputT0 = network.add_input("inputT0", trt.float32, [3, 4, 5]) inputT1 = network.add_input("inputT1", trt.int32, [3]) profile.set_shape_input(inputT1.name, [1, 1, 1], [3, 4, 5], [5, 5, 5]) config.add_optimization_profile(profile) shuffleLayer = network.add_shuffle(inputT0) shuffleLayer.set_input(1, inputT1) network.mark_output(shuffleLayer.get_output(0)) engineString = builder.build_serialized_network(network, config) engine = trt.Runtime(logger).deserialize_cuda_engine(engineString) nIO = engine.num_bindings nInput = np.sum([engine.binding_is_input(i) for i in range(engine.num_bindings)]) context = engine.create_execution_context() def run(shape): context.set_shape_input(1, shape) # set shape input tensor using CPU buffer for i in range(nIO): print("[%2d]%s->" % (i, "Input " if i < nInput else "Output"), ("ShapeTensor " if engine.is_shape_binding(i) else "ExecutionTensor"), engine.get_binding_dtype(i), engine.get_binding_shape(i), context.get_binding_shape(i), engine.get_binding_name(i)) bufferH = [] bufferH.append(np.ascontiguousarray(np.arange(np.prod(shape), dtype=np.float32).reshape(shape))) bufferH.append([]) # placeholder for input shape tensor, we need not to pass input shape tensor to GPU # we can also use a dummy input shape tenor "bufferH.append(np.ascontiguousarray([0],dtype=np.int32))" here to avoid 3 if-condition statments "if engine.is_shape_binding(i)" below for i in range(nInput, nIO): bufferH.append(np.empty(context.get_binding_shape(i), dtype=trt.nptype(engine.get_binding_dtype(i)))) bufferD = [] for i in range(nIO): if engine.is_shape_binding(i): bufferD.append(int(0)) else: bufferD.append(cudart.cudaMalloc(bufferH[i].nbytes)[1]) for i in range(nInput): if engine.is_shape_binding(i): continue cudart.cudaMemcpy(bufferD[i], bufferH[i].ctypes.data, bufferH[i].nbytes, cudart.cudaMemcpyKind.cudaMemcpyHostToDevice) context.execute_async_v2(bufferD, 0) for i in range(nInput, nIO): if engine.is_shape_binding(i): continue cudart.cudaMemcpy(bufferH[i].ctypes.data, bufferD[i], bufferH[i].nbytes, cudart.cudaMemcpyKind.cudaMemcpyDeviceToHost) for i in range(nIO): print(i) print(bufferH[i]) for b in bufferD: cudart.cudaFree(b) return # do inference with a shape run([3, 4, 5]) # do inference with another shape run([5, 4, 3])
trt-samples-for-hackathon-cn-master
cookbook/02-API/OptimizationProfile/main-ShapeInput-BindingAPI.py
# # Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import numpy as np import tensorrt as trt from cuda import cudart logger = trt.Logger(trt.Logger.ERROR) builder = trt.Builder(logger) network = builder.create_network(1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)) profile = builder.create_optimization_profile() config = builder.create_builder_config() inputT0 = network.add_input("inputT0", trt.float32, [3, 4, 5]) inputT1 = network.add_input("inputT1", trt.int32, [3]) profile.set_shape_input(inputT1.name, [1, 1, 1], [3, 4, 5], [5, 5, 5]) print("OptimizationProfile is available? %s" % bool(profile)) # an equivalent API: print(profile.__nonzero__()) config.add_optimization_profile(profile) shuffleLayer = network.add_shuffle(inputT0) shuffleLayer.set_input(1, inputT1) network.mark_output(shuffleLayer.get_output(0)) engineString = builder.build_serialized_network(network, config) engine = trt.Runtime(logger).deserialize_cuda_engine(engineString) nIO = engine.num_io_tensors lTensorName = [engine.get_tensor_name(i) for i in range(nIO)] nInput = [engine.get_tensor_mode(lTensorName[i]) for i in range(nIO)].count(trt.TensorIOMode.INPUT) context = engine.create_execution_context() def run(shape): context.set_tensor_address(lTensorName[1], np.array(shape, dtype=np.int32).ctypes.data) # set input shape tensor using CPU buffer for i in range(nIO): print("[%2d]%s->" % (i, "Input " if i < nInput else "Output"), ("ShapeTensor " if engine.is_shape_inference_io(lTensorName[i]) else "ExecutionTensor"), engine.get_tensor_dtype(lTensorName[i]), engine.get_tensor_shape(lTensorName[i]), context.get_tensor_shape(lTensorName[i]), lTensorName[i]) bufferH = [] bufferH.append(np.ascontiguousarray(np.arange(3 * 4 * 5, dtype=np.float32).reshape(3, 4, 5))) bufferH.append([]) # placeholder for input shape tensor, we need not to pass input shape tensor to GPU # we can also use a dummy input shape tenor "bufferH.append(np.ascontiguousarray([0],dtype=np.int32))" here to avoid 4 if-condition statments "if engine.is_shape_inference_io(lTensorName[i])" below for i in range(nInput, nIO): bufferH.append(np.empty(context.get_tensor_shape(lTensorName[i]), dtype=trt.nptype(engine.get_tensor_dtype(lTensorName[i])))) bufferD = [] for i in range(nIO): if engine.is_shape_inference_io(lTensorName[i]): # skip input shape tensor bufferD.append(int(0)) else: bufferD.append(cudart.cudaMalloc(bufferH[i].nbytes)[1]) for i in range(nInput): if engine.is_shape_inference_io(lTensorName[i]): # skip input shape tensor continue cudart.cudaMemcpy(bufferD[i], bufferH[i].ctypes.data, bufferH[i].nbytes, cudart.cudaMemcpyKind.cudaMemcpyHostToDevice) for i in range(nIO): if engine.is_shape_inference_io(lTensorName[i]): # skip input shape tensor continue context.set_tensor_address(lTensorName[i], int(bufferD[i])) context.execute_async_v3(0) for i in range(nInput, nIO): if engine.is_shape_inference_io(lTensorName[i]): # skip input shape tensor continue cudart.cudaMemcpy(bufferH[i].ctypes.data, bufferD[i], bufferH[i].nbytes, cudart.cudaMemcpyKind.cudaMemcpyDeviceToHost) for i in range(nIO): print(lTensorName[i]) print(bufferH[i]) for b in bufferD: cudart.cudaFree(b) return # do inference with a shape run([3, 4, 5]) # do inference with another shape run([5, 4, 3])
trt-samples-for-hackathon-cn-master
cookbook/02-API/OptimizationProfile/main-ShapeInput.py
# # Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import numpy as np import tensorrt as trt from cuda import cudart nHeight = 28 nWidth = 28 data = np.random.rand(1, 1, nHeight, nWidth).astype(np.float32).reshape(1, 1, nHeight, nWidth) * 2 - 1 np.random.seed(31193) np.set_printoptions(precision=3, linewidth=200, suppress=True) cudart.cudaDeviceSynchronize() class MyProfiler(trt.IProfiler): def __init__(self): super(MyProfiler, self).__init__() def report_layer_time(self, layerName, ms): print("Timing: %8.3fus -> %s" % (ms * 1000, layerName)) def run(bEmitProfile): print("Run with bEmitProfile=%s --------------------------------------------" % bEmitProfile) logger = trt.Logger(trt.Logger.ERROR) builder = trt.Builder(logger) network = builder.create_network(1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)) profile = builder.create_optimization_profile() config = builder.create_builder_config() inputTensor = network.add_input("inputT0", trt.float32, [-1, 1, nHeight, nWidth]) profile.set_shape(inputTensor.name, [1, 1, nHeight, nWidth], [4, 1, nHeight, nWidth], [8, 1, nHeight, nWidth]) config.add_optimization_profile(profile) w = np.ascontiguousarray(np.random.rand(32, 1, 5, 5).astype(np.float32)) b = np.ascontiguousarray(np.random.rand(32, 1, 1).astype(np.float32)) _0 = network.add_convolution_nd(inputTensor, 32, [5, 5], trt.Weights(w), trt.Weights(b)) _0.padding_nd = [2, 2] _1 = network.add_activation(_0.get_output(0), trt.ActivationType.RELU) _2 = network.add_pooling_nd(_1.get_output(0), trt.PoolingType.MAX, [2, 2]) _2.stride_nd = [2, 2] w = np.ascontiguousarray(np.random.rand(64, 32, 5, 5).astype(np.float32)) b = np.ascontiguousarray(np.random.rand(64, 1, 1).astype(np.float32)) _3 = network.add_convolution_nd(_2.get_output(0), 64, [5, 5], trt.Weights(w), trt.Weights(b)) _3.padding_nd = [2, 2] _4 = network.add_activation(_3.get_output(0), trt.ActivationType.RELU) _5 = network.add_pooling_nd(_4.get_output(0), trt.PoolingType.MAX, [2, 2]) _5.stride_nd = [2, 2] _6 = network.add_shuffle(_5.get_output(0)) _6.reshape_dims = (-1, 64 * 7 * 7) w = np.ascontiguousarray(np.random.rand(64 * 7 * 7, 1024).astype(np.float32)) b = np.ascontiguousarray(np.random.rand(1, 1024).astype(np.float32)) _7 = network.add_constant(w.shape, trt.Weights(w)) _8 = network.add_matrix_multiply(_6.get_output(0), trt.MatrixOperation.NONE, _7.get_output(0), trt.MatrixOperation.NONE) _9 = network.add_constant(b.shape, trt.Weights(b)) _10 = network.add_elementwise(_8.get_output(0), _9.get_output(0), trt.ElementWiseOperation.SUM) _11 = network.add_activation(_10.get_output(0), trt.ActivationType.RELU) w = np.ascontiguousarray(np.random.rand(1024, 10).astype(np.float32)) b = np.ascontiguousarray(np.random.rand(1, 10).astype(np.float32)) _12 = network.add_constant(w.shape, trt.Weights(w)) _13 = network.add_matrix_multiply(_11.get_output(0), trt.MatrixOperation.NONE, _12.get_output(0), trt.MatrixOperation.NONE) _14 = network.add_constant(b.shape, trt.Weights(b)) _15 = network.add_elementwise(_13.get_output(0), _14.get_output(0), trt.ElementWiseOperation.SUM) _16 = network.add_softmax(_15.get_output(0)) _16.axes = 1 << 1 _17 = network.add_topk(_16.get_output(0), trt.TopKOperation.MAX, 1, 1 << 1) network.mark_output(_17.get_output(1)) engineString = builder.build_serialized_network(network, config) engine = trt.Runtime(logger).deserialize_cuda_engine(engineString) nIO = engine.num_io_tensors lTensorName = [engine.get_tensor_name(i) for i in range(nIO)] nInput = [engine.get_tensor_mode(lTensorName[i]) for i in range(nIO)].count(trt.TensorIOMode.INPUT) context = engine.create_execution_context() context.set_input_shape(lTensorName[0], [1, 1, nHeight, nWidth]) context.enqueue_emits_profile = bEmitProfile # default value is True (all inferences are noted by profiler), we can turn it off and decide which inference should be noted manually context.profiler = MyProfiler() # assign profiler to context bufferH = [] bufferH.append(np.ascontiguousarray(data)) for i in range(nInput, nIO): bufferH.append(np.empty(context.get_tensor_shape(lTensorName[i]), dtype=trt.nptype(engine.get_tensor_dtype(lTensorName[i])))) bufferD = [] for i in range(nIO): bufferD.append(cudart.cudaMalloc(bufferH[i].nbytes)[1]) for i in range(nInput): cudart.cudaMemcpy(bufferD[i], bufferH[i].ctypes.data, bufferH[i].nbytes, cudart.cudaMemcpyKind.cudaMemcpyHostToDevice) for i in range(nIO): context.set_tensor_address(lTensorName[i], int(bufferD[i])) context.execute_async_v3(0) # Profile's method report_layer_time is called when execute is called if not bEmitProfile: context.report_to_profiler() # use this API to request the Profile to report data in manual mode, otherwise the Profile will ignore the execute context.execute_async_v3(0) # do inference again, there are 2 notes of inference if bEmitProfile==True, but 1 if bEmitProfile==False for i in range(nInput, nIO): cudart.cudaMemcpy(bufferH[i].ctypes.data, bufferD[i], bufferH[i].nbytes, cudart.cudaMemcpyKind.cudaMemcpyDeviceToHost) for b in bufferD: cudart.cudaFree(b) if __name__ == "__main__": run(True) run(False)
trt-samples-for-hackathon-cn-master
cookbook/02-API/Profiler/main.py
# # Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import numpy as np import tensorrt as trt from cuda import cudart nB, nC, nH, nW = 1, 3, 4, 5 data = (np.arange(nB * nC * nH * nW, dtype=np.float32) / np.prod(nB * nC * nH * nW) * 128).astype(np.float32).reshape(nB, nC, nH, nW) def formatToString(formatBitMask): output = "" if formatBitMask & (1 << int(trt.TensorFormat.LINEAR)): # 0 output += "LINEAR," elif formatBitMask & (1 << int(trt.TensorFormat.CHW2)): # 1 output += "CHW2," elif formatBitMask & (1 << int(trt.TensorFormat.HWC8)): # 2 output += "HWC8," elif formatBitMask & (1 << int(trt.TensorFormat.CHW4)): # 3 output += "CHW4," elif formatBitMask & (1 << int(trt.TensorFormat.CHW16)): # 4 output += "CHW16," elif formatBitMask & (1 << int(trt.TensorFormat.CHW32)): # 5 output += "CHW32," elif formatBitMask & (1 << int(trt.TensorFormat.DHWC8)): # 6 output += "DHWC8," elif formatBitMask & (1 << int(trt.TensorFormat.CDHW32)): # 7 output += "CDHW32," elif formatBitMask & (1 << int(trt.TensorFormat.HWC)): # 8 output += "HWC," elif formatBitMask & (1 << int(trt.TensorFormat.DLA_LINEAR)): # 9 output += "DLA_LINEAR," elif formatBitMask & (1 << int(trt.TensorFormat.DLA_HWC4)): # 10 output += "DLA_HWC4," elif formatBitMask & (1 << int(trt.TensorFormat.HWC16)): # 11 output += "DHWC16," if len(output) == 0: output = "None" else: output = output[:-1] return output np.set_printoptions(precision=3, edgeitems=8, linewidth=300, suppress=True) cudart.cudaDeviceSynchronize() logger = trt.Logger(trt.Logger.ERROR) builder = trt.Builder(logger) network = builder.create_network(1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)) profile = builder.create_optimization_profile() config = builder.create_builder_config() config.set_flag(trt.BuilderFlag.INT8) inputT0 = network.add_input("inputT0", trt.float32, (-1, nC, nH, nW)) inputT0.set_dimension_name(0, "Batch Size") profile.set_shape(inputT0.name, [1, nC, nH, nW], [nB, nC, nH, nW], [nB * 2, nC, nH, nW]) config.add_optimization_profile(profile) layer = network.add_identity(inputT0) layer.precision = trt.int8 layer.set_output_type(0, trt.int8) tensor = layer.get_output(0) tensor.name = "Identity Layer Output Tensor 0" tensor.dtype = trt.int8 tensor.allowed_formats = 1 << int(trt.TensorFormat.CHW4) tensor.dynamic_range = [-128, 128] tensor.reset_dynamic_range() tensor.dynamic_range = [0, 128] network.mark_output(tensor) print("tensor.name = %s" % tensor.name) print("tensor.shape = %s" % tensor.shape) print("tensor.location = %s" % tensor.location) print("tensor.__sizeof__() = %s" % tensor.__sizeof__()) print("tensor.__str__() = %s" % tensor.__str__()) print("tensor.broadcast_across_batch = %s" % tensor.broadcast_across_batch) print("tensor.dtype = %s" % tensor.dtype) print("tensor.allowed_formats = %s" % formatToString(tensor.allowed_formats)) print("tensor.dynamic_range = [%d, %d]" % (tensor.dynamic_range[0], tensor.dynamic_range[1])) print("tensor.is_execution_tensor = %s" % tensor.is_execution_tensor) print("tensor.is_shape_tensor = %s" % tensor.is_shape_tensor) print("tensor.is_network_input = %s" % tensor.is_network_input) print("tensor.is_network_output = %s" % tensor.is_network_output) print("inputT0.get_dimension_name() = %s" % inputT0.get_dimension_name(0)) # only for input tensor """ Member of ILayer: ++++ shown above ---- not shown above [no prefix] others ----__class__ __delattr__ __dir__ __doc__ __eq__ __format__ __ge__ __getattribute__ __gt__ __hash__ __init__ __init_subclass__ __le__ __lt__ __module__ __ne__ __new__ __reduce__ __reduce_ex__ __repr__ __setattr__ ++++__sizeof__ ++++__str__ __subclasshook__ ++++allowed_formats ++++broadcast_across_batch ++++dtype ++++dynamic_range ++++get_dimension_name ++++is_execution_tensor ++++is_network_input ++++is_network_output ++++is_shape_tensor ++++location ++++name ++++reset_dynamic_range ++++set_dimension_name ++++set_dynamic_range ++++shape """
trt-samples-for-hackathon-cn-master
cookbook/02-API/Tensor/main.py
# # Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import numpy as np import nvtx import tensorrt as trt from cuda import cudart np.random.seed(31193) nGEMM = 10 nMKN = 128 logger = trt.Logger(trt.Logger.VERBOSE) builder = trt.Builder(logger) network = builder.create_network(1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)) profile = builder.create_optimization_profile() config = builder.create_builder_config() config.max_aux_streams = 2 inputList = [] for i in range(nGEMM + 1): inputT = network.add_input("inputT" + str(i), trt.float32, [-1, 4, nMKN, nMKN]) profile.set_shape(inputT.name, [1, 4, nMKN, nMKN], [4, 4, nMKN, nMKN], [8, 4, nMKN, nMKN]) inputList.append(inputT) config.add_optimization_profile(profile) tempTensor0 = inputList[0] tempTensor1 = inputList[0] for i in range(1, nGEMM + 1): tempLayer0 = network.add_matrix_multiply(tempTensor0, trt.MatrixOperation.NONE, inputList[i], trt.MatrixOperation.NONE) tempTensor0 = tempLayer0.get_output(0) tempLayer1 = network.add_matrix_multiply(tempTensor1, trt.MatrixOperation.NONE, inputList[nGEMM + 1 - i], trt.MatrixOperation.NONE) tempTensor1 = tempLayer1.get_output(0) network.mark_output(tempTensor0) network.mark_output(tempTensor1) engineString = builder.build_serialized_network(network, config) engine = trt.Runtime(logger).deserialize_cuda_engine(engineString) nIO = engine.num_io_tensors lTensorName = [engine.get_tensor_name(i) for i in range(nIO)] nInput = [engine.get_tensor_mode(lTensorName[i]) for i in range(nIO)].count(trt.TensorIOMode.INPUT) print("Engine.num_aux_streams=%d" % engine.num_aux_streams) context = engine.create_execution_context() for i in range(nInput): context.set_input_shape(lTensorName[i], [4, 4, nMKN, nMKN]) for i in range(nIO): print("[%2d]%s->" % (i, "Input " if i < nInput else "Output"), engine.get_tensor_dtype(lTensorName[i]), engine.get_tensor_shape(lTensorName[i]), context.get_tensor_shape(lTensorName[i]), lTensorName[i]) context.set_aux_streams([cudart.cudaStreamCreate()[1] for i in range(max(1, engine.num_aux_streams))]) # optional, TensorRT wiil create residual cudaStream besides we assign to context bufferH = [] for i in range(nInput): bufferH.append(np.ascontiguousarray(np.random.rand(np.prod([4, 4, nMKN, nMKN]))).astype(np.float32).reshape(4, 4, nMKN, nMKN) * 2 - 1) for i in range(nInput, nIO): bufferH.append(np.empty(context.get_tensor_shape(lTensorName[i]), dtype=trt.nptype(engine.get_tensor_dtype(lTensorName[i])))) bufferD = [] for i in range(nIO): bufferD.append(cudart.cudaMalloc(bufferH[i].nbytes)[1]) for i in range(nInput): cudart.cudaMemcpy(bufferD[i], bufferH[i].ctypes.data, bufferH[i].nbytes, cudart.cudaMemcpyKind.cudaMemcpyHostToDevice) for i in range(nIO): context.set_tensor_address(lTensorName[i], int(bufferD[i])) # warm up context.execute_async_v3(0) with nvtx.annotate("Inference", color="green"): context.execute_async_v3(0) for i in range(nInput, nIO): cudart.cudaMemcpy(bufferH[i].ctypes.data, bufferD[i], bufferH[i].nbytes, cudart.cudaMemcpyKind.cudaMemcpyDeviceToHost) for i in range(nIO): print(lTensorName[i]) print(np.sum(bufferH[i])) for b in bufferD: cudart.cudaFree(b)
trt-samples-for-hackathon-cn-master
cookbook/02-API/AuxStream/main.py
# # Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import os import numpy as np import tensorrt as trt from cuda import cudart class MyInt8EntropyCalibrator2(trt.IInt8EntropyCalibrator2): # the most common use def __init__(self, nCalibration, nBatchSize, tensorDictionary, cacheFile): super().__init__() # some parameters can be passed from TensorRT build time self.nCalibration = nCalibration self.nBatchSize = nBatchSize self.td = tensorDictionary self.cacheFile = cacheFile self.oneBatch = self.generator() for name in self.td.keys(): # add size and device buffer for each input tensor item = self.td[name] item["size"] = trt.volume((self.nBatchSize, ) + tuple(item["shape"])) * item["dataType"].itemsize item["buffer"] = int(cudart.cudaMalloc(item["size"])[1]) def __del__(self): for name in self.td.keys(): cudart.cudaFree(self.td[name]["buffer"]) def generator(self): for i in range(self.nCalibration): print("> calibration %d" % i) # for debug dataDictionary = {} for name in self.td.keys(): # create calibration data by name data = np.random.rand(np.prod(self.td[name]["shape"])).astype(trt.nptype(self.td[name]["dataType"])).reshape(self.td[name]["shape"]) dataDictionary[name] = np.ascontiguousarray(data) yield dataDictionary def get_batch_size(self): # necessary API #print("[MyCalibrator::get_batch_size]") # for debug return self.nBatchSize def get_batch(self, nameList=None, inputNodeName=None): # necessary API #print("[MyCalibrator::get_batch]") # for debug assert (set(nameList) == set(self.td.keys())) try: dataDictionary = next(self.oneBatch) bufferD = [None for i in range(len(self.td))] for i, name in enumerate(nameList): bufferD[i] = self.td[name]["buffer"] cudart.cudaMemcpy(bufferD[i], dataDictionary[name].ctypes.data, self.td[name]["size"], cudart.cudaMemcpyKind.cudaMemcpyHostToDevice) return bufferD except StopIteration: return None def read_calibration_cache(self): # necessary API #print("[MyCalibrator::read_calibration_cache]") # for debug if os.path.exists(self.cacheFile): print("Succeed finding %s" % self.cacheFile) with open(self.cacheFile, "rb") as f: cache = f.read() return cache else: print("Failed finding %s" % self.cacheFile) return def write_calibration_cache(self, cache): # necessary API #print("[MyCalibrator::write_calibration_cache]") # for debug with open(self.cacheFile, "wb") as f: f.write(cache) print("Succeed saving %s" % self.cacheFile) return # Other calibrators but not recommended to use class MyInt8Calibrator(trt.IInt8Calibrator): def __init__(self, nCalibration, nBatchSize, tensorDictionary, cacheFile): super().__init__() def __del__(self): pass def get_batch_size(self): # necessary API return 1 def get_batch(self, nameList=None, inputNodeName=None): # necessary API return None def read_calibration_cache(self): # necessary API return def write_calibration_cache(self, cache): # necessary API return class MyInt8LegacyCalibrator(trt.IInt8LegacyCalibrator): def __init__(self, nCalibration, nBatchSize, tensorDictionary, cacheFile): super().__init__() def __del__(self): pass def get_batch_size(self): # necessary API return 1 def get_batch(self, nameList=None, inputNodeName=None): # necessary API return None def read_calibration_cache(self): # necessary API return def write_calibration_cache(self, cache): # necessary API return class MyInt8EntropyCalibrator(trt.IInt8EntropyCalibrator): def __init__(self, nCalibration, nBatchSize, tensorDictionary, cacheFile): super().__init__() def __del__(self): pass def get_batch_size(self): # necessary API return 1 def get_batch(self, nameList=None, inputNodeName=None): # necessary API return None def read_calibration_cache(self): # necessary API return def write_calibration_cache(self, cache): # necessary API return class MyInt8MinMaxCalibrator(trt.IInt8MinMaxCalibrator): def __init__(self, nCalibration, nBatchSize, tensorDictionary, cacheFile): super().__init__() def __del__(self): pass def get_batch_size(self): # necessary API return 1 def get_batch(self, nameList=None, inputNodeName=None): # necessary API return None def read_calibration_cache(self): # necessary API return def write_calibration_cache(self, cache): # necessary API return if __name__ == "__main__": # fror unit test cudart.cudaDeviceSynchronize() m = MyInt8EntropyCalibrator2(10, 1, [[1, 1, 28, 28]], [trt.float32], "./model.INT8Cache") m.get_batch(["inputT0"])
trt-samples-for-hackathon-cn-master
cookbook/02-API/INT8-PTQ/calibrator.py
# # Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import os import calibrator import numpy as np import tensorrt as trt from cuda import cudart shape = [1, 1, 28, 28] nCalibration = 10 cacheFile = "model.INT8Cache" os.system("rm -rf %s" % cacheFile) np.random.seed(31193) np.set_printoptions(precision=3, linewidth=200, suppress=True) cudart.cudaDeviceSynchronize() logger = trt.Logger(trt.Logger.ERROR) builder = trt.Builder(logger) network = builder.create_network(1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)) profile = builder.create_optimization_profile() config = builder.create_builder_config() tensorDictionary = {} # build a dictionary to pass information to calibrator tensorDictionary["inputT0"] = {"shape": shape[1:], "dataType": trt.float32} # remove Batch dimension of input tensors to fit the old Calibrator API myCalibrator = calibrator.MyInt8EntropyCalibrator2(nCalibration, 1, tensorDictionary, cacheFile) # use calibrator inherited from trt.IInt8EntropyCalibrator2 print("myCalibrator.get_algorithm() =", myCalibrator.get_algorithm()) # print the algorithm the calibrator uses config.set_flag(trt.BuilderFlag.INT8) # set INT8 mdoe and corresponding Calibrator config.int8_calibrator = myCalibrator inputTensor = network.add_input("inputT0", trt.float32, [-1] + shape[1:]) profile.set_shape(inputTensor.name, [1] + shape[1:], [2] + shape[1:], [4] + shape[1:]) config.add_optimization_profile(profile) w = np.ascontiguousarray(np.random.rand(32, 1, 5, 5).astype(np.float32)) b = np.ascontiguousarray(np.random.rand(32).astype(np.float32)) _0 = network.add_convolution_nd(inputTensor, 32, [5, 5], w, b) _0.padding_nd = [2, 2] _1 = network.add_activation(_0.get_output(0), trt.ActivationType.RELU) _2 = network.add_pooling_nd(_1.get_output(0), trt.PoolingType.MAX, [2, 2]) _2.stride_nd = [2, 2] w = np.ascontiguousarray(np.random.rand(64, 32, 5, 5).astype(np.float32)) b = np.ascontiguousarray(np.random.rand(64).astype(np.float32)) _3 = network.add_convolution_nd(_2.get_output(0), 64, [5, 5], w, b) _3.padding_nd = [2, 2] _4 = network.add_activation(_3.get_output(0), trt.ActivationType.RELU) _5 = network.add_pooling_nd(_4.get_output(0), trt.PoolingType.MAX, [2, 2]) _5.stride_nd = [2, 2] _6 = network.add_shuffle(_5.get_output(0)) _6.first_transpose = (0, 2, 3, 1) _6.reshape_dims = (-1, 64 * 7 * 7) w = np.ascontiguousarray(np.random.rand(64 * 7 * 7, 1024).astype(np.float32)) b = np.ascontiguousarray(np.random.rand(1, 1024).astype(np.float32)) _7 = network.add_constant(w.shape, trt.Weights(w)) _8 = network.add_matrix_multiply(_6.get_output(0), trt.MatrixOperation.NONE, _7.get_output(0), trt.MatrixOperation.NONE) _9 = network.add_constant(b.shape, trt.Weights(b)) _10 = network.add_elementwise(_8.get_output(0), _9.get_output(0), trt.ElementWiseOperation.SUM) _11 = network.add_activation(_10.get_output(0), trt.ActivationType.RELU) w = np.ascontiguousarray(np.random.rand(1024, 10).astype(np.float32)) b = np.ascontiguousarray(np.random.rand(1, 10).astype(np.float32)) _12 = network.add_constant(w.shape, trt.Weights(w)) _13 = network.add_matrix_multiply(_11.get_output(0), trt.MatrixOperation.NONE, _12.get_output(0), trt.MatrixOperation.NONE) _14 = network.add_constant(b.shape, trt.Weights(b)) _15 = network.add_elementwise(_13.get_output(0), _14.get_output(0), trt.ElementWiseOperation.SUM) _16 = network.add_softmax(_15.get_output(0)) _16.axes = 1 << 1 _17 = network.add_topk(_16.get_output(0), trt.TopKOperation.MAX, 1, 1 << 1) network.mark_output(_17.get_output(1)) engineString = builder.build_serialized_network(network, config) print("Finish!")
trt-samples-for-hackathon-cn-master
cookbook/02-API/INT8-PTQ/main.py
# # Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import os from time import time import numpy as np import tensorrt as trt from cuda import cudart trtFile = "./model.plan" nB, nC, nH, nW = 1, 1, 28, 28 data = np.random.rand(nB, nC, nH, nW).astype(np.float32) * 2 - 1 np.random.seed(31193) def run(bUseCUDNN): logger = trt.Logger(trt.Logger.INFO) builder = trt.Builder(logger) network = builder.create_network(1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)) profile = builder.create_optimization_profile() config = builder.create_builder_config() if bUseCUDNN: config.set_tactic_sources(1 << int(trt.TacticSource.CUBLAS) | 1 << int(trt.TacticSource.CUBLAS_LT) | 1 << int(trt.TacticSource.CUDNN) | 1 << int(trt.TacticSource.EDGE_MASK_CONVOLUTIONS)) else: config.set_tactic_sources(1 << int(trt.TacticSource.CUBLAS) | 1 << int(trt.TacticSource.CUBLAS_LT) | 1 << int(trt.TacticSource.EDGE_MASK_CONVOLUTIONS)) inputTensor = network.add_input("inputT0", trt.float32, [-1, nC, nH, nW]) profile.set_shape(inputTensor.name, [nB, nC, nH, nW], [nB, nC, nH, nW], [nB * 2, nC, nH, nW]) config.add_optimization_profile(profile) w = np.ascontiguousarray(np.random.rand(32, 1, 5, 5).astype(np.float32)) b = np.ascontiguousarray(np.random.rand(32).astype(np.float32)) _0 = network.add_convolution_nd(inputTensor, 32, [5, 5], w, b) _0.padding_nd = [2, 2] _1 = network.add_activation(_0.get_output(0), trt.ActivationType.RELU) _2 = network.add_pooling_nd(_1.get_output(0), trt.PoolingType.MAX, [2, 2]) _2.stride_nd = [2, 2] w = np.ascontiguousarray(np.random.rand(64, 32, 5, 5).astype(np.float32)) b = np.ascontiguousarray(np.random.rand(64).astype(np.float32)) _3 = network.add_convolution_nd(_2.get_output(0), 64, [5, 5], w, b) _3.padding_nd = [2, 2] _4 = network.add_activation(_3.get_output(0), trt.ActivationType.RELU) _5 = network.add_pooling_nd(_4.get_output(0), trt.PoolingType.MAX, [2, 2]) _5.stride_nd = [2, 2] _6 = network.add_shuffle(_5.get_output(0)) _6.first_transpose = (0, 2, 3, 1) _6.reshape_dims = (-1, 64 * 7 * 7) w = np.ascontiguousarray(np.random.rand(64 * 7 * 7, 1024).astype(np.float32)) b = np.ascontiguousarray(np.random.rand(1, 1024).astype(np.float32)) _7 = network.add_constant(w.shape, trt.Weights(w)) _8 = network.add_matrix_multiply(_6.get_output(0), trt.MatrixOperation.NONE, _7.get_output(0), trt.MatrixOperation.NONE) _9 = network.add_constant(b.shape, trt.Weights(b)) _10 = network.add_elementwise(_8.get_output(0), _9.get_output(0), trt.ElementWiseOperation.SUM) _11 = network.add_activation(_10.get_output(0), trt.ActivationType.RELU) w = np.ascontiguousarray(np.random.rand(1024, 10).astype(np.float32)) b = np.ascontiguousarray(np.random.rand(1, 10).astype(np.float32)) _12 = network.add_constant(w.shape, trt.Weights(w)) _13 = network.add_matrix_multiply(_11.get_output(0), trt.MatrixOperation.NONE, _12.get_output(0), trt.MatrixOperation.NONE) _14 = network.add_constant(b.shape, trt.Weights(b)) _15 = network.add_elementwise(_13.get_output(0), _14.get_output(0), trt.ElementWiseOperation.SUM) _16 = network.add_softmax(_15.get_output(0)) _16.axes = 1 << 1 _17 = network.add_topk(_16.get_output(0), trt.TopKOperation.MAX, 1, 1 << 1) network.mark_output(_17.get_output(1)) engineString = builder.build_serialized_network(network, config) engine = trt.Runtime(logger).deserialize_cuda_engine(engineString) nIO = engine.num_io_tensors lTensorName = [engine.get_tensor_name(i) for i in range(nIO)] nInput = [engine.get_tensor_mode(lTensorName[i]) for i in range(nIO)].count(trt.TensorIOMode.INPUT) context = engine.create_execution_context() context.set_input_shape(lTensorName[0], [nB, nC, nH, nW]) for i in range(nIO): print("[%2d]%s->" % (i, "Input " if i < nInput else "Output"), engine.get_tensor_dtype(lTensorName[i]), engine.get_tensor_shape(lTensorName[i]), context.get_tensor_shape(lTensorName[i]), lTensorName[i]) bufferH = [] bufferH.append(np.ascontiguousarray(data)) for i in range(nInput, nIO): bufferH.append(np.empty(context.get_tensor_shape(lTensorName[i]), dtype=trt.nptype(engine.get_tensor_dtype(lTensorName[i])))) bufferD = [] for i in range(nIO): bufferD.append(cudart.cudaMalloc(bufferH[i].nbytes)[1]) for i in range(nInput): cudart.cudaMemcpy(bufferD[i], bufferH[i].ctypes.data, bufferH[i].nbytes, cudart.cudaMemcpyKind.cudaMemcpyHostToDevice) for i in range(nIO): context.set_tensor_address(lTensorName[i], int(bufferD[i])) context.execute_async_v3(0) for i in range(nInput, nIO): cudart.cudaMemcpy(bufferH[i].ctypes.data, bufferD[i], bufferH[i].nbytes, cudart.cudaMemcpyKind.cudaMemcpyDeviceToHost) for i in range(nIO): print(lTensorName[i]) print(bufferH[i]) t0 = time() for i in range(10): context.execute_async_v3(0) t1 = time() print("Timing:%f ms" % ((t1 - t0) * 1000)) for b in bufferD: cudart.cudaFree(b) if __name__ == "__main__": os.system("rm -rf ./*.plan") np.set_printoptions(precision=3, linewidth=200, suppress=True) cudart.cudaDeviceSynchronize() run(True) # build with all tactic source run(False) # build without cuDNN
trt-samples-for-hackathon-cn-master
cookbook/02-API/TacticSource/main.py
# # Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import tensorrt as trt logger = trt.Logger(trt.Logger.ERROR) builder = trt.Builder(logger) builder.reset() # reset Builder as default, not required network = builder.create_network(1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)) # available values #builder.create_network(1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_PRECISION)) # deprecated by BuilderFlag since TensorRT 8.0 config = builder.create_builder_config() inputTensor = network.add_input("inputT0", trt.float32, [3, 4, 5]) identityLayer = network.add_identity(inputTensor) network.mark_output(identityLayer.get_output(0)) print("builder.__sizeof__() = %d" % builder.__sizeof__()) print("builder.__str__() = %s" % builder.__str__()) print("\nDevice type part ======================================================") print("builder.platform_has_tf32 = %s" % builder.platform_has_tf32) print("builder.platform_has_fast_fp16 = %s" % builder.platform_has_fast_fp16) print("builder.platform_has_fast_int8 = %s" % builder.platform_has_fast_int8) print("builder.num_DLA_cores = %d" % builder.num_DLA_cores) print("builder.max_DLA_batch_size = %d" % builder.max_DLA_batch_size) print("\nEngine build part =====================================================") print("builder.logger = %s" % builder.logger) print("builder.is_network_supported() = %s" % builder.is_network_supported(network, config)) print("builder.get_plugin_registry().plugin_creator_list =", builder.get_plugin_registry().plugin_creator_list) builder.max_threads = 16 # The maximum thread that can be used by the Builder #builder.max_batch_size = 8 # use in Implicit Batch Mode, deprecated since TensorRT 8.4, use Dynamic Shape Mode instead #builder.max_workspace_size = 1 << 30 # deprecated since TensorRT 8.4, use BuilderConfig.set_memory_pool_limit instead engineString = builder.build_serialized_network(network, config) #engine = builder.build_engine(network, config) # deprecate since TensorRT 8.0, use build_serialized_network instead #engine = builder.build_cuda_engine(network) # deprecate since TensorRT 7.0, use build_serialized_network instead """ Member of IBuilder: ++++ shown above ---- not shown above [no prefix] others ----__class__ __del__ __delattr__ __dir__ __doc__ __enter__ __eq__ __exit__ __format__ __ge__ __getattribute__ __gt__ __hash__ __init__ __init_subclass__ __le__ __lt__ __module__ __ne__ __new__ ----__pybind11_module_local_v4_gcc_libstdcpp_cxxabi1013__ __reduce__ __reduce_ex__ __repr__ __setattr__ ++++__sizeof__ ++++__str__ __subclasshook__ ++++build_engine ++++build_serialized_network ++++create_builder_config ++++create_network ++++create_optimization_profile ----error_recorder refer to 02-API/ErrorRecorder get_plugin_registry ----gpu_allocator refer to 02-API/GPUAllocator ++++is_network_supported ++++max_DLA_batch_size ++++max_batch_size ++++max_threads ++++num_DLA_cores ++++platform_has_fast_fp16 ++++platform_has_fast_int8 ++++platform_has_tf32 ++++reset """
trt-samples-for-hackathon-cn-master
cookbook/02-API/Builder/main.py
# # Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import numpy as np import tensorrt as trt from cuda import cudart trtFile = "./model.plan" timeCacheFile = "./model.cache" shape = [1, 1, 28, 28] data = np.random.rand(np.prod(shape)).astype(np.float32).reshape(shape) * 2 - 1 np.random.seed(31193) class MyGpuAllocator(trt.IGpuAllocator): def __init__(self): print("[MyGpuAllocator::__init__]()") super(MyGpuAllocator, self).__init__() self.sizeList = [] self.addressList = [] self.flagList = [] def allocate(self, size, alignment, flag): print("[MyGpuAllocator::allocate] Size=%d, Alignment=%d, Flag=%d)" % (size, alignment, flag)) status, adress = cudart.cudaMalloc(size) if status != cudart.cudaError_t.cudaSuccess: print("Failed allocating size %d") return 0 self.sizeList.append(size) self.addressList.append(adress) self.flagList.append(bool(flag)) # flag == True means the size is flexible (reallocate could be called), this is inconsistent with int(trt.AllocatorFlag.RESIZABLE) == 0 return adress def deallocate(self, adress): #def free(adress): # old name of this API,deprecated since TensorRT 8.0 print("[MyGpuAllocator::deallocate] Address=%d" % adress) try: index = self.addressList.index(adress) except: print("Failed finding adress %d in addressList" % adress) return False status = cudart.cudaFree(adress) if status[0] != cudart.cudaError_t.cudaSuccess: print("Failed deallocating adress %d" % adress) return False del self.sizeList[index] del self.addressList[index] del self.flagList[index] return True def reallocate(self, oldAddress, alignment, newSize): print("[MyGpuAllocator::reallocate] OldAddress=%d, Alignment=%d, NewSize=%d)" % (oldAddress, alignment, newSize)) try: index = self.addressList.index(oldAddress) except: print("Failed finding adress %d in addressList" % oldAddress) return 0 if self.flagList[index] == False: print("Old buffer is not resizeable") return 0 if newSize <= self.sizeList[index]: # smaller than the older size print("New size is not larger than the old one") return oldAddress newAddress = self.allocate(newSize, alignment, self.flagList[index]) if newAddress == 0: print("Failed reallocating new buffer") return 0 status = cudart.cudaMemcpy(newAddress, oldAddress, self.sizeList[index], cudart.cudaMemcpyKind.cudaMemcpyDeviceToDevice) if status[0] != cudart.cudaError_t.cudaSuccess: print("Failed copy memory from buffer %d to %d" % (oldAddress, newAddress)) return oldAddress status = self.deallocate(oldAddress) if status == False: print("Failed deallocating old buffer %d" % oldAddress) return newAddress return newAddress np.set_printoptions(precision=3, linewidth=200, suppress=True) cudart.cudaDeviceSynchronize() logger = trt.Logger(trt.Logger.ERROR) builder = trt.Builder(logger) builder.gpu_allocator = MyGpuAllocator() # assign GPU Allocator to Builder network = builder.create_network(1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)) profile = builder.create_optimization_profile() config = builder.create_builder_config() inputTensor = network.add_input("inputT0", trt.float32, [-1] + shape[1:]) profile.set_shape(inputTensor.name, [1] + shape[1:], [2] + shape[1:], [4] + shape[1:]) config.add_optimization_profile(profile) w = np.ascontiguousarray(np.random.rand(32, 1, 5, 5).astype(np.float32)) b = np.ascontiguousarray(np.random.rand(32, 1, 1).astype(np.float32)) _0 = network.add_convolution_nd(inputTensor, 32, [5, 5], trt.Weights(w), trt.Weights(b)) _0.padding_nd = [2, 2] _1 = network.add_activation(_0.get_output(0), trt.ActivationType.RELU) _2 = network.add_pooling_nd(_1.get_output(0), trt.PoolingType.MAX, [2, 2]) _2.stride_nd = [2, 2] w = np.ascontiguousarray(np.random.rand(64, 32, 5, 5).astype(np.float32)) b = np.ascontiguousarray(np.random.rand(64, 1, 1).astype(np.float32)) _3 = network.add_convolution_nd(_2.get_output(0), 64, [5, 5], trt.Weights(w), trt.Weights(b)) _3.padding_nd = [2, 2] _4 = network.add_activation(_3.get_output(0), trt.ActivationType.RELU) _5 = network.add_pooling_nd(_4.get_output(0), trt.PoolingType.MAX, [2, 2]) _5.stride_nd = [2, 2] _6 = network.add_shuffle(_5.get_output(0)) _6.reshape_dims = (-1, 64 * 7 * 7) w = np.ascontiguousarray(np.random.rand(64 * 7 * 7, 1024).astype(np.float32)) b = np.ascontiguousarray(np.random.rand(1, 1024).astype(np.float32)) _7 = network.add_constant(w.shape, trt.Weights(w)) _8 = network.add_matrix_multiply(_6.get_output(0), trt.MatrixOperation.NONE, _7.get_output(0), trt.MatrixOperation.NONE) _9 = network.add_constant(b.shape, trt.Weights(b)) _10 = network.add_elementwise(_8.get_output(0), _9.get_output(0), trt.ElementWiseOperation.SUM) _11 = network.add_activation(_10.get_output(0), trt.ActivationType.RELU) w = np.ascontiguousarray(np.random.rand(1024, 10).astype(np.float32)) b = np.ascontiguousarray(np.random.rand(1, 10).astype(np.float32)) _12 = network.add_constant(w.shape, trt.Weights(w)) _13 = network.add_matrix_multiply(_11.get_output(0), trt.MatrixOperation.NONE, _12.get_output(0), trt.MatrixOperation.NONE) _14 = network.add_constant(b.shape, trt.Weights(b)) _15 = network.add_elementwise(_13.get_output(0), _14.get_output(0), trt.ElementWiseOperation.SUM) _16 = network.add_softmax(_15.get_output(0)) _16.axes = 1 << 1 _17 = network.add_topk(_16.get_output(0), trt.TopKOperation.MAX, 1, 1 << 1) network.mark_output(_17.get_output(1)) engineString = builder.build_serialized_network(network, config) runtime = trt.Runtime(logger) runtime.gpu_allocator = MyGpuAllocator() # assign GPU Allocator to Runtime or ExecutionContext engine = runtime.deserialize_cuda_engine(engineString) nIO = engine.num_io_tensors lTensorName = [engine.get_tensor_name(i) for i in range(nIO)] nInput = [engine.get_tensor_mode(lTensorName[i]) for i in range(nIO)].count(trt.TensorIOMode.INPUT) context = engine.create_execution_context() #context.temporary_allocator = MyGpuAllocator() # assign GPU Allocator to Runtime or ExecutionContext context.set_input_shape(lTensorName[0], shape) for i in range(nIO): print("[%2d]%s->" % (i, "Input " if i < nInput else "Output"), engine.get_tensor_dtype(lTensorName[i]), engine.get_tensor_shape(lTensorName[i]), context.get_tensor_shape(lTensorName[i]), lTensorName[i]) bufferH = [] bufferH.append(np.ascontiguousarray(data.reshape(-1))) for i in range(nInput, nIO): bufferH.append(np.empty(context.get_tensor_shape(lTensorName[i]), dtype=trt.nptype(engine.get_tensor_dtype(lTensorName[i])))) bufferD = [] for i in range(nIO): bufferD.append(cudart.cudaMalloc(bufferH[i].nbytes)[1]) for i in range(nInput): cudart.cudaMemcpy(bufferD[i], bufferH[i].ctypes.data, bufferH[i].nbytes, cudart.cudaMemcpyKind.cudaMemcpyHostToDevice) for i in range(nIO): context.set_tensor_address(lTensorName[i], int(bufferD[i])) context.execute_async_v3(0) for i in range(nInput, nIO): cudart.cudaMemcpy(bufferH[i].ctypes.data, bufferD[i], bufferH[i].nbytes, cudart.cudaMemcpyKind.cudaMemcpyDeviceToHost) for i in range(nIO): print(lTensorName[i]) print(bufferH[i]) for b in bufferD: cudart.cudaFree(b)
trt-samples-for-hackathon-cn-master
cookbook/02-API/GPUAllocator/main.py
# # Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import os from time import time import numpy as np import tensorrt as trt from cuda import cudart trtFile = "model.plan" timingCacheFile = "model.TimingCache" bIgnoreMismatch = False # turn on if we allow the timing cache file using among different device shape = [8, 1, 28, 28] def run(iNetwork, bUseTimeCache): print("iNetwork=%d, bUseTimeCache=%d" % (iNetwork, bUseTimeCache)) logger = trt.Logger(trt.Logger.ERROR) timingCacheString = b"" if bUseTimeCache and os.path.isfile(timingCacheFile): with open(timingCacheFile, "rb") as f: timingCacheString = f.read() if timingCacheString == None: print("Failed loading %s" % timingCacheFile) return print("Succeeded loading %s" % timingCacheFile) builder = trt.Builder(logger) network = builder.create_network(1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)) profile = builder.create_optimization_profile() config = builder.create_builder_config() if bUseTimeCache: timingCache = config.create_timing_cache(timingCacheString) #timingCache.reset() # clean the timing cache, not required config.set_timing_cache(timingCache, bIgnoreMismatch) inputTensor = network.add_input("inputT0", trt.float32, [-1] + shape[1:]) profile.set_shape(inputTensor.name, [2] + shape[1:], [4] + shape[1:], [8] + shape[1:]) config.add_optimization_profile(profile) # Common part w = np.ascontiguousarray(np.random.rand(32, 1, 5, 5).astype(np.float32)) b = np.ascontiguousarray(np.random.rand(32).astype(np.float32)) _0 = network.add_convolution_nd(inputTensor, 32, [5, 5], w, b) _0.padding_nd = [2, 2] _1 = network.add_activation(_0.get_output(0), trt.ActivationType.RELU) _2 = network.add_pooling_nd(_1.get_output(0), trt.PoolingType.MAX, [2, 2]) _2.stride_nd = [2, 2] w = np.ascontiguousarray(np.random.rand(64, 32, 5, 5).astype(np.float32)) b = np.ascontiguousarray(np.random.rand(64).astype(np.float32)) _3 = network.add_convolution_nd(_2.get_output(0), 64, [5, 5], w, b) _3.padding_nd = [2, 2] _4 = network.add_activation(_3.get_output(0), trt.ActivationType.RELU) _5 = network.add_pooling_nd(_4.get_output(0), trt.PoolingType.MAX, [2, 2]) _5.stride_nd = [2, 2] _6 = network.add_shuffle(_5.get_output(0)) _6.first_transpose = (0, 2, 3, 1) _6.reshape_dims = (-1, 64 * 7 * 7) w = np.ascontiguousarray(np.random.rand(64 * 7 * 7, 1024).astype(np.float32)) b = np.ascontiguousarray(np.random.rand(1, 1024).astype(np.float32)) _7 = network.add_constant(w.shape, trt.Weights(w)) _8 = network.add_matrix_multiply(_6.get_output(0), trt.MatrixOperation.NONE, _7.get_output(0), trt.MatrixOperation.NONE) _9 = network.add_constant(b.shape, trt.Weights(b)) _10 = network.add_elementwise(_8.get_output(0), _9.get_output(0), trt.ElementWiseOperation.SUM) _11 = network.add_activation(_10.get_output(0), trt.ActivationType.RELU) w = np.ascontiguousarray(np.random.rand(1024, 10).astype(np.float32)) b = np.ascontiguousarray(np.random.rand(1, 10).astype(np.float32)) _12 = network.add_constant(w.shape, trt.Weights(w)) _13 = network.add_matrix_multiply(_11.get_output(0), trt.MatrixOperation.NONE, _12.get_output(0), trt.MatrixOperation.NONE) _14 = network.add_constant(b.shape, trt.Weights(b)) _15 = network.add_elementwise(_13.get_output(0), _14.get_output(0), trt.ElementWiseOperation.SUM) # Differnece part if iNetwork == 0: w = np.ascontiguousarray(np.random.rand(10, 512).astype(np.float32)) b = np.ascontiguousarray(np.random.rand(1, 512).astype(np.float32)) layerWeight = network.add_constant(w.shape, trt.Weights(w)) layer = network.add_matrix_multiply(_15.get_output(0), trt.MatrixOperation.NONE, layerWeight.get_output(0), trt.MatrixOperation.NONE) layerBias = network.add_constant(b.shape, trt.Weights(b)) layer = network.add_elementwise(layer.get_output(0), layerBias.get_output(0), trt.ElementWiseOperation.SUM) layer = network.add_activation(layer.get_output(0), trt.ActivationType.RELU) w = np.ascontiguousarray(np.random.rand(512, 10).astype(np.float32)) b = np.ascontiguousarray(np.random.rand(1, 10).astype(np.float32)) layerWeight = network.add_constant(w.shape, trt.Weights(w)) layer = network.add_matrix_multiply(layer.get_output(0), trt.MatrixOperation.NONE, layerWeight.get_output(0), trt.MatrixOperation.NONE) layerBias = network.add_constant(b.shape, trt.Weights(b)) layer = network.add_elementwise(layer.get_output(0), layerBias.get_output(0), trt.ElementWiseOperation.SUM) else: w = np.ascontiguousarray(np.random.rand(10, 768).astype(np.float32)) b = np.ascontiguousarray(np.random.rand(1, 768).astype(np.float32)) layerWeight = network.add_constant(w.shape, trt.Weights(w)) layer = network.add_matrix_multiply(_15.get_output(0), trt.MatrixOperation.NONE, layerWeight.get_output(0), trt.MatrixOperation.NONE) layerBias = network.add_constant(b.shape, trt.Weights(b)) layer = network.add_elementwise(layer.get_output(0), layerBias.get_output(0), trt.ElementWiseOperation.SUM) layer = network.add_activation(layer.get_output(0), trt.ActivationType.RELU) w = np.ascontiguousarray(np.random.rand(768, 10).astype(np.float32)) b = np.ascontiguousarray(np.random.rand(1, 10).astype(np.float32)) layerWeight = network.add_constant(w.shape, trt.Weights(w)) layer = network.add_matrix_multiply(layer.get_output(0), trt.MatrixOperation.NONE, layerWeight.get_output(0), trt.MatrixOperation.NONE) layerBias = network.add_constant(b.shape, trt.Weights(b)) layer = network.add_elementwise(layer.get_output(0), layerBias.get_output(0), trt.ElementWiseOperation.SUM) layer = network.add_activation(layer.get_output(0), trt.ActivationType.RELU) w = np.ascontiguousarray(np.random.rand(10, 2048).astype(np.float32)) b = np.ascontiguousarray(np.random.rand(1, 2048).astype(np.float32)) layerWeight = network.add_constant(w.shape, trt.Weights(w)) layer = network.add_matrix_multiply(layer.get_output(0), trt.MatrixOperation.NONE, layerWeight.get_output(0), trt.MatrixOperation.NONE) layerBias = network.add_constant(b.shape, trt.Weights(b)) layer = network.add_elementwise(layer.get_output(0), layerBias.get_output(0), trt.ElementWiseOperation.SUM) layer = network.add_activation(layer.get_output(0), trt.ActivationType.RELU) w = np.ascontiguousarray(np.random.rand(2048, 10).astype(np.float32)) b = np.ascontiguousarray(np.random.rand(1, 10).astype(np.float32)) layerWeight = network.add_constant(w.shape, trt.Weights(w)) layer = network.add_matrix_multiply(layer.get_output(0), trt.MatrixOperation.NONE, layerWeight.get_output(0), trt.MatrixOperation.NONE) layerBias = network.add_constant(b.shape, trt.Weights(b)) layer = network.add_elementwise(layer.get_output(0), layerBias.get_output(0), trt.ElementWiseOperation.SUM) _16 = network.add_softmax(layer.get_output(0)) _16.axes = 1 << 1 _17 = network.add_topk(_16.get_output(0), trt.TopKOperation.MAX, 1, 1 << 1) network.mark_output(_17.get_output(1)) t0 = time() engineString = builder.build_serialized_network(network, config) t1 = time() print("%s timing cache, %f ms" % ("With" if bUseTimeCache else "Without", (t1 - t0) * 1000)) if bUseTimeCache: timingCacheNew = config.get_timing_cache() #res = timingCache.combine(timingCacheNew, bIgnoreMismatch) # merge timing cache from the old one (load form file) with the new one (created by this build), not required timingCache = timingCacheNew #print("timingCache.combine:%s" % res) timeCacheString = timingCache.serialize() with open(timingCacheFile, "wb") as f: f.write(timeCacheString) print("Succeeded saving %s" % timingCacheFile) print("#--------------------------------------------------------------------") if __name__ == "__main__": os.system("rm -rfv model.TimingCache") np.set_printoptions(precision=3, linewidth=200, suppress=True) cudart.cudaDeviceSynchronize() run(0, 0) run(0, 0) run(1, 0) run(1, 0) run(0, 1) os.system("ls -alh |grep model.TimingCache") run(0, 1) os.system("ls -alh |grep model.TimingCache") run(1, 1) os.system("ls -alh |grep model.TimingCache") run(1, 1) os.system("ls -alh |grep model.TimingCache") run(0, 1) os.system("ls -alh |grep model.TimingCache")
trt-samples-for-hackathon-cn-master
cookbook/02-API/TimingCache/main.py
# # Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import numpy as np import tensorrt as trt kernel = np.ones([32, 3, 5, 5], dtype=np.float32) # use data type of int32 rather than float32/float16 bias = np.ones(32, dtype=np.int32) logger = trt.Logger(trt.Logger.ERROR) builder = trt.Builder(logger) network = builder.create_network(1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)) config = builder.create_builder_config() inputT0 = network.add_input("inputT0", trt.float32, (1, 3, 600, 800)) convolutionLayer = network.add_convolution_nd(inputT0, 32, [5, 5], kernel, bias) network.mark_output(convolutionLayer.get_output(0)) engineString = builder.build_serialized_network(network, config)
trt-samples-for-hackathon-cn-master
cookbook/10-ProblemSolving/WeightsAreNotPermittedSinceTheyAreOfTypeInt32/main.py
# # Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import os from collections import OrderedDict import numpy as np import onnx import onnx_graphsurgeon as gs import tensorrt as trt onnxFile1 = "model-01.onnx" onnxFile2 = "model-02.onnx" # Create a ONNX graph with Onnx Graphsurgeon ----------------------------------- a0 = gs.Constant("a0", np.ascontiguousarray(np.array([0], dtype=np.int64))) a1 = gs.Constant("a1", np.ascontiguousarray(np.array([1], dtype=np.int64))) a2 = gs.Constant("a2", np.ascontiguousarray(np.array([2], dtype=np.int64))) am1 = gs.Constant("am1", np.ascontiguousarray(np.array([-1], dtype=np.int64))) am2 = gs.Constant("am2", np.ascontiguousarray(np.array([-2], dtype=np.int64))) nodeList = [] tensor0 = gs.Variable("tensor0", np.int32, ["B", "T"]) tensor1 = gs.Variable("tensor1", np.int32, ["B", "T"]) tensor2 = gs.Variable("tensor2", bool, ["B", "T"]) node0 = gs.Node("GreaterOrEqual", "GreaterOrEqual_27", inputs=[tensor0, tensor1], outputs=[tensor2]) nodeList.append(node0) tensor3 = gs.Variable("tensor3", bool, ["B", 1, "T"]) node1 = gs.Node("Unsqueeze", "Unsqueeze_29", inputs=[tensor2], outputs=[tensor3], attrs=OrderedDict([("axes", [1])])) nodeList.append(node1) tensor4 = gs.Variable("tensor4", bool, ["B", 1, "T"]) node2 = gs.Node("Not", "Not_30", inputs=[tensor3], outputs=[tensor4]) nodeList.append(node2) tensor5 = gs.Variable("tensor5", bool, ["B", 1, "T2"]) node3 = gs.Node("Slice", "Slice_79", inputs=[tensor4, a0, am2, a2, a2], outputs=[tensor5]) nodeList.append(node3) tensor6 = gs.Variable("tensor6", bool, ["B", 1, "T3"]) node4 = gs.Node("Slice", "Slice_84", inputs=[tensor5, a0, am2, a2, a2], outputs=[tensor6]) nodeList.append(node4) graph = gs.Graph(nodes=nodeList, inputs=[tensor0, tensor1], outputs=[tensor6]) graph.cleanup().toposort() onnx.save(gs.export_onnx(graph), onnxFile1) # Edit the network with Onnx Graphsurgeon -------------------------------------- graph = gs.import_onnx(onnx.load(onnxFile1)) for node in graph.nodes: if node.op == "Slice" and node.name == "Slice_79": castV0 = gs.Variable("CastV-0", np.dtype(np.int32), None) castN0 = gs.Node("Cast", "CastN-0", inputs=[node.inputs[0]], outputs=[castV0], attrs=OrderedDict([("to", onnx.TensorProto.INT32)])) graph.nodes.append(castN0) node.inputs[0] = castV0 nextSliceNode = node.o() castV1 = gs.Variable("CastV-1", bool, None) castN1 = gs.Node("Cast", "CastN-1", inputs=[nextSliceNode.outputs[0]], outputs=[castV1], attrs=OrderedDict([("to", onnx.TensorProto.BOOL)])) graph.nodes.append(castN1) for i in range(len(graph.outputs)): if graph.outputs[i] == nextSliceNode.outputs[0]: graph.outputs[i] = castV1 break graph.cleanup() onnx.save(gs.export_onnx(graph), onnxFile2) # parse ONNX into TensorRT by trtexec ------------------------------------------ def parseOnnxToTRT(logger, onnxFile): builder = trt.Builder(logger) network = builder.create_network(1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)) profile = builder.create_optimization_profile() config = builder.create_builder_config() parser = trt.OnnxParser(network, logger) if not os.path.exists(onnxFile): print("Failed finding ONNX file!") return print("Succeeded finding ONNX file!") with open(onnxFile, "rb") as model: if not parser.parse(model.read()): print("Failed parsing ONNX file!") for error in range(parser.num_errors): print(parser.get_error(error)) return print("Succeeded parsing ONNX file!") inputT0 = network.get_input(0) inputT0.shape = [-1, -1] profile.set_shape(inputT0.name, [1, 1], [4, 32], [16, 64]) inputT1 = network.get_input(1) inputT1.shape = [-1, -1] profile.set_shape(inputT1.name, [1, 1], [4, 32], [16, 64]) config.add_optimization_profile(profile) engineString = builder.build_serialized_network(network, config) print("%s building .plan from %s" % ("Failed" if engineString is None else "Succeeded", onnxFile)) logger = trt.Logger(trt.Logger.ERROR) parseOnnxToTRT(logger, onnxFile1) parseOnnxToTRT(logger, onnxFile2) print("All test finished!")
trt-samples-for-hackathon-cn-master
cookbook/10-ProblemSolving/SliceNodeWithBoolIO/main.py
# # Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import numpy as np import tensorrt as trt kernel = np.ones([32, 3, 5, 5], dtype=np.float32) # count of weight is incorrect bias = np.ones(7, dtype=np.float32) logger = trt.Logger(trt.Logger.ERROR) builder = trt.Builder(logger) network = builder.create_network(1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)) config = builder.create_builder_config() inputT0 = network.add_input("inputT0", trt.float32, (1, 3, 600, 800)) convolutionLayer = network.add_convolution_nd(inputT0, 32, [5, 5], kernel, bias) network.mark_output(convolutionLayer.get_output(0)) engineString = builder.build_serialized_network(network, config)
trt-samples-for-hackathon-cn-master
cookbook/10-ProblemSolving/WeightsHasCountXButYWasExpected/main.py
# # Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import os from collections import OrderedDict import numpy as np import onnx import onnx_graphsurgeon as gs onnxFile0 = "model-0.onnx" onnxFile1 = "model-1.onnx" onnxFile2 = "model-2.onnx" nEmbedding = 256 np.random.seed(31193) tensor0 = gs.Variable("tensor0", np.float32, ["B", nEmbedding, "T"]) constantM1 = gs.Constant("constantM1", np.ascontiguousarray(np.array([-1], dtype=np.int64))) constantM2 = gs.Constant("constantM2", np.ascontiguousarray(np.array([-2], dtype=np.int64))) tensor1 = gs.Variable("tensor1", np.float32, None) node1 = gs.Node("ReduceSum", "ReduceSum", inputs=[tensor0, constantM2], outputs=[tensor1], attrs=OrderedDict([("keepdims", 1)])) graph = gs.Graph(nodes=[node1], inputs=[tensor0], outputs=[tensor1], opset=13) onnx.save(gs.export_onnx(graph.cleanup().toposort()), onnxFile0) print("Succeeded building %s!" % (onnxFile0)) # Add a pair of Transpose nodes before and after ReduceSum node, however, they will be fused by TensorRT graph = gs.import_onnx(onnx.load(onnxFile0)) for node in graph.nodes: if node.op == "ReduceSum": node.inputs[1] = constantM1 tensor2 = gs.Variable("tensor2", np.float32, None) node2 = gs.Node("Transpose", "Transpose-0", inputs=[node.inputs[0]], outputs=[tensor2], attrs=OrderedDict([("perm", [0, 2, 1])])) graph.nodes.append(node2) tensor3 = gs.Variable("tensor3", np.float32, None) node3 = gs.Node("Transpose", "Transpose-1", inputs=[tensor3], outputs=[node.outputs[0]], attrs=OrderedDict([("perm", [0, 2, 1])])) graph.nodes.append(node3) node.inputs[0] = tensor2 node.outputs[0] = tensor3 graph.cleanup().toposort() onnx.save(gs.export_onnx(graph), onnxFile1) print("Succeeded building %s!" % (onnxFile1)) # Add a pair of Transpose nodes before and after ReduceSum node, further more, add two more Identity nodes after the Tranpose nodes in case of TenorRT's fusion graph = gs.import_onnx(onnx.load(onnxFile0)) for node in graph.nodes: if node.op == "ReduceSum": node.inputs[1] = constantM1 tensor2 = gs.Variable("tensor2", np.float32, None) node2 = gs.Node("Transpose", "Transpose-0", inputs=[node.inputs[0]], outputs=[tensor2], attrs=OrderedDict([("perm", [0, 2, 1])])) graph.nodes.append(node2) tensor3 = gs.Variable("tensor3", np.float32, None) node3 = gs.Node("Identity", "Identity-0", inputs=[tensor2], outputs=[tensor3]) graph.nodes.append(node3) tensor4 = gs.Variable("tensor4", np.float32, None) node4 = gs.Node("Transpose", "Transpose-1", inputs=[tensor4], outputs=[node.outputs[0]], attrs=OrderedDict([("perm", [0, 2, 1])])) graph.nodes.append(node4) node.inputs[0] = tensor3 node.outputs[0] = tensor4 graph.cleanup().toposort() onnx.save(gs.export_onnx(graph), onnxFile2) print("Succeeded building %s!" % (onnxFile2)) command = "trtexec --onnx=%s --verbose --useCudaGraph --noDataTransfers --minShapes=tensor0:1x256x1024 --optShapes=tensor0:1x256x1024 --maxShapes=tensor0:1x256x1024 --shapes=tensor0:1x256x1024" os.system(command % onnxFile0) os.system(command % onnxFile1) os.system(command % onnxFile2)
trt-samples-for-hackathon-cn-master
cookbook/09-BestPractice/AdjustReduceLayer/main.py
# # Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import os from collections import OrderedDict import numpy as np import onnx import onnx_graphsurgeon as gs import tensorrt as trt nLoop = 10 nC = 32 onnxFile0 = "model-0.onnx" onnxFile1 = "model-1.onnx" tensor0 = gs.Variable("tensor-0", np.float32, ["B", 1, 16, 16]) constant32x1 = gs.Constant("constant32x1", np.ascontiguousarray(np.random.rand(nC, 1, 3, 3).reshape(nC, 1, 3, 3).astype(np.float32) * 2 - 1)) constant32x32 = gs.Constant("constant32x32", np.ascontiguousarray(np.random.rand(nC, nC, 3, 3).reshape(nC, nC, 3, 3).astype(np.float32) * 2 - 1)) constant32 = gs.Constant("constant32", np.ascontiguousarray(np.random.rand(1, nC, 1, 1).reshape(1, nC, 1, 1).astype(np.float32) * 2 - 1)) constant32t = gs.Constant("constant32t", np.ascontiguousarray(np.random.rand(1, 1, 1, nC).reshape(1, 1, 1, nC).astype(np.float32) * 2 - 1)) constant1x32 = gs.Constant("constant1x32", np.ascontiguousarray(np.random.rand(1, nC, 3, 3).reshape(1, nC, 3, 3).astype(np.float32) * 2 - 1)) constant1 = gs.Constant("constant1", np.ascontiguousarray(np.array([1], dtype=np.int64))) constant32r = gs.Constant("constant32r", np.ascontiguousarray(np.random.rand(1, nC, 1, 1).reshape(1, nC, 1, 1).astype(np.float32) * 2 - 1)) graphNodeList = [] tensor1 = gs.Variable("tensor-1", np.float32, None) node1 = gs.Node("Conv", "Conv0", inputs=[tensor0, constant32x1], outputs=[tensor1]) node1.attrs = OrderedDict([("kernel_shape", [3, 3]), ("pads", [1, 1, 1, 1])]) """ node1.attrs = OrderedDict([ ("dilations", [1, 1]), ("kernel_shape", [3, 3]), ("pads", [1, 1, 1, 1]), ("strides", [1, 1]), ]) """ graphNodeList.append(node1) tensorLoop = tensor1 for i in range(nLoop // 2): tensor2 = gs.Variable("tensor-%d-1" % i, np.float32, None) node2 = gs.Node("Conv", "Conv-" + str(i), inputs=[tensorLoop, constant32x32], outputs=[tensor2]) node2.attrs = OrderedDict([("kernel_shape", [3, 3]), ("pads", [1, 1, 1, 1])]) graphNodeList.append(node2) tensor3 = gs.Variable("tensor-%d-2" % i, np.float32, None) node3 = gs.Node("Unsqueeze", "Unsqueeze-%d" + str(i), inputs=[tensor2, constant1], outputs=[tensor3]) graphNodeList.append(node3) tensor4 = gs.Variable("tensor-%d-3" % i, dtype=np.float32, shape=None) node4 = gs.Node("Add", "Add-" + str(i), inputs=[tensor3, constant32], outputs=[tensor4]) graphNodeList.append(node4) tensor5 = gs.Variable("tensor-%d-4" % i, np.float32, None) node5 = gs.Node("Squeeze", "Squeeze-%d" + str(i), inputs=[tensor4, constant1], outputs=[tensor5]) graphNodeList.append(node5) tensor6 = gs.Variable("tensor-%d-5" % i, dtype=np.float32, shape=None) node6 = gs.Node("Relu", "ReLU-" + str(i), inputs=[tensor5], outputs=[tensor6]) graphNodeList.append(node6) tensorLoop = tensor6 for i in range(nLoop // 2, nLoop): tensor2 = gs.Variable("tensor-%d-1" % i, np.float32, None) node2 = gs.Node("Conv", "Conv-" + str(i), inputs=[tensorLoop, constant32x32], outputs=[tensor2]) node2.attrs = OrderedDict([("kernel_shape", [3, 3]), ("pads", [1, 1, 1, 1])]) graphNodeList.append(node2) tensor3 = gs.Variable("tensor-%d-2" % i, np.float32, None) node3 = gs.Node("Transpose", "Transpose-%d" + str(i), inputs=[tensor2], outputs=[tensor3], attrs=OrderedDict([("perm", [0, 2, 3, 1])])) graphNodeList.append(node3) tensor4 = gs.Variable("tensor-%d-3" % i, dtype=np.float32, shape=None) node4 = gs.Node("Add", "Add-" + str(i), inputs=[tensor3, constant32t], outputs=[tensor4]) graphNodeList.append(node4) tensor5 = gs.Variable("tensor-%d-4" % i, np.float32, None) node5 = gs.Node("Transpose", "Transpose-%d" + str(i), inputs=[tensor4], outputs=[tensor5], attrs=OrderedDict([("perm", [0, 3, 1, 2])])) graphNodeList.append(node5) tensor6 = gs.Variable("tensor-%d-5" % i, dtype=np.float32, shape=None) node6 = gs.Node("Relu", "ReLU-" + str(i), inputs=[tensor5], outputs=[tensor6]) graphNodeList.append(node6) tensorLoop = tensor6 tensor7 = gs.Variable("tensor-6", dtype=np.float32, shape=None) node7 = gs.Node("Conv", "Conv1", inputs=[tensorLoop, constant1x32], outputs=[tensor7]) graphNodeList.append(node7) graph = gs.Graph(nodes=graphNodeList, inputs=[tensor0], outputs=[tensor7], opset=13) onnx.save(gs.export_onnx(graph.cleanup().toposort()), onnxFile0) print("Succeeded building %s!" % (onnxFile0)) # Remove pairs of Transpose or Squeeze/Unsqueeze ndoes graph = gs.import_onnx(onnx.load(onnxFile0)) for node in graph.nodes: if node.op in ["Unsqueeze", "Squeeze"]: node.o().inputs[0] = node.inputs[0] if node.op == "Transpose": if node.o().op == "Add": node.o().inputs[1] = constant32r node.o().inputs[0] = node.inputs[0] graph.cleanup().toposort() onnx.save(gs.export_onnx(graph), onnxFile1) print("Succeeded building %s!" % (onnxFile1)) def run(onnxFile): logger = trt.Logger(trt.Logger.VERBOSE) builder = trt.Builder(logger) network = builder.create_network(1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)) profile = builder.create_optimization_profile() config = builder.create_builder_config() config.max_workspace_size = 22 << 30 parser = trt.OnnxParser(network, logger) with open(onnxFile, "rb") as model: parser.parse(model.read()) inputT0 = network.get_input(0) inputT0.shape = [-1, 1, 16, 16] profile.set_shape(inputT0.name, [1, 1, 16, 16], [8, 1, 16, 16], [8, 1, 16, 16]) config.add_optimization_profile(profile) engineString = builder.build_serialized_network(network, config) trtFile = onnxFile.split(".")[0] + ".plan" with open(trtFile, "wb") as f: f.write(engineString) print("Succeeded building %s!" % (trtFile)) os.system("trtexec --loadEngine=%s --verbose --useCudaGraph --noDataTransfers --shapes=tensor-0:8x1x16x16" % trtFile) run(onnxFile0) run(onnxFile1)
trt-samples-for-hackathon-cn-master
cookbook/09-BestPractice/EliminateSqueezeUnsqueezeTranspose/main.py
# # Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import os from collections import OrderedDict import numpy as np import onnx import onnx_graphsurgeon as gs import tensorrt as trt nLoop = 10 onnxFile = "model.onnx" np.random.seed(31193) # Create a ONNX graph with Onnx Graphsurgeon ----------------------------------- tensor0 = gs.Variable("tensor0", np.float32, ["B", 1]) constant1x256 = gs.Constant("constant1x256", np.ascontiguousarray(np.random.rand(1, 256).reshape(1, 256).astype(np.float32) * 2 - 1)) constant256x2048 = gs.Constant("constant256x2048", np.ascontiguousarray(np.random.rand(256, 2048).reshape(256, 2048).astype(np.float32) * 2 - 1)) constant2048 = gs.Constant("constant2048", np.ascontiguousarray(np.random.rand(2048).astype(np.float32) * 2 - 1)) constant2048x256 = gs.Constant("constant2048x256", np.ascontiguousarray(np.random.rand(2048, 256).reshape(2048, 256).astype(np.float32) * 2 - 1)) constant256 = gs.Constant("constant256", np.ascontiguousarray(np.random.rand(256).astype(np.float32) * 2 - 1)) constantM1 = gs.Constant("constantM1", np.ascontiguousarray(np.array([-1], dtype=np.int64))) graphNodeList = [] tensor1 = gs.Variable("tensor1", np.float32, None) node1 = gs.Node("MatMul", "MMU1", inputs=[tensor0, constant1x256], outputs=[tensor1]) graphNodeList.append(node1) tensorLoop = tensor1 for i in range(nLoop): tensor2 = gs.Variable("tensor%d-1" % i, np.float32, None) node2 = gs.Node("MatMul", "MMU-" + str(i), inputs=[tensorLoop, constant256x2048], outputs=[tensor2]) graphNodeList.append(node2) tensor3 = gs.Variable("tensor%d-2" % i, dtype=np.float32, shape=None) node3 = gs.Node("Add", "AddU-" + str(i), inputs=[tensor2, constant2048], outputs=[tensor3]) graphNodeList.append(node3) tensor4 = gs.Variable("tensor%d-3" % i, dtype=np.float32, shape=None) node4 = gs.Node("Relu", "ReLUU-" + str(i), inputs=[tensor3], outputs=[tensor4]) graphNodeList.append(node4) tensor5 = gs.Variable("tensor%d-4" % i, dtype=np.float32, shape=None) node5 = gs.Node("MatMul", "MMD-" + str(i), inputs=[tensor4, constant2048x256], outputs=[tensor5]) graphNodeList.append(node5) tensor6 = gs.Variable("tensor%d-5" % i, dtype=np.float32, shape=None) node6 = gs.Node("Add", "AddD-" + str(i), inputs=[tensor5, constant256], outputs=[tensor6]) graphNodeList.append(node6) tensor7 = gs.Variable("tensor%d-6" % i, dtype=np.float32, shape=None) node7 = gs.Node("Relu", "ReLUD-" + str(i), inputs=[tensor6], outputs=[tensor7]) graphNodeList.append(node7) tensorLoop = tensor7 tensor8 = gs.Variable("tensor8", dtype=np.float32, shape=None) node8 = gs.Node("ReduceSum", "Reduce", inputs=[tensorLoop, constantM1], outputs=[tensor8], attrs=OrderedDict([("keepdims", 0)])) graphNodeList.append(node8) graph = gs.Graph(nodes=graphNodeList, inputs=[tensor0], outputs=[tensor8], opset=13) onnx.save(gs.export_onnx(graph.cleanup().toposort()), onnxFile) print("Succeeded building %s!" % (onnxFile)) def run(nBS): logger = trt.Logger(trt.Logger.VERBOSE) builder = trt.Builder(logger) network = builder.create_network(1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)) config = builder.create_builder_config() config.max_workspace_size = 22 << 30 parser = trt.OnnxParser(network, logger) with open(onnxFile, "rb") as model: parser.parse(model.read()) inputT0 = network.get_input(0) inputT0.shape = [nBS, 1] engineString = builder.build_serialized_network(network, config) trtFile = onnxFile.split(".")[0] + ".plan" with open(trtFile, "wb") as f: f.write(engineString) print("Succeeded building %s!" % trtFile) os.system("trtexec --loadEngine=%s --verbose --useCudaGraph --noDataTransfers" % trtFile) run(1) run(2) run(4) run(8) run(16) run(32) run(64) run(128) run(256) run(512) run(1024)
trt-samples-for-hackathon-cn-master
cookbook/09-BestPractice/IncreaseBatchSize/main.py
# # Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import os from collections import OrderedDict from time import time_ns import numpy as np import onnx import onnx_graphsurgeon as gs import tensorrt as trt from cuda import cudart nLoop = 10 nWarm = 10 nTest = 100 onnxFile = "model.onnx" np.random.seed(31193) # Create a ONNX graph with Onnx Graphsurgeon ----------------------------------- tensor0 = gs.Variable("tensor0", np.float32, ["B", 1]) constant1x256 = gs.Constant("constant1x256", np.ascontiguousarray(np.random.rand(1, 256).reshape(1, 256).astype(np.float32) * 2 - 1)) constant256x2048 = gs.Constant("constant256x2048", np.ascontiguousarray(np.random.rand(256, 2048).reshape(256, 2048).astype(np.float32) * 2 - 1)) constant2048 = gs.Constant("constant2048", np.ascontiguousarray(np.random.rand(2048).astype(np.float32) * 2 - 1)) constant2048x256 = gs.Constant("constant2048x256", np.ascontiguousarray(np.random.rand(2048, 256).reshape(2048, 256).astype(np.float32) * 2 - 1)) constant256 = gs.Constant("constant256", np.ascontiguousarray(np.random.rand(256).astype(np.float32) * 2 - 1)) constantM1 = gs.Constant("constantM1", np.ascontiguousarray(np.array([-1], dtype=np.int64))) graphNodeList = [] tensor1 = gs.Variable("tensor1", np.float32, None) node1 = gs.Node("MatMul", "MMU1", inputs=[tensor0, constant1x256], outputs=[tensor1]) graphNodeList.append(node1) tensorLoop = tensor1 for i in range(nLoop): tensor2 = gs.Variable("tensor%d-1" % i, np.float32, None) node2 = gs.Node("MatMul", "MMU-" + str(i), inputs=[tensorLoop, constant256x2048], outputs=[tensor2]) graphNodeList.append(node2) tensor3 = gs.Variable("tensor%d-2" % i, dtype=np.float32, shape=None) node3 = gs.Node("Add", "AddU-" + str(i), inputs=[tensor2, constant2048], outputs=[tensor3]) graphNodeList.append(node3) tensor4 = gs.Variable("tensor%d-3" % i, dtype=np.float32, shape=None) node4 = gs.Node("Relu", "ReLUU-" + str(i), inputs=[tensor3], outputs=[tensor4]) graphNodeList.append(node4) tensor5 = gs.Variable("tensor%d-4" % i, dtype=np.float32, shape=None) node5 = gs.Node("MatMul", "MMD-" + str(i), inputs=[tensor4, constant2048x256], outputs=[tensor5]) graphNodeList.append(node5) tensor6 = gs.Variable("tensor%d-5" % i, dtype=np.float32, shape=None) node6 = gs.Node("Add", "AddD-" + str(i), inputs=[tensor5, constant256], outputs=[tensor6]) graphNodeList.append(node6) tensor7 = gs.Variable("tensor%d-6" % i, dtype=np.float32, shape=None) node7 = gs.Node("Relu", "ReLUD-" + str(i), inputs=[tensor6], outputs=[tensor7]) graphNodeList.append(node7) tensorLoop = tensor7 tensor8 = gs.Variable("tensor8", dtype=np.float32, shape=None) node8 = gs.Node("ReduceSum", "Reduce", inputs=[tensorLoop, constantM1], outputs=[tensor8], attrs=OrderedDict([("keepdims", 0)])) graphNodeList.append(node8) graph = gs.Graph(nodes=graphNodeList, inputs=[tensor0], outputs=[tensor8], opset=13) onnx.save(gs.export_onnx(graph.cleanup().toposort()), onnxFile) print("Succeeded building %s!" % (onnxFile)) def test(engine, context, nBatchSize): nProfile = engine.num_optimization_profiles if nProfile == 1: bindingBias = 0 else: if nBatchSize <= 4: bindingBias = 0 context.set_optimization_profile_async(0, 0) cudart.cudaStreamSynchronize(0) else: bindingBias = 2 context.set_optimization_profile_async(1, 0) cudart.cudaStreamSynchronize(0) context.set_binding_shape(bindingBias, [nBatchSize, 1]) nInput = np.sum([engine.binding_is_input(i) for i in range(engine.num_bindings)]) nOutput = engine.num_bindings - nInput for i in range(nInput): print("Bind[%2d]:i[%2d]->" % (i, i), engine.get_binding_dtype(i), engine.get_binding_shape(i), context.get_binding_shape(i), engine.get_binding_name(i)) for i in range(nInput, nInput + nOutput): print("Bind[%2d]:o[%2d]->" % (i, i - nInput), engine.get_binding_dtype(i), engine.get_binding_shape(i), context.get_binding_shape(i), engine.get_binding_name(i)) nInput = nInput // nProfile nOutput = nOutput // nProfile data = np.random.rand(nBatchSize).reshape(nBatchSize, 1).astype(np.float32) bufferH = [] bufferH.append(np.ascontiguousarray(data.reshape(-1))) for i in range(nInput, nInput + nOutput): bufferH.append(np.empty(context.get_binding_shape(bindingBias + i), dtype=trt.nptype(engine.get_binding_dtype(bindingBias + i)))) bufferD = [] for i in range(nInput + nOutput): bufferD.append(cudart.cudaMalloc(bufferH[i].nbytes)[1]) if nProfile == 1 or nBatchSize <= 4: bufferD = bufferD + [int(0), int(0)] else: bufferD = [int(0), int(0)] + bufferD for i in range(nInput): cudart.cudaMemcpy(bufferD[i], bufferH[i].ctypes.data, bufferH[i].nbytes, cudart.cudaMemcpyKind.cudaMemcpyHostToDevice) context.execute_v2(bufferD) for i in range(nInput, nInput + nOutput): cudart.cudaMemcpy(bufferH[i].ctypes.data, bufferD[i], bufferH[i].nbytes, cudart.cudaMemcpyKind.cudaMemcpyDeviceToHost) for i in range(nWarm): context.execute_v2(bufferD) t0 = time_ns() for i in range(nTest): context.execute_v2(bufferD) t1 = time_ns() print("+---- BatchSize=%2d: %.4fms\n" % (nBatchSize, (t1 - t0) / 1e6 / nTest)) if nProfile == 1 or nBatchSize <= 4: bufferD = bufferD[:2] else: bufferD = bufferD[-2:] for b in bufferD: cudart.cudaFree(b) def run(nProfile): logger = trt.Logger(trt.Logger.ERROR) builder = trt.Builder(logger) network = builder.create_network(1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)) config = builder.create_builder_config() parser = trt.OnnxParser(network, logger) with open(onnxFile, "rb") as model: parser.parse(model.read()) if nProfile == 1: profile = builder.create_optimization_profile() inputT0 = network.get_input(0) inputT0.shape = [-1, 1] profile.set_shape(inputT0.name, [1, 1], [510, 1], [512, 1]) config.add_optimization_profile(profile) else: profile0 = builder.create_optimization_profile() inputT0 = network.get_input(0) inputT0.shape = [-1, 1] profile0.set_shape(inputT0.name, (1, 1), (4, 1), (4, 1)) config.add_optimization_profile(profile0) profile1 = builder.create_optimization_profile() inputT0 = network.get_input(0) inputT0.shape = [-1, 1] profile1.set_shape(inputT0.name, (510, 1), (510, 1), (512, 1)) config.add_optimization_profile(profile1) engineString = builder.build_serialized_network(network, config) trtFile = onnxFile.split(".")[0] + "-%d.plan" % nProfile with open(trtFile, "wb") as f: f.write(engineString) print("Succeeded building %s!" % (trtFile)) engine = trt.Runtime(logger).deserialize_cuda_engine(engineString) context = engine.create_execution_context() # MultiOptimizationProfile is not supported by TensorRT-8.5, we use script to test test(engine, context, 1) test(engine, context, 4) test(engine, context, 510) test(engine, context, 512) run(1) run(2)
trt-samples-for-hackathon-cn-master
cookbook/09-BestPractice/UsingMultiOptimizationProfile/main.py
# # Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import os from collections import OrderedDict import numpy as np import onnx import onnx_graphsurgeon as gs import tensorrt as trt nLoop = 10 np.random.seed(31193) def run(nM, nK, nN, bUseScriptToBuild): tensor0 = gs.Variable("tensor0", np.float32, [nM, 1]) constant1xK = gs.Constant("constant1xK", np.ascontiguousarray(np.random.rand(1, nK).reshape(1, nK).astype(np.float32) * 2 - 1)) constantKxN = gs.Constant("constantKxN", np.ascontiguousarray(np.random.rand(nK, nN).reshape(nK, nN).astype(np.float32) * 2 - 1)) constantN = gs.Constant("constantN", np.ascontiguousarray(np.random.rand(nN).astype(np.float32) * 2 - 1)) constantNxK = gs.Constant("constantNxK", np.ascontiguousarray(np.random.rand(nN, nK).reshape(nN, nK).astype(np.float32) * 2 - 1)) constantK = gs.Constant("constantK", np.ascontiguousarray(np.random.rand(nK).astype(np.float32) * 2 - 1)) constantM1 = gs.Constant("constantM1", np.ascontiguousarray(np.array([-1], dtype=np.int64))) graphNodeList = [] tensor1 = gs.Variable("tensor1", np.float32, None) node1 = gs.Node("MatMul", "MMU1", inputs=[tensor0, constant1xK], outputs=[tensor1]) graphNodeList.append(node1) tensorLoop = tensor1 for i in range(nLoop): tensor2 = gs.Variable("tensor%d-1" % i, np.float32, None) node2 = gs.Node("MatMul", "MMU-" + str(i), inputs=[tensorLoop, constantKxN], outputs=[tensor2]) graphNodeList.append(node2) tensor3 = gs.Variable("tensor%d-2" % i, dtype=np.float32, shape=None) node3 = gs.Node("Add", "AddU-" + str(i), inputs=[tensor2, constantN], outputs=[tensor3]) graphNodeList.append(node3) tensor4 = gs.Variable("tensor%d-3" % i, dtype=np.float32, shape=None) node4 = gs.Node("Relu", "ReLUU-" + str(i), inputs=[tensor3], outputs=[tensor4]) graphNodeList.append(node4) tensor5 = gs.Variable("tensor%d-4" % i, dtype=np.float32, shape=None) node5 = gs.Node("MatMul", "MMD-" + str(i), inputs=[tensor4, constantNxK], outputs=[tensor5]) graphNodeList.append(node5) tensor6 = gs.Variable("tensor%d-5" % i, dtype=np.float32, shape=None) node6 = gs.Node("Add", "AddD-" + str(i), inputs=[tensor5, constantK], outputs=[tensor6]) graphNodeList.append(node6) tensor7 = gs.Variable("tensor%d-6" % i, dtype=np.float32, shape=None) node7 = gs.Node("Relu", "ReLUD-" + str(i), inputs=[tensor6], outputs=[tensor7]) graphNodeList.append(node7) tensorLoop = tensor7 tensor8 = gs.Variable("tensor8", dtype=np.float32, shape=None) node8 = gs.Node("ReduceSum", "Reduce", inputs=[tensorLoop, constantM1], outputs=[tensor8], attrs=OrderedDict([("keepdims", 0)])) graphNodeList.append(node8) graph = gs.Graph(nodes=graphNodeList, inputs=[tensor0], outputs=[tensor8], opset=13) onnxFile = "model-%d-%d-%d.onnx" % (nM, nK, nN) onnx.save(gs.export_onnx(graph.cleanup().toposort()), onnxFile) print("Succeeded building %s!" % (onnxFile)) if bUseScriptToBuild: logger = trt.Logger(trt.Logger.VERBOSE) builder = trt.Builder(logger) network = builder.create_network(1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)) config = builder.create_builder_config() config.max_workspace_size = 22 << 30 parser = trt.OnnxParser(network, logger) with open(onnxFile, "rb") as model: parser.parse(model.read()) engineString = builder.build_serialized_network(network, config) trtFile = onnxFile.split(".")[0] + ".plan" with open(trtFile, "wb") as f: f.write(engineString) print("Succeeded building %s!" % (trtFile)) os.system("trtexec --loadEngine=%s --useCudaGraph --noDataTransfers --fp16" % trtFile) else: os.system("trtexec --onnx=%s --useCudaGraph --noDataTransfers --fp16" % onnxFile) run(32, 256, 2048, True) run(31, 256, 2048, True) # nM -> nM-1 run(32, 255, 2048, True) # nK -> nK-1 run(32, 256, 2047, True) # nN -> nN-1 run(32, 256, 2048, False) run(31, 256, 2048, False) # nM -> nM-1 run(32, 255, 2048, False) # nK -> nK-1 run(32, 256, 2047, False) # nN -> nN-1
trt-samples-for-hackathon-cn-master
cookbook/09-BestPractice/AlignSize/main.py
# # Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import os from collections import OrderedDict import numpy as np import onnx import onnx_graphsurgeon as gs import tensorrt as trt onnxFile0 = "model-0.onnx-backup" onnxFile1 = "model-1.onnx" if True: # model bindind parameters, not change t0 = 19 t1 = 256 t2 = 256 nBS = 16 nSL = 64 """ # extract subgraph from wenet encoder, should not be used in this example, TODO: rewrite this part by ONNX onnxFileS = "./encoder.onnx" graph = gs.import_onnx(onnx.load(onnxFileS)) graph.outputs = [] for node in graph.nodes: if node.op == "Relu" and node.name == "Relu_38": node.outputs[0].name = "inputT0" node.outputs[0].shape= ["B",t1,"t4",t0] graph.inputs = [node.outputs[0]] if node.op == "Add" and node.name == "Add_62": graph.outputs = [node.outputs[0]] graph.cleanup() onnx.save(gs.export_onnx(graph), onnxFile0) """ graph = gs.import_onnx(onnx.load(onnxFile0)) for node in graph.nodes: if node.op == "MatMul" and node.name == "MatMul_61": convKernel = node.inputs[1].values.transpose(1, 0).reshape(256, t1, 1, t0).astype(np.float32) convKernelV = gs.Constant("ConvKernelV", np.ascontiguousarray(convKernel)) continue if node.op == "Add" and node.name == "Add_62": convBias = node.inputs[0].values convBiasV = gs.Constant("ConvBiasV", np.ascontiguousarray(convBias)) continue convV = gs.Variable("ConvV", np.dtype(np.float32), ["B", t1, "t4", 1]) convN = gs.Node("Conv", "ConvN", inputs=[graph.inputs[0], convKernelV, convBiasV], outputs=[convV]) convN.attrs = OrderedDict([ ("dilations", [1, 1]), ("kernel_shape", [1, t0]), ("pads", [0, 0, 0, 0]), ("strides", [1, 1]), ]) graph.nodes.append(convN) constant3 = gs.Constant("constant3", np.ascontiguousarray(np.array([3], dtype=np.int64))) squeezeV = gs.Variable("SqueezeV", np.dtype(np.float32), ["B", t2, "t4"]) squeezeN = gs.Node("Squeeze", "SqueezeN", inputs=[convV, constant3], outputs=[squeezeV]) graph.nodes.append(squeezeN) transposeV = gs.Variable("TransposeV", np.dtype(np.float32), ["B", "t4", t2]) transposeN = gs.Node("Transpose", "TransposeN", inputs=[squeezeV], outputs=[transposeV], attrs=OrderedDict([("perm", [0, 2, 1])])) graph.nodes.append(transposeN) graph.outputs = [transposeV] graph.cleanup() onnx.save(gs.export_onnx(graph), onnxFile1) def run(onnxFile): logger = trt.Logger(trt.Logger.VERBOSE) builder = trt.Builder(logger) network = builder.create_network(1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)) profile = builder.create_optimization_profile() config = builder.create_builder_config() parser = trt.OnnxParser(network, logger) with open(onnxFile, "rb") as model: parser.parse(model.read()) inputT0 = network.get_input(0) inputT0.shape = [-1, t1, -1, t0] profile.set_shape(inputT0.name, [1, t1, 1, t0], [nBS, t1, nSL, t0], [nBS, t1, nSL, t0]) config.add_optimization_profile(profile) engineString = builder.build_serialized_network(network, config) trtFile = onnxFile.split(".")[0] + ".plan" with open(trtFile, "wb") as f: f.write(engineString) print("Succeeded building %s!" % (trtFile)) os.system("trtexec --loadEngine=%s --verbose --useCudaGraph --noDataTransfers --shapes=inputTensor:%dx%dx%dx%d" % (trtFile, nBS, t1, nSL, t0)) run(onnxFile0) run(onnxFile1)
trt-samples-for-hackathon-cn-master
cookbook/09-BestPractice/ConvertTranposeMultiplicationToConvolution/main.py