python_code
stringlengths 0
679k
| repo_name
stringlengths 9
41
| file_path
stringlengths 6
149
|
---|---|---|
#################################################################################################
#
# Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#################################################################################################
# test/unit/conv/device/conv2d_dgrad_implicit_gemm_f32nhwc_f32nhwc_f32nhwc_simt_f32_sm80.cu
import cutlass.backend
from cutlass.backend.conv2d_operation import *
from cutlass.backend import *
from cutlass.backend.test import *
from cutlass.backend.utils.device import device_cc
import unittest
@unittest.skipIf(device_cc() < 80, "Device compute capability is insufficient for SM80 tests.")
class Conv2dDgradImplicitGemmF32nhwcF32nhwcF32nhwcSimtF32SM80(unittest.TestCase):
def test_SM80_Device_Conv2d_Fprop_Analytic_ImplicitGemm_f32nhwc_f32nhwc_f32nhwc_simt_f32(self):
math_inst = MathInstruction(
instruction_shape=[1, 1, 1],
element_a=cutlass_bindings.float32, element_b=cutlass_bindings.float32,
element_accumulator=cutlass_bindings.float32, opcode_class=cutlass_bindings.OpClass.Simt,
math_operation=MathOperation.multiply_add
)
A = TensorDescription(
element=math_inst.element_a,
layout=cutlass_bindings.TensorNHWC,
alignment=4)
B = TensorDescription(
element=math_inst.element_b,
layout=cutlass_bindings.TensorNHWC,
alignment=4)
C = TensorDescription(
element=cutlass_bindings.float32,
layout=cutlass_bindings.TensorNHWC,
alignment=1)
tile_description = TileDescription(
threadblock_shape=[128, 128, 8], stages=4,
warp_count=[4, 2, 1],
math_instruction=math_inst
)
epilogue_functor = LinearCombination(
C.element, C.alignment,
math_inst.element_accumulator, cutlass_bindings.float32)
operation = Conv2dOperation(
conv_kind=cutlass_bindings.conv.Operator.dgrad, iterator_algorithm=cutlass_bindings.conv.IteratorAlgorithm.analytic,
arch=80, tile_description=tile_description, A=A, B=B, C=C,
stride_support=StrideSupport.Unity,
epilogue_functor=epilogue_functor,
swizzling_functor=cutlass_bindings.IdentitySwizzle1
)
self.assertTrue(test_all_conv2d(operation))
def test_SM80_Device_Conv2d_Dgrad_Optimized_ImplicitGemm_f32nhwc_f32nhwc_f32nhwc_simt_f32(self):
math_inst = MathInstruction(
instruction_shape=[1, 1, 1],
element_a=cutlass_bindings.float32, element_b=cutlass_bindings.float32,
element_accumulator=cutlass_bindings.float32, opcode_class=cutlass_bindings.OpClass.Simt,
math_operation=MathOperation.multiply_add
)
A = TensorDescription(
element=math_inst.element_a,
layout=cutlass_bindings.TensorNHWC,
alignment=4)
B = TensorDescription(
element=math_inst.element_b,
layout=cutlass_bindings.TensorNHWC,
alignment=4)
C = TensorDescription(
element=cutlass_bindings.float32,
layout=cutlass_bindings.TensorNHWC,
alignment=1)
tile_description = TileDescription(
threadblock_shape=[128, 128, 8], stages=4,
warp_count=[2, 4, 1],
math_instruction=math_inst
)
epilogue_functor = LinearCombination(
C.element, C.alignment,
math_inst.element_accumulator, cutlass_bindings.float32)
operation = Conv2dOperation(
conv_kind=cutlass_bindings.conv.Operator.dgrad, iterator_algorithm=cutlass_bindings.conv.IteratorAlgorithm.optimized,
arch=80, tile_description=tile_description, A=A, B=B, C=C,
stride_support=StrideSupport.Unity,
epilogue_functor=epilogue_functor,
swizzling_functor=cutlass_bindings.IdentitySwizzle1
)
self.assertTrue(test_all_conv2d(operation))
if __name__ == '__main__':
cutlass.backend.get_memory_pool(2**26, 2**26)
unittest.main()
| cutlass-main | test/python/backend/conv/conv2d_dgrad_implicit_gemm_f32nhwc_f32nhwc_f32nhwc_simt_f32_sm80.py |
#################################################################################################
#
# Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#################################################################################################
# test/unit/conv/device/conv2d_fprop_implicit_gemm_f32nhwc_f32nhwc_f32nhwc_simt_f32_sm80.cu
import cutlass.backend
from cutlass.backend.conv2d_operation import *
from cutlass.backend import *
from cutlass.backend.test import *
from cutlass.backend.utils.device import device_cc
import unittest
@unittest.skipIf(device_cc() < 80, "Device compute capability is insufficient for SM80 tests.")
class Conv2dFpropImplicitGemmF32nhwcF32nhwcF32nhwcSimtF32SM80(unittest.TestCase):
def test_SM80_Device_Conv2d_Fprop_Analytic_ImplicitGemm_f32nhwc_f32nhwc_f32nhwc_simt_f32(self):
math_inst = MathInstruction(
instruction_shape=[1, 1, 1],
element_a=cutlass_bindings.float32, element_b=cutlass_bindings.float32,
element_accumulator=cutlass_bindings.float32, opcode_class=cutlass_bindings.OpClass.Simt,
math_operation=MathOperation.multiply_add
)
A = TensorDescription(
element=math_inst.element_a,
layout=cutlass_bindings.TensorNHWC,
alignment=4)
B = TensorDescription(
element=math_inst.element_b,
layout=cutlass_bindings.TensorNHWC,
alignment=4)
C = TensorDescription(
element=cutlass_bindings.float32,
layout=cutlass_bindings.TensorNHWC,
alignment=1)
tile_description = TileDescription(
threadblock_shape=[128, 128, 8], stages=4,
warp_count=[4, 2, 1],
math_instruction=math_inst
)
epilogue_functor = LinearCombination(
C.element, C.alignment,
math_inst.element_accumulator, cutlass_bindings.float32)
operation = Conv2dOperation(
conv_kind=cutlass_bindings.conv.Operator.fprop, iterator_algorithm=cutlass_bindings.conv.IteratorAlgorithm.analytic,
arch=80, tile_description=tile_description, A=A, B=B, C=C,
stride_support=StrideSupport.Strided,
epilogue_functor=epilogue_functor,
swizzling_functor=cutlass_bindings.IdentitySwizzle2
)
self.assertTrue(test_all_conv2d(operation))
def test_SM80_Device_Conv2d_Fprop_Optimized_ImplicitGemm_f32nhwc_f32nhwc_f32nhwc_simt_f32(self):
math_inst = MathInstruction(
instruction_shape=[1, 1, 1],
element_a=cutlass_bindings.float32, element_b=cutlass_bindings.float32,
element_accumulator=cutlass_bindings.float32, opcode_class=cutlass_bindings.OpClass.Simt,
math_operation=MathOperation.multiply_add
)
A = TensorDescription(
element=math_inst.element_a,
layout=cutlass_bindings.TensorNHWC,
alignment=4)
B = TensorDescription(
element=math_inst.element_b,
layout=cutlass_bindings.TensorNHWC,
alignment=4)
C = TensorDescription(
element=cutlass_bindings.float32,
layout=cutlass_bindings.TensorNHWC,
alignment=1)
tile_description = TileDescription(
threadblock_shape=[128, 128, 8], stages=4,
warp_count=[2, 4, 1],
math_instruction=math_inst
)
epilogue_functor = LinearCombination(
C.element, C.alignment,
math_inst.element_accumulator, cutlass_bindings.float32)
operation = Conv2dOperation(
conv_kind=cutlass_bindings.conv.Operator.fprop, iterator_algorithm=cutlass_bindings.conv.IteratorAlgorithm.optimized,
arch=80, tile_description=tile_description, A=A, B=B, C=C,
stride_support=StrideSupport.Strided,
epilogue_functor=epilogue_functor,
swizzling_functor=cutlass_bindings.IdentitySwizzle1
)
self.assertTrue(test_all_conv2d(operation))
if __name__ == '__main__':
cutlass.backend.get_memory_pool(2**26, 2**26)
unittest.main()
| cutlass-main | test/python/backend/conv/conv2d_fprop_implicit_gemm_f32nhwc_f32nhwc_f32nhwc_simt_f32_sm80.py |
cutlass-main | test/python/backend/conv/__init__.py |
|
#################################################################################################
#
# Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#################################################################################################
# test/unit/conv/device/conv2d_wgrad_implicit_gemm_f16nhwc_f16nhwc_f16nhwc_tensor_op_f16_sm80.cu
import cutlass.backend
from cutlass.backend import *
from cutlass.backend.test import *
from cutlass.backend.utils.device import device_cc
import unittest
@unittest.skipIf(device_cc() < 80, "Device compute capability is insufficient for SM80 tests.")
class Conv2dWgradImplicitGemmF16nhwcF16nhwcF16nhwcTensorOpF16SM80(unittest.TestCase):
def test_Device_Conv2d_Wgrad_Analytic_ImplicitGemm_f16nhwc_f16nhwc_f16nhwc_tensor_op_f16(self):
math_inst = MathInstruction(
instruction_shape=[16, 8, 16],
element_a=cutlass_bindings.float16, element_b=cutlass_bindings.float16,
element_accumulator=cutlass_bindings.float16, opcode_class=cutlass_bindings.OpClass.TensorOp,
math_operation=MathOperation.multiply_add
)
A = TensorDescription(
element=math_inst.element_a,
layout=cutlass_bindings.TensorNHWC,
alignment=8)
B = TensorDescription(
element=math_inst.element_b,
layout=cutlass_bindings.TensorNHWC,
alignment=8)
C = TensorDescription(
element=cutlass_bindings.float16,
layout=cutlass_bindings.TensorNHWC,
alignment=8)
tile_description = TileDescription(
threadblock_shape=[128, 128, 64], stages=3,
warp_count=[2, 2, 1],
math_instruction=math_inst
)
epilogue_functor = LinearCombination(
C.element, C.alignment, math_inst.element_accumulator,
cutlass_bindings.float16
)
operation = Conv2dOperation(
conv_kind=cutlass_bindings.conv.Operator.wgrad, iterator_algorithm=cutlass_bindings.conv.IteratorAlgorithm.analytic,
arch=80, tile_description=tile_description, A=A, B=B, C=C,
stride_support=StrideSupport.Strided,
epilogue_functor=epilogue_functor,
swizzling_functor=cutlass_bindings.IdentitySwizzle1
)
self.assertTrue(test_all_conv2d(operation))
def test_Device_Conv2d_Wgrad_Optimized_ImplicitGemm_f16nhwc_f16nhwc_f16nhwc_tensor_op_f16(self):
math_inst = MathInstruction(
instruction_shape=[16, 8, 16],
element_a=cutlass_bindings.float16, element_b=cutlass_bindings.float16,
element_accumulator=cutlass_bindings.float16, opcode_class=cutlass_bindings.OpClass.TensorOp,
math_operation=MathOperation.multiply_add
)
A = TensorDescription(
element=math_inst.element_a,
layout=cutlass_bindings.TensorNHWC,
alignment=8)
B = TensorDescription(
element=math_inst.element_b,
layout=cutlass_bindings.TensorNHWC,
alignment=8)
C = TensorDescription(
element=cutlass_bindings.float16,
layout=cutlass_bindings.TensorNHWC,
alignment=8)
tile_description = TileDescription(
threadblock_shape=[128, 128, 64], stages=3,
warp_count=[2, 2, 1],
math_instruction=math_inst
)
epilogue_functor = LinearCombination(
C.element, C.alignment, math_inst.element_accumulator,
cutlass_bindings.float16
)
operation = Conv2dOperation(
conv_kind=cutlass_bindings.conv.Operator.wgrad, iterator_algorithm=cutlass_bindings.conv.IteratorAlgorithm.optimized,
arch=80, tile_description=tile_description, A=A, B=B, C=C,
stride_support=StrideSupport.Strided,
epilogue_functor=epilogue_functor,
swizzling_functor=cutlass_bindings.IdentitySwizzle1
)
self.assertTrue(test_all_conv2d(operation))
if __name__ == '__main__':
cutlass.backend.get_memory_pool(2**26, 2**26)
unittest.main()
| cutlass-main | test/python/backend/conv/conv2d_wgrad_implicit_gemm_f16nhwc_f16nhwc_f16nhwc_tensor_op_f16_sm80.py |
#################################################################################################
#
# Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#################################################################################################
# test/unit/conv/device/conv2d_fprop_fixed_channels_f16nhwc_f16nhwc_f16nhwc_tensor_op_f32_sm80.cu
import cutlass.backend
from cutlass.backend import *
from cutlass.backend.test import *
from cutlass.backend.utils.device import device_cc
import unittest
@unittest.skipIf(device_cc() < 80, "Device compute capability is insufficient for SM80 tests.")
def conv2d_fixed_channel_problemsizes(channels):
problem_sizes = [
cutlass_bindings.conv.Conv2dProblemSize(
cutlass_bindings.Tensor4DCoord(1, 8, 8, channels),
cutlass_bindings.Tensor4DCoord(16, 3, 3, channels),
cutlass_bindings.Tensor4DCoord(1, 1, 1, 1),
cutlass_bindings.MatrixCoord(2, 2),
cutlass_bindings.MatrixCoord(1, 1),
cutlass_bindings.conv.Mode.cross_correlation,
1, 1
),
cutlass_bindings.conv.Conv2dProblemSize(
cutlass_bindings.Tensor4DCoord(1, 224, 224, channels),
cutlass_bindings.Tensor4DCoord(32, 7, 7, channels),
cutlass_bindings.Tensor4DCoord(1, 1, 1, 1),
cutlass_bindings.MatrixCoord(1, 1),
cutlass_bindings.MatrixCoord(1, 1),
cutlass_bindings.conv.Mode.cross_correlation,
1, 1
),
cutlass_bindings.conv.Conv2dProblemSize(
cutlass_bindings.Tensor4DCoord(1, 224, 224, channels),
cutlass_bindings.Tensor4DCoord(64, 7, 7, channels),
cutlass_bindings.Tensor4DCoord(1, 1, 1, 1),
cutlass_bindings.MatrixCoord(2, 2),
cutlass_bindings.MatrixCoord(1, 1),
cutlass_bindings.conv.Mode.cross_correlation,
1, 1
),
cutlass_bindings.conv.Conv2dProblemSize(
cutlass_bindings.Tensor4DCoord(1, 224, 224, channels),
cutlass_bindings.Tensor4DCoord(64, 5, 5, channels),
cutlass_bindings.Tensor4DCoord(1, 1, 1, 1),
cutlass_bindings.MatrixCoord(1, 1),
cutlass_bindings.MatrixCoord(1, 1),
cutlass_bindings.conv.Mode.cross_correlation,
1, 1
),
cutlass_bindings.conv.Conv2dProblemSize(
cutlass_bindings.Tensor4DCoord(1, 224, 224, channels),
cutlass_bindings.Tensor4DCoord(64, 5, 5, channels),
cutlass_bindings.Tensor4DCoord(1, 1, 1, 1),
cutlass_bindings.MatrixCoord(2, 2),
cutlass_bindings.MatrixCoord(1, 1),
cutlass_bindings.conv.Mode.cross_correlation,
1, 1
),
]
return problem_sizes
class Conv2dFpropFixedChannelsF16NHWCF16NHWCF16HNWCTensorOpF32SM80(unittest.TestCase):
def test_SM80_Device_Conv2d_Fprop_Fixed_Channels_ImplicitGemm_f16nhwc_f16nhwc_f16nhwc_tensor_op_f32_channels_8(self):
math_inst = MathInstruction(
instruction_shape=[16, 8, 16],
element_a=cutlass_bindings.float16, element_b=cutlass_bindings.float16,
element_accumulator=cutlass_bindings.float32, opcode_class=cutlass_bindings.OpClass.TensorOp,
math_operation=MathOperation.multiply_add
)
A = TensorDescription(
element=math_inst.element_a,
layout=cutlass_bindings.TensorNHWC,
alignment=8)
B = TensorDescription(
element=math_inst.element_b,
layout=cutlass_bindings.TensorNHWC,
alignment=8)
C = TensorDescription(
element=cutlass_bindings.float16,
layout=cutlass_bindings.TensorNHWC,
alignment=8)
tile_description = TileDescription(
threadblock_shape=[128, 128, 64], stages=3,
warp_count=[2, 2, 1],
math_instruction=math_inst
)
epilogue_functor = LinearCombination(
C.element, C.alignment,
math_inst.element_accumulator, cutlass_bindings.float32)
operation = Conv2dOperation(
conv_kind=cutlass_bindings.conv.Operator.fprop, iterator_algorithm=cutlass_bindings.conv.IteratorAlgorithm.fixed_channels,
arch=80, tile_description=tile_description, A=A, B=B, C=C,
stride_support=StrideSupport.Strided,
epilogue_functor=epilogue_functor,
swizzling_functor=cutlass_bindings.IdentitySwizzle1
)
self.assertTrue(test_all_conv2d(operation, conv2d_fixed_channel_problemsizes(8)))
def test_SM80_Device_Conv2d_Fprop_Fixed_Channels_ImplicitGemm_f16nhwc_f16nhwc_f16nhwc_tensor_op_f32_channels_4(self):
math_inst = MathInstruction(
instruction_shape=[16, 8, 16],
element_a=cutlass_bindings.float16, element_b=cutlass_bindings.float16,
element_accumulator=cutlass_bindings.float32, opcode_class=cutlass_bindings.OpClass.TensorOp,
math_operation=MathOperation.multiply_add
)
A = TensorDescription(
element=math_inst.element_a,
layout=cutlass_bindings.TensorNHWC,
alignment=4)
B = TensorDescription(
element=math_inst.element_b,
layout=cutlass_bindings.TensorNHWC,
alignment=4)
C = TensorDescription(
element=cutlass_bindings.float16,
layout=cutlass_bindings.TensorNHWC,
alignment=8)
tile_description = TileDescription(
threadblock_shape=[128, 128, 64], stages=3,
warp_count=[2, 2, 1],
math_instruction=math_inst
)
epilogue_functor = LinearCombination(
C.element, C.alignment,
math_inst.element_accumulator, cutlass_bindings.float32)
operation = Conv2dOperation(
conv_kind=cutlass_bindings.conv.Operator.fprop, iterator_algorithm=cutlass_bindings.conv.IteratorAlgorithm.fixed_channels,
arch=80, tile_description=tile_description, A=A, B=B, C=C,
stride_support=StrideSupport.Strided,
epilogue_functor=epilogue_functor,
swizzling_functor=cutlass_bindings.IdentitySwizzle1
)
self.assertTrue(test_all_conv2d(operation, conv2d_fixed_channel_problemsizes(4)))
def test_SM80_Device_Conv2d_Fprop_Fixed_Channels_ImplicitGemm_f16nhwc_f16nhwc_f16nhwc_tensor_op_f32_channels_2(self):
math_inst = MathInstruction(
instruction_shape=[16, 8, 16],
element_a=cutlass_bindings.float16, element_b=cutlass_bindings.float16,
element_accumulator=cutlass_bindings.float32, opcode_class=cutlass_bindings.OpClass.TensorOp,
math_operation=MathOperation.multiply_add
)
A = TensorDescription(
element=math_inst.element_a,
layout=cutlass_bindings.TensorNHWC,
alignment=2)
B = TensorDescription(
element=math_inst.element_b,
layout=cutlass_bindings.TensorNHWC,
alignment=2)
C = TensorDescription(
element=cutlass_bindings.float16,
layout=cutlass_bindings.TensorNHWC,
alignment=8)
tile_description = TileDescription(
threadblock_shape=[128, 128, 64], stages=3,
warp_count=[2, 2, 1],
math_instruction=math_inst
)
epilogue_functor = LinearCombination(
C.element, C.alignment,
math_inst.element_accumulator, cutlass_bindings.float32)
operation = Conv2dOperation(
conv_kind=cutlass_bindings.conv.Operator.fprop, iterator_algorithm=cutlass_bindings.conv.IteratorAlgorithm.fixed_channels,
arch=80, tile_description=tile_description, A=A, B=B, C=C,
stride_support=StrideSupport.Strided,
epilogue_functor=epilogue_functor,
swizzling_functor=cutlass_bindings.IdentitySwizzle1
)
self.assertTrue(test_all_conv2d(operation, conv2d_fixed_channel_problemsizes(2)))
if __name__ == '__main__':
cutlass.backend.get_memory_pool(2**26, 2**26)
unittest.main()
| cutlass-main | test/python/backend/conv/conv2d_fprop_fixed_channels_f16nhwc_f16nhwc_f16nhwc_tensor_op_f32_sm80.py |
#################################################################################################
#
# Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#################################################################################################
# test/unit/conv/device/conv2d_fprop_implicit_gemm_tf32nhwc_tf32nhwc_f32nhwc_tensor_op_f32_sm80.cu
import cutlass.backend
from cutlass.backend import *
from cutlass.backend.test import *
from cutlass.backend.utils.device import device_cc
import unittest
@unittest.skipIf(device_cc() < 80, "Device compute capability is insufficient for SM80 tests.")
class Conv2dFpropImplicitGemmTF32nhwcTF32nhwcTF32nhwcTensorOpF32SM80(unittest.TestCase):
def test_SM80_Device_Conv2d_Fprop_Analytic_ImplicitGemm_tf32nhwc_tf32nhwc_f32nhwc_tensor_op_f32(self):
math_inst = MathInstruction(
instruction_shape=[16, 8, 8],
element_a=cutlass_bindings.float32, element_b=cutlass_bindings.float32,
element_accumulator=cutlass_bindings.float32, opcode_class=cutlass_bindings.OpClass.TensorOp,
math_operation=MathOperation.multiply_add
)
A = TensorDescription(
element=math_inst.element_a,
layout=cutlass_bindings.TensorNHWC,
alignment=4)
B = TensorDescription(
element=math_inst.element_b,
layout=cutlass_bindings.TensorNHWC,
alignment=4)
C = TensorDescription(
element=cutlass_bindings.float32,
layout=cutlass_bindings.TensorNHWC,
alignment=8)
tile_description = TileDescription(
threadblock_shape=[128, 128, 16], stages=3,
warp_count=[2, 2, 1],
math_instruction=math_inst
)
epilogue_functor = LinearCombination(
C.element, C.alignment,
math_inst.element_accumulator, cutlass_bindings.float32)
operation = Conv2dOperation(
conv_kind=cutlass_bindings.conv.Operator.fprop, iterator_algorithm=cutlass_bindings.conv.IteratorAlgorithm.analytic,
arch=80, tile_description=tile_description, A=A, B=B, C=C,
stride_support=StrideSupport.Strided,
epilogue_functor=epilogue_functor,
swizzling_functor=cutlass_bindings.IdentitySwizzle1
)
self.assertTrue(test_all_conv2d(operation))
def test_SM80_Device_Conv2d_Fprop_Optimized_ImplicitGemm_tf32nhwc_tf32nhwc_f32nhwc_tensor_op_f32_align2(self):
math_inst = MathInstruction(
instruction_shape=[16, 8, 8],
element_a=cutlass_bindings.float32, element_b=cutlass_bindings.float32,
element_accumulator=cutlass_bindings.float32, opcode_class=cutlass_bindings.OpClass.TensorOp,
math_operation=MathOperation.multiply_add
)
A = TensorDescription(
element=math_inst.element_a,
layout=cutlass_bindings.TensorNHWC,
alignment=2)
B = TensorDescription(
element=math_inst.element_b,
layout=cutlass_bindings.TensorNHWC,
alignment=2)
C = TensorDescription(
element=cutlass_bindings.float32,
layout=cutlass_bindings.TensorNHWC,
alignment=8)
tile_description = TileDescription(
threadblock_shape=[128, 128, 16], stages=3,
warp_count=[2, 2, 1],
math_instruction=math_inst
)
epilogue_functor = LinearCombination(
C.element, C.alignment,
math_inst.element_accumulator, cutlass_bindings.float32)
operation = Conv2dOperation(
conv_kind=cutlass_bindings.conv.Operator.fprop, iterator_algorithm=cutlass_bindings.conv.IteratorAlgorithm.optimized,
arch=80, tile_description=tile_description, A=A, B=B, C=C,
stride_support=StrideSupport.Strided,
epilogue_functor=epilogue_functor,
swizzling_functor=cutlass_bindings.IdentitySwizzle1
)
problem_sizes = [
cutlass_bindings.conv.Conv2dProblemSize(
cutlass_bindings.Tensor4DCoord(1, 4, 4, 12),
cutlass_bindings.Tensor4DCoord(8, 3, 3, 12),
cutlass_bindings.Tensor4DCoord(0, 0, 0, 0),
cutlass_bindings.MatrixCoord(3, 3),
cutlass_bindings.MatrixCoord(1, 1),
cutlass_bindings.conv.Mode.cross_correlation,
1, 1
)
]
self.assertTrue(test_all_conv2d(operation, problem_sizes))
if __name__ == '__main__':
cutlass.backend.get_memory_pool(2**26, 2**26)
unittest.main()
| cutlass-main | test/python/backend/conv/conv2d_fprop_implicit_gemm_tf32nhwc_tf32nhwc_f32nhwc_tensor_op_f32_sm80.py |
#################################################################################################
#
# Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#################################################################################################
# test/unit/conv/device/conv2d_fprop_implicit_gemm_f16nhwc_f16nhwc_f32nhwc_tensor_op_f32_sm80.cu
import cutlass.backend
from cutlass.backend import *
from cutlass.backend.test import *
from cutlass.backend.utils.device import device_cc
import unittest
@unittest.skipIf(device_cc() < 80, "Device compute capability is insufficient for SM80 tests.")
class Conv2dFpropImplicitGemmF16nhwcF16nhwcF32nhwcTensorOpF32SM80(unittest.TestCase):
def test_SM80_Device_Conv2d_Fprop_Analytic_ImplicitGemm_f16nhwc_f16nhwc_f32nhwc_tensor_op_f32(self):
math_inst = MathInstruction(
instruction_shape=[16, 8, 16],
element_a=cutlass_bindings.float16, element_b=cutlass_bindings.float16,
element_accumulator=cutlass_bindings.float32, opcode_class=cutlass_bindings.OpClass.TensorOp,
math_operation=MathOperation.multiply_add
)
A = TensorDescription(
element=math_inst.element_a,
layout=cutlass_bindings.TensorNHWC,
alignment=8)
B = TensorDescription(
element=math_inst.element_b,
layout=cutlass_bindings.TensorNHWC,
alignment=8)
C = TensorDescription(
element=cutlass_bindings.float32,
layout=cutlass_bindings.TensorNHWC,
alignment=4)
tile_description = TileDescription(
threadblock_shape=[128, 128, 64], stages=3,
warp_count=[2, 2, 1],
math_instruction=math_inst
)
epilogue_functor = LinearCombination(
C.element, C.alignment,
math_inst.element_accumulator, cutlass_bindings.float32)
operation = Conv2dOperation(
conv_kind=cutlass_bindings.conv.Operator.fprop, iterator_algorithm=cutlass_bindings.conv.IteratorAlgorithm.analytic,
arch=80, tile_description=tile_description, A=A, B=B, C=C,
stride_support=StrideSupport.Strided,
epilogue_functor=epilogue_functor,
swizzling_functor=cutlass_bindings.IdentitySwizzle1
)
self.assertTrue(test_all_conv2d(operation))
if __name__ == '__main__':
cutlass.backend.get_memory_pool(2**26, 2**26)
unittest.main()
| cutlass-main | test/python/backend/conv/conv2d_fprop_implicit_gemm_f16nhwc_f16nhwc_f32nhwc_tensor_op_f32_sm80.py |
#################################################################################################
#
# Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#################################################################################################
# test/unit/conv/device/conv2d_wgrad_implicit_gemm_f32nhwc_f32nhwc_f32nhwc_simt_f32_sm80.cu
import cutlass.backend
from cutlass.backend.conv2d_operation import *
from cutlass.backend import *
from cutlass.backend.test import *
from cutlass.backend.utils.device import device_cc
import unittest
@unittest.skipIf(device_cc() < 80, "Device compute capability is insufficient for SM80 tests.")
class Conv2dWgradImplicitGemmF32nhwcF32nhwcF32nhwcSimtF32SM80(unittest.TestCase):
def test_SM80_Device_Conv2d_Wgrad_Analytic_ImplicitGemm_f32nhwc_f32nhwc_f32nhwc_simt_f32(self):
math_inst = MathInstruction(
instruction_shape=[1, 1, 1],
element_a=cutlass_bindings.float32, element_b=cutlass_bindings.float32,
element_accumulator=cutlass_bindings.float32, opcode_class=cutlass_bindings.OpClass.Simt,
math_operation=MathOperation.multiply_add
)
A = TensorDescription(
element=math_inst.element_a,
layout=cutlass_bindings.TensorNHWC,
alignment=4)
B = TensorDescription(
element=math_inst.element_b,
layout=cutlass_bindings.TensorNHWC,
alignment=4)
C = TensorDescription(
element=cutlass_bindings.float32,
layout=cutlass_bindings.TensorNHWC,
alignment=1)
tile_description = TileDescription(
threadblock_shape=[128, 128, 8], stages=4,
warp_count=[2, 4, 1],
math_instruction=math_inst
)
epilogue_functor = LinearCombination(
C.element, C.alignment,
math_inst.element_accumulator, cutlass_bindings.float32)
operation = Conv2dOperation(
conv_kind=cutlass_bindings.conv.Operator.wgrad, iterator_algorithm=cutlass_bindings.conv.IteratorAlgorithm.analytic,
arch=80, tile_description=tile_description, A=A, B=B, C=C,
stride_support=StrideSupport.Strided,
epilogue_functor=epilogue_functor,
swizzling_functor=cutlass_bindings.IdentitySwizzle1
)
self.assertTrue(test_all_conv2d(operation))
def test_SM80_Device_Conv2d_Wgrad_Optimized_ImplicitGemm_f32nhwc_f32nhwc_f32nhwc_simt_f32(self):
math_inst = MathInstruction(
instruction_shape=[1, 1, 1],
element_a=cutlass_bindings.float32, element_b=cutlass_bindings.float32,
element_accumulator=cutlass_bindings.float32, opcode_class=cutlass_bindings.OpClass.Simt,
math_operation=MathOperation.multiply_add
)
A = TensorDescription(
element=math_inst.element_a,
layout=cutlass_bindings.TensorNHWC,
alignment=4)
B = TensorDescription(
element=math_inst.element_b,
layout=cutlass_bindings.TensorNHWC,
alignment=4)
C = TensorDescription(
element=cutlass_bindings.float32,
layout=cutlass_bindings.TensorNHWC,
alignment=1)
tile_description = TileDescription(
threadblock_shape=[128, 128, 8], stages=4,
warp_count=[2, 4, 1],
math_instruction=math_inst
)
epilogue_functor = LinearCombination(
C.element, C.alignment,
math_inst.element_accumulator, cutlass_bindings.float32)
operation = Conv2dOperation(
conv_kind=cutlass_bindings.conv.Operator.wgrad, iterator_algorithm=cutlass_bindings.conv.IteratorAlgorithm.optimized,
arch=80, tile_description=tile_description, A=A, B=B, C=C,
stride_support=StrideSupport.Strided,
epilogue_functor=epilogue_functor,
swizzling_functor=cutlass_bindings.IdentitySwizzle1
)
self.assertTrue(test_all_conv2d(operation))
if __name__ == '__main__':
cutlass.backend.get_memory_pool(2**26, 2**26)
unittest.main()
| cutlass-main | test/python/backend/conv/conv2d_wgrad_implicit_gemm_f32nhwc_f32nhwc_f32nhwc_simt_f32_sm80.py |
#################################################################################################
#
# Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#################################################################################################
import cutlass.backend
import unittest
from cutlass.backend.memory_manager import *
if __name__ == '__main__':
cutlass.backend.get_memory_pool(2**32, 2**32)
loader = unittest.TestLoader()
tests = loader.discover('./', 'conv2d_*.py')
testRunner = unittest.runner.TextTestRunner()
testRunner.run(tests)
| cutlass-main | test/python/backend/conv/run_all_tests.py |
#################################################################################################
#
# Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#################################################################################################
# test/unit/conv/device/conv2d_wgrad_implicit_gemm_tf32nhwc_tf32nhwc_f32nhwc_tensor_op_f32_sm80.cu
import cutlass.backend
from cutlass.backend import *
from cutlass.backend.test import *
from cutlass.backend.utils.device import device_cc
import unittest
@unittest.skipIf(device_cc() < 80, "Device compute capability is insufficient for SM80 tests.")
class Conv2dWgradImplicitGemmTF32nhwcTF32nhwcTF32nhwcTensorOpF32SM80(unittest.TestCase):
def test_SM80_Device_Conv2d_Wgrad_Optimized_ImplicitGemm_tf32nhwc_tf32nhwc_f32nhwc_tensor_op_f32(self):
math_inst = MathInstruction(
instruction_shape=[16, 8, 8],
element_a=cutlass_bindings.float32, element_b=cutlass_bindings.float32,
element_accumulator=cutlass_bindings.float32, opcode_class=cutlass_bindings.OpClass.TensorOp,
math_operation=MathOperation.multiply_add
)
A = TensorDescription(
element=math_inst.element_a,
layout=cutlass_bindings.TensorNHWC,
alignment=4)
B = TensorDescription(
element=math_inst.element_b,
layout=cutlass_bindings.TensorNHWC,
alignment=4)
C = TensorDescription(
element=cutlass_bindings.float32,
layout=cutlass_bindings.TensorNHWC,
alignment=8)
tile_description = TileDescription(
threadblock_shape=[128, 128, 16], stages=3,
warp_count=[2, 2, 1],
math_instruction=math_inst
)
epilogue_functor = LinearCombination(
C.element, C.alignment,
math_inst.element_accumulator, cutlass_bindings.float32)
operation = Conv2dOperation(
conv_kind=cutlass_bindings.conv.Operator.wgrad, iterator_algorithm=cutlass_bindings.conv.IteratorAlgorithm.optimized,
arch=80, tile_description=tile_description, A=A, B=B, C=C,
stride_support=StrideSupport.Strided,
epilogue_functor=epilogue_functor,
swizzling_functor=cutlass_bindings.IdentitySwizzle1
)
self.assertTrue(test_all_conv2d(operation))
def test_SM80_Device_Conv2d_Wgrad_Optimized_ImplicitGemm_tf32nhwc_tf32nhwc_f32nhwc_tensor_op_f32_align1(self):
math_inst = MathInstruction(
instruction_shape=[16, 8, 8],
element_a=cutlass_bindings.float32, element_b=cutlass_bindings.float32,
element_accumulator=cutlass_bindings.float32, opcode_class=cutlass_bindings.OpClass.TensorOp,
math_operation=MathOperation.multiply_add
)
A = TensorDescription(
element=math_inst.element_a,
layout=cutlass_bindings.TensorNHWC,
alignment=1)
B = TensorDescription(
element=math_inst.element_b,
layout=cutlass_bindings.TensorNHWC,
alignment=1)
C = TensorDescription(
element=cutlass_bindings.float32,
layout=cutlass_bindings.TensorNHWC,
alignment=4)
tile_description = TileDescription(
threadblock_shape=[128, 128, 32], stages=3,
warp_count=[2, 2, 1],
math_instruction=math_inst
)
epilogue_functor = LinearCombination(
C.element, C.alignment,
math_inst.element_accumulator, cutlass_bindings.float32)
operation = Conv2dOperation(
conv_kind=cutlass_bindings.conv.Operator.wgrad, iterator_algorithm=cutlass_bindings.conv.IteratorAlgorithm.optimized,
arch=80, tile_description=tile_description, A=A, B=B, C=C,
stride_support=StrideSupport.Strided,
epilogue_functor=epilogue_functor,
swizzling_functor=cutlass_bindings.IdentitySwizzle1
)
problem_sizes = [
cutlass_bindings.conv.Conv2dProblemSize(
cutlass_bindings.Tensor4DCoord(1, 8, 8, 1),
cutlass_bindings.Tensor4DCoord(1, 3, 3, 1),
cutlass_bindings.Tensor4DCoord(1, 1, 1, 1),
cutlass_bindings.MatrixCoord(1, 1),
cutlass_bindings.MatrixCoord(1, 1),
cutlass_bindings.conv.Mode.cross_correlation,
1, 1
),
]
self.assertTrue(test_all_conv2d(operation, problem_sizes))
if __name__ == '__main__':
cutlass.backend.get_memory_pool(2**26, 2**26)
unittest.main()
| cutlass-main | test/python/backend/conv/conv2d_wgrad_implicit_gemm_tf32nhwc_tf32nhwc_f32nhwc_tensor_op_f32_sm80.py |
#################################################################################################
#
# Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#################################################################################################
# test/unit/conv/device/conv2d_fprop_implicit_gemm_f16nhwc_f16nhwc_f16nhwc_tensor_op_f16_sm80.cu
import cutlass.backend
from cutlass.backend import *
from cutlass.backend.test import *
from cutlass.backend.utils.device import device_cc
import unittest
@unittest.skipIf(device_cc() < 80, "Device compute capability is insufficient for SM80 tests.")
class Conv2dFpropImplicitGemmF16nhwcF16nhwcF16nhwcTensorOpF16SM80(unittest.TestCase):
def test_SM80_Device_Conv2d_Fprop_Analytic_ImplicitGemm_f16nhwc_f16nhwc_f16nhwc_tensor_op_f16(self):
math_inst = MathInstruction(
instruction_shape=[16, 8, 16],
element_a=cutlass_bindings.float16, element_b=cutlass_bindings.float16,
element_accumulator=cutlass_bindings.float16, opcode_class=cutlass_bindings.OpClass.TensorOp,
math_operation=MathOperation.multiply_add
)
A = TensorDescription(
element=math_inst.element_a,
layout=cutlass_bindings.TensorNHWC,
alignment=8)
B = TensorDescription(
element=math_inst.element_b,
layout=cutlass_bindings.TensorNHWC,
alignment=8)
C = TensorDescription(
element=cutlass_bindings.float16,
layout=cutlass_bindings.TensorNHWC,
alignment=8)
tile_description = TileDescription(
threadblock_shape=[128, 128, 64], stages=3,
warp_count=[2, 2, 1],
math_instruction=math_inst
)
epilogue_functor = LinearCombination(
C.element, C.alignment,
math_inst.element_accumulator, cutlass_bindings.float16)
operation = Conv2dOperation(
conv_kind=cutlass_bindings.conv.Operator.fprop, iterator_algorithm=cutlass_bindings.conv.IteratorAlgorithm.analytic,
arch=80, tile_description=tile_description, A=A, B=B, C=C,
stride_support=StrideSupport.Strided,
epilogue_functor=epilogue_functor,
swizzling_functor=cutlass_bindings.IdentitySwizzle1
)
self.assertTrue(test_all_conv2d(operation))
def test_SM80_Device_Conv2d_Fprop_Optimized_ImplicitGemm_f16nhwc_f16nhwc_f16nhwc_tensor_op_f16(self):
math_inst = MathInstruction(
instruction_shape=[16, 8, 16],
element_a=cutlass_bindings.float16, element_b=cutlass_bindings.float16,
element_accumulator=cutlass_bindings.float16, opcode_class=cutlass_bindings.OpClass.TensorOp,
math_operation=MathOperation.multiply_add
)
A = TensorDescription(
element=math_inst.element_a,
layout=cutlass_bindings.TensorNHWC,
alignment=8)
B = TensorDescription(
element=math_inst.element_b,
layout=cutlass_bindings.TensorNHWC,
alignment=8)
C = TensorDescription(
element=cutlass_bindings.float16,
layout=cutlass_bindings.TensorNHWC,
alignment=8)
tile_description = TileDescription(
threadblock_shape=[128, 128, 64], stages=3,
warp_count=[2, 2, 1],
math_instruction=math_inst
)
epilogue_functor = LinearCombination(
C.element, C.alignment,
math_inst.element_accumulator, cutlass_bindings.float16)
operation = Conv2dOperation(
conv_kind=cutlass_bindings.conv.Operator.fprop, iterator_algorithm=cutlass_bindings.conv.IteratorAlgorithm.optimized,
arch=80, tile_description=tile_description, A=A, B=B, C=C,
stride_support=StrideSupport.Strided,
epilogue_functor=epilogue_functor,
swizzling_functor=cutlass_bindings.IdentitySwizzle1
)
self.assertTrue(test_all_conv2d(operation))
def test_SM80_Device_Conv2d_Fprop_Analytic_ImplicitGemm_f16nhwc_f16nhwc_f16nhwc_tensor_op_f16_align2(self):
math_inst = MathInstruction(
instruction_shape=[16, 8, 16],
element_a=cutlass_bindings.float16, element_b=cutlass_bindings.float16,
element_accumulator=cutlass_bindings.float16, opcode_class=cutlass_bindings.OpClass.TensorOp,
math_operation=MathOperation.multiply_add
)
A = TensorDescription(
element=math_inst.element_a,
layout=cutlass_bindings.TensorNHWC,
alignment=2)
B = TensorDescription(
element=math_inst.element_b,
layout=cutlass_bindings.TensorNHWC,
alignment=2)
C = TensorDescription(
element=cutlass_bindings.float16,
layout=cutlass_bindings.TensorNHWC,
alignment=8)
tile_description = TileDescription(
threadblock_shape=[128, 128, 64], stages=3,
warp_count=[2, 2, 1],
math_instruction=math_inst
)
epilogue_functor = LinearCombination(
C.element, C.alignment,
math_inst.element_accumulator, cutlass_bindings.float16)
operation = Conv2dOperation(
conv_kind=cutlass_bindings.conv.Operator.fprop, iterator_algorithm=cutlass_bindings.conv.IteratorAlgorithm.analytic,
arch=80, tile_description=tile_description, A=A, B=B, C=C,
stride_support=StrideSupport.Strided,
epilogue_functor=epilogue_functor,
swizzling_functor=cutlass_bindings.IdentitySwizzle1
)
problem_sizes = [
cutlass_bindings.conv.Conv2dProblemSize(
cutlass_bindings.Tensor4DCoord(1, 4, 4, 12),
cutlass_bindings.Tensor4DCoord(8, 3, 3, 12),
cutlass_bindings.Tensor4DCoord(0, 0, 0, 0),
cutlass_bindings.MatrixCoord(3, 3),
cutlass_bindings.MatrixCoord(1, 1),
cutlass_bindings.conv.Mode.cross_correlation,
1, 1
),
cutlass_bindings.conv.Conv2dProblemSize(
cutlass_bindings.Tensor4DCoord(1, 4, 4, 14),
cutlass_bindings.Tensor4DCoord(8, 3, 3, 14),
cutlass_bindings.Tensor4DCoord(0, 0, 0, 0),
cutlass_bindings.MatrixCoord(3, 3),
cutlass_bindings.MatrixCoord(1, 1),
cutlass_bindings.conv.Mode.cross_correlation,
1, 1
),
cutlass_bindings.conv.Conv2dProblemSize(
cutlass_bindings.Tensor4DCoord(1, 23, 56, 98),
cutlass_bindings.Tensor4DCoord(128, 3, 3, 98),
cutlass_bindings.Tensor4DCoord(4, 0, 5, 0),
cutlass_bindings.MatrixCoord(3, 3),
cutlass_bindings.MatrixCoord(1, 1),
cutlass_bindings.conv.Mode.cross_correlation,
1, 1
),
]
self.assertTrue(test_all_conv2d(operation, problem_sizes))
def test_SM80_Device_Conv2d_Fprop_Optimized_ImplicitGemm_f16nhwc_f16nhwc_f16nhwc_tensor_op_f16_align2(self):
math_inst = MathInstruction(
instruction_shape=[16, 8, 16],
element_a=cutlass_bindings.float16, element_b=cutlass_bindings.float16,
element_accumulator=cutlass_bindings.float16, opcode_class=cutlass_bindings.OpClass.TensorOp,
math_operation=MathOperation.multiply_add
)
A = TensorDescription(
element=math_inst.element_a,
layout=cutlass_bindings.TensorNHWC,
alignment=2)
B = TensorDescription(
element=math_inst.element_b,
layout=cutlass_bindings.TensorNHWC,
alignment=2)
C = TensorDescription(
element=cutlass_bindings.float16,
layout=cutlass_bindings.TensorNHWC,
alignment=8)
tile_description = TileDescription(
threadblock_shape=[128, 128, 64], stages=3,
warp_count=[2, 2, 1],
math_instruction=math_inst
)
epilogue_functor = LinearCombination(
C.element, C.alignment,
math_inst.element_accumulator, cutlass_bindings.float16)
operation = Conv2dOperation(
conv_kind=cutlass_bindings.conv.Operator.fprop, iterator_algorithm=cutlass_bindings.conv.IteratorAlgorithm.optimized,
arch=80, tile_description=tile_description, A=A, B=B, C=C,
stride_support=StrideSupport.Strided,
epilogue_functor=epilogue_functor,
swizzling_functor=cutlass_bindings.IdentitySwizzle1
)
problem_sizes = [
cutlass_bindings.conv.Conv2dProblemSize(
cutlass_bindings.Tensor4DCoord(1, 4, 4, 12),
cutlass_bindings.Tensor4DCoord(8, 3, 3, 12),
cutlass_bindings.Tensor4DCoord(0, 0, 0, 0),
cutlass_bindings.MatrixCoord(3, 3),
cutlass_bindings.MatrixCoord(1, 1),
cutlass_bindings.conv.Mode.cross_correlation,
1, 1
),
cutlass_bindings.conv.Conv2dProblemSize(
cutlass_bindings.Tensor4DCoord(1, 4, 4, 14),
cutlass_bindings.Tensor4DCoord(8, 3, 3, 14),
cutlass_bindings.Tensor4DCoord(0, 0, 0, 0),
cutlass_bindings.MatrixCoord(3, 3),
cutlass_bindings.MatrixCoord(1, 1),
cutlass_bindings.conv.Mode.cross_correlation,
1, 1
),
cutlass_bindings.conv.Conv2dProblemSize(
cutlass_bindings.Tensor4DCoord(1, 23, 56, 98),
cutlass_bindings.Tensor4DCoord(128, 3, 3, 98),
cutlass_bindings.Tensor4DCoord(4, 0, 5, 0),
cutlass_bindings.MatrixCoord(3, 3),
cutlass_bindings.MatrixCoord(1, 1),
cutlass_bindings.conv.Mode.cross_correlation,
1, 1
),
]
self.assertTrue(test_all_conv2d(operation, problem_sizes))
def test_SM80_Device_Conv2d_Fprop_Analytic_ImplicitGemm_f16nhwc_f16nhwc_f16nhwc_tensor_op_f16_align4(self):
math_inst = MathInstruction(
instruction_shape=[16, 8, 16],
element_a=cutlass_bindings.float16, element_b=cutlass_bindings.float16,
element_accumulator=cutlass_bindings.float16, opcode_class=cutlass_bindings.OpClass.TensorOp,
math_operation=MathOperation.multiply_add
)
A = TensorDescription(
element=math_inst.element_a,
layout=cutlass_bindings.TensorNHWC,
alignment=4)
B = TensorDescription(
element=math_inst.element_b,
layout=cutlass_bindings.TensorNHWC,
alignment=4)
C = TensorDescription(
element=cutlass_bindings.float16,
layout=cutlass_bindings.TensorNHWC,
alignment=8)
tile_description = TileDescription(
threadblock_shape=[128, 128, 64], stages=3,
warp_count=[2, 2, 1],
math_instruction=math_inst
)
epilogue_functor = LinearCombination(
C.element, C.alignment,
math_inst.element_accumulator, cutlass_bindings.float16)
operation = Conv2dOperation(
conv_kind=cutlass_bindings.conv.Operator.fprop, iterator_algorithm=cutlass_bindings.conv.IteratorAlgorithm.optimized,
arch=80, tile_description=tile_description, A=A, B=B, C=C,
stride_support=StrideSupport.Strided,
epilogue_functor=epilogue_functor,
swizzling_functor=cutlass_bindings.IdentitySwizzle1
)
problem_sizes = [
cutlass_bindings.conv.Conv2dProblemSize(
cutlass_bindings.Tensor4DCoord(1, 4, 4, 12),
cutlass_bindings.Tensor4DCoord(8, 3, 3, 12),
cutlass_bindings.Tensor4DCoord(0, 0, 0, 0),
cutlass_bindings.MatrixCoord(3, 3),
cutlass_bindings.MatrixCoord(1, 1),
cutlass_bindings.conv.Mode.cross_correlation,
1, 1
),
cutlass_bindings.conv.Conv2dProblemSize(
cutlass_bindings.Tensor4DCoord(1, 4, 4, 28),
cutlass_bindings.Tensor4DCoord(8, 3, 3, 28),
cutlass_bindings.Tensor4DCoord(0, 0, 0, 0),
cutlass_bindings.MatrixCoord(3, 3),
cutlass_bindings.MatrixCoord(1, 1),
cutlass_bindings.conv.Mode.cross_correlation,
1, 1
),
cutlass_bindings.conv.Conv2dProblemSize(
cutlass_bindings.Tensor4DCoord(1, 23, 56, 100),
cutlass_bindings.Tensor4DCoord(128, 3, 3, 100),
cutlass_bindings.Tensor4DCoord(4, 0, 5, 0),
cutlass_bindings.MatrixCoord(3, 3),
cutlass_bindings.MatrixCoord(1, 1),
cutlass_bindings.conv.Mode.cross_correlation,
1, 1
),
]
self.assertTrue(test_all_conv2d(operation, problem_sizes))
if __name__ == '__main__':
cutlass.backend.get_memory_pool(2**26, 2**26)
unittest.main()
| cutlass-main | test/python/backend/conv/conv2d_fprop_implicit_gemm_f16nhwc_f16nhwc_f16nhwc_tensor_op_f16_sm80.py |
cutlass-main | test/python/backend/gemm/__init__.py |
|
#################################################################################################
#
# Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#################################################################################################
from functools import partial
import cutlass.backend
from cutlass.backend import *
from cutlass.backend import library
from cutlass.backend.test import *
import unittest
from cutlass.backend.test.utils import LayoutCombination, get_name
from cutlass.backend.test.gemm_testbed import test_all_gemm
from cutlass.backend.utils.device import device_cc
# Partial specialziation for naming tests
name_fn = partial(get_name, element_a=cutlass_bindings.float16, element_b=cutlass_bindings.float16, arch=90)
def add_test(cls, layouts, alignments, element_output, element_accumulator, element_epilogue,
cluster_shape, threadblock_shape, stages, opclass, persistent=False):
"""
Create a test-running function with the given specification and set it as a method of `cls`.
:param cls: class to which the generated method will be added
:type cls: type
:param layouts: indexable container of layouts of A, B, and C operands
:param alignments: indexable container of alignments of A, B, and C operands
:param element_output: data type of the output element
:param element_accumulator: data type used in accumulation
:param element_epilogue: data type used in computing the epilogue
:param cluster_shape: indexable container of dimensions of threadblock cluster to be launched
:param threadblock_shape: indexable container of dimensions of threadblock tiles
:param stages: number of pipeline stages to use in the kernel
:type stages: int
:param opclass: class of operation being performed (e.g., SIMT, Tensor Core)
:type opclass: cutlass_bindings.OpClass
:param persistent: whether this is a persistent warp-specialized kernel
:type persistent: bool
"""
def run(self):
"""
Dynamically-generated function that constructs a GEMM operation and verifies it against
multiple test cases.
"""
element_A = cutlass_bindings.float16
element_B = cutlass_bindings.float16
inst_shape = [1, 1, 1] if opclass == cutlass_bindings.OpClass.Simt else None
warp_count = [2, 2, 1] if opclass == cutlass_bindings.OpClass.Simt else None
math_inst = MathInstruction(
instruction_shape=inst_shape,
element_a=element_A, element_b=element_B, element_accumulator=element_accumulator,
opcode_class=opclass, math_operation=MathOperation.multiply_add
)
tile_description = TileDescription(
threadblock_shape=threadblock_shape,
cluster_shape=cluster_shape,
stages=stages, warp_count=warp_count,
math_instruction=math_inst,
persistent=persistent
)
A = TensorDescription(element=element_A, layout=layouts[0], alignment=alignments[0])
B = TensorDescription(element=element_B, layout=layouts[1], alignment=alignments[1])
C = TensorDescription(element=element_output, layout=layouts[2], alignment=alignments[2])
epilogue_functor = LinearCombination(C.element, C.alignment, math_inst.element_accumulator, element_epilogue)
swizzling_functor = cutlass_bindings.IdentitySwizzle1
operation = GemmOperationUniversal(
arch=90, tile_description=tile_description, A=A, B=B, C=C,
epilogue_functor=epilogue_functor, swizzling_functor=swizzling_functor)
self.assertTrue(test_all_gemm(operation, "universal"))
if persistent:
suffix = "_persistent"
else:
suffix = ""
name = name_fn(layouts, alignments, element_output, element_accumulator,
element_epilogue, cluster_shape, threadblock_shape, stages, opclass=opclass, suffix=suffix)
setattr(cls, name, run)
return run
@unittest.skipIf(device_cc() < 90, "Device compute capability is insufficient for SM90 tests.")
class GemmF16Sm90(unittest.TestCase):
"""
Wrapper class to which tests will be added dynamically in __main__
"""
pass
add_test_tensorop = partial(add_test, opclass=cutlass_bindings.OpClass.TensorOp)
add_test_simt = partial(add_test, opclass=cutlass_bindings.OpClass.Simt)
# Tests with 1x1x1 clusters
add_test_tensorop(GemmF16Sm90, LayoutCombination.NNN, [8, 8, 8], cutlass_bindings.float16, cutlass_bindings.float32, cutlass_bindings.float32, [1, 1, 1], [128, 128, 32], 3)
add_test_tensorop(GemmF16Sm90, LayoutCombination.NNT, [8, 8, 8], cutlass_bindings.float16, cutlass_bindings.float32, cutlass_bindings.float32, [1, 1, 1], [128, 128, 32], None)
add_test_tensorop(GemmF16Sm90, LayoutCombination.NTN, [8, 8, 8], cutlass_bindings.float16, cutlass_bindings.float32, cutlass_bindings.float32, [1, 1, 1], [128, 128, 32], None)
add_test_tensorop(GemmF16Sm90, LayoutCombination.NTT, [8, 8, 8], cutlass_bindings.float16, cutlass_bindings.float32, cutlass_bindings.float32, [1, 1, 1], [128, 128, 32], None)
add_test_tensorop(GemmF16Sm90, LayoutCombination.TNN, [8, 8, 8], cutlass_bindings.float16, cutlass_bindings.float32, cutlass_bindings.float32, [1, 1, 1], [128, 128, 32], None)
add_test_tensorop(GemmF16Sm90, LayoutCombination.TNT, [8, 8, 8], cutlass_bindings.float16, cutlass_bindings.float32, cutlass_bindings.float32, [1, 1, 1], [128, 128, 32], None)
add_test_tensorop(GemmF16Sm90, LayoutCombination.TTN, [8, 8, 8], cutlass_bindings.float16, cutlass_bindings.float32, cutlass_bindings.float32, [1, 1, 1], [128, 128, 32], None)
add_test_tensorop(GemmF16Sm90, LayoutCombination.TTT, [8, 8, 8], cutlass_bindings.float16, cutlass_bindings.float32, cutlass_bindings.float32, [1, 1, 1], [128, 128, 32], None)
add_test_tensorop(GemmF16Sm90, LayoutCombination.TNT, [8, 8, 8], cutlass_bindings.float16, cutlass_bindings.float32, cutlass_bindings.float32, [1, 1, 1], [64, 128, 32], None)
add_test_tensorop(GemmF16Sm90, LayoutCombination.TNT, [8, 8, 8], cutlass_bindings.float16, cutlass_bindings.float32, cutlass_bindings.float32, [1, 1, 1], [128, 64, 32], None)
add_test_tensorop(GemmF16Sm90, LayoutCombination.TNT, [8, 8, 8], cutlass_bindings.float16, cutlass_bindings.float32, cutlass_bindings.float32, [1, 1, 1], [64, 64, 64], None)
add_test_tensorop(GemmF16Sm90, LayoutCombination.TNT, [4, 4, 8], cutlass_bindings.float16, cutlass_bindings.float32, cutlass_bindings.float32, [1, 1, 1], [128, 128, 32], None)
add_test_tensorop(GemmF16Sm90, LayoutCombination.TNT, [4, 4, 8], cutlass_bindings.float16, cutlass_bindings.float16, cutlass_bindings.float16, [1, 1, 1], [128, 128, 32], None)
add_test_tensorop(GemmF16Sm90, LayoutCombination.TNT, [8, 8, 8], cutlass_bindings.float16, cutlass_bindings.float16, cutlass_bindings.float16, [1, 1, 1], [128, 128, 32], None)
add_test_tensorop(GemmF16Sm90, LayoutCombination.TNT, [8, 8, 8], cutlass_bindings.float16, cutlass_bindings.float32, cutlass_bindings.float32, [1, 1, 1], [64, 64, 64], 5)
add_test_tensorop(GemmF16Sm90, LayoutCombination.TNT, [2, 2, 2], cutlass_bindings.float16, cutlass_bindings.float16, cutlass_bindings.float16, [1, 1, 1], [128, 128, 32], None)
# Tests with different cluster shapes
add_test_tensorop(GemmF16Sm90, LayoutCombination.TTN, [8, 8, 8], cutlass_bindings.float32, cutlass_bindings.float32, cutlass_bindings.float32, [2, 2, 1], [64, 128, 64], None)
add_test_tensorop(GemmF16Sm90, LayoutCombination.TNN, [8, 8, 8], cutlass_bindings.float32, cutlass_bindings.float32, cutlass_bindings.float32, [2, 2, 1], [64, 128, 64], None)
add_test_tensorop(GemmF16Sm90, LayoutCombination.NTN, [8, 8, 8], cutlass_bindings.float32, cutlass_bindings.float32, cutlass_bindings.float32, [2, 2, 1], [64, 128, 64], None)
add_test_tensorop(GemmF16Sm90, LayoutCombination.NNN, [8, 8, 8], cutlass_bindings.float32, cutlass_bindings.float32, cutlass_bindings.float32, [2, 2, 1], [64, 128, 64], None)
add_test_tensorop(GemmF16Sm90, LayoutCombination.TTN, [8, 8, 8], cutlass_bindings.float32, cutlass_bindings.float32, cutlass_bindings.float32, [1, 4, 1], [64, 128, 64], None)
add_test_tensorop(GemmF16Sm90, LayoutCombination.TTN, [8, 8, 8], cutlass_bindings.float32, cutlass_bindings.float32, cutlass_bindings.float32, [2, 4, 1], [64, 128, 64], None)
add_test_tensorop(GemmF16Sm90, LayoutCombination.TTN, [8, 8, 8], cutlass_bindings.float32, cutlass_bindings.float32, cutlass_bindings.float32, [4, 1, 1], [64, 128, 64], None)
add_test_tensorop(GemmF16Sm90, LayoutCombination.TTN, [8, 8, 8], cutlass_bindings.float32, cutlass_bindings.float32, cutlass_bindings.float32, [4, 2, 1], [64, 128, 64], None)
# Tests for persistent warp-specialized threadblocks
add_test_tensorop(GemmF16Sm90, LayoutCombination.TTN, [8, 8, 8], cutlass_bindings.float32, cutlass_bindings.float32, cutlass_bindings.float32, [1, 1, 1], [64, 128, 64], None, persistent=True)
add_test_tensorop(GemmF16Sm90, LayoutCombination.TTN, [8, 8, 8], cutlass_bindings.float32, cutlass_bindings.float32, cutlass_bindings.float32, [2, 1, 1], [64, 128, 64], None, persistent=True)
add_test_tensorop(GemmF16Sm90, LayoutCombination.TTN, [8, 8, 8], cutlass_bindings.float32, cutlass_bindings.float32, cutlass_bindings.float32, [1, 1, 1], [128, 128, 64], None, persistent=True)
add_test_tensorop(GemmF16Sm90, LayoutCombination.TTN, [8, 8, 8], cutlass_bindings.float32, cutlass_bindings.float32, cutlass_bindings.float32, [2, 1, 1], [128, 128, 64], None, persistent=True)
add_test_tensorop(GemmF16Sm90, LayoutCombination.TTN, [8, 8, 8], cutlass_bindings.float32, cutlass_bindings.float32, cutlass_bindings.float32, [1, 2, 1], [64, 128, 64], None, persistent=True)
add_test_tensorop(GemmF16Sm90, LayoutCombination.TTN, [8, 8, 8], cutlass_bindings.float32, cutlass_bindings.float32, cutlass_bindings.float32, [2, 2, 1], [64, 128, 64], None, persistent=True)
add_test_tensorop(GemmF16Sm90, LayoutCombination.TTN, [8, 8, 8], cutlass_bindings.float32, cutlass_bindings.float32, cutlass_bindings.float32, [1, 4, 1], [64, 128, 64], None, persistent=True)
add_test_tensorop(GemmF16Sm90, LayoutCombination.TTN, [8, 8, 8], cutlass_bindings.float32, cutlass_bindings.float32, cutlass_bindings.float32, [2, 4, 1], [64, 128, 64], None, persistent=True)
add_test_tensorop(GemmF16Sm90, LayoutCombination.TTN, [8, 8, 8], cutlass_bindings.float32, cutlass_bindings.float32, cutlass_bindings.float32, [4, 1, 1], [64, 128, 64], None, persistent=True)
add_test_tensorop(GemmF16Sm90, LayoutCombination.TTN, [8, 8, 8], cutlass_bindings.float32, cutlass_bindings.float32, cutlass_bindings.float32, [4, 4, 1], [64, 128, 64], None, persistent=True)
# Tests using SIMT
add_test_simt(GemmF16Sm90, LayoutCombination.NNN, [1, 1, 1], cutlass_bindings.float16, cutlass_bindings.float32, cutlass_bindings.float32, [1, 1, 1], [128, 128, 8], 2)
add_test_simt(GemmF16Sm90, LayoutCombination.TNN, [1, 1, 1], cutlass_bindings.float16, cutlass_bindings.float32, cutlass_bindings.float32, [1, 1, 1], [64, 128, 8], 2)
add_test_simt(GemmF16Sm90, LayoutCombination.NTN, [1, 1, 1], cutlass_bindings.float16, cutlass_bindings.float32, cutlass_bindings.float32, [1, 1, 1], [128, 64, 8], 2)
add_test_simt(GemmF16Sm90, LayoutCombination.TTN, [1, 1, 1], cutlass_bindings.float16, cutlass_bindings.float32, cutlass_bindings.float32, [1, 1, 1], [64, 64, 8], 2)
add_test_simt(GemmF16Sm90, LayoutCombination.NNT, [1, 1, 1], cutlass_bindings.float16, cutlass_bindings.float16, cutlass_bindings.float16, [1, 1, 1], [128, 128, 8], 2)
if __name__ == '__main__':
cutlass.backend.get_memory_pool(2**30, 2**30)
unittest.main()
| cutlass-main | test/python/backend/gemm/gemm_f16_sm90.py |
#################################################################################################
#
# Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#################################################################################################
from functools import partial
import cutlass.backend
from cutlass.backend import *
from cutlass.backend import library
from cutlass.backend.test import *
import unittest
from cutlass.backend.test.utils import LayoutCombination, get_name
from cutlass.backend.test.gemm_testbed import test_all_gemm
from cutlass.backend.utils.device import device_cc
name_fn = partial(get_name, element_a=cutlass_bindings.bfloat16, element_b=cutlass_bindings.bfloat16, arch=90)
def add_test(cls, layouts, alignments, element_output, element_accumulator, element_epilogue,
cluster_shape, threadblock_shape, stages, opclass, persistent=False):
"""
Create a test-running function with the given specification and set it as a method of `cls`.
:param cls: class to which the generated method will be added
:type cls: type
:param layouts: indexable container of layouts of A, B, and C operands
:param alignments: indexable container of alignments of A, B, and C operands
:param element_output: data type of the output element
:param element_accumulator: data type used in accumulation
:param element_epilogue: data type used in computing the epilogue
:param cluster_shape: indexable container of dimensions of threadblock cluster to be launched
:param threadblock_shape: indexable container of dimensions of threadblock tiles
:param stages: number of pipeline stages to use in the kernel
:type stages: int
:param opclass: class of operation being performed (e.g., SIMT, Tensor Core)
:type opclass: cutlass_bindings.OpClass
:param persistent: whether this is a persistent warp-specialized kernel
:type persistent: bool
"""
def run(self):
"""
Dynamically-generated function that constructs a GEMM operation and verifies it against
multiple test cases.
"""
element_A = cutlass_bindings.bfloat16
element_B = cutlass_bindings.bfloat16
inst_shape = [1, 1, 1] if opclass == cutlass_bindings.OpClass.Simt else None
warp_count = [2, 2, 1] if opclass == cutlass_bindings.OpClass.Simt else None
math_inst = MathInstruction(
instruction_shape=inst_shape,
element_a=element_A, element_b=element_B, element_accumulator=element_accumulator,
opcode_class=opclass, math_operation=MathOperation.multiply_add
)
tile_description = TileDescription(
threadblock_shape=threadblock_shape,
cluster_shape=cluster_shape,
stages=stages, warp_count=warp_count,
math_instruction=math_inst,
persistent=persistent
)
A = TensorDescription(element=element_A, layout=layouts[0], alignment=alignments[0])
B = TensorDescription(element=element_B, layout=layouts[1], alignment=alignments[1])
C = TensorDescription(element=element_output, layout=layouts[2], alignment=alignments[2])
epilogue_functor = LinearCombination(C.element, C.alignment, math_inst.element_accumulator, element_epilogue)
swizzling_functor = cutlass_bindings.IdentitySwizzle1
operation = GemmOperationUniversal(
arch=90, tile_description=tile_description, A=A, B=B, C=C,
epilogue_functor=epilogue_functor, swizzling_functor=swizzling_functor)
self.assertTrue(test_all_gemm(operation, "universal"))
if persistent:
suffix = "_persistent"
else:
suffix = ""
name = name_fn(layouts, alignments, element_output, element_accumulator,
element_epilogue, cluster_shape, threadblock_shape, stages, opclass=opclass, suffix=suffix)
setattr(cls, name, run)
return run
@unittest.skipIf(device_cc() < 90, "Device compute capability is insufficient for SM90 tests.")
class GemmBF16Sm90(unittest.TestCase):
"""
Wrapper class to which tests will be added dynamically in __main__
"""
pass
add_test_tensorop = partial(add_test, opclass=cutlass_bindings.OpClass.TensorOp)
add_test_simt = partial(add_test, opclass=cutlass_bindings.OpClass.Simt)
add_test_tensorop(GemmBF16Sm90, LayoutCombination.NNN, [8, 8, 8], cutlass_bindings.bfloat16, cutlass_bindings.float32, cutlass_bindings.float32, [1, 1, 1], [128, 128, 32], 3)
add_test_tensorop(GemmBF16Sm90, LayoutCombination.NNN, [4, 4, 8], cutlass_bindings.bfloat16, cutlass_bindings.float32, cutlass_bindings.float32, [1, 1, 1], [128, 128, 32], 5)
add_test_tensorop(GemmBF16Sm90, LayoutCombination.TNN, [8, 8, 8], cutlass_bindings.bfloat16, cutlass_bindings.float32, cutlass_bindings.float32, [2, 1, 1], [128, 128, 32], None)
add_test_tensorop(GemmBF16Sm90, LayoutCombination.TNN, [8, 8, 8], cutlass_bindings.bfloat16, cutlass_bindings.float32, cutlass_bindings.float32, [2, 1, 1], [128, 128, 32], None, persistent=True)
add_test_simt(GemmBF16Sm90, LayoutCombination.NNN, [1, 1, 1], cutlass_bindings.bfloat16, cutlass_bindings.float32, cutlass_bindings.float32, [1, 1, 1], [128, 128, 8], 2)
if __name__ == '__main__':
cutlass.backend.get_memory_pool(2**30, 2**30)
unittest.main()
| cutlass-main | test/python/backend/gemm/gemm_bf16_sm90.py |
#################################################################################################
#
# Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#################################################################################################
from functools import partial
import cutlass.backend
from cutlass.backend import *
from cutlass.backend import library
from cutlass.backend.test import *
import unittest
from cutlass.backend.test.utils import LayoutCombination, get_name
from cutlass.backend.test.gemm_testbed import test_all_gemm
from cutlass.backend.utils.device import device_cc
name_fn = partial(get_name, element_a=cutlass_bindings.float64, element_b=cutlass_bindings.float64, arch=90)
def add_test(cls, layouts, alignments, element_output, element_accumulator, element_epilogue,
cluster_shape, threadblock_shape, stages, opclass):
"""
Create a test-running function with the given specification and set it as a method of `cls`.
:param cls: class to which the generated method will be added
:type cls: type
:param layouts: indexable container of layouts of A, B, and C operands
:param alignments: indexable container of alignments of A, B, and C operands
:param element_output: data type of the output element
:param element_accumulator: data type used in accumulation
:param element_epilogue: data type used in computing the epilogue
:param cluster_shape: indexable container of dimensions of threadblock cluster to be launched
:param threadblock_shape: indexable container of dimensions of threadblock tiles
:param stages: number of pipeline stages to use in the kernel
:type stages: int
:param opclass: class of operation being performed (e.g., SIMT, Tensor Core)
:type opclass: cutlass_bindings.OpClass
"""
def run(self):
"""
Dynamically-generated function that constructs a GEMM operation and verifies it against
multiple test cases.
"""
element_A = cutlass_bindings.float64
element_B = cutlass_bindings.float64
inst_shape = [1, 1, 1] if opclass == cutlass_bindings.OpClass.Simt else None
warp_count = [2, 2, 1] if opclass == cutlass_bindings.OpClass.Simt else None
math_inst = MathInstruction(
instruction_shape=inst_shape,
element_a=element_A, element_b=element_B, element_accumulator=element_accumulator,
opcode_class=opclass, math_operation=MathOperation.multiply_add
)
tile_description = TileDescription(
threadblock_shape=threadblock_shape,
cluster_shape=cluster_shape,
stages=stages, warp_count=warp_count,
math_instruction=math_inst
)
A = TensorDescription(element=element_A, layout=layouts[0], alignment=alignments[0])
B = TensorDescription(element=element_B, layout=layouts[1], alignment=alignments[1])
C = TensorDescription(element=element_output, layout=layouts[2], alignment=alignments[2])
epilogue_functor = LinearCombination(C.element, C.alignment, math_inst.element_accumulator, element_epilogue)
swizzling_functor = cutlass_bindings.IdentitySwizzle1
operation = GemmOperationUniversal(
arch=90, tile_description=tile_description, A=A, B=B, C=C,
epilogue_functor=epilogue_functor, swizzling_functor=swizzling_functor)
self.assertTrue(test_all_gemm(operation, "universal"))
name = name_fn(layouts, alignments, element_output, element_accumulator,
element_epilogue, cluster_shape, threadblock_shape, stages, opclass=opclass)
setattr(cls, name, run)
return run
@unittest.skipIf(device_cc() < 90, "Device compute capability is insufficient for SM90 tests.")
class GemmF64Sm90(unittest.TestCase):
"""
Wrapper class to which tests will be added dynamically in __main__
"""
pass
add_test_simt = partial(add_test, opclass=cutlass_bindings.OpClass.Simt)
add_test_simt(GemmF64Sm90, LayoutCombination.NNN, [1, 1, 1], cutlass_bindings.float64, cutlass_bindings.float64, cutlass_bindings.float64, [1, 1, 1], [64, 64, 32], 2)
if __name__ == '__main__':
cutlass.backend.get_memory_pool(2**30, 2**30)
unittest.main()
| cutlass-main | test/python/backend/gemm/gemm_f64_sm90.py |
#################################################################################################
#
# Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#################################################################################################
import cutlass.backend
from cutlass.backend import *
from cutlass.backend.test import *
import unittest
from cutlass.backend.test.gemm_testbed import test_all_gemm
from cutlass.backend.utils.device import device_cc
@unittest.skipIf(device_cc() < 80, "Device compute capability is insufficient for SM80 tests.")
class GemmF64TensorOpSm80(unittest.TestCase):
def test_SM80_Device_Gemm_f64n_f64t_f64t_tensor_op_f64_32x32x16_16x16x16(self):
math_inst = MathInstruction(
instruction_shape=[8, 8, 4],
element_a=cutlass_bindings.float64, element_b=cutlass_bindings.float64,
element_accumulator=cutlass_bindings.float64, opcode_class=cutlass_bindings.OpClass.TensorOp,
math_operation=MathOperation.multiply_add
)
tile_description = TileDescription(
threadblock_shape=[32, 32, 16],
stages=4, warp_count=[2, 2, 1],
math_instruction=math_inst
)
# alignment 1 restricted for double
A = TensorDescription(
element=cutlass_bindings.float64, layout=cutlass_bindings.ColumnMajor,
alignment=1
)
B = TensorDescription(
element=cutlass_bindings.float64, layout=cutlass_bindings.RowMajor,
alignment=1
)
C = TensorDescription(
element=cutlass_bindings.float64, layout=cutlass_bindings.RowMajor,
alignment=1
)
element_epilogue = cutlass_bindings.float64
epilogue_functor = LinearCombination(
C.element, C.alignment,
math_inst.element_accumulator, element_epilogue)
swizzling_functor = cutlass_bindings.IdentitySwizzle1
operation = GemmOperationUniversal(
arch=80, tile_description=tile_description,
A=A, B=B, C=C,
epilogue_functor=epilogue_functor, swizzling_functor=swizzling_functor
)
self.assertTrue(test_all_gemm(operation, "universal"))
def test_SM80_Device_Gemm_f64t_f64n_f64t_tensor_op_f64_64x64x16_32x32x16(self):
math_inst = MathInstruction(
instruction_shape=[8, 8, 4],
element_a=cutlass_bindings.float64, element_b=cutlass_bindings.float64,
element_accumulator=cutlass_bindings.float64, opcode_class=cutlass_bindings.OpClass.TensorOp,
math_operation=MathOperation.multiply_add
)
tile_description = TileDescription(
threadblock_shape=[64, 64, 16],
stages=4, warp_count=[2, 2, 1],
math_instruction=math_inst
)
# alignment 1 restricted for double
A = TensorDescription(
element=cutlass_bindings.float64, layout=cutlass_bindings.RowMajor,
alignment=1
)
B = TensorDescription(
element=cutlass_bindings.float64, layout=cutlass_bindings.ColumnMajor,
alignment=1
)
C = TensorDescription(
element=cutlass_bindings.float64, layout=cutlass_bindings.RowMajor,
alignment=1
)
element_epilogue = cutlass_bindings.float64
epilogue_functor = LinearCombination(
C.element, C.alignment,
math_inst.element_accumulator, element_epilogue)
swizzling_functor = cutlass_bindings.IdentitySwizzle1
operation = GemmOperationUniversal(
arch=80, tile_description=tile_description,
A=A, B=B, C=C,
epilogue_functor=epilogue_functor, swizzling_functor=swizzling_functor
)
self.assertTrue(test_all_gemm(operation, "universal"))
if __name__ == '__main__':
cutlass.backend.get_memory_pool(2**30, 2**30)
unittest.main()
| cutlass-main | test/python/backend/gemm/gemm_f64_sm80.py |
#################################################################################################
#
# Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#################################################################################################
import cutlass.backend
from cutlass.backend import *
from cutlass.backend.test import *
import unittest
from cutlass.backend.test.gemm_testbed import test_all_gemm
from cutlass.backend.utils.device import device_cc
@unittest.skipIf(device_cc() < 80, "Device compute capability is insufficient for SM80 tests.")
class GemmBF16TensorOpSm80(unittest.TestCase):
def SM80_Device_Gemm_bf16n_bf16n_f32t_tensor_op_f32_64x128x64_32x64x64(self):
math_inst = MathInstruction(
instruction_shape=[16, 8, 16],
element_a=cutlass_bindings.bfloat16, element_b=cutlass_bindings.bfloat16,
element_accumulator=cutlass_bindings.float32, opcode_class=cutlass_bindings.OpClass.TensorOp,
math_operation=MathOperation.multiply_add
)
tile_description = TileDescription(
threadblock_shape=[64, 128, 64],
stages=4, warp_count=[2, 2, 1],
math_instruction=math_inst
)
A = TensorDescription(
element=cutlass_bindings.bfloat16, layout=cutlass_bindings.ColumnMajor,
alignment=8
)
B = TensorDescription(
element=cutlass_bindings.bfloat16, layout=cutlass_bindings.ColumnMajor,
alignment=8
)
C = TensorDescription(
element=cutlass_bindings.float32, layout=cutlass_bindings.RowMajor,
alignment=4
)
epilogue_functor = LinearCombination(
C.element, C.alignment,
math_inst.element_accumulator, cutlass_bindings.float32)
swizzling_functor = cutlass_bindings.IdentitySwizzle1
operation = GemmOperationUniversal(
arch=80, tile_description=tile_description,
A=A, B=B, C=C,
epilogue_functor=epilogue_functor, swizzling_functor=swizzling_functor
)
self.assertTrue(test_all_gemm(operation, "universal"))
def test_SM80_Device_Gemm_bf16t_bf16t_bf16t_tensor_op_f32_128x256x64_64x64x64(self):
math_inst = MathInstruction(
instruction_shape=[16, 8, 16],
element_a=cutlass_bindings.bfloat16, element_b=cutlass_bindings.bfloat16,
element_accumulator=cutlass_bindings.float32, opcode_class=cutlass_bindings.OpClass.TensorOp,
math_operation=MathOperation.multiply_add
)
tile_description = TileDescription(
threadblock_shape=[64, 128, 32],
stages=6, warp_count=[2, 2, 1],
math_instruction=math_inst
)
A = TensorDescription(
element=cutlass_bindings.bfloat16, layout=cutlass_bindings.RowMajor,
alignment=8
)
B = TensorDescription(
element=cutlass_bindings.bfloat16, layout=cutlass_bindings.RowMajor,
alignment=8
)
C = TensorDescription(
element=cutlass_bindings.bfloat16, layout=cutlass_bindings.RowMajor,
alignment=8
)
epilogue_functor = LinearCombination(
C.element, C.alignment,
math_inst.element_accumulator, cutlass_bindings.float32)
swizzling_functor = cutlass_bindings.IdentitySwizzle1
operation = GemmOperationUniversal(
arch=80, tile_description=tile_description,
A=A, B=B, C=C,
epilogue_functor=epilogue_functor, swizzling_functor=swizzling_functor
)
self.assertTrue(test_all_gemm(operation, "multistage"))
if __name__ == '__main__':
cutlass.backend.get_memory_pool(2**30, 2**30)
unittest.main()
| cutlass-main | test/python/backend/gemm/gemm_bf16_sm80.py |
#################################################################################################
#
# Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#################################################################################################
import cutlass.backend
from cutlass.backend import *
from cutlass.backend.test import *
import unittest
from cutlass.backend.test.gemm_testbed import test_all_gemm
from cutlass.backend.utils.device import device_cc
@unittest.skipIf(device_cc() < 80, "Device compute capability is insufficient for SM80 tests.")
class GemmF16Sm80(unittest.TestCase):
def test_SM80_Device_Gemm_f32t_f32n_f32t_tensor_op_bf16_f32_128x128x32_64x64x32(self):
math_inst = MathInstruction(
instruction_shape=[16, 8, 16],
element_a=cutlass_bindings.float16, element_b=cutlass_bindings.float16,
element_accumulator=cutlass_bindings.float32, opcode_class=cutlass_bindings.OpClass.TensorOp,
math_operation=MathOperation.multiply_add
)
tile_description = TileDescription(
threadblock_shape=[128, 128, 32],
stages=3, warp_count=[2, 2, 1],
math_instruction=math_inst
)
A = TensorDescription(
element=cutlass_bindings.float16, layout=cutlass_bindings.ColumnMajor,
alignment=8
)
B = TensorDescription(
element=cutlass_bindings.float16, layout=cutlass_bindings.RowMajor,
alignment=8
)
C = TensorDescription(
element=cutlass_bindings.float32, layout=cutlass_bindings.ColumnMajor,
alignment=4
)
element_epilogue = cutlass_bindings.float32
epilogue_functor = LinearCombination(
C.element, C.alignment,
math_inst.element_accumulator, element_epilogue)
swizzling_functor = cutlass_bindings.BatchedIdentitySwizzle
operation = GemmOperationUniversal(
arch=80, tile_description=tile_description,
A=A, B=B, C=C,
epilogue_functor=epilogue_functor, swizzling_functor=swizzling_functor,
direct_store=True
)
self.assertTrue(test_all_gemm(operation, "universal"))
def test_SM80_Device_Gemm_f16n_f16n_f16t_tensor_op_f32_128x128x64_64x64x64(self):
math_inst = MathInstruction(
instruction_shape=[16, 8, 16],
element_a=cutlass_bindings.float16, element_b=cutlass_bindings.float16,
element_accumulator=cutlass_bindings.float32, opcode_class=cutlass_bindings.OpClass.TensorOp,
math_operation=MathOperation.multiply_add
)
tile_description = TileDescription(
threadblock_shape=[128, 128, 64],
stages=3, warp_count=[2, 2, 1],
math_instruction=math_inst
)
A = TensorDescription(
element=cutlass_bindings.float16, layout=cutlass_bindings.ColumnMajor,
alignment=8
)
B = TensorDescription(
element=cutlass_bindings.float16, layout=cutlass_bindings.ColumnMajor,
alignment=8
)
C = TensorDescription(
element=cutlass_bindings.float16, layout=cutlass_bindings.RowMajor,
alignment=8
)
element_epilogue = cutlass_bindings.float32
epilogue_functor = LinearCombination(
C.element, C.alignment,
math_inst.element_accumulator, element_epilogue)
swizzling_functor = cutlass_bindings.IdentitySwizzle1
operation = GemmOperationUniversal(
arch=80, tile_description=tile_description,
A=A, B=B, C=C,
epilogue_functor=epilogue_functor, swizzling_functor=swizzling_functor
)
self.assertTrue(test_all_gemm(operation, "universal"))
def test_SM80_Device_Gemm_f16n_f16n_f32n_tensor_op_f32_128x256x64_64x64x64(self):
math_inst = MathInstruction(
instruction_shape=[16, 8, 16],
element_a=cutlass_bindings.float16, element_b=cutlass_bindings.float16,
element_accumulator=cutlass_bindings.float32, opcode_class=cutlass_bindings.OpClass.TensorOp,
math_operation=MathOperation.multiply_add
)
tile_description = TileDescription(
threadblock_shape=[128, 256, 64],
stages=3, warp_count=[2, 4, 1],
math_instruction=math_inst
)
A = TensorDescription(
element=cutlass_bindings.float16, layout=cutlass_bindings.ColumnMajor,
alignment=8
)
B = TensorDescription(
element=cutlass_bindings.float16, layout=cutlass_bindings.ColumnMajor,
alignment=8
)
C = TensorDescription(
element=cutlass_bindings.float32, layout=cutlass_bindings.ColumnMajor,
alignment=4
)
element_epilogue = cutlass_bindings.float32
epilogue_functor = LinearCombination(
C.element, C.alignment,
math_inst.element_accumulator, element_epilogue)
swizzling_functor = cutlass_bindings.IdentitySwizzle1
operation = GemmOperationUniversal(
arch=80, tile_description=tile_description,
A=A, B=B, C=C,
epilogue_functor=epilogue_functor, swizzling_functor=swizzling_functor
)
self.assertTrue(test_all_gemm(operation, "universal"))
def test_SM80_Device_Gemm_f16n_f16n_f32t_tensor_op_f32_256x128x64_64x64x64(self):
math_inst = MathInstruction(
instruction_shape=[16, 8, 16],
element_a=cutlass_bindings.float16, element_b=cutlass_bindings.float16,
element_accumulator=cutlass_bindings.float32, opcode_class=cutlass_bindings.OpClass.TensorOp,
math_operation=MathOperation.multiply_add
)
tile_description = TileDescription(
threadblock_shape=[256, 128, 64],
stages=3, warp_count=[4, 2, 1],
math_instruction=math_inst
)
A = TensorDescription(
element=cutlass_bindings.float16, layout=cutlass_bindings.ColumnMajor,
alignment=8
)
B = TensorDescription(
element=cutlass_bindings.float16, layout=cutlass_bindings.ColumnMajor,
alignment=8
)
C = TensorDescription(
element=cutlass_bindings.float32, layout=cutlass_bindings.RowMajor,
alignment=4
)
element_epilogue = cutlass_bindings.float32
epilogue_functor = LinearCombination(
C.element, C.alignment,
math_inst.element_accumulator, element_epilogue)
swizzling_functor = cutlass_bindings.IdentitySwizzle1
operation = GemmOperationUniversal(
arch=80, tile_description=tile_description,
A=A, B=B, C=C,
epilogue_functor=epilogue_functor, swizzling_functor=swizzling_functor
)
self.assertTrue(test_all_gemm(operation, "universal"))
def test_SM80_Device_Gemm_f16n_f16t_f16t_tensor_op_f16_sliced_k_128x64x64_64x64x32(self):
math_inst = MathInstruction(
instruction_shape=[16, 8, 16],
element_a=cutlass_bindings.float16, element_b=cutlass_bindings.float16,
element_accumulator=cutlass_bindings.float16, opcode_class=cutlass_bindings.OpClass.TensorOp,
math_operation=MathOperation.multiply_add
)
tile_description = TileDescription(
threadblock_shape=[128, 64, 64],
stages=3, warp_count=[2, 1, 1],
math_instruction=math_inst
)
A = TensorDescription(
element=cutlass_bindings.float16, layout=cutlass_bindings.ColumnMajor,
alignment=8
)
B = TensorDescription(
element=cutlass_bindings.float16, layout=cutlass_bindings.RowMajor,
alignment=8
)
C = TensorDescription(
element=cutlass_bindings.float16, layout=cutlass_bindings.RowMajor,
alignment=4
)
element_epilogue = cutlass_bindings.float16
epilogue_functor = LinearCombination(
C.element, C.alignment,
math_inst.element_accumulator, element_epilogue)
swizzling_functor = cutlass_bindings.IdentitySwizzle1
operation = GemmOperationUniversal(
arch=80, tile_description=tile_description,
A=A, B=B, C=C,
epilogue_functor=epilogue_functor, swizzling_functor=swizzling_functor
)
self.assertTrue(test_all_gemm(operation, "universal"))
def test_SM80_Device_GemmUniversal_f16n_f16t_f32t_tensor_op_f32_64x64x32_32x32x32(self):
math_inst = MathInstruction(
instruction_shape=[16, 8, 16],
element_a=cutlass_bindings.float16, element_b=cutlass_bindings.float16,
element_accumulator=cutlass_bindings.float16, opcode_class=cutlass_bindings.OpClass.TensorOp,
math_operation=MathOperation.multiply_add
)
tile_description = TileDescription(
threadblock_shape=[64, 64, 32],
stages=10, warp_count=[2, 2, 1],
math_instruction=math_inst
)
A = TensorDescription(
element=cutlass_bindings.float16, layout=cutlass_bindings.ColumnMajor,
alignment=8
)
B = TensorDescription(
element=cutlass_bindings.float16, layout=cutlass_bindings.RowMajor,
alignment=8
)
C = TensorDescription(
element=cutlass_bindings.float16, layout=cutlass_bindings.RowMajor,
alignment=4
)
element_epilogue = cutlass_bindings.float16
epilogue_functor = LinearCombination(
C.element, C.alignment,
math_inst.element_accumulator, element_epilogue)
swizzling_functor = cutlass_bindings.IdentitySwizzle1
operation = GemmOperationUniversal(
arch=80, tile_description=tile_description,
A=A, B=B, C=C,
epilogue_functor=epilogue_functor, swizzling_functor=swizzling_functor
)
self.assertTrue(test_all_gemm(operation, "universal"))
def test_SM80_Device_Gemm_f16n_f16t_f32t_tensor_op_f32_256x128x64_64x64x64(self):
math_inst = MathInstruction(
instruction_shape=[16, 8, 16],
element_a=cutlass_bindings.float16, element_b=cutlass_bindings.float16,
element_accumulator=cutlass_bindings.float32, opcode_class=cutlass_bindings.OpClass.TensorOp,
math_operation=MathOperation.multiply_add
)
tile_description = TileDescription(
threadblock_shape=[256, 128, 64],
stages=3, warp_count=[4, 2, 1],
math_instruction=math_inst
)
A = TensorDescription(
element=cutlass_bindings.float16, layout=cutlass_bindings.ColumnMajor,
alignment=8
)
B = TensorDescription(
element=cutlass_bindings.float16, layout=cutlass_bindings.RowMajor,
alignment=8
)
C = TensorDescription(
element=cutlass_bindings.float16, layout=cutlass_bindings.RowMajor,
alignment=8
)
element_epilogue = cutlass_bindings.float32
epilogue_functor = LinearCombination(
C.element, C.alignment,
math_inst.element_accumulator, element_epilogue)
swizzling_functor = cutlass_bindings.IdentitySwizzle1
operation = GemmOperationUniversal(
arch=80, tile_description=tile_description,
A=A, B=B, C=C,
epilogue_functor=epilogue_functor, swizzling_functor=swizzling_functor
)
self.assertTrue(test_all_gemm(operation, "universal"))
def test_test_SM80_Device_Gemm_f16t_f16n_f16t_tensor_op_f16_sliced_k_128x64x64_64x64x32(self):
math_inst = MathInstruction(
instruction_shape=[16, 8, 16],
element_a=cutlass_bindings.float16, element_b=cutlass_bindings.float16,
element_accumulator=cutlass_bindings.float32, opcode_class=cutlass_bindings.OpClass.TensorOp,
math_operation=MathOperation.multiply_add
)
tile_description = TileDescription(
threadblock_shape=[128, 64, 64],
stages=3, warp_count=[2, 1, 1],
math_instruction=math_inst
)
A = TensorDescription(
element=cutlass_bindings.float16, layout=cutlass_bindings.RowMajor,
alignment=8
)
B = TensorDescription(
element=cutlass_bindings.float16, layout=cutlass_bindings.ColumnMajor,
alignment=8
)
C = TensorDescription(
element=cutlass_bindings.float16, layout=cutlass_bindings.RowMajor,
alignment=4
)
element_epilogue = cutlass_bindings.float32
epilogue_functor = LinearCombination(
C.element, C.alignment,
math_inst.element_accumulator, element_epilogue)
swizzling_functor = cutlass_bindings.IdentitySwizzle1
operation = GemmOperationUniversal(
arch=80, tile_description=tile_description,
A=A, B=B, C=C,
epilogue_functor=epilogue_functor, swizzling_functor=swizzling_functor
)
self.assertTrue(test_all_gemm(operation, "universal"))
def test_SM80_Device_Gemm_f16t_f16t_f32n_tensor_op_f32_128x256x64_64x64x64(self):
math_inst = MathInstruction(
instruction_shape=[16, 8, 16],
element_a=cutlass_bindings.float16, element_b=cutlass_bindings.float16,
element_accumulator=cutlass_bindings.float32, opcode_class=cutlass_bindings.OpClass.TensorOp,
math_operation=MathOperation.multiply_add
)
tile_description = TileDescription(
threadblock_shape=[128, 256, 64],
stages=3, warp_count=[2, 4, 1],
math_instruction=math_inst
)
A = TensorDescription(
element=cutlass_bindings.float16, layout=cutlass_bindings.RowMajor,
alignment=8
)
B = TensorDescription(
element=cutlass_bindings.float16, layout=cutlass_bindings.RowMajor,
alignment=8
)
C = TensorDescription(
element=cutlass_bindings.float16, layout=cutlass_bindings.ColumnMajor,
alignment=8
)
element_epilogue = cutlass_bindings.float32
epilogue_functor = LinearCombination(
C.element, C.alignment,
math_inst.element_accumulator, element_epilogue)
swizzling_functor = cutlass_bindings.IdentitySwizzle1
operation = GemmOperationUniversal(
arch=80, tile_description=tile_description,
A=A, B=B, C=C,
epilogue_functor=epilogue_functor, swizzling_functor=swizzling_functor
)
self.assertTrue(test_all_gemm(operation, "universal"))
def test_SM80_Device_Gemm_f16t_f16t_f32t_tensor_op_f32_128x256x64_64x64x64(self):
math_inst = MathInstruction(
instruction_shape=[16, 8, 16],
element_a=cutlass_bindings.float16, element_b=cutlass_bindings.float16,
element_accumulator=cutlass_bindings.float32, opcode_class=cutlass_bindings.OpClass.TensorOp,
math_operation=MathOperation.multiply_add
)
tile_description = TileDescription(
threadblock_shape=[128, 256, 64],
stages=3, warp_count=[2, 4, 1],
math_instruction=math_inst
)
A = TensorDescription(
element=cutlass_bindings.float16, layout=cutlass_bindings.ColumnMajor,
alignment=8
)
B = TensorDescription(
element=cutlass_bindings.float16, layout=cutlass_bindings.ColumnMajor,
alignment=8
)
C = TensorDescription(
element=cutlass_bindings.float32, layout=cutlass_bindings.ColumnMajor,
alignment=4
)
element_epilogue = cutlass_bindings.float32
epilogue_functor = LinearCombination(
C.element, C.alignment,
math_inst.element_accumulator, element_epilogue)
swizzling_functor = cutlass_bindings.IdentitySwizzle1
operation = GemmOperationUniversal(
arch=80, tile_description=tile_description,
A=A, B=B, C=C,
epilogue_functor=epilogue_functor, swizzling_functor=swizzling_functor
)
self.assertTrue(test_all_gemm(operation, "universal"))
if __name__ == '__main__':
cutlass.backend.get_memory_pool(2**30, 2**30)
unittest.main()
| cutlass-main | test/python/backend/gemm/gemm_f16_sm80.py |
#################################################################################################
#
# Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#################################################################################################
from functools import partial
import cutlass.backend
from cutlass.backend import *
from cutlass.backend import library
from cutlass.backend.test import *
import unittest
from cutlass.backend.test.utils import LayoutCombination, get_name
from cutlass.backend.test.gemm_testbed import test_all_gemm
from cutlass.backend.utils.device import device_cc
name_fn = partial(get_name, element_a=cutlass_bindings.float16, element_b=cutlass_bindings.float16, arch=90)
def add_test(cls, layouts, alignments, element_output, element_accumulator, element_epilogue,
cluster_shape, threadblock_shape, stages, opclass, persistent=False):
"""
Create a test-running function with the given specification and set it as a method of `cls`.
:param cls: class to which the generated method will be added
:type cls: type
:param layouts: indexable container of layouts of A, B, and C operands
:param alignments: indexable container of alignments of A, B, and C operands
:param element_output: data type of the output element
:param element_accumulator: data type used in accumulation
:param element_epilogue: data type used in computing the epilogue
:param cluster_shape: indexable container of dimensions of threadblock cluster to be launched
:param threadblock_shape: indexable container of dimensions of threadblock tiles
:param stages: number of pipeline stages to use in the kernel
:type stages: int
:param opclass: class of operation being performed (e.g., SIMT, Tensor Core)
:type opclass: cutlass_bindings.OpClass
:param persistent: whether this is a persistent warp-specialized kernel
:type persistent: bool
"""
def run(self):
"""
Dynamically-generated function that constructs a GEMM operation and verifies it against
multiple test cases.
"""
element_A = cutlass_bindings.int8
element_B = cutlass_bindings.int8
inst_shape = [1, 1, 1] if opclass == cutlass_bindings.OpClass.Simt else None
warp_count = [2, 2, 1] if opclass == cutlass_bindings.OpClass.Simt else None
math_inst = MathInstruction(
instruction_shape=inst_shape,
element_a=element_A, element_b=element_B, element_accumulator=element_accumulator,
opcode_class=opclass, math_operation=MathOperation.multiply_add
)
tile_description = TileDescription(
threadblock_shape=threadblock_shape,
cluster_shape=cluster_shape,
stages=stages, warp_count=warp_count,
math_instruction=math_inst,
persistent=persistent
)
A = TensorDescription(element=element_A, layout=layouts[0], alignment=alignments[0])
B = TensorDescription(element=element_B, layout=layouts[1], alignment=alignments[1])
C = TensorDescription(element=element_output, layout=layouts[2], alignment=alignments[2])
if opclass == cutlass_bindings.OpClass.Simt:
epilogue_functor_cls = LinearCombinationClamp
else:
epilogue_functor_cls = LinearCombination
epilogue_functor = epilogue_functor_cls(C.element, C.alignment, math_inst.element_accumulator, element_epilogue)
swizzling_functor = cutlass_bindings.IdentitySwizzle1
operation = GemmOperationUniversal(
arch=90, tile_description=tile_description, A=A, B=B, C=C,
epilogue_functor=epilogue_functor, swizzling_functor=swizzling_functor)
self.assertTrue(test_all_gemm(operation, "universal"))
if persistent:
suffix = "_persistent"
else:
suffix = ""
name = name_fn(layouts, alignments, element_output, element_accumulator,
element_epilogue, cluster_shape, threadblock_shape, stages, opclass=opclass, suffix=suffix)
setattr(cls, name, run)
return run
@unittest.skipIf(device_cc() < 90, "Device compute capability is insufficient for SM90 tests.")
class GemmS8Sm90(unittest.TestCase):
"""
Wrapper class to which tests will be added dynamically in __main__
"""
pass
add_test_tensorop = partial(add_test, opclass=cutlass_bindings.OpClass.TensorOp)
add_test_simt = partial(add_test, opclass=cutlass_bindings.OpClass.Simt)
# Tests with 1x1x1 clusters
add_test_tensorop(GemmS8Sm90, LayoutCombination.TNN, [16, 16, 16], cutlass_bindings.int8, cutlass_bindings.int32, cutlass_bindings.int32, [1, 1, 1], [128, 128, 128], 3)
add_test_tensorop(GemmS8Sm90, LayoutCombination.TNT, [16, 16, 16], cutlass_bindings.int8, cutlass_bindings.int32, cutlass_bindings.int32, [1, 1, 1], [128, 128, 128], None)
add_test_tensorop(GemmS8Sm90, LayoutCombination.TNT, [16, 16, 8], cutlass_bindings.int8, cutlass_bindings.int32, cutlass_bindings.int32, [1, 1, 1], [128, 128, 128], None)
add_test_tensorop(GemmS8Sm90, LayoutCombination.TNT, [16, 16, 16], cutlass_bindings.int8, cutlass_bindings.int32, cutlass_bindings.int32, [1, 1, 1], [64, 128, 128], None)
add_test_tensorop(GemmS8Sm90, LayoutCombination.TNT, [16, 16, 16], cutlass_bindings.int8, cutlass_bindings.int32, cutlass_bindings.int32, [1, 1, 1], [128, 64, 32], None)
add_test_tensorop(GemmS8Sm90, LayoutCombination.TNT, [4, 4, 16], cutlass_bindings.int8, cutlass_bindings.int32, cutlass_bindings.int32, [1, 1, 1], [128, 128, 128], None)
# Tests with different cluster shapes
add_test_tensorop(GemmS8Sm90, LayoutCombination.TNT, [16, 16, 16], cutlass_bindings.int8, cutlass_bindings.int32, cutlass_bindings.int32, [2, 2, 1], [128, 128, 128], None)
add_test_tensorop(GemmS8Sm90, LayoutCombination.TNT, [16, 16, 16], cutlass_bindings.int8, cutlass_bindings.int32, cutlass_bindings.int32, [1, 4, 1], [128, 128, 128], None)
add_test_tensorop(GemmS8Sm90, LayoutCombination.TNT, [16, 16, 16], cutlass_bindings.int8, cutlass_bindings.int32, cutlass_bindings.int32, [4, 4, 1], [128, 128, 128], None)
# Tests with persistent warp-specialized threadblocks
add_test_tensorop(GemmS8Sm90, LayoutCombination.TNT, [16, 16, 16], cutlass_bindings.int8, cutlass_bindings.int32, cutlass_bindings.int32, [2, 1, 1], [128, 128, 128], None, persistent=True)
# Tests for SIMT
add_test_simt(GemmS8Sm90, LayoutCombination.TNN, [1, 1, 1], cutlass_bindings.int8, cutlass_bindings.int32, cutlass_bindings.int32, [1, 1, 1], [64, 32, 8], 2)
if __name__ == '__main__':
cutlass.backend.get_memory_pool(2**30, 2**30)
unittest.main()
| cutlass-main | test/python/backend/gemm/gemm_s8_sm90.py |
#################################################################################################
#
# Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#################################################################################################
import cutlass.backend
from cutlass.backend import *
from cutlass.backend.memory_manager import get_allocated_size
from cutlass.backend.test import *
import unittest
from cutlass.backend.test.gemm_testbed import test_all_gemm
from cutlass.backend.utils.device import device_cc
@unittest.skipIf(device_cc() < 80, "Device compute capability is insufficient for SM80 tests.")
class GemmF32nF32nF32nTensorOpF32Sm80(unittest.TestCase):
def test_SM80_Device_Gemm_f32t_f32n_f32t_tensor_op_bf16_f32_128x128x32_64x64x32(self):
math_inst = MathInstruction(
instruction_shape=[16, 8, 8],
element_a=cutlass_bindings.float32, element_b=cutlass_bindings.float32,
element_accumulator=cutlass_bindings.float32, opcode_class=cutlass_bindings.OpClass.TensorOp,
math_operation=MathOperation.multiply_add_fast_bf16
)
tile_description = TileDescription(
threadblock_shape=[128, 128, 32],
stages=3, warp_count=[2, 2, 1],
math_instruction=math_inst
)
A = TensorDescription(
element=cutlass_bindings.float32, layout=cutlass_bindings.RowMajor,
alignment=4
)
B = TensorDescription(
element=cutlass_bindings.float32, layout=cutlass_bindings.ColumnMajor,
alignment=4
)
C = TensorDescription(
element=cutlass_bindings.float32, layout=cutlass_bindings.RowMajor,
alignment=4
)
element_epilogue = cutlass_bindings.float32
epilogue_functor = LinearCombination(
C.element, C.alignment,
math_inst.element_accumulator, element_epilogue)
swizzling_functor = cutlass_bindings.IdentitySwizzle1
operation = GemmOperationUniversal(
arch=80, tile_description=tile_description,
A=A, B=B, C=C,
epilogue_functor=epilogue_functor, swizzling_functor=swizzling_functor
)
self.assertTrue(test_all_gemm(operation, "universal"))
def test_SM80_Device_Gemm_f32n_f32n_f32t_tensor_op_f32_128x128x32_64x64x32(self):
math_inst = MathInstruction(
instruction_shape=[16, 8, 8],
element_a=cutlass_bindings.float32, element_b=cutlass_bindings.float32,
element_accumulator=cutlass_bindings.float32, opcode_class=cutlass_bindings.OpClass.TensorOp,
math_operation=MathOperation.multiply_add
)
tile_description = TileDescription(
threadblock_shape=[128, 128, 32],
stages=3, warp_count=[2, 2, 1],
math_instruction=math_inst
)
A = TensorDescription(
element=cutlass_bindings.float32, layout=cutlass_bindings.ColumnMajor,
alignment=4
)
B = TensorDescription(
element=cutlass_bindings.float32, layout=cutlass_bindings.ColumnMajor,
alignment=4
)
C = TensorDescription(
element=cutlass_bindings.float32, layout=cutlass_bindings.RowMajor,
alignment=4
)
element_epilogue = cutlass_bindings.float32
epilogue_functor = LinearCombination(
C.element, C.alignment,
math_inst.element_accumulator, element_epilogue)
swizzling_functor = cutlass_bindings.IdentitySwizzle1
operation = GemmOperationUniversal(
arch=80, tile_description=tile_description,
A=A, B=B, C=C,
epilogue_functor=epilogue_functor, swizzling_functor=swizzling_functor
)
self.assertTrue(test_all_gemm(operation, "universal"))
def test_SM80_Device_Gemm_f32n_f32n_f32t_tensor_op_fast_accurate_f32_64x64x32_32x32x32(self):
math_inst = MathInstruction(
instruction_shape=[16, 8, 8],
element_a=cutlass_bindings.float32, element_b=cutlass_bindings.float32,
element_accumulator=cutlass_bindings.float32, opcode_class=cutlass_bindings.OpClass.TensorOp,
math_operation=MathOperation.multiply_add_fast_f32
)
tile_description = TileDescription(
threadblock_shape=[64, 64, 32],
stages=3, warp_count=[2, 2, 1],
math_instruction=math_inst
)
A = TensorDescription(
element=cutlass_bindings.float32, layout=cutlass_bindings.ColumnMajor,
alignment=4
)
B = TensorDescription(
element=cutlass_bindings.float32, layout=cutlass_bindings.ColumnMajor,
alignment=4
)
C = TensorDescription(
element=cutlass_bindings.float32, layout=cutlass_bindings.RowMajor,
alignment=4
)
element_epilogue = cutlass_bindings.float32
epilogue_functor = LinearCombination(
C.element, C.alignment,
math_inst.element_accumulator, element_epilogue)
swizzling_functor = cutlass_bindings.IdentitySwizzle1
operation = GemmOperationUniversal(
arch=80, tile_description=tile_description,
A=A, B=B, C=C,
epilogue_functor=epilogue_functor, swizzling_functor=swizzling_functor
)
self.assertTrue(test_all_gemm(operation, "universal"))
if __name__ == '__main__':
cutlass.backend.get_memory_pool(2**24, 2**24)
cutlass.backend.compiler.load_from_cache()
unittest.main()
| cutlass-main | test/python/backend/gemm/gemm_f32_sm80.py |
#################################################################################################
#
# Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#################################################################################################
import cutlass.backend
from cutlass.backend import *
from cutlass.backend.epilogue import LinearCombinationClamp
from cutlass.backend.test import *
import unittest
from cutlass.backend.test.gemm_testbed import test_all_gemm
from cutlass.backend.utils.device import device_cc
@unittest.skipIf(device_cc() < 80, "Device compute capability is insufficient for SM80 tests.")
class GemmS8TensorOpF32Sm80(unittest.TestCase):
def test_SM80_Device_Gemm_s8t_s8n_s8t_tensor_op_s32_64x64x64_32x32x64(self):
math_inst = MathInstruction(
instruction_shape=[16, 8, 32],
element_a=cutlass_bindings.int8, element_b=cutlass_bindings.int8,
element_accumulator=cutlass_bindings.int32, opcode_class=cutlass_bindings.OpClass.TensorOp,
math_operation=MathOperation.multiply_add_saturate
)
tile_description = TileDescription(
threadblock_shape=[64, 64, 64],
stages=6, warp_count=[2, 2, 1],
math_instruction=math_inst
)
A = TensorDescription(
element=cutlass_bindings.int8, layout=cutlass_bindings.ColumnMajorInterleaved32,
alignment=16
)
B = TensorDescription(
element=cutlass_bindings.int8, layout=cutlass_bindings.RowMajorInterleaved32,
alignment=16
)
C = TensorDescription(
element=cutlass_bindings.int8, layout=cutlass_bindings.ColumnMajorInterleaved32,
alignment=8
)
epilogue_functor = FastLinearCombinationClamp(
C.element, C.alignment
)
swizzling_functor = cutlass_bindings.IdentitySwizzle1
operation = GemmOperationUniversal(
arch=80, tile_description=tile_description,
A=A, B=B, C=C,
epilogue_functor=epilogue_functor, swizzling_functor=swizzling_functor
)
self.assertTrue(test_all_gemm(operation, "interleaved"))
def test_SM80_Device_Gemm_s8t_s8n_s8t_tensor_op_s32_256x128x128_64x64x128(self):
math_inst = MathInstruction(
instruction_shape=[16, 8, 32],
element_a=cutlass_bindings.int8, element_b=cutlass_bindings.int8,
element_accumulator=cutlass_bindings.int32, opcode_class=cutlass_bindings.OpClass.TensorOp,
math_operation=MathOperation.multiply_add
)
tile_description = TileDescription(
threadblock_shape=[128, 128, 128],
stages=3, warp_count=[2, 2, 1],
math_instruction=math_inst
)
A = TensorDescription(
element=cutlass_bindings.int8, layout=cutlass_bindings.RowMajor,
alignment=16
)
B = TensorDescription(
element=cutlass_bindings.int8, layout=cutlass_bindings.ColumnMajor,
alignment=16
)
C = TensorDescription(
element=cutlass_bindings.int8, layout=cutlass_bindings.RowMajor,
alignment=16
)
epilogue_functor = FastLinearCombinationClamp(
C.element, C.alignment
)
swizzling_functor = cutlass_bindings.IdentitySwizzle1
operation = GemmOperationUniversal(
arch=80, tile_description=tile_description,
A=A, B=B, C=C,
epilogue_functor=epilogue_functor, swizzling_functor=swizzling_functor
)
self.assertTrue(test_all_gemm(operation, "multistage"))
def test_SM80_Device_Gemm_s8t_s8n_s8n_tensor_op_s32_128x128x128_64x64x128(self):
math_inst = MathInstruction(
instruction_shape=[16, 8, 32],
element_a=cutlass_bindings.int8, element_b=cutlass_bindings.int8,
element_accumulator=cutlass_bindings.int32, opcode_class=cutlass_bindings.OpClass.TensorOp,
math_operation=MathOperation.multiply_add
)
tile_description = TileDescription(
threadblock_shape=[128, 128, 128],
stages=3, warp_count=[2, 2, 1],
math_instruction=math_inst
)
A = TensorDescription(
element=cutlass_bindings.int8, layout=cutlass_bindings.RowMajor,
alignment=16
)
B = TensorDescription(
element=cutlass_bindings.int8, layout=cutlass_bindings.ColumnMajor,
alignment=16
)
C = TensorDescription(
element=cutlass_bindings.int8, layout=cutlass_bindings.ColumnMajor,
alignment=16
)
epilogue_functor = FastLinearCombinationClamp(
C.element, C.alignment
)
swizzling_functor = cutlass_bindings.IdentitySwizzle1
operation = GemmOperationUniversal(
arch=80, tile_description=tile_description,
A=A, B=B, C=C,
epilogue_functor=epilogue_functor, swizzling_functor=swizzling_functor
)
self.assertTrue(test_all_gemm(operation, "multistage"))
def test_SM80_Device_Gemm_s8t_s8n_s32n_tensor_op_s32_128x128x128_64x64x128(self):
math_inst = MathInstruction(
instruction_shape=[16, 8, 32],
element_a=cutlass_bindings.int8, element_b=cutlass_bindings.int8,
element_accumulator=cutlass_bindings.int32, opcode_class=cutlass_bindings.OpClass.TensorOp,
math_operation=MathOperation.multiply_add
)
tile_description = TileDescription(
threadblock_shape=[128, 128, 128],
stages=3, warp_count=[2, 2, 1],
math_instruction=math_inst
)
A = TensorDescription(
element=cutlass_bindings.int8, layout=cutlass_bindings.RowMajor,
alignment=16
)
B = TensorDescription(
element=cutlass_bindings.int8, layout=cutlass_bindings.ColumnMajor,
alignment=16
)
C = TensorDescription(
element=cutlass_bindings.int32, layout=cutlass_bindings.ColumnMajor,
alignment=4
)
element_epilogue = cutlass_bindings.int32
epilogue_functor = LinearCombinationClamp(
C.element, C.alignment, math_inst.element_accumulator,
element_epilogue
)
swizzling_functor = cutlass_bindings.IdentitySwizzle1
operation = GemmOperationUniversal(
arch=80, tile_description=tile_description,
A=A, B=B, C=C,
epilogue_functor=epilogue_functor, swizzling_functor=swizzling_functor
)
self.assertTrue(test_all_gemm(operation, "multistage"))
def test_SM80_Device_Gemm_s8t_s8n_s32t_tensor_op_s32_128x128x128_64x64x128(self):
math_inst = MathInstruction(
instruction_shape=[16, 8, 32],
element_a=cutlass_bindings.int8, element_b=cutlass_bindings.int8,
element_accumulator=cutlass_bindings.int32, opcode_class=cutlass_bindings.OpClass.TensorOp,
math_operation=MathOperation.multiply_add
)
tile_description = TileDescription(
threadblock_shape=[128, 128, 128],
stages=3, warp_count=[2, 2, 1],
math_instruction=math_inst
)
A = TensorDescription(
element=cutlass_bindings.int8, layout=cutlass_bindings.RowMajor,
alignment=16
)
B = TensorDescription(
element=cutlass_bindings.int8, layout=cutlass_bindings.ColumnMajor,
alignment=16
)
C = TensorDescription(
element=cutlass_bindings.int32, layout=cutlass_bindings.RowMajor,
alignment=4
)
element_epilogue = cutlass_bindings.int32
epilogue_functor = LinearCombinationClamp(
C.element, C.alignment, math_inst.element_accumulator,
element_epilogue
)
swizzling_functor = cutlass_bindings.IdentitySwizzle1
operation = GemmOperationUniversal(
arch=80, tile_description=tile_description,
A=A, B=B, C=C,
epilogue_functor=epilogue_functor, swizzling_functor=swizzling_functor
)
self.assertTrue(test_all_gemm(operation, "multistage"))
if __name__ == '__main__':
cutlass.backend.get_memory_pool(2**30, 2**30)
unittest.main()
| cutlass-main | test/python/backend/gemm/gemm_s8_sm80.py |
#################################################################################################
#
# Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#################################################################################################
import cutlass.backend
from cutlass.backend import *
from cutlass.backend.test import *
import unittest
from cutlass.backend.test.gemm_grouped_testbed import TestbedGrouped
from cutlass.backend.utils.device import device_cc
@unittest.skipIf(device_cc() < 80, "Device compute capability is insufficient for SM80 tests.")
class GemmGroupedSm80(unittest.TestCase):
def test_SM80_Device_GemmGrouped_f16n_f16t_f32n_tensor_op_f32_128x128x32_64x64x32(self):
math_inst = MathInstruction(
instruction_shape=[16, 8, 16], element_a=cutlass_bindings.float16,
element_b=cutlass_bindings.float16, element_accumulator=cutlass_bindings.float32,
opcode_class=cutlass_bindings.OpClass.TensorOp,
math_operation=MathOperation.multiply_add
)
tile_description = TileDescription(
threadblock_shape=[128, 128, 32],
stages=3, warp_count=[2, 2, 1],
math_instruction=math_inst
)
A = TensorDescription(
element=cutlass_bindings.float16, layout=cutlass_bindings.ColumnMajor,
alignment=8
)
B = TensorDescription(
element=cutlass_bindings.float16, layout=cutlass_bindings.ColumnMajor,
alignment=8
)
C = TensorDescription(
element=cutlass_bindings.float32, layout=cutlass_bindings.ColumnMajor,
alignment=4
)
element_epilogue = cutlass_bindings.float32
epilogue_functor = LinearCombination(
C.element, C.alignment,
math_inst.element_accumulator, element_epilogue)
swizzling_functor = cutlass_bindings.BatchedIdentitySwizzle
for precompute_mode in [SchedulerMode.Device, SchedulerMode.Host]:
operation = GemmOperationGrouped(
80,
tile_description, A, B, C,
epilogue_functor, swizzling_functor,
precompute_mode=precompute_mode
)
testbed = TestbedGrouped(operation=operation)
self.assertTrue(testbed.run(24))
def test_SM80_Device_GemmGrouped_f64t_f64t_f64n_tensor_op_f64_64x64x16_32x32x16(self):
math_inst = MathInstruction(
instruction_shape=[8, 8, 4], element_a=cutlass_bindings.float64,
element_b=cutlass_bindings.float64, element_accumulator=cutlass_bindings.float64,
opcode_class=cutlass_bindings.OpClass.TensorOp,
math_operation=MathOperation.multiply_add
)
tile_description = TileDescription(
threadblock_shape=[64, 64, 16],
stages=4, warp_count=[2, 2, 1],
math_instruction=math_inst
)
A = TensorDescription(
element=cutlass_bindings.float64, layout=cutlass_bindings.RowMajor,
alignment=1
)
B = TensorDescription(
element=cutlass_bindings.float64, layout=cutlass_bindings.RowMajor,
alignment=1
)
C = TensorDescription(
element=cutlass_bindings.float64, layout=cutlass_bindings.ColumnMajor,
alignment=1
)
element_epilogue = cutlass_bindings.float64
epilogue_functor = LinearCombination(
C.element, C.alignment,
math_inst.element_accumulator, element_epilogue)
swizzling_functor = cutlass_bindings.BatchedIdentitySwizzle
for precompute_mode in [SchedulerMode.Device, SchedulerMode.Host]:
operation = GemmOperationGrouped(
80,
tile_description, A, B, C,
epilogue_functor, swizzling_functor,
precompute_mode=precompute_mode
)
testbed = TestbedGrouped(operation=operation)
self.assertTrue(testbed.run(24))
def test_SM80_Device_GemmGrouped_f32t_f32t_f32t_simt_f32_128x64x8_64x32x1(self):
math_inst = MathInstruction(
instruction_shape=[1, 1, 1], element_a=cutlass_bindings.float32,
element_b=cutlass_bindings.float32, element_accumulator=cutlass_bindings.float32,
opcode_class=cutlass_bindings.OpClass.Simt,
math_operation=MathOperation.multiply_add
)
tile_description = TileDescription(
threadblock_shape=[128, 64, 8],
stages=4, warp_count=[2, 2, 1],
math_instruction=math_inst
)
A = TensorDescription(
element=cutlass_bindings.float32, layout=cutlass_bindings.RowMajor,
alignment=1
)
B = TensorDescription(
element=cutlass_bindings.float32, layout=cutlass_bindings.RowMajor,
alignment=1
)
C = TensorDescription(
element=cutlass_bindings.float32, layout=cutlass_bindings.RowMajor,
alignment=1
)
element_epilogue = cutlass_bindings.float32
epilogue_functor = LinearCombination(
C.element, C.alignment,
math_inst.element_accumulator, element_epilogue)
swizzling_functor = cutlass_bindings.BatchedIdentitySwizzle
for precompute_mode in [SchedulerMode.Device, SchedulerMode.Host]:
operation = GemmOperationGrouped(
80,
tile_description, A, B, C,
epilogue_functor, swizzling_functor,
precompute_mode=precompute_mode
)
testbed = TestbedGrouped(operation=operation)
self.assertTrue(testbed.run(27))
def test_SM80_Device_GemmGrouped_f16n_f16t_f32n_tensor_op_f32_128x128x32_64x64x32_cache(self):
math_inst = MathInstruction(
instruction_shape=[16, 8, 16], element_a=cutlass_bindings.float16,
element_b=cutlass_bindings.float16, element_accumulator=cutlass_bindings.float32,
opcode_class=cutlass_bindings.OpClass.TensorOp,
math_operation=MathOperation.multiply_add
)
tile_description = TileDescription(
threadblock_shape=[128, 128, 32],
stages=3, warp_count=[2, 2, 1],
math_instruction=math_inst
)
A = TensorDescription(
element=cutlass_bindings.float16, layout=cutlass_bindings.ColumnMajor,
alignment=8
)
B = TensorDescription(
element=cutlass_bindings.float16, layout=cutlass_bindings.ColumnMajor,
alignment=8
)
C = TensorDescription(
element=cutlass_bindings.float32, layout=cutlass_bindings.ColumnMajor,
alignment=4
)
element_epilogue = cutlass_bindings.float32
epilogue_functor = LinearCombination(
C.element, C.alignment,
math_inst.element_accumulator, element_epilogue)
swizzling_functor = cutlass_bindings.BatchedIdentitySwizzle
for precompute_mode in [SchedulerMode.Device, SchedulerMode.Host]:
operation = GemmOperationGrouped(
80,
tile_description, A, B, C,
epilogue_functor, swizzling_functor,
precompute_mode=precompute_mode
)
testbed = TestbedGrouped(operation=operation)
self.assertTrue(testbed.run(5))
if __name__ == '__main__':
cutlass.backend.get_memory_pool(2**30, 2**30)
unittest.main()
| cutlass-main | test/python/backend/gemm/gemm_grouped_sm80.py |
#################################################################################################
#
# Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#################################################################################################
import cutlass.backend
import unittest
if __name__ == '__main__':
cutlass.backend.get_memory_pool(2**30, 2**30)
loader = unittest.TestLoader()
tests = loader.discover('./', 'gemm_*.py')
testRunner = unittest.runner.TextTestRunner()
testRunner.run(tests)
| cutlass-main | test/python/backend/gemm/run_all_tests.py |
#################################################################################################
#
# Copyright (c) 2023 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#################################################################################################
"""
High-level tests for running batched GEMMs
"""
from functools import partial
from math import prod
import cutlass
import logging
import torch
import unittest
from cutlass.backend.test.utils import LayoutCombination, add_test_gemm
from cutlass.backend.utils.device import device_cc
cutlass.set_log_level(logging.WARNING)
torch.manual_seed(2023)
def pytorch_reference(A, B, C, alpha, beta):
# Get the batch count. Assume that any of A, B, and C
# with a batch dimension ahve matching batch count. Thus,
# we break out of the loop once we have found the first
# tensor containing a batch dimension.
batch_count = (1,)
for tensor in [A, B, C]:
if len(tensor.shape) > 2:
batch_count = tensor.shape[:-2]
break
int_batch_count = prod(batch_count)
def add_batch(tensor):
if len(tensor.shape) == 2:
return tensor.unsqueeze(0).repeat(int_batch_count, 1, 1)
else:
return tensor.reshape(-1, tensor.size(-2), tensor.size(-1))
# Reshape tensors to have batch dimension
A = add_batch(A)
B = add_batch(B)
C = add_batch(C)
ret = (torch.bmm(A, B) * alpha) + (C * beta)
reshape_vals = batch_count + C.shape[-2:]
return ret.reshape(*reshape_vals)
def initialize(rows, cols, batch):
tensor = torch.randint(-3, 3, size=(rows*cols*prod(batch),), device='cuda').half()
if len(batch) > 0 and prod(batch) > 1:
reshape_vals = batch + (rows, cols)
return tensor.reshape(*reshape_vals)
else:
return tensor.reshape(rows, cols)
class GemmF16Batched(unittest.TestCase):
def run_batched(self, batch_count: tuple, batch_A: bool, batch_B: bool, batch_C: bool):
M = 512
N = 256
K = 128
alpha = 1.
beta = 2.
A = initialize(M, K, batch_count if batch_A else (1,))
B = initialize(K, N, batch_count if batch_B else (1,))
C = initialize(M, N, batch_count if batch_C else (1,))
D = initialize(M, N, batch_count)
plan = cutlass.op.Gemm(A=A, B=B, C=C, D=D, element_accumulator=cutlass.DataType.f32)
plan.run(A, B, C, D, alpha, beta)
reference = pytorch_reference(A, B, C, alpha, beta)
assert reference.equal(D)
def test_batched_ABC(self):
self.run_batched((3,), True, True, True)
self.run_batched((2, 3), True, True, True)
def test_batched_AB(self):
self.run_batched((3,), True, True, False)
self.run_batched((2, 3), True, True, False)
def test_batched_AC(self):
self.run_batched((3,), True, False, True)
self.run_batched((2, 3), True, False, True)
def test_batched_BC(self):
self.run_batched((3,), False, True, True)
self.run_batched((2, 3), False, True, True)
def test_batched_A(self):
self.run_batched((3,), True, False, False)
self.run_batched((2, 3), True, False, False)
def test_batched_B(self):
self.run_batched((3,), False, True, False)
self.run_batched((2, 3), False, True, False)
def test_batched_C(self):
self.run_batched((3,), False, False, True)
self.run_batched((2, 3), False, False, True)
if __name__ == '__main__':
unittest.main()
| cutlass-main | test/python/gemm/gemm_batched.py |
#################################################################################################
#
# Copyright (c) 2023 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#################################################################################################
"""
Low-level functionality tests for GEMM with F16 operands on SM90
"""
from functools import partial
import cutlass
import logging
import unittest
from cutlass.backend.test.utils import LayoutCombination, add_test_gemm
from cutlass.backend.utils.device import device_cc
cutlass.set_log_level(logging.WARNING)
cc = 90
@unittest.skipIf(device_cc() < cc, 'Device compute capability is insufficient for SM90 tests.')
class GemmF16Sm90(unittest.TestCase):
"""
Wrapper class to which tests will be added dynamically in __main__
"""
pass
add_test_specialized = partial(add_test_gemm, cls=GemmF16Sm90, element=cutlass.DataType.f16,
warp_count=None, compilation_modes=['nvcc'])
add_test_tensorop = partial(add_test_specialized, opclass=cutlass.OpcodeClass.TensorOp)
# Tests with 1x1x1 clusters
add_test_unit_cluster = partial(add_test_tensorop, cluster_shape=[1, 1, 1])
add_test_unit_cluster(layouts=LayoutCombination.NNN, alignments=[8, 8, 8], element_output=cutlass.DataType.f16,
element_accumulator=cutlass.DataType.f32, threadblock_shape=[128, 128, 32], stages=3)
add_test_unit_cluster(layouts=LayoutCombination.NNT, alignments=[8, 8, 8], element_output=cutlass.DataType.f16,
element_accumulator=cutlass.DataType.f32, threadblock_shape=[128, 128, 32], stages=None)
add_test_unit_cluster(layouts=LayoutCombination.NTN, alignments=[8, 8, 8], element_output=cutlass.DataType.f16,
element_accumulator=cutlass.DataType.f32, threadblock_shape=[128, 128, 32], stages=None)
add_test_unit_cluster(layouts=LayoutCombination.NTT, alignments=[8, 8, 8], element_output=cutlass.DataType.f16,
element_accumulator=cutlass.DataType.f32, threadblock_shape=[128, 128, 32], stages=None)
add_test_unit_cluster(layouts=LayoutCombination.TNN, alignments=[8, 8, 8], element_output=cutlass.DataType.f16,
element_accumulator=cutlass.DataType.f32, threadblock_shape=[128, 128, 32], stages=None)
add_test_unit_cluster(layouts=LayoutCombination.TNT, alignments=[4, 4, 8], element_output=cutlass.DataType.f16,
element_accumulator=cutlass.DataType.f32, threadblock_shape=[128, 128, 32], stages=None)
add_test_unit_cluster(layouts=LayoutCombination.TNT, alignments=[4, 4, 8], element_output=cutlass.DataType.f16,
element_accumulator=cutlass.DataType.f16, threadblock_shape=[128, 128, 32], stages=None)
add_test_unit_cluster(layouts=LayoutCombination.TNT, alignments=[8, 8, 8], element_output=cutlass.DataType.f16,
element_accumulator=cutlass.DataType.f16, threadblock_shape=[128, 128, 32], stages=None)
add_test_unit_cluster(layouts=LayoutCombination.TNT, alignments=[8, 8, 8], element_output=cutlass.DataType.f16,
element_accumulator=cutlass.DataType.f32, threadblock_shape=[ 64, 64, 64], stages=5)
add_test_unit_cluster(layouts=LayoutCombination.TNT, alignments=[2, 2, 2], element_output=cutlass.DataType.f16,
element_accumulator=cutlass.DataType.f16, threadblock_shape=[128, 128, 32], stages=None)
# Tests with different cluster shapes
add_test_cluster_shape = partial(add_test_tensorop, threadblock_shape=[64, 128, 64], stages=None)
add_test_cluster_shape(layouts=LayoutCombination.TTN, alignments=[8, 8, 8], element_output=cutlass.DataType.f16,
element_accumulator=cutlass.DataType.f16, cluster_shape=[2, 2, 1])
add_test_cluster_shape(layouts=LayoutCombination.TNN, alignments=[8, 8, 4], element_output=cutlass.DataType.f32,
element_accumulator=cutlass.DataType.f32, cluster_shape=[2, 2, 1])
add_test_cluster_shape(layouts=LayoutCombination.NTN, alignments=[8, 8, 4], element_output=cutlass.DataType.f32,
element_accumulator=cutlass.DataType.f32, cluster_shape=[2, 2, 1])
add_test_cluster_shape(layouts=LayoutCombination.NNN, alignments=[8, 8, 4], element_output=cutlass.DataType.f32,
element_accumulator=cutlass.DataType.f32, cluster_shape=[2, 2, 1])
add_test_cluster_shape(layouts=LayoutCombination.TTN, alignments=[8, 8, 4], element_output=cutlass.DataType.f32,
element_accumulator=cutlass.DataType.f32, cluster_shape=[1, 4, 1])
add_test_cluster_shape(layouts=LayoutCombination.TTN, alignments=[8, 8, 4], element_output=cutlass.DataType.f32,
element_accumulator=cutlass.DataType.f32, cluster_shape=[2, 4, 1])
add_test_cluster_shape(layouts=LayoutCombination.TTN, alignments=[8, 8, 4], element_output=cutlass.DataType.f32,
element_accumulator=cutlass.DataType.f32, cluster_shape=[4, 1, 1])
add_test_cluster_shape(layouts=LayoutCombination.TTN, alignments=[8, 8, 4], element_output=cutlass.DataType.f32,
element_accumulator=cutlass.DataType.f32, cluster_shape=[4, 2, 1])
# Tests for different schedule modes
add_test_schedule = partial(add_test_specialized, layouts=LayoutCombination.TTN, alignments=[8, 8, 4],
element_output=cutlass.DataType.f32, element_accumulator=cutlass.DataType.f32,
opclass=cutlass.OpcodeClass.TensorOp, threadblock_shape=[128, 128, 64], stages=None)
add_test_schedule(
cluster_shape=[1, 1, 1],
kernel_schedule=cutlass.KernelScheduleType.TmaWarpSpecializedPingpong,
epilogue_schedule=cutlass.EpilogueScheduleType.TmaWarpSpecialized
)
add_test_schedule(
cluster_shape=[1, 1, 1],
kernel_schedule=cutlass.KernelScheduleType.TmaWarpSpecializedCooperative,
epilogue_schedule=cutlass.EpilogueScheduleType.TmaWarpSpecializedCooperative
)
add_test_schedule(
cluster_shape=[2, 1, 1],
kernel_schedule=cutlass.KernelScheduleType.TmaWarpSpecializedPingpong,
epilogue_schedule=cutlass.EpilogueScheduleType.TmaWarpSpecialized
)
add_test_schedule(
cluster_shape=[2, 1, 1],
kernel_schedule=cutlass.KernelScheduleType.TmaWarpSpecializedCooperative,
epilogue_schedule=cutlass.EpilogueScheduleType.TmaWarpSpecializedCooperative
)
# Tests using SIMT
add_test_simt = partial(add_test_specialized, opclass=cutlass.OpcodeClass.Simt, alignments=[1, 1, 1], cluster_shape=[1, 1, 1], stages=2)
add_test_simt(layouts=LayoutCombination.NNN, element_output=cutlass.DataType.f16, element_accumulator=cutlass.DataType.f32, threadblock_shape=[128, 128, 8])
add_test_simt(layouts=LayoutCombination.TNN, element_output=cutlass.DataType.f16, element_accumulator=cutlass.DataType.f32, threadblock_shape=[ 64, 128, 8])
add_test_simt(layouts=LayoutCombination.NTN, element_output=cutlass.DataType.f16, element_accumulator=cutlass.DataType.f32, threadblock_shape=[128, 64, 8])
add_test_simt(layouts=LayoutCombination.TTN, element_output=cutlass.DataType.f16, element_accumulator=cutlass.DataType.f32, threadblock_shape=[ 64, 64, 8])
add_test_simt(layouts=LayoutCombination.NNT, element_output=cutlass.DataType.f16, element_accumulator=cutlass.DataType.f16, threadblock_shape=[128, 128, 8])
if __name__ == '__main__':
unittest.main()
| cutlass-main | test/python/gemm/gemm_f16_sm90.py |
#################################################################################################
#
# Copyright (c) 2023 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#################################################################################################
"""
Low-level functionality tests for GEMM with F64 operands on SM90
"""
from functools import partial
import cutlass
import logging
import unittest
from cutlass.backend.test.utils import LayoutCombination, add_test_gemm
from cutlass.backend.utils.device import device_cc
cutlass.set_log_level(logging.WARNING)
cc = 90
@unittest.skipIf(device_cc() < cc, 'Device compute capability is insufficient for SM90 tests.')
class GemmF64Sm90(unittest.TestCase):
"""
Wrapper class to which tests will be added dynamically in __main__
"""
pass
add_test_specialized = partial(add_test_gemm, cls=GemmF64Sm90, alignments=[1, 1, 1], cluster_shape=[1, 1, 1],
element=cutlass.DataType.f64, element_output=cutlass.DataType.f64,
element_accumulator=cutlass.DataType.f64, compilation_modes=['nvcc'])
add_test_specialized(opclass=cutlass.OpcodeClass.TensorOp, layouts=LayoutCombination.NNT, threadblock_shape=[128, 128, 32], stages=3)
add_test_specialized(opclass=cutlass.OpcodeClass.TensorOp, layouts=LayoutCombination.TNN, threadblock_shape=[128, 128, 32], stages=3)
add_test_specialized( opclass=cutlass.OpcodeClass.Simt, layouts=LayoutCombination.NNN, threadblock_shape=[128, 128, 8], stages=2)
add_test_specialized( opclass=cutlass.OpcodeClass.Simt, layouts=LayoutCombination.TTT, threadblock_shape=[ 64, 128, 8], stages=2)
if __name__ == '__main__':
unittest.main()
| cutlass-main | test/python/gemm/gemm_f64_sm90.py |
#################################################################################################
#
# Copyright (c) 2023 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#################################################################################################
"""
Low-level functionality tests for GEMM with F64 operands on SM80
"""
from functools import partial
import cutlass
import logging
import unittest
from cutlass.backend.test.utils import LayoutCombination, add_test_gemm
from cutlass.backend.utils.device import device_cc
cutlass.set_log_level(logging.WARNING)
cc = 80
@unittest.skipIf(device_cc() < cc, 'Device compute capability is insufficient for SM80 tests.')
class GemmF64Sm80(unittest.TestCase):
"""
Wrapper class to which tests will be added dynamically in __main__
"""
pass
@unittest.skipIf(device_cc() < cc, 'Device compute capability is insufficient for SM80 tests.')
class GemmF64Sm80StreamK(unittest.TestCase):
"""
Wrapper class to which tests will be added dynamically in __main__
"""
pass
add_test_specialized = partial(add_test_gemm, element=cutlass.DataType.f64, cc=cc, cluster_shape=[1, 1, 1])
# Tests using TensorOp
add_test_tensorop = partial(add_test_specialized, opclass=cutlass.OpcodeClass.TensorOp)
add_test_tensorop(cls=GemmF64Sm80, layouts=LayoutCombination.NNN, alignments=[1, 1, 1], element_output=cutlass.DataType.f64,
element_accumulator=cutlass.DataType.f64, threadblock_shape=[128, 128, 16], warp_count=[4, 2, 1], stages=3)
add_test_tensorop(cls=GemmF64Sm80, layouts=LayoutCombination.NTN, alignments=[1, 1, 1], element_output=cutlass.DataType.f64,
element_accumulator=cutlass.DataType.f64, threadblock_shape=[ 64, 64, 16], warp_count=[2, 2, 1], stages=4)
add_test_tensorop(cls=GemmF64Sm80, layouts=LayoutCombination.TTN, alignments=[1, 1, 1], element_output=cutlass.DataType.f64,
element_accumulator=cutlass.DataType.f64, threadblock_shape=[ 32, 32, 16], warp_count=[2, 1, 1], stages=5)
# Tests using SIMT
add_test_simt = partial(add_test_specialized, opclass=cutlass.OpcodeClass.Simt)
add_test_simt(cls=GemmF64Sm80, layouts=LayoutCombination.NNN, alignments=[1, 1, 1], element_output=cutlass.DataType.f64,
element_accumulator=cutlass.DataType.f64, threadblock_shape=[128, 128, 8], warp_count=[2, 2, 1], stages=2)
add_test_simt(cls=GemmF64Sm80, layouts=LayoutCombination.TNN, alignments=[1, 1, 1], element_output=cutlass.DataType.f64,
element_accumulator=cutlass.DataType.f64, threadblock_shape=[ 64, 128, 8], warp_count=[1, 2, 1], stages=2)
add_test_simt(cls=GemmF64Sm80, layouts=LayoutCombination.NTN, alignments=[1, 1, 1], element_output=cutlass.DataType.f64,
element_accumulator=cutlass.DataType.f64, threadblock_shape=[128, 64, 8], warp_count=[2, 1, 1], stages=2)
add_test_simt(cls=GemmF64Sm80, layouts=LayoutCombination.TTN, alignments=[1, 1, 1], element_output=cutlass.DataType.f64,
element_accumulator=cutlass.DataType.f64, threadblock_shape=[ 64, 64, 8], warp_count=[1, 1, 1], stages=2)
add_test_simt(cls=GemmF64Sm80, layouts=LayoutCombination.NNT, alignments=[1, 1, 1], element_output=cutlass.DataType.f64,
element_accumulator=cutlass.DataType.f64, threadblock_shape=[128, 128, 8], warp_count=[2, 2, 1], stages=2)
# Stream K tests
add_test_streamk = partial(add_test_specialized, opclass=cutlass.OpcodeClass.TensorOp, swizzle=cutlass.swizzle.ThreadblockSwizzleStreamK)
add_test_streamk(cls=GemmF64Sm80StreamK, layouts=LayoutCombination.NTT, alignments=[1, 1, 1], element_output=cutlass.DataType.f64,
element_accumulator=cutlass.DataType.f64, threadblock_shape=[128, 128, 16], warp_count=[4, 2, 1], stages=3)
if __name__ == '__main__':
unittest.main()
| cutlass-main | test/python/gemm/gemm_f64_sm80.py |
#################################################################################################
#
# Copyright (c) 2023 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#################################################################################################
"""
Low-level functionality tests for GEMM with F16 operands on SM80
"""
from functools import partial
import cutlass
import logging
import unittest
from cutlass.backend.test.utils import LayoutCombination, add_test_gemm
from cutlass.backend.utils.device import device_cc
cutlass.set_log_level(logging.WARNING)
cc = 80
@unittest.skipIf(device_cc() < cc, 'Device compute capability is insufficient for SM80 tests.')
class GemmF16Sm80(unittest.TestCase):
"""
Wrapper class to which tests will be added dynamically in __main__
"""
pass
@unittest.skipIf(device_cc() < cc, 'Device compute capability is insufficient for SM80 tests.')
class GemmF16Sm80StreamK(unittest.TestCase):
"""
Wrapper class to which tests will be added dynamically in __main__
"""
pass
add_test_specialized = partial(add_test_gemm, element=cutlass.DataType.f16, cc=cc, cluster_shape=[1, 1, 1])
# Tests using TensorOp
add_test_tensorop = partial(add_test_specialized, opclass=cutlass.OpcodeClass.TensorOp)
add_test_tensorop(cls=GemmF16Sm80, layouts=LayoutCombination.NNN, alignments=[8, 8, 8], element_output=cutlass.DataType.f16,
element_accumulator=cutlass.DataType.f32, threadblock_shape=[128, 128, 32], warp_count=[2, 2, 1], stages=3)
add_test_tensorop(cls=GemmF16Sm80, layouts=LayoutCombination.NNT, alignments=[8, 8, 8], element_output=cutlass.DataType.f16,
element_accumulator=cutlass.DataType.f32, threadblock_shape=[128, 128, 32], warp_count=[2, 2, 1], stages=3)
add_test_tensorop(cls=GemmF16Sm80, layouts=LayoutCombination.NTN, alignments=[8, 8, 8], element_output=cutlass.DataType.f16,
element_accumulator=cutlass.DataType.f32, threadblock_shape=[128, 128, 32], warp_count=[2, 2, 1], stages=3)
add_test_tensorop(cls=GemmF16Sm80, layouts=LayoutCombination.NTT, alignments=[8, 8, 8], element_output=cutlass.DataType.f16,
element_accumulator=cutlass.DataType.f32, threadblock_shape=[128, 128, 32], warp_count=[2, 2, 1], stages=3)
add_test_tensorop(cls=GemmF16Sm80, layouts=LayoutCombination.TNN, alignments=[8, 8, 8], element_output=cutlass.DataType.f16,
element_accumulator=cutlass.DataType.f32, threadblock_shape=[128, 128, 32], warp_count=[2, 2, 1], stages=3)
add_test_tensorop(cls=GemmF16Sm80, layouts=LayoutCombination.TNT, alignments=[8, 8, 8], element_output=cutlass.DataType.f16,
element_accumulator=cutlass.DataType.f32, threadblock_shape=[128, 128, 32], warp_count=[2, 2, 1], stages=3)
add_test_tensorop(cls=GemmF16Sm80, layouts=LayoutCombination.TTN, alignments=[8, 8, 8], element_output=cutlass.DataType.f16,
element_accumulator=cutlass.DataType.f32, threadblock_shape=[128, 128, 32], warp_count=[2, 2, 1], stages=3)
add_test_tensorop(cls=GemmF16Sm80, layouts=LayoutCombination.TTT, alignments=[8, 8, 8], element_output=cutlass.DataType.f16,
element_accumulator=cutlass.DataType.f32, threadblock_shape=[128, 128, 32], warp_count=[2, 2, 1], stages=3)
add_test_tensorop(cls=GemmF16Sm80, layouts=LayoutCombination.TNT, alignments=[8, 8, 8], element_output=cutlass.DataType.f16,
element_accumulator=cutlass.DataType.f32, threadblock_shape=[ 64, 128, 32], warp_count=[1, 2, 1], stages=3)
add_test_tensorop(cls=GemmF16Sm80, layouts=LayoutCombination.TNT, alignments=[8, 8, 8], element_output=cutlass.DataType.f16,
element_accumulator=cutlass.DataType.f32, threadblock_shape=[128, 64, 32], warp_count=[2, 1, 1], stages=3)
add_test_tensorop(cls=GemmF16Sm80, layouts=LayoutCombination.TNT, alignments=[8, 8, 8], element_output=cutlass.DataType.f16,
element_accumulator=cutlass.DataType.f32, threadblock_shape=[ 64, 64, 64], warp_count=[1, 1, 1], stages=3)
add_test_tensorop(cls=GemmF16Sm80, layouts=LayoutCombination.TNT, alignments=[4, 4, 8], element_output=cutlass.DataType.f16,
element_accumulator=cutlass.DataType.f32, threadblock_shape=[128, 128, 32], warp_count=[2, 2, 1], stages=3)
add_test_tensorop(cls=GemmF16Sm80, layouts=LayoutCombination.TNT, alignments=[4, 4, 8], element_output=cutlass.DataType.f16,
element_accumulator=cutlass.DataType.f16, threadblock_shape=[128, 128, 32], warp_count=[2, 2, 1], stages=3)
add_test_tensorop(cls=GemmF16Sm80, layouts=LayoutCombination.TNT, alignments=[8, 8, 8], element_output=cutlass.DataType.f16,
element_accumulator=cutlass.DataType.f16, threadblock_shape=[128, 128, 32], warp_count=[2, 2, 1], stages=3)
add_test_tensorop(cls=GemmF16Sm80, layouts=LayoutCombination.TNT, alignments=[8, 8, 8], element_output=cutlass.DataType.f16,
element_accumulator=cutlass.DataType.f32, threadblock_shape=[ 64, 64, 64], warp_count=[1, 1, 1], stages=5)
add_test_tensorop(cls=GemmF16Sm80, layouts=LayoutCombination.TNT, alignments=[2, 2, 2], element_output=cutlass.DataType.f16,
element_accumulator=cutlass.DataType.f16, threadblock_shape=[128, 128, 32], warp_count=[2, 2, 1], stages=3)
# Tests using SIMT
add_test_simt = partial(add_test_specialized, opclass=cutlass.OpcodeClass.Simt)
add_test_simt(cls=GemmF16Sm80, layouts=LayoutCombination.NNN, alignments=[1, 1, 1], element_output=cutlass.DataType.f16,
element_accumulator=cutlass.DataType.f32, threadblock_shape=[128, 128, 8], warp_count=[2, 2, 1], stages=2)
add_test_simt(cls=GemmF16Sm80, layouts=LayoutCombination.TNN, alignments=[1, 1, 1], element_output=cutlass.DataType.f16,
element_accumulator=cutlass.DataType.f32, threadblock_shape=[ 64, 128, 8], warp_count=[1, 2, 1], stages=2)
add_test_simt(cls=GemmF16Sm80, layouts=LayoutCombination.NTN, alignments=[1, 1, 1], element_output=cutlass.DataType.f16,
element_accumulator=cutlass.DataType.f32, threadblock_shape=[128, 64, 8], warp_count=[2, 1, 1], stages=2)
add_test_simt(cls=GemmF16Sm80, layouts=LayoutCombination.TTN, alignments=[1, 1, 1], element_output=cutlass.DataType.f16,
element_accumulator=cutlass.DataType.f32, threadblock_shape=[ 64, 64, 8], warp_count=[1, 1, 1], stages=2)
add_test_simt(cls=GemmF16Sm80, layouts=LayoutCombination.NNT, alignments=[1, 1, 1], element_output=cutlass.DataType.f16,
element_accumulator=cutlass.DataType.f16, threadblock_shape=[128, 128, 8], warp_count=[2, 2, 1], stages=2)
# Stream K tests
add_test_streamk = partial(add_test_specialized, opclass=cutlass.OpcodeClass.TensorOp, swizzle=cutlass.swizzle.ThreadblockSwizzleStreamK)
add_test_streamk(cls=GemmF16Sm80StreamK, layouts=LayoutCombination.NNN, alignments=[8, 8, 8], element_output=cutlass.DataType.f16,
element_accumulator=cutlass.DataType.f32, threadblock_shape=[128, 128, 32], warp_count=[2, 2, 1], stages=3)
add_test_streamk(cls=GemmF16Sm80StreamK, layouts=LayoutCombination.TNT, alignments=[8, 8, 8], element_output=cutlass.DataType.f16,
element_accumulator=cutlass.DataType.f32, threadblock_shape=[ 64, 64, 64], warp_count=[1, 1, 1], stages=5)
if __name__ == '__main__':
unittest.main()
| cutlass-main | test/python/gemm/gemm_f16_sm80.py |
#################################################################################################
#
# Copyright (c) 2023 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#################################################################################################
"""
Low-level functionality tests for GEMM with S8 operands on SM90
"""
from functools import partial
import cutlass
import logging
import unittest
from cutlass.backend.test.utils import LayoutCombination, add_test_gemm
from cutlass.backend.utils.device import device_cc
cutlass.set_log_level(logging.WARNING)
cc = 90
@unittest.skipIf(device_cc() < cc, 'Device compute capability is insufficient for SM90 tests.')
class GemmS8Sm90(unittest.TestCase):
"""
Wrapper class to which tests will be added dynamically in __main__
"""
pass
add_test_specialized = partial(add_test_gemm, cls=GemmS8Sm90, element=cutlass.DataType.s8, compilation_modes=['nvcc'])
add_test_tensorop = partial(add_test_specialized, opclass=cutlass.OpcodeClass.TensorOp)
# Tests with 1x1x1 clusters
add_test_tensorop(layouts=LayoutCombination.TNN, alignments=[16, 16, 16], element_output=cutlass.DataType.s8,
element_accumulator=cutlass.DataType.s32, cluster_shape=[1, 1, 1], threadblock_shape=[128, 128, 128], stages=3)
add_test_tensorop(layouts=LayoutCombination.TNT, alignments=[16, 16, 16], element_output=cutlass.DataType.s8,
element_accumulator=cutlass.DataType.s32, cluster_shape=[1, 1, 1], threadblock_shape=[128, 128, 128], stages=None)
add_test_tensorop(layouts=LayoutCombination.TNT, alignments=[16, 16, 8], element_output=cutlass.DataType.s8,
element_accumulator=cutlass.DataType.s32, cluster_shape=[1, 1, 1], threadblock_shape=[128, 128, 128], stages=None)
add_test_tensorop(layouts=LayoutCombination.TNT, alignments=[16, 16, 16], element_output=cutlass.DataType.s8,
element_accumulator=cutlass.DataType.s32, cluster_shape=[1, 1, 1], threadblock_shape=[64, 128, 128], stages=None)
add_test_tensorop(layouts=LayoutCombination.TNT, alignments=[16, 16, 16], element_output=cutlass.DataType.s8,
element_accumulator=cutlass.DataType.s32, cluster_shape=[1, 1, 1], threadblock_shape=[128, 64, 32], stages=None)
add_test_tensorop(layouts=LayoutCombination.TNT, alignments=[ 4, 4, 16], element_output=cutlass.DataType.s8,
element_accumulator=cutlass.DataType.s32, cluster_shape=[1, 1, 1], threadblock_shape=[128, 128, 128], stages=None)
# Tests with different cluster shapes
add_test_tensorop(layouts=LayoutCombination.TNT, alignments=[16, 16, 16], element_output=cutlass.DataType.s8,
element_accumulator=cutlass.DataType.s32, cluster_shape=[2, 2, 1], threadblock_shape=[128, 128, 128], stages=None)
add_test_tensorop(layouts=LayoutCombination.TNT, alignments=[16, 16, 16], element_output=cutlass.DataType.s8,
element_accumulator=cutlass.DataType.s32, cluster_shape=[1, 4, 1], threadblock_shape=[128, 128, 128], stages=None)
# Tests with warp-specialized ping-pong schedule
add_test_tensorop(layouts=LayoutCombination.TNT, alignments=[16, 16, 16], element_output=cutlass.DataType.s8,
element_accumulator=cutlass.DataType.s32, cluster_shape=[2, 1, 1], threadblock_shape=[128, 128, 128], stages=None,
kernel_schedule=cutlass.KernelScheduleType.TmaWarpSpecializedPingpong,
epilogue_schedule=cutlass.EpilogueScheduleType.TmaWarpSpecialized)
# Tests for SIMT
add_test_simt = partial(add_test_specialized, opclass=cutlass.OpcodeClass.Simt)
add_test_simt(layouts=LayoutCombination.TNN, alignments=[1, 1, 1], element_output=cutlass.DataType.s8,
element_accumulator=cutlass.DataType.s32, cluster_shape=[1, 1, 1], threadblock_shape=[64, 32, 8], stages=2)
if __name__ == '__main__':
unittest.main()
| cutlass-main | test/python/gemm/gemm_s8_sm90.py |
#################################################################################################
#
# Copyright (c) 2023 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#################################################################################################
"""
Low-level functionality tests for GEMM with F32 operands on SM80
"""
from functools import partial
import cutlass
import logging
import unittest
from cutlass.backend.test.utils import LayoutCombination, add_test_gemm
from cutlass.backend.utils.device import device_cc
cutlass.set_log_level(logging.WARNING)
cc = 80
@unittest.skipIf(device_cc() < cc, 'Device compute capability is insufficient for SM80 tests.')
class GemmF32Sm80(unittest.TestCase):
"""
Wrapper class to which tests will be added dynamically in __main__
"""
pass
@unittest.skipIf(device_cc() < cc, 'Device compute capability is insufficient for SM80 tests.')
class GemmF32Sm80StreamK(unittest.TestCase):
"""
Wrapper class to which tests will be added dynamically in __main__
"""
pass
add_test_specialized = partial(add_test_gemm, element=cutlass.DataType.f32, cc=cc, cluster_shape=[1, 1, 1])
# Tests using TensorOp
add_test_tensorop = partial(add_test_specialized, opclass=cutlass.OpcodeClass.TensorOp)
add_test_tensorop(cls=GemmF32Sm80, layouts=LayoutCombination.NNN, alignments=[4, 4, 4], element_output=cutlass.DataType.f32,
element_accumulator=cutlass.DataType.f32, threadblock_shape=[128, 128, 32], warp_count=[2, 2, 1], stages=3)
add_test_tensorop(cls=GemmF32Sm80, layouts=LayoutCombination.NNT, alignments=[4, 4, 4], element_output=cutlass.DataType.f32,
element_accumulator=cutlass.DataType.f32, threadblock_shape=[128, 128, 32], warp_count=[2, 2, 1], stages=3)
add_test_tensorop(cls=GemmF32Sm80, layouts=LayoutCombination.NTN, alignments=[4, 4, 4], element_output=cutlass.DataType.f32,
element_accumulator=cutlass.DataType.f32, threadblock_shape=[ 64, 128, 32], warp_count=[1, 2, 1], stages=3)
add_test_tensorop(cls=GemmF32Sm80, layouts=LayoutCombination.NTN, alignments=[4, 4, 4], element_output=cutlass.DataType.f32,
element_accumulator=cutlass.DataType.f32, threadblock_shape=[ 64, 64, 32], warp_count=[1, 1, 1], stages=4)
# Tests using SIMT
add_test_simt = partial(add_test_specialized, opclass=cutlass.OpcodeClass.Simt)
add_test_simt(cls=GemmF32Sm80, layouts=LayoutCombination.NNN, alignments=[1, 1, 1], element_output=cutlass.DataType.f32,
element_accumulator=cutlass.DataType.f32, threadblock_shape=[128, 128, 8], warp_count=[2, 2, 1], stages=2)
add_test_simt(cls=GemmF32Sm80, layouts=LayoutCombination.TNN, alignments=[1, 1, 1], element_output=cutlass.DataType.f32,
element_accumulator=cutlass.DataType.f32, threadblock_shape=[ 64, 128, 8], warp_count=[1, 2, 1], stages=2)
add_test_simt(cls=GemmF32Sm80, layouts=LayoutCombination.NTN, alignments=[1, 1, 1], element_output=cutlass.DataType.f32,
element_accumulator=cutlass.DataType.f32, threadblock_shape=[128, 64, 8], warp_count=[2, 1, 1], stages=2)
add_test_simt(cls=GemmF32Sm80, layouts=LayoutCombination.TTN, alignments=[1, 1, 1], element_output=cutlass.DataType.f32,
element_accumulator=cutlass.DataType.f32, threadblock_shape=[ 64, 64, 8], warp_count=[1, 1, 1], stages=2)
add_test_simt(cls=GemmF32Sm80, layouts=LayoutCombination.NNT, alignments=[1, 1, 1], element_output=cutlass.DataType.f32,
element_accumulator=cutlass.DataType.f32, threadblock_shape=[128, 128, 8], warp_count=[2, 2, 1], stages=2)
# Stream K tests
add_test_streamk = partial(add_test_specialized, opclass=cutlass.OpcodeClass.TensorOp, swizzle=cutlass.swizzle.ThreadblockSwizzleStreamK)
add_test_streamk(cls=GemmF32Sm80StreamK, layouts=LayoutCombination.TTN, alignments=[4, 4, 4], element_output=cutlass.DataType.f32,
element_accumulator=cutlass.DataType.f32, threadblock_shape=[128, 128, 32], warp_count=[2, 2, 1], stages=3)
if __name__ == '__main__':
unittest.main()
| cutlass-main | test/python/gemm/gemm_f32_sm80.py |
#################################################################################################
#
# Copyright (c) 2023 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#################################################################################################
"""
Low-level functionality tests for GEMM with S8 operands on SM80
"""
from functools import partial
import cutlass
import logging
import unittest
from cutlass.backend.test.utils import LayoutCombination, add_test_gemm
from cutlass.backend.utils.device import device_cc
cutlass.set_log_level(logging.WARNING)
cc = 80
@unittest.skipIf(device_cc() < cc, 'Device compute capability is insufficient for SM80 tests.')
class GemmS8Sm80(unittest.TestCase):
"""
Wrapper class to which tests will be added dynamically in __main__
"""
pass
@unittest.skipIf(device_cc() < cc, 'Device compute capability is insufficient for SM80 tests.')
class GemmS8Sm80StreamK(unittest.TestCase):
"""
Wrapper class to which tests will be added dynamically in __main__
"""
pass
add_test_specialized = partial(add_test_gemm, element=cutlass.DataType.s8, cc=cc, cluster_shape=[1, 1, 1])
# Tests using TensorOp
add_test_tensorop = partial(add_test_specialized, opclass=cutlass.OpcodeClass.TensorOp)
add_test_tensorop(cls=GemmS8Sm80, layouts=LayoutCombination.TNN, alignments=[16, 16, 16], element_output=cutlass.DataType.s8,
element_accumulator=cutlass.DataType.s32, threadblock_shape=[256, 128, 64], warp_count=[4, 2, 1], stages=3)
add_test_tensorop(cls=GemmS8Sm80, layouts=LayoutCombination.TNT, alignments=[16, 16, 16], element_output=cutlass.DataType.s8,
element_accumulator=cutlass.DataType.s32, threadblock_shape=[128, 256, 64], warp_count=[2, 4, 1], stages=3)
add_test_tensorop(cls=GemmS8Sm80, layouts=LayoutCombination.TNN, alignments=[16, 16, 4], element_output=cutlass.DataType.s32,
element_accumulator=cutlass.DataType.s32, threadblock_shape=[ 64, 64, 64], warp_count=[1, 1, 1], stages=4)
# Tests using SIMT
add_test_simt = partial(add_test_specialized, opclass=cutlass.OpcodeClass.Simt)
add_test_simt(cls=GemmS8Sm80, layouts=LayoutCombination.NNN, alignments=[1, 1, 1], element_output=cutlass.DataType.s8,
element_accumulator=cutlass.DataType.s32, threadblock_shape=[128, 128, 8], warp_count=[2, 2, 1], stages=2)
add_test_simt(cls=GemmS8Sm80, layouts=LayoutCombination.TNN, alignments=[1, 1, 1], element_output=cutlass.DataType.s8,
element_accumulator=cutlass.DataType.s32, threadblock_shape=[ 64, 128, 8], warp_count=[1, 2, 1], stages=2)
add_test_simt(cls=GemmS8Sm80, layouts=LayoutCombination.NTN, alignments=[1, 1, 1], element_output=cutlass.DataType.s8,
element_accumulator=cutlass.DataType.s32, threadblock_shape=[128, 64, 8], warp_count=[2, 1, 1], stages=2)
add_test_simt(cls=GemmS8Sm80, layouts=LayoutCombination.TTN, alignments=[1, 1, 1], element_output=cutlass.DataType.s32,
element_accumulator=cutlass.DataType.s32, threadblock_shape=[ 64, 64, 8], warp_count=[1, 1, 1], stages=2)
add_test_simt(cls=GemmS8Sm80, layouts=LayoutCombination.NNT, alignments=[1, 1, 1], element_output=cutlass.DataType.s32,
element_accumulator=cutlass.DataType.s32, threadblock_shape=[128, 128, 8], warp_count=[2, 2, 1], stages=2)
# Stream K tests
add_test_streamk = partial(add_test_specialized, opclass=cutlass.OpcodeClass.TensorOp, swizzle=cutlass.swizzle.ThreadblockSwizzleStreamK)
add_test_streamk(cls=GemmS8Sm80StreamK, layouts=LayoutCombination.TNT, alignments=[16, 16, 16], element_output=cutlass.DataType.s8,
element_accumulator=cutlass.DataType.s32, threadblock_shape=[128, 256, 64], warp_count=[2, 4, 1], stages=3)
if __name__ == '__main__':
unittest.main()
| cutlass-main | test/python/gemm/gemm_s8_sm80.py |
#################################################################################################
#
# Copyright (c) 2023 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#################################################################################################
import unittest
if __name__ == '__main__':
loader = unittest.TestLoader()
tests = loader.discover('./', 'gemm_*.py')
testRunner = unittest.runner.TextTestRunner()
results = testRunner.run(tests)
if not results.wasSuccessful():
raise Exception('Test cases failed')
| cutlass-main | test/python/gemm/run_all_tests.py |
#################################################################################################
#
# Copyright (c) 2023 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#################################################################################################
"""
Tests emitting a CUTLASS kernel to a PyTorch CUDA extension
"""
import random
import tempfile
import unittest
import cutlass
import cutlass_bindings
if cutlass.utils.datatypes.torch_available:
import torch
def _initialize(dtype, M: int, N: int, K: int):
"""
Utility function to initialize A, B, C, and D matrices corresponding to dimensions M, N, and K
:param dtype: data type of tensors
:param M: M dimension of GEMM problem
:type M: int
:param N: N dimension of GEMM problem
:type N: int
:param K: N dimension of GEMM problem
:type K: int
:return: initialized tensors A, B, C, and D
:rtype: list
"""
sizes = [(M, K), (K, N), (M, N), (M, N)]
return [torch.randint(-3, 3, size, device='cuda').to(dtype) for size in sizes]
def _generate_problems(dtype, num):
"""
Utility function to generate `num` GEMMs of random sizes
:param dtype: data type of tensors
:param num: number of GEMMs to generate
:type num: int
:return: lists of A, B, C, and D tensors
:rtype: list
"""
valid_sizes = [128, 256, 512, 1024]
As, Bs, Cs, Ds = [], [], [], []
for _ in range(num):
M, N, K = [random.choice(valid_sizes) for _ in range(3)]
A, B, C, D = _initialize(dtype, M, N, K)
As.append(A)
Bs.append(B)
Cs.append(C)
Ds.append(D)
return As, Bs, Cs, Ds
def _generate_conv2d_problem(conv_kind, dtype, ps):
"""
Utility function to generate conv2d inputs
:param conv_kind: kind of convolution
:type conv_kind: str
:param dtype: data type of tensors
:param problem_size: the conv2d problem size
:type problem_size: cutlass_bindings.conv.Conv2dProblemSize
:return: initialized tensors A, B, C, and D
:rtype: list
"""
if conv_kind == "fprop":
tensor_A_size = (ps.N, ps.C, ps.H, ps.W)
tensor_B_size = (ps.K, ps.C, ps.R, ps.S)
tensor_C_size = (ps.N, ps.K, ps.P, ps.Q)
elif conv_kind == "dgrad":
tensor_A_size = (ps.N, ps.K, ps.P, ps.Q)
tensor_B_size = (ps.K, ps.C, ps.R, ps.S)
tensor_C_size = (ps.N, ps.C, ps.H, ps.W)
else:
tensor_A_size = (ps.N, ps.K, ps.P, ps.Q)
tensor_B_size = (ps.N, ps.C, ps.H, ps.W)
tensor_C_size = (ps.K, ps.C, ps.R, ps.S)
sizes = [tensor_A_size, tensor_B_size, tensor_C_size]
return [torch.ceil(torch.empty(size, dtype=dtype, device='cuda').uniform_(-4.5, 3.5)).to(memory_format=torch.channels_last) for size in sizes]
@unittest.skipIf(not cutlass.utils.datatypes.torch_available, 'PyTorch must be available to run PyTorch extension tests')
class PyTorchExtensionTest(unittest.TestCase):
def test_gemm(self):
random.seed(2023)
dtype = torch.float16
plan = cutlass.op.Gemm(element=dtype, layout=cutlass.LayoutType.RowMajor)
plan.activation = cutlass.epilogue.relu
op = plan.construct()
with tempfile.TemporaryDirectory() as tmpdir:
mod = cutlass.emit.pytorch(op, name='gemm_mod', cc=plan.cc, sourcedir=tmpdir, jit=True)
A, B, C, _ = _initialize(dtype, 1024, 256, 512)
D_ref = torch.nn.functional.relu(A @ B)
D = mod.run(A, B)
assert torch.allclose(D, D_ref)
D = mod.run(A, B, C)
assert torch.allclose(D, D_ref)
D = mod.run(A, B, C, 1.0)
assert torch.allclose(D, D_ref)
D = mod.run(A, B, C, 1.0, 0.0)
assert torch.allclose(D, D_ref)
alpha = 2.0
beta = -1.0
D_ref = torch.nn.functional.relu((A @ B) * alpha + (beta * C))
D = mod.run(A, B, C, alpha, beta)
assert torch.allclose(D, D_ref)
def test_grouped_gemm(self):
random.seed(2023)
dtype = torch.float16
plan = cutlass.op.GroupedGemm(element=dtype, layout=cutlass.LayoutType.RowMajor)
op = plan.construct()
with tempfile.TemporaryDirectory() as tmpdir:
mod = cutlass.emit.pytorch(op, name='grouped_gemm_mod', cc=plan.cc, sourcedir=tmpdir, jit=True)
As, Bs, Cs, _ = _generate_problems(dtype, 50)
def check_all(X, Y):
for x, y in zip(X, Y):
assert torch.allclose(x, y)
Ds_ref = [a @ b for a, b in zip(As, Bs)]
Ds = mod.run(As, Bs)
check_all(Ds, Ds_ref)
Ds = mod.run(As, Bs, Cs)
check_all(Ds, Ds_ref)
Ds = mod.run(As, Bs, Cs, 1.0)
check_all(Ds, Ds_ref)
Ds = mod.run(As, Bs, Cs, 1.0, 0.0)
check_all(Ds, Ds_ref)
alpha = 2.0
beta = -1.0
Ds_ref = [(a @ b) * alpha + (beta * c) for a, b, c in zip(As, Bs, Cs)]
Ds = mod.run(As, Bs, Cs, alpha, beta)
check_all(Ds, Ds_ref)
def test_conv2d_fprop(self):
torch.manual_seed(2023)
dtype = torch.float16
plan = cutlass.op.Conv2d(kind="fprop", element=dtype, element_accumulator=torch.float32)
plan.activation = "relu"
op = plan.construct()
with tempfile.TemporaryDirectory() as tmpdir:
mod = cutlass.emit.pytorch(op, name="conv2d_mod", cc=plan.cc, sourcedir=tmpdir, jit=True)
problem_size = cutlass_bindings.conv.Conv2dProblemSize(
cutlass_bindings.Tensor4DCoord(1, 4, 4, 16),
cutlass_bindings.Tensor4DCoord(8, 3, 3, 16),
cutlass_bindings.Tensor4DCoord(0, 0, 0, 0),
cutlass_bindings.MatrixCoord(3, 3),
cutlass_bindings.MatrixCoord(1, 1),
cutlass_bindings.conv.Mode.cross_correlation,
1, 1
)
A, B, C = _generate_conv2d_problem("fprop", dtype, problem_size)
stride = (problem_size.stride_h, problem_size.stride_w)
padding = (problem_size.pad_h, problem_size.pad_w)
alpha = 1.0
beta = 0.5
D_ref = alpha * torch.ops.aten.conv2d(
A, B, stride=stride, padding=padding
) + beta * C
D_ref = torch.nn.functional.relu(D_ref)
D = mod.run(A, B, C, stride, padding, alpha=alpha, beta=beta)
assert torch.allclose(D, D_ref)
# Test serial split-K
D_serial_split_k = mod.run(A, B, C, stride, padding, alpha=alpha, beta=beta, split_k_mode="serial", split_k_slices=3)
assert torch.allclose(D, D_serial_split_k)
# Test parallel split-K
D_parallel_split_k = mod.run(A, B, C, stride, padding, alpha=alpha, beta=beta, split_k_mode="parallel", split_k_slices=7)
assert torch.allclose(D, D_parallel_split_k)
def test_conv2d_dgrad(self):
torch.manual_seed(2023)
dtype = torch.float16
plan = cutlass.op.Conv2d(kind="dgrad", element=dtype, element_accumulator=torch.float32)
op = plan.construct()
with tempfile.TemporaryDirectory() as tmpdir:
mod = cutlass.emit.pytorch(op, name="conv2d_dgrad_mod", cc=plan.cc, sourcedir=tmpdir, jit=True)
problem_size = cutlass_bindings.conv.Conv2dProblemSize(
cutlass_bindings.Tensor4DCoord(1, 4, 4, 16),
cutlass_bindings.Tensor4DCoord(8, 3, 3, 16),
cutlass_bindings.Tensor4DCoord(0, 0, 0, 0),
cutlass_bindings.MatrixCoord(3, 3),
cutlass_bindings.MatrixCoord(1, 1),
cutlass_bindings.conv.Mode.cross_correlation,
1, 1
)
A, B, C = _generate_conv2d_problem("dgrad", dtype, problem_size)
stride = (problem_size.stride_h, problem_size.stride_w)
padding = (problem_size.pad_h, problem_size.pad_w)
alpha = 1.0
beta = 0.5
input_size = (problem_size.N, problem_size.C, problem_size.H, problem_size.W)
D_ref = alpha * torch.nn.grad.conv2d_input(
input_size, B, A,
stride=stride, padding=padding
) + beta * C
D = mod.run(input_size, A, B, C, stride, padding, alpha=alpha, beta=beta, )
assert torch.allclose(D, D_ref)
def test_conv2d_wgrad(self):
torch.manual_seed(2023)
dtype = torch.float16
plan = cutlass.op.Conv2d(kind="wgrad", element=dtype, element_accumulator=torch.float32)
op = plan.construct()
with tempfile.TemporaryDirectory() as tmpdir:
mod = cutlass.emit.pytorch(op, name="conv2d_wgrad_mod", cc=plan.cc, sourcedir=tmpdir, jit=True)
problem_size = cutlass_bindings.conv.Conv2dProblemSize(
cutlass_bindings.Tensor4DCoord(1, 4, 4, 16),
cutlass_bindings.Tensor4DCoord(8, 3, 3, 16),
cutlass_bindings.Tensor4DCoord(0, 0, 0, 0),
cutlass_bindings.MatrixCoord(3, 3),
cutlass_bindings.MatrixCoord(1, 1),
cutlass_bindings.conv.Mode.cross_correlation,
1, 1
)
A, B, C = _generate_conv2d_problem("wgrad", dtype, problem_size)
stride = (problem_size.stride_h, problem_size.stride_w)
padding = (problem_size.pad_h, problem_size.pad_w)
alpha = 1.0
beta = 0.5
weight_size = (problem_size.K, problem_size.C, problem_size.R, problem_size.S)
D_ref = alpha * torch.nn.grad.conv2d_weight(
B, weight_size, A,
stride=stride, padding=padding
) + beta * C
D = mod.run(weight_size, A, B, C, stride, padding, alpha=alpha, beta=beta)
assert torch.allclose(D, D_ref)
# Test serial split-K
D_serial_split_k = mod.run(weight_size, A, B, C, stride, padding, alpha=alpha, beta=beta, split_k_mode="serial", split_k_slices=3)
assert torch.allclose(D, D_serial_split_k)
# Test parallel split-K
D_parallel_split_k = mod.run(weight_size, A, B, C, stride, padding, alpha=alpha, beta=beta, split_k_mode="parallel", split_k_slices=7)
assert torch.allclose(D, D_parallel_split_k)
if __name__ == '__main__':
unittest.main()
| cutlass-main | test/python/emit/pytorch.py |
#################################################################################################
#
# Copyright (c) 2023 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 'AS IS'
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#################################################################################################
import copy
import os
from pybind11.setup_helpers import Pybind11Extension
import setuptools
from setuptools import setup
from setuptools.command.build_ext import build_ext
def _cutlass_path_from_dir() -> str:
cutlass_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), '../')
if not os.path.isdir(cutlass_path):
raise Exception(f'Environment variable "CUTLASS_PATH" is not defined, and default path of {cutlass_path} does not exist.')
return cutlass_path
def _cuda_install_path_from_nvcc() -> str:
import subprocess
# Attempt to detect CUDA_INSTALL_PATH based on location of NVCC
result = subprocess.run(['which', 'nvcc'], capture_output=True)
if result.returncode != 0:
raise Exception(f'Unable to find nvcc via `which` utility.')
cuda_install_path = result.stdout.decode('utf-8').split('/bin/nvcc')[0]
if not os.path.isdir(cuda_install_path):
raise Exception(f'Environment variable "CUDA_INSTALL_PATH" is not defined, and default path of {cuda_install_path} does not exist.')
return cuda_install_path
cutlass_path = (
os.getenv('CUTLASS_PATH')
if os.getenv('CUTLASS_PATH') is not None
else _cutlass_path_from_dir()
)
cuda_install_path = (
os.getenv('CUDA_INSTALL_PATH')
if os.getenv('CUDA_INSTALL_PATH') is not None
else _cuda_install_path_from_nvcc()
)
class BuildExtension(build_ext):
"""
Wrapper around `build_ext` to use NVCC when compiling the CUTLASS Python-C++ bindings.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def build_extensions(self):
original_compile = self.compiler._compile
def custom_compile(obj, src, ext, cc_args, extra_postargs, pp_opts):
"""
Wrapper around build_ext.compiler._compile method
"""
postargs = copy.deepcopy(extra_postargs)
postargs = [f for f in postargs if f not in ['-g0', '-fvisibility=hidden']]
postargs.extend(["-Xcompiler='-fPIC'", "-Xcompiler='-g0'", "-Xcompiler='-O3'", '-x', 'cu'])
try:
original_compiler = self.compiler.compiler_so
self.compiler.set_executable('compiler_so', [f'{cuda_install_path}/bin/nvcc'])
original_compile(obj, src, ext, cc_args, postargs, pp_opts)
finally:
self.compiler.set_executable('compiler_so', original_compiler)
self.compiler._compile = custom_compile
super().build_extensions()
include_dirs = [
cutlass_path + '/include',
cuda_install_path + '/include',
cutlass_path + '/tools/util/include',
cutlass_path + '/test',
]
library_dirs = [
cuda_install_path + '/lib64',
]
ext_modules = [
Pybind11Extension('cutlass_bindings',
['cutlass/cpp/cutlass_bindings.cpp'],
include_dirs=include_dirs,
library_dirs=library_dirs,
extra_compile_args=['-Xcompiler="-fpermissive"', '-w', '-std=c++17'],
libraries=['cudart'])
]
setup(
name='cutlass',
version='3.1.0',
description='CUTLASS Pythonic Interface',
package_dir={'': '.'},
packages=['cutlass', 'cutlass.emit', 'cutlass.op', 'cutlass.utils', 'cutlass.backend', 'cutlass.backend.utils'],
setup_requires=['pybind11'],
install_requires=[
'bfloat16',
'cuda-python>=11.8.0',
'pybind11',
'scikit-build',
'treelib'
],
ext_modules=ext_modules,
cmdclass={
'build_ext': BuildExtension
}
)
| cutlass-main | python/setup.py |
# Configuration file for the Sphinx documentation builder.
#
# For the full list of built-in configuration values, see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath('..'))
sys.path.insert(0, os.path.abspath('../..'))
sys.path.insert(0, os.path.abspath('../../media/docs'))
# -- Project information -----------------------------------------------------
# https://www.sphinx-doc.org/en/master/usage/configuration.html#project-information
project = 'CUTLASS Python interface'
copyright = '2023, NVIDIA'
author = 'NVIDIA'
release = '3.1.0'
# -- General configuration ---------------------------------------------------
# https://www.sphinx-doc.org/en/master/usage/configuration.html#general-configuration
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'myst_parser',
'nbsphinx',
'nbsphinx_link',
'sphinx_copybutton',
'sphinx.ext.autodoc',
'sphinx.ext.autosectionlabel',
'sphinx.ext.autosummary',
'sphinx.ext.coverage',
'sphinx.ext.extlinks',
'sphinx.ext.ifconfig',
'sphinx.ext.intersphinx',
'sphinx.ext.mathjax',
'sphinx.ext.napoleon',
'sphinx.ext.viewcode',
'sphinx_inline_tabs',
]
source_suffix = {
'.rst': 'restructuredtext',
'.md': 'markdown',
}
autodoc_typehints = 'description'
pygments_style = "sphinx"
pygments_dark_style = "monokai"
templates_path = ['_templates']
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# Ignore errors when converting notebooks
nbsphinx_allow_errors = True
language = 'en'
# -- Options for HTML output -------------------------------------------------
# https://www.sphinx-doc.org/en/master/usage/configuration.html#options-for-html-output
html_static_path = ['_static']
html_title = "CUTLASS Python"
html_baseurl = 'docs'
html_theme = 'furo'
html_theme_options = {
"light_logo": "cutlass-logo-small.png",
"dark_logo": "cutlass-logo-small.png",
"light_css_variables": {
"color-brand-primary": "#76B900",
"color-brand-content": "#76B900",
},
"dark_css_variables": {
"color-brand-primary": "#76B900",
"color-brand-content": "#76B900",
},
"footer_icons": [
{
"name": "GitHub",
"url": "https://github.com/NVIDIA/cutlass",
"html": """
<svg stroke="currentColor" fill="currentColor" stroke-width="0" viewBox="0 0 16 16">
<path fill-rule="evenodd" d="M8 0C3.58 0 0 3.58 0 8c0 3.54 2.29 6.53 5.47 7.59.4.07.55-.17.55-.38 0-.19-.01-.82-.01-1.49-2.01.37-2.53-.49-2.69-.94-.09-.23-.48-.94-.82-1.13-.28-.15-.68-.52-.01-.53.63-.01 1.08.58 1.23.82.72 1.21 1.87.87 2.33.66.07-.52.28-.87.51-1.07-1.78-.2-3.64-.89-3.64-3.95 0-.87.31-1.59.82-2.15-.08-.2-.36-1.02.08-2.12 0 0 .67-.21 2.2.82.64-.18 1.32-.27 2-.27.68 0 1.36.09 2 .27 1.53-1.04 2.2-.82 2.2-.82.44 1.1.16 1.92.08 2.12.51.56.82 1.27.82 2.15 0 3.07-1.87 3.75-3.65 3.95.29.25.54.73.54 1.48 0 1.07-.01 1.93-.01 2.2 0 .21.15.46.55.38A8.013 8.013 0 0 0 16 8c0-4.42-3.58-8-8-8z"></path>
</svg>
""",
"class": "",
},
],
}
| cutlass-main | python/docs_src/source/conf.py |
#################################################################################################
#
# Copyright (c) 2023 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#################################################################################################
import logging
import os
import sys
def _cutlass_path_from_dir() -> str:
cutlass_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../')
if not os.path.isdir(cutlass_path):
raise Exception(f'Environment variable "CUTLASS_PATH" is not defined, '
f'and default path of {cutlass_path} does not exist.')
return cutlass_path
def _cuda_install_path_from_nvcc() -> str:
import subprocess
# Attempt to detect CUDA_INSTALL_PATH based on location of NVCC
result = subprocess.run(['which', 'nvcc'], capture_output=True)
if result.returncode != 0:
raise Exception(f'Unable to find nvcc via `which` utility.')
cuda_install_path = result.stdout.decode('utf-8').split('/bin/nvcc')[0]
if not os.path.isdir(cuda_install_path):
raise Exception(f'Environment variable "CUDA_INSTALL_PATH" is not defined, '
f'and default path of {cuda_install_path} does not exist.')
return cuda_install_path
CUTLASS_PATH = os.getenv("CUTLASS_PATH", _cutlass_path_from_dir())
CUDA_INSTALL_PATH = os.getenv("CUDA_INSTALL_PATH", _cuda_install_path_from_nvcc())
CACHE_FILE = "compiled_cache.db"
# Add the path to the CUTLASS profiler generation/manifest scripts to PYTHONPATH
sys.path.insert(0, os.path.join(CUTLASS_PATH, "tools/library/scripts/"))
# Import types/methods from the CUTLASS utility libraries for profiler generation/emission under
from library import (
ArchitectureNames,
DataType,
DataTypeSize,
EpilogueFunctor,
EpilogueScheduleSuffixes,
EpilogueScheduleTag,
EpilogueScheduleType,
GemmKind,
LayoutTag,
LayoutType,
KernelScheduleSuffixes,
KernelScheduleTag,
KernelScheduleType,
MathInstruction,
MathOperation,
OpcodeClass,
OperationKind,
SharedMemPerCC,
SwizzlingFunctor,
TensorDescription,
TileDescription,
TileSchedulerSuffixes,
TileSchedulerTag,
TileSchedulerType
)
this = sys.modules[__name__]
this.logger = logging.getLogger(__name__)
def set_log_level(level: int):
"""
Sets the log level
:param log_level: severity of logging level to use. See https://docs.python.org/3/library/logging.html#logging-levels for options
:type log_level: int
"""
this.logger.setLevel(level)
set_log_level(logging.ERROR)
from cutlass.library_defaults import OptionRegistry
from cutlass.backend.utils.device import device_cc
this.option_registry = OptionRegistry(device_cc())
this.__version__ = '3.2.0'
from cutlass.backend import get_memory_pool
from cutlass.emit.pytorch import pytorch
from cutlass.op.gemm import Gemm
from cutlass.op.conv import Conv2d, Conv2dFprop, Conv2dDgrad, Conv2dWgrad
from cutlass.op.gemm_grouped import GroupedGemm
from cutlass.op.op import OperationBase
get_memory_pool(init_pool_size=2 ** 30, max_pool_size=2 ** 32)
| cutlass-main | python/cutlass/__init__.py |
#################################################################################################
#
# Copyright (c) 2023 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#################################################################################################
"""
Registry of swizzling functions
"""
import cutlass_bindings
IdentitySwizzle1 = cutlass_bindings.IdentitySwizzle1
IdentitySwizzle2 = cutlass_bindings.IdentitySwizzle2
IdentitySwizzle4 = cutlass_bindings.IdentitySwizzle4
IdentitySwizzle8 = cutlass_bindings.IdentitySwizzle8
HorizontalSwizzle = cutlass_bindings.HorizontalSwizzle
BatchedIdentitySwizzle = cutlass_bindings.BatchedIdentitySwizzle
ThreadblockSwizzleStreamK = cutlass_bindings.ThreadblockSwizzleStreamK
StridedDgradIdentitySwizzle1 = cutlass_bindings.StridedDgradIdentitySwizzle1
StridedDgradIdentitySwizzle4 = cutlass_bindings.StridedDgradIdentitySwizzle4
StridedDgradHorizontalSwizzle = cutlass_bindings.StridedDgradHorizontalSwizzle
_swizzling_functors = [
IdentitySwizzle1,
IdentitySwizzle2,
IdentitySwizzle4,
IdentitySwizzle8,
HorizontalSwizzle,
BatchedIdentitySwizzle,
ThreadblockSwizzleStreamK,
StridedDgradIdentitySwizzle1,
StridedDgradIdentitySwizzle4,
StridedDgradHorizontalSwizzle,
]
def get_swizzling_functors():
return _swizzling_functors
| cutlass-main | python/cutlass/swizzle.py |
#################################################################################################
#
# Copyright (c) 2023 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#################################################################################################
"""
Classes containing valid operations for a given compute capability and data types.
"""
import logging
from cuda import __version__
# Strip any additional information from the CUDA version
_cuda_version = __version__.split("rc")[0]
# Imports from CUTLASS profiler generator and manifest scripts
import generator as prof_generator
import manifest as prof_manifest
from library import (
ConvKind, IteratorAlgorithm, StrideSupport, GroupMode
)
import cutlass
from cutlass.utils.check import valid_stage_count
from cutlass.utils.datatypes import td_from_profiler_td, td_from_profiler_op, has_binding_type
_generator_ccs = [50, 60, 61, 70, 75, 80, 90]
class KernelsForDataType:
"""
Container class for keeping track of kernels that correspond to a particular combination
of data types for operands A, B, and accumulator
"""
def __init__(self, datatype_comb: tuple, layout_comb: tuple):
self.datatype_comb = datatype_comb
self.layout_comb = layout_comb
# Dictionary mapping from alignment (int) to a list of kernels that fit the alignment
# constraint for the data type combination
self.kernels_by_alignment = {}
def add(self, operation):
"""
Add an operation to the list of supported kernels
"""
alignment = operation.A.alignment
if alignment not in self.kernels_by_alignment:
self.kernels_by_alignment[alignment] = []
self.kernels_by_alignment[alignment].append(operation)
@property
def alignments(self):
"""
Returns an unsorted list of alignments supported by this data type combination
:return: unsorted list of alignments supported by this data type combination
:rtype: list
"""
return list(self.kernels_by_alignment.keys())
@property
def all_operations(self):
"""
Returns a list of all operations supported by this data type combination
:return: list of all operations supported by this data type combination
:rtype: list
"""
ops = []
for _, alignment_ops in self.kernels_by_alignment.items():
ops.extend(alignment_ops)
return ops
def operations(self, alignment: int):
"""
Returns operations satisfying the alignment constraint indicated by `alignment`
:param alignment: alignment constraint of operations to return
:type alignment: int
:return: list of operations
:rtype: list
"""
if alignment not in self.kernels_by_alignment:
raise Exception(
f"No operations of alignment {alignment} found for data type and layout "
f"combination {self.datatype_comb} {self.layout_comb}"
)
return self.kernels_by_alignment[alignment]
def find_alignment(self, shape: tuple, layout: cutlass.LayoutType) -> int:
"""
Returns the most preferable alignment for a given shape and layout
:param shape: extent of each dimension of the tensor
:type shape: tuple
:param layout: layout of the tensor
:type layout: cutlass.LayoutType
:return: maximum alignment supported by the data type combination and tensor size
:rtype: int
"""
# Determine the leading dimension of the shape
if layout == cutlass.LayoutType.ColumnMajor:
ld = shape[-2]
elif layout == cutlass.LayoutType.RowMajor:
ld = shape[-1]
elif layout == cutlass.LayoutType.TensorNHWC:
ld = shape[-1]
else:
raise Exception(f"Unexpected or unsupported layout {layout}")
for alignment in sorted(list(self.kernels_by_alignment.keys()), reverse=True):
if ld % alignment == 0:
return alignment
# Default to alignment of 1 if no others match
return 1
def sort(self):
"""
Sorts each list of kernels in `kernels_by_alignment` in descending order of threadblock shape
"""
key = lambda op: (
op.tile_description.threadblock_shape[0]
* op.tile_description.threadblock_shape[1]
* op.tile_description.threadblock_shape[2]
)
for alignment in self.kernels_by_alignment.keys():
self.kernels_by_alignment[alignment].sort(key=key, reverse=True)
class ArchOptions:
"""
Structure for keeping track of kernels available on a given compute capability
:param target_cc: compute capability of the device on which kernels will be run
:type target_cc: int
:param kernel_cc: compute capability of the kernels to generate
:type kernel_cc: int
:param operation_kind: type of operation to register
:type operation_kind: cutlass.OperationKind
:param gemm_kinds: types of GEMM operations that can be included
:type gemm_kinds: list
:param allowed_math_operations: types of primitive math operations allowed
:type allowed_math_operations: list
"""
def __init__(
self,
target_cc: int,
kernel_cc: int,
operation_kind: cutlass.OperationKind,
gemm_kinds: list,
allowed_math_operations: list = [
cutlass.MathOperation.multiply_add,
cutlass.MathOperation.multiply_add_saturate,
]
):
self.cc = kernel_cc
# Dictionary with following structure:
# Key: OpcodeClass
# Value: Dictionary with the following structure:
# Key: tuple of ((DataType, DataType, DataType), (LayoutType, LayoutType, LayoutType),
# representing ((element_a, element_b, element_accumulator), (layout_a, layout_b))
# Value: KernelsForDataType
self.operations_by_opclass = {}
self.op_class = None
self.allowed_math_operations = allowed_math_operations
# Identify the method within CUTLASS generator script that generates kernel
# descriptions for the target CC
generate_function_name = "GenerateSM" + str(kernel_cc)
if not hasattr(prof_generator, generate_function_name):
cutlass.logger.warning(f"No generator found for architecture {kernel_cc}")
return
generate_function = getattr(prof_generator, generate_function_name)
# Initialize a default manifest and populate it with valid kernel descriptions
# for the target CC
args = [
"--kernels=all",
f"--log-level={logging.getLevelName(cutlass.logger.level)}"
]
manifest_args = prof_generator.define_parser().parse_args(args)
manifest = prof_manifest.Manifest(manifest_args)
generate_function(manifest, _cuda_version)
if operation_kind not in manifest.operations:
# No kernels generated for this architecture, this could be because the CUDA
# toolkit is insufficient to support operations in this CC
cutlass.logger.warning(f"No operations of type {operation_kind} found for CC {kernel_cc}")
return
# Iterate through the available operations for this operation kind and
# find available opclasses and data types
for name, op_list in manifest.operations[operation_kind].items():
for op in op_list:
if operation_kind == cutlass.OperationKind.Gemm:
if op.gemm_kind not in gemm_kinds:
continue
mi = op.tile_description.math_instruction
if mi.math_operation not in self.allowed_math_operations:
continue
datatype_comb = (mi.element_a, mi.element_b, mi.element_accumulator)
# Skip any data types that do not currently have conversions via cutlass_bindings
if False in [has_binding_type(elt) for elt in datatype_comb]:
continue
# Prune operations that don't fit in shared memory
td = td_from_profiler_op(op)
if not valid_stage_count(target_cc, td)[0]:
continue
if mi.opcode_class not in self.operations_by_opclass:
self.operations_by_opclass[mi.opcode_class] = {}
datatype_comb = (mi.element_a, mi.element_b, mi.element_accumulator)
layout_comb = (op.A.layout, op.B.layout)
# Register TF32 kernels as F32 to enable F32 -> TF32 conversion + TF32 Tensor Core operations
if datatype_comb == (cutlass.DataType.tf32, cutlass.DataType.tf32, cutlass.DataType.f32):
# TF32 kernels only supported on SM80 and beyond
if self.cc < 80:
continue
elif self.cc == 90:
if (op.A.element != cutlass.DataType.f32
or op.B.element != cutlass.DataType.f32
or op.C.element != cutlass.DataType.f32):
continue
datatype_comb = (cutlass.DataType.f32, cutlass.DataType.f32, cutlass.DataType.f32)
opclass_dict = self.operations_by_opclass[mi.opcode_class]
key = (datatype_comb, layout_comb)
if key not in opclass_dict:
opclass_dict[key] = KernelsForDataType(datatype_comb, layout_comb)
opclass_dict[key].add(op)
# Set the default opclass to TensorOp, if available. Otherwise default to SIMT
if cutlass.OpcodeClass.TensorOp in self.operations_by_opclass:
self.op_class = cutlass.OpcodeClass.TensorOp
else:
self.op_class = cutlass.OpcodeClass.Simt
# The profiler's generator may generate only a limited set of combinations of operands for SIMT kernels.
# Here, we generate additional versions via a generic TileDescription.
if cutlass.OpcodeClass.Simt not in self.operations_by_opclass:
self.operations_by_opclass[cutlass.OpcodeClass.Simt] = {}
if operation_kind == cutlass.OperationKind.Gemm:
types = [
(cutlass.DataType.s8, cutlass.DataType.s8, cutlass.DataType.s8),
(cutlass.DataType.s8, cutlass.DataType.s8, cutlass.DataType.s32),
(cutlass.DataType.f16, cutlass.DataType.f16, cutlass.DataType.f16),
(cutlass.DataType.f16, cutlass.DataType.f16, cutlass.DataType.f32),
(cutlass.DataType.f32, cutlass.DataType.f32, cutlass.DataType.f32),
(cutlass.DataType.f64, cutlass.DataType.f64, cutlass.DataType.f64),
]
layouts = [
(cutlass.LayoutType.RowMajor, cutlass.LayoutType.RowMajor),
(cutlass.LayoutType.RowMajor, cutlass.LayoutType.ColumnMajor),
(cutlass.LayoutType.ColumnMajor, cutlass.LayoutType.RowMajor),
(cutlass.LayoutType.ColumnMajor, cutlass.LayoutType.ColumnMajor),
]
elif operation_kind == cutlass.OperationKind.Conv2d:
types = [
(cutlass.DataType.f16, cutlass.DataType.f16, cutlass.DataType.f16),
(cutlass.DataType.f16, cutlass.DataType.f16, cutlass.DataType.f32),
(cutlass.DataType.f32, cutlass.DataType.f32, cutlass.DataType.f32),
(cutlass.DataType.f64, cutlass.DataType.f64, cutlass.DataType.f64),
]
layouts = [
(cutlass.LayoutType.TensorNHWC, cutlass.LayoutType.TensorNHWC),
]
else:
raise NotImplementedError(f"Operation kind {operation_kind} is currently unsupported.")
alignment = 1
epilogue_functor = cutlass.EpilogueFunctor.LinearCombination
swizzling_functor = cutlass.SwizzlingFunctor.Identity8
for type_comb in types:
for layout_comb in layouts:
comb = (type_comb, layout_comb)
if comb in self.operations_by_opclass[cutlass.OpcodeClass.Simt]:
continue
A = cutlass.TensorDescription(type_comb[0], layout_comb[0], alignment)
B = cutlass.TensorDescription(type_comb[1], layout_comb[1], alignment)
C = cutlass.TensorDescription(type_comb[2], cutlass.LayoutType.ColumnMajor, alignment)
math_inst = cutlass.MathInstruction(
[1, 1, 1],
type_comb[0],
type_comb[1],
type_comb[2],
cutlass.OpcodeClass.Simt,
cutlass.MathOperation.multiply_add
)
td = cutlass.TileDescription(
[128, 128, 8], 2, [4, 2, 1], math_inst, 50, 1024)
# Prune operations that don't fit in shared memory
if not valid_stage_count(target_cc, td_from_profiler_td(td))[0]:
continue
new_kernels = KernelsForDataType(type_comb, layout_comb)
if operation_kind == cutlass.OperationKind.Gemm:
new_operation = prof_manifest.GemmOperation(
cutlass.GemmKind.Universal, td.minimum_compute_capability,
td, A, B, C, type_comb[2], epilogue_functor, swizzling_functor)
new_kernels.add(new_operation)
elif operation_kind == cutlass.OperationKind.Conv2d:
for conv_kind in [ConvKind.Fprop, ConvKind.Dgrad, ConvKind.Wgrad]:
new_operation = prof_manifest.Conv2dOperation(
conv_kind, IteratorAlgorithm.Analytic, td.minimum_compute_capability, td,
A, B, C, type_comb[2], StrideSupport.Strided, epilogue_functor, swizzling_functor,
group_mode=GroupMode.SingleGroup
)
new_kernels.add(new_operation)
self.operations_by_opclass[cutlass.OpcodeClass.Simt][comb] = new_kernels
# Sort all operations
for oc in self.operations_by_opclass.keys():
for comb in self.operations_by_opclass[oc].keys():
self.operations_by_opclass[oc][comb].sort()
def opclass_supports_combination(
self, op_class: cutlass.OpcodeClass, datatype_comb: tuple, layout_comb: tuple
) -> bool:
"""
Returns whether the provided operation class supports the provided data type and layout combination
:param op_class: operation class to consider
:type op_class: cutlass.OpcodeClass
:param datatype_comb: tuple of data types for (element_A, element_B, element_accumulator)
:type datatype_comb: tuple[cutlass.DataType]
:param layout_comb: tuple of data types for (layout_A, layout_B)
:type layout_comb: tuple[cutlass.LayoutType]
:return: set of operation classes that support the provided data type and layout combination
:rtype: set
"""
if op_class not in self.operations_by_opclass:
raise Exception(f"Unexpected or unsupported operation class {op_class}")
return (datatype_comb, layout_comb) in self.operations_by_opclass[op_class]
def supporting_opclasses(
self,
element_a: cutlass.DataType,
element_b: cutlass.DataType,
element_accumulator: cutlass.DataType,
layout_a: cutlass.LayoutType,
layout_b: cutlass.LayoutType,
) -> set:
"""
Returns a set of operation classes that support the provided data type combination
:param element_a: data type of operand A
:type element_a: cutlass.DataType
:param element_b: data type of operand B
:type element_b: cutlass.DataType
:param element_accumulator: data type of accumulator
:type element_accumulator: cutlass.DataType
:param layout_a: layout of operand A
:type layout_a: cutlass.LayoutType
:param layout_b: layout of operand B
:type layout_b: cutlass.LayoutType
:return: set of operation classes that support the provided data type combination
:rtype: set
"""
supporting_op_classes = set()
datatype_comb = (element_a, element_b, element_accumulator)
layout_comb = (layout_a, layout_b)
for op_class in self.operations_by_opclass.keys():
if self.opclass_supports_combination(op_class, datatype_comb, layout_comb):
supporting_op_classes.add(op_class)
return supporting_op_classes
def operations(
self,
op_class: cutlass.OpcodeClass,
element_a: cutlass.DataType,
element_b: cutlass.DataType,
element_accumulator: cutlass.DataType,
layout_a: cutlass.LayoutType,
layout_b: cutlass.LayoutType,
) -> KernelsForDataType:
"""
Returns whether the provided operation class supports the provided data type combination
:param op_class: operation class to consider
:type op_class: cutlass.OpcodeClass
:param element_a: data type of operand A
:type element_a: cutlass.DataType
:param element_b: data type of operand B
:type element_b: cutlass.DataType
:param element_accumulator: data type of accumulator
:type element_accumulator: cutlass.DataType
:param layout_a: layout of operand A
:type layout_a: cutlass.LayoutType
:param layout_b: layout of operand B
:type layout_b: cutlass.LayoutType
:return: container of kernels by alignment supported by the provided combination of parameters
:rtype: KernelsForDataType
"""
datatype_comb = (element_a, element_b, element_accumulator)
layout_comb = (layout_a, layout_b)
if not self.opclass_supports_combination(op_class, datatype_comb, layout_comb):
raise Exception(
f"Data type layout combination {datatype_comb}, {layout_comb} "
f"is not supported by opcode class {op_class} on CC {self.cc}."
)
return self.operations_by_opclass[op_class][(datatype_comb, layout_comb)]
class OptionRegistry:
"""
Container of all architecture-specific options
:param target_cc: compute capability of the device on which operations will be run
:type target_cc: int
"""
def __init__(self, target_cc: int):
self.registry = {}
gemm_kinds = [cutlass.GemmKind.Universal, cutlass.GemmKind.Universal3x]
operation_kinds = [cutlass.OperationKind.Gemm, cutlass.OperationKind.Conv2d]
# Construct options for each CC
for kernel_cc in _generator_ccs:
self.registry[kernel_cc] = {}
for opkind in operation_kinds:
self.registry[kernel_cc][opkind] = ArchOptions(target_cc, kernel_cc, opkind, gemm_kinds)
def options_for_cc(self, cc: int, op_kind=cutlass.OperationKind.Gemm) -> ArchOptions:
return self.registry.get(cc, None)[op_kind]
| cutlass-main | python/cutlass/library_defaults.py |
#################################################################################################
#
# Copyright (c) 2023 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#################################################################################################
"""
Registry of elementwise epilogues
Elementwise epilogues can be added to many CUTLASS kernels in the CUTLAS Python interface via
code like the following for GEMM:
.. highlight:: python
.. code-block:: python
plan = cutlass.op.Gemm(element=cutlass.DataType.f32, layout=cutlass.LayoutType.RowMajor)
plan.activation = cutlass.epilogue.relu
"""
from cutlass.backend import epilogue
gelu = epilogue.gelu
hardswish = epilogue.hardswish
identity = epilogue.identity
leaky_relu = epilogue.leaky_relu
relu = epilogue.relu
sigmoid = epilogue.sigmoid
silu = epilogue.silu
tanh = epilogue.tanh
_activations = [gelu, hardswish, identity, leaky_relu, relu, sigmoid, silu, tanh]
def get_activations() -> list:
"""
Returns a list of available activation functions
:return: list of available activation functions
:rtype: list
"""
return _activations
def get_activation_epilogue(
activation,
element_output,
elements_per_access,
element_accumulator,
element_compute,
):
"""
Return an epilogue corresponding to the activation function, data types, and alignment
used in the kernel
:param activation: elementwise activation function to use
:param element_output: data type of the output
:param elements_per_access: alignment of operand C of the kernel
:type elements_per_access: int
:param element_accumulator: data type of the accumulated output C
:param element_compute: data type in which compute operations should be performed
:return: epilogue functor
"""
if activation not in _activations:
raise Exception(
f"Unsupported activation type {activation}. Available activations are: {_activations}"
)
if activation == identity:
return epilogue.LinearCombination(
element_output, elements_per_access, element_accumulator, element_compute
)
else:
return epilogue.LinearCombinationGeneric(
activation(element_compute),
element_output,
elements_per_access,
element_accumulator,
element_compute,
)
| cutlass-main | python/cutlass/epilogue.py |
#################################################################################################
#
# Copyright (c) 2023 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#################################################################################################
"""
Ease-of-use interface for constructing, compiling, and running GEMMs.
The ``Gemm`` interface is meant to allow one to easily instantiate, compile, and run
GEMM operations in CUTLASS via Python, without specifying many configuration parameters.
Under the hood, the interface will select sensible default parameters for the many template
parameters for CUTLASS GEMMs.
Note: optimal performance is not to be expected from this interface. To achieve optimal
performance, one should specify and tune each configuration parameter.
The simplest example of using this interface is the following:
.. highlight:: python
.. code-block:: python
# A, B, C, and D are torch/numpy/cupy tensor objects
plan = cutlass.op.Gemm(A, B, C, D)
plan.run()
One can also use the interface by specifying data types of operands at construction
and using different tensor objects with these data types at runtime:
.. highlight:: python
.. code-block:: python
# The following is shorthand for:
# cutlass.op.Gemm(element_A=torch.float32, element_B=torch.float32,
# element_C=torch.float32, element_D=torch.float32,
# element_accumulator=torch.float32,
# layout=cutlass.LayoutType.RowMajor)
plan = cutlass.op.Gemm(element=torch.float32, layout=cutlass.LayoutType.RowMajor)
A0 = torch.rand((128, 256), device='cuda')
B0 = torch.rand((256, 64), device='cuda')
C0 = torch.zeros((128, 64), device='cuda')
D0 = torch.zeros((128, 64), device.'cuda')
plan.run(A0, B0, C0, D0)
A = torch.rand((32, 128), device='cuda')
B = torch.rand((128, 256), device='cuda')
C = torch.zeros((32, 256), device='cuda')
D = torch.zeros((32, 256), device.'cuda')
plan.run(A1, B1, C1, D1)
The interface additionally enables one to decouple the compilation of the underlying CUTLASS
kernel from its execution:
.. highlight:: python
.. code-block:: python
plan = cutlass.op.Gemm(element=np.float32, layout=cutlass.LayoutType.RowMajor)
plan.compile()
# Do other work...
plan.run(A0, B0, C0, D0)
# Do other work...
plan.run(A1, B1, C1, D1)
Elementwise activation functions are easily fused to the GEMM via the interface:
.. highlight:: python
.. code-block:: python
plan = cutlass.op.Gemm(element=np.float32, layout=cutlass.LayoutType.RowMajor)
plan.activation = cutlass.epilogue.relu
Operations can also be run asynchronously:
.. highlight:: python
.. code-block:: python
plan = cutlass.op.Gemm(element=np.float32, layout=cutlass.LayoutType.RowMajor)
args = plan.run()
# Do other work...
args.sync()
"""
from math import prod
import cutlass_bindings
import cutlass
from cutlass import epilogue, swizzle
from cutlass.backend import compiler
from cutlass.backend.gemm_operation import GemmArguments, GemmOperationUniversal
from cutlass.backend.library import TensorDescription, TileDescription
from cutlass.op.op import OperationBase
from cutlass.utils import check, datatypes
class Gemm(OperationBase):
"""
Constructs a ``Gemm`` object.
The data types and layouts of operands A, B, and C, along with the data type of output D
and that used for accumulation, are bound to the ``Gemm`` object throughout its lifetime --
these are not to be changed after a ``Gemm`` has been constructed.
The constructor has optional parameters for flexibly setting these parameters. The following
constructors are equivalent:
.. highlight:: python
.. code-block:: python
# Use F32 for A, B, C, D, and accumulation. All operands are row major.
# Use the generic ``element`` and ``layout`` parameters to concisely set all data types and layouts
# for operands to the same values.
Gemm(element=cutlass.DataType.f32, layout=cutlass.LayoutType.RowMajor)
# Explicitly specify the data types to use for A, B, C, and D. Use the generic ``layout``.
Gemm(element_A=cutlass.DataType.f32, element_B=cutlass.DataType.f32, element_C=cutlass.DataType.f32,
element_D=cutlass.DataType.f32, layout=cutlass.LayoutType.RowMajor)
# Set the data types and elements from existing tensors. Note that one can use different tensors when
# executing GEMM via the ``run()`` method than passed in here (though those passed in to ``run()`` must
# have the same data type and layout as those passed in here).
# A, B, C, and D are row-major torch.Tensor objects of type torch.float32
Gemm(A=A, B=B, C=C, D=D)
# Use the generic ``element`` and explicitly specify the layouts to use for A, B, and C (layout of D is
# the same as that for D, at present)
Gemm(element=cutlass.DataType.f32, layout_A=cutlass.LayoutType.RowMajor,
layout_B=cutlass.LayoutType.RowMajor, layout_C=cutlass.LayoutType.RowMajor)
# Explicitly specify the data type and layout for only some of A, B, C, and D. Unspecified data types
# and layouts will inherit those passed in via the generic ``element`` and ``layout``
Gemm(element_A=cutlass.DataType.f32, layout_B=cutlass.LayoutType.RowMajor,
element=cutlass.DataType.f32, layout=cutlass.LayoutType.RowMajor)
The order of precedence for the setting of the data type and layout for a given operand/output is as follows:
1) If the tensor type is specified (e.g., ``A``), use the data type and layout inferred from this tensor
2) Otherwise, if the data type/layout (e.g., ``element_A``, ``layout_A``) is specified, use those
3) Otherwise, use the generic values (e.g., ``element``, ``layout``)
:param cc: compute capability of device for which kernels should be compiled. For example, if running on H100, this should be set to 90
:type cc: int
:param kernel_cc: compute capability of kernels to generate. For example, if running on SM90, but desiring to use a CUTLASS 2.x-style Ampere kernel, this should be set to 80
:type kernel_cc: int
:param A: tensor representing data type and layout of operand A
:param B: tensor representing data type and layout of operand B
:param C: tensor representing data type and layout of operand C
:param D: tensor representing data type and layout of operand D
:param alpha: scalar paramter alpha from GEMM computation that scales the product of operands A and B
:param beta: scalar parameter beta from GEMM operation that scales operand C
:param element_accumulator: data type to be used in accumulation of the product of operands A and B
:type element_accumulator: cutlass.DataType
:param element: generic data type to be used for operands A, B, C, D, as well as the accumulation data type
:type element: cutlass.DataType
:param layout: generic layout type to be used for operands A, B, C, and D
:type layout: cutlass.LayoutType
:param element_A: data type to be used for operand A
:type element_A: cutlass.DataType
:param element_B: data type to be used for operand B
:type element_B: cutlass.DataType
:param element_C: data type to be used for operand C
:type element_C: cutlass.DataType
:param element_D: data type to be used for operand D
:type element_D: cutlass.DataType
:type layout_A: layout of operand A
:param layout_A: cutlass.LayoutType
:type layout_B: layout of operand B
:param layout_B: cutlass.LayoutType
:type layout_C: layout of operand C
:param layout_C: cutlass.LayoutType
:type layout_D: layout of operand D
:param layout_D: cutlass.LayoutType
"""
def __init__(
self, A=None, B=None, C=None, D=None,
alpha=1.0, beta=0.0, element_accumulator=None,
element=None, layout=None,
element_A=None, element_B=None, element_C=None, element_D=None,
layout_A=None, layout_B=None, layout_C=None,
cc: int = None, kernel_cc: int = None
):
super().__init__(cc=cc, kernel_cc=kernel_cc)
self.name = "gemm"
self.compiled = False
elements = []
layouts = []
# Check that at least one of the following is set for each tensor (illustrated assuming tensor A):
# ``A``, ``element_A``, ``element`` and ``A``, ``layout_A``, ``layout``
for elt, lay, tens, name in zip([element_A, element_B, element_C, element_D],
[layout_A, layout_B, layout_C, layout_C],
[A, B, C, D],
["A", "B", "C", "D"]):
if elt is not None and tens is not None:
raise Exception(f'Must not specify both element_{name} and tensor {name}')
if lay is not None and tens is not None:
raise Exception(f'Must not specify both layout_{name} and tensor {name}')
if elt is None and tens is None and element is None:
raise Exception(f'Must specify one of element_{name}, tensor {name}, or generic element.')
if lay is None and tens is None and layout is None:
raise Exception(f'Must specify one of layout_{name}, tensor {name}, or generic layout.')
elt_to_set = None
lay_to_set = None
if tens is not None:
elt_to_set, lay_to_set = datatypes.get_datatype_and_layout(tens)
else:
elt_to_set = elt if elt is not None else element
lay_to_set = lay if lay is not None else layout
elements.append(datatypes.library_type(elt_to_set))
layouts.append(datatypes.library_layout(lay_to_set))
self._element_a, self._element_b, self._element_c, self._element_d = elements
self._layout_a, self._layout_b, self._layout_c, self._layout_d = layouts
if element_accumulator is None:
self._element_accumulator = self._element_c
else:
self._element_accumulator = datatypes.library_type(element_accumulator)
self.A = A
self.B = B
self.C = C
self.D = D
self.alpha = alpha
self.beta = beta
self.epilogue_functor = None
self.op_class = None
self._reset_operations()
self._swizzling_functor = cutlass.swizzle.IdentitySwizzle1
def _reset_operations(self, reset_epilogue: bool = True):
# Set the default op class
datatype_comb = (self._element_a, self._element_b, self._element_accumulator)
layout_comb = (self._layout_a, self._layout_b)
self.possible_op_classes = self.options.supporting_opclasses(
self._element_a, self._element_b, self._element_accumulator,
self._layout_a, self._layout_b)
if cutlass.OpcodeClass.TensorOp in self.possible_op_classes:
self.opclass = cutlass.OpcodeClass.TensorOp
elif cutlass.OpcodeClass.Simt in self.possible_op_classes:
self.opclass = cutlass.OpcodeClass.Simt
else:
raise Exception(f'No kernel configuration found for supported data type and layout '
f'combination {datatype_comb}x{layout_comb}')
if reset_epilogue:
self._reset_epilogue_functor_activation(epilogue.identity)
@property
def swizzling_functor(self):
"""
Returns the type of the swizzling functor currently being used by the GEMM
:return: swizzing functor type
"""
return self._swizzling_functor
@swizzling_functor.setter
def swizzling_functor(self, swizzling_functor):
"""
Sets the swizzling functor to the type specified by `swizzling_functor`
"""
if swizzling_functor == swizzle.ThreadblockSwizzleStreamK:
if self.op_class == cutlass.OpcodeClass.Simt:
raise Exception('ThreadblockSwizzleStreamK is currently only supported with opcode class TensorOp')
if self.current_cc == 90:
raise Exception('ThreadblockSwizzleStreamK is currently unsupported on SM90')
self._swizzling_functor = swizzling_functor
def _valid_tile_description(self, td: TileDescription) -> tuple:
"""
Checks whether the provided tile description is valid for the given compute capability. At present,
this checks the following:
- Does the tile description use a number of stages supported by the compute capability in question?
- Does the tile size requested fit within shared memory?
- Are cluster dimensions outside the valid range requested for a given architecture (e.g.,
more non-unit cluster dimensions for pre-SM90 architectures)?
- Is the kernel schedule being used supported on the architecture in question?
:param td: tile description to validate
:type td: cutlass.backend.TileDescription
:return: tuple in which the first element is a bool indicating that the tile description is valid
and the second element is a string providing an optional error message.
:rtype: tuple
"""
# Check stage count based on the CC to which we are compiling (self.cc), rather
# than the CC from which we find kernels (self.current_cc)
valid, msg = check.valid_stage_count(self.cc, td, self._element_c, self._element_d)
if not valid:
return (valid, msg)
valid, msg = check.valid_cluster_shape(self.current_cc, td.cluster_shape)
if not valid:
return (valid, msg)
valid, msg = check.valid_schedule(self.current_cc, td.kernel_schedule, td.epilogue_schedule, td.tile_scheduler)
return valid, msg
def tile_descriptions(self) -> list:
"""
Returns a list of valid tile descriptions for the operations
:returns: list of valid tile descriptions for the operations
:rtype: list
"""
return [datatypes.td_from_profiler_op(op) for op in self.possible_operations.all_operations]
def construct(
self, tile_description: TileDescription = None,
alignment_A: int = None, alignment_B: int = None, alignment_C: int = None) -> GemmOperationUniversal:
"""
Constructs a ``cutlass.backend.GemmUniversalOperation`` based on the input parameters and current
kernel specification of the ``Gemm`` object.
:param tile_description: tile description specifying shapes and operand types to use in the kernel
:type tile_description: cutlass.backend.TileDescription
:param alignment_A: alignment of operand A
:type alignment_A: int
:param alignment_B: alignment of operand B
:type alignment_B: int
:param alignment_C: alignment of operand C
:type alignment_C: int
:return: operation that was constructed
:rtype: cutlass.backend.GemmOperationUniversal
"""
alignment_pref_A = min(128 // cutlass.DataTypeSize[self._element_a], max(self.possible_operations.alignments))
alignment_pref_B = min(128 // cutlass.DataTypeSize[self._element_b], max(self.possible_operations.alignments))
alignment_pref_C = min(128 // cutlass.DataTypeSize[self._element_c], max(self.possible_operations.alignments))
alignment_A = check.alignment_or_default(alignment_A, alignment_pref_A)
alignment_B = check.alignment_or_default(alignment_B, alignment_pref_B)
alignment_C = check.alignment_or_default(alignment_C, alignment_pref_C)
self.epilogue_functor = self._reset_epilogue_functor_alignment(alignment_C, self.epilogue_functor)
tensor_A = TensorDescription(
datatypes.binding_type(self._element_a),
datatypes.binding_layout(self._layout_a),
alignment_A
)
tensor_B = TensorDescription(
datatypes.binding_type(self._element_b),
datatypes.binding_layout(self._layout_b),
alignment_B
)
tensor_C = TensorDescription(
datatypes.binding_type(self._element_c),
datatypes.binding_layout(self._layout_c),
alignment_C
)
if tile_description is None:
op = self.possible_operations.operations(alignment_A)[0]
tile_description = datatypes.td_from_profiler_op(op)
else:
valid, err_str = self._valid_tile_description(tile_description)
if not valid:
raise Exception(f"Invalid tile description. {err_str}")
self.tile_description = tile_description
operation = GemmOperationUniversal(
arch=self.current_cc,
tile_description=tile_description,
A=tensor_A, B=tensor_B, C=tensor_C,
epilogue_functor=self.epilogue_functor,
swizzling_functor=self._swizzling_functor,
)
return operation
def compile(self, tile_description: TileDescription = None,
alignment_A: int = None, alignment_B: int = None, alignment_C: int = None,
print_module: bool = False) -> cutlass.backend.GemmOperationUniversal:
"""
Emits and compiles the kernel currently specified. If ``tile_description`` and any
of the ``alignment`` parameters are set, the kernel will be chosen using this
tile description and alignments. Otherwise, a default tile description and alignment
will be used.
:param tile_description: tile description specifying shapes and operand types to use in the kernel
:type tile_description: cutlass.backend.TileDescription
:param alignment_A: alignment of operand A
:type alignment_A: int
:param alignment_B: alignment of operand B
:type alignment_B: int
:param alignment_C: alignment of operand C
:type alignment_C: int
:param print_module: whether to print the emitted C++ code
:type print_module: bool
:return: operation that was compiled
:rtype: cutlass.backend.GemmOperationUniversal
"""
self.operation = self.construct(tile_description, alignment_A, alignment_B, alignment_C)
if print_module:
print(self.operation.rt_module.emit())
compiler.add_module([self.operation,])
return self.operation
def _verify_rank(self, tensor):
"""
Verifies that ``tensor`` has rank greater than 1
:param tensor: object representing a tensor passed in to verify, or ``None`` if no tensor was passed in
:type tensor: numpy/cupy/torch array/tensor object
"""
if len(tensor.shape) < 2:
raise Exception(f"Tensors must be of rank greater than 1. Received tensor of shape: {tensor.shape}")
def _get_batch_count(self, A, B, C, D) -> int:
"""
Returns the batch count specified by the tensors A, B, C, and D and verifies that these
tensors match in batch size. Presence of a batch dimension is detected by one of the
tensors being rank 3. If a batch dimension is present, it must be present in one of
operands A, B, or C (but need not be in all), and must be present in D.
:param A: tensor A
:type A: numpy/cupy/torch array/tensor object
:param B: tensor B
:type B: numpy/cupy/torch array/tensor object
:param C: tensor C
:type C: numpy/cupy/torch array/tensor object
:param D: tensor D
:type D: numpy/cupy/torch array/tensor object
:return: tuple of batch count dimensions
:rtype: tuple
"""
A_batch = A.shape[:-2] if len(A.shape) > 2 else tuple()
B_batch = B.shape[:-2] if len(B.shape) > 2 else tuple()
C_batch = C.shape[:-2] if len(C.shape) > 2 else tuple()
D_batch = D.shape[:-2] if len(D.shape) > 2 else tuple()
if len(D_batch) > 0 and D_batch not in [A_batch, B_batch, C_batch]:
raise Exception(f"Batch count in D must be present in one of operands A, B, and C. "
f"Batch counts are: A={A_batch}, B={B_batch}, C={C_batch}, D={D_batch}")
for batch_shape in [A_batch, B_batch, C_batch]:
if len(batch_shape) > 0 and batch_shape != D_batch:
raise Exception(f"Batch count for all other operands must either match that of D or be zero."
f"Received batch shape of {batch_shape}, which does not match that of D of {D_batch}.")
return D_batch
def _get_batch_stride(self, tensor) -> int:
"""
Returns the batch stride of ``tensor``. If ``tensor`` is only rank-2, batch stride is 0.
:param tensor: tensor object to process
:type tensor: numpy/cupy/torch array/tensor object
:return: stride between each matrix in the batch
:rtype: int
"""
if len(tensor.shape) > 2:
return tensor.shape[-2] * tensor.shape[-1]
else:
return 0
def _get_problem_args(self, A, B, C, D) -> tuple:
"""
Returns the problem size and GEMM universal mode to use for the
given operands.
:param A: tensor A
:type A: numpy/cupy/torch array/tensor object
:param B: tensor B
:type B: numpy/cupy/torch array/tensor object
:param C: tensor C
:type C: numpy/cupy/torch array/tensor object
:param D: tensor D
:type D: numpy/cupy/torch array/tensor object
:return: tuple containing the problem size (cutlass_bindings.gemm.GemmCoord), the GEMM mode (cutlass_bindings.gemm.Mode), and the batch count (int)
:rtype: tuple
"""
M, K = A.shape[-2:]
N = B.shape[-1]
mode = cutlass_bindings.gemm.Mode.Gemm
batch_count = self._get_batch_count(A, B, C, D)
returned_batch_count = prod(batch_count) if len(batch_count) > 0 else 1
# If we are running a batched GEMM in which there is a nonzero batch stride
# only for A, then we can fold the batched dimension of A into the M dimension
# (i.e., (b, m, k) x (k, n) -> (m*b, k) x (k, n)). This works only if both A
# and C are row major. A similar operation can be performed if only B has a nonzero
# batch dimension
if len(batch_count) > 0:
A_row = self._layout_a == cutlass.LayoutType.RowMajor
B_row = self._layout_b == cutlass.LayoutType.RowMajor
C_row = self._layout_c == cutlass.LayoutType.RowMajor
batched = lambda x : len(x.shape) == 2 + len(batch_count)
if batched(A) and not batched(B) and batched(C) and A_row and C_row:
M *= prod(batch_count)
returned_batch_count = 1
elif not batched(A) and batched(B) and batched(C) and not B_row and not C_row:
N *= prod(batch_count)
returned_batch_count = 1
else:
mode = cutlass_bindings.gemm.Mode.Batched
return cutlass_bindings.gemm.GemmCoord(M, N, K), mode, returned_batch_count
def _verify_type_and_layout(self, tensor, ref_type, ref_layout, name):
"""
Verifies that ``tensor`` has data type ``ref_type`` and layout ``ref_layout``. An exception
is raised if it does not.
:param tensor: object representing a tensor passed in to verify, or ``None`` if no tensor was passed in
:type tensor: numpy/cupy/torch array/tensor object
:param ref_dtype: data type for the tensor that this object was initialized to
:param ref_layout: layout for the tensor that this object was initialized to
:param name: identifier of the tensor to verify. Used in raising exceptions
:type name: str
"""
dtype, layout = datatypes.get_datatype_and_layout(tensor)
if dtype != ref_type or layout != ref_layout:
raise Exception(f'Tensor {name} with type and layout ({dtype}, {layout}) '
f'does not match the expected type and '
f'layout of ({ref_type}, {ref_layout}).')
def run(self, A=None, B=None, C=None, D=None,
alpha=None, beta=None, sync: bool = True, print_module: bool = False) -> GemmArguments:
"""
Runs the kernel currently specified. If it has not already been, the kernel is emitted and
compiled. Tensors holding operands and outputs of the kernel are sourced either from the
``A``, ``B``, ``C``, ``D``, ``alpha``, and ``beta``
parameters provided in this call, or from those
passed in on the construction of this object -- one of the two must be specified.
By default, this call returns only once the kernel has completed. To launch the kernel
and immediately return, set ``sync=False``. In this case, it is the responsibility of the
caller to syncrhonize the results of the kernel before attempting to access outputs
by calling ``sync()`` on the arguments returned from this call.
:param A: tensor representing data type and layout of operand A
:param B: tensor representing data type and layout of operand B
:param C: tensor representing data type and layout of operand C
:param D: tensor representing data type and layout of operand D
:param alpha: scalar paramter alpha from GEMM computation that scales the product of operands A and B
:param beta: scalar parameter beta from GEMM operation that scales operand C
:param sync: whether the call should wait for the kernel to complete before returning
:type sync: bool
:param print_module: whether to print the emitted C++ code
:type print_module: bool
:return: arguments passed in to the kernel
:rtype: cutlass.backend.GemmArguments
"""
A = self._verify_tensor(A, self.A, self._element_a, self._layout_a, "A")
B = self._verify_tensor(B, self.B, self._element_b, self._layout_b, "B")
C = self._verify_tensor(C, self.C, self._element_c, self._layout_c, "C")
D = self._verify_tensor(D, self.D, self._element_d, self._layout_d, "D")
alpha = self._verify_scalar(alpha, self.alpha, self._element_c, "alpha")
beta = self._verify_scalar(beta, self.beta, self._element_c, "beta")
self._verify_rank(A)
self._verify_rank(B)
self._verify_rank(C)
self._verify_rank(D)
alignment_a = self.possible_operations.find_alignment(A.shape, self._layout_a)
alignment_b = self.possible_operations.find_alignment(B.shape, self._layout_b)
alignment_c = self.possible_operations.find_alignment(C.shape, self._layout_c)
self.compile(self.tile_description, alignment_A=alignment_a, alignment_B=alignment_b,
alignment_C=alignment_c, print_module=print_module)
problem_size, mode, batch_count = self._get_problem_args(A, B, C, D)
if mode == cutlass_bindings.gemm.Mode.Gemm or batch_count == 1:
kwargs = {'split_k_slices': 1}
else:
kwargs = {
'batch': batch_count,
'batch_strides': {
'A': self._get_batch_stride(A),
'B': self._get_batch_stride(B),
'C': self._get_batch_stride(C),
'D': self._get_batch_stride(D)
}
}
arguments = GemmArguments(
operation=self.operation, problem_size=problem_size,
A=A, B=B, C=C, D=D,
output_op=self.operation.epilogue_type(alpha, beta),
gemm_mode=mode,
**kwargs
)
self.operation.run(arguments)
if sync:
arguments.sync()
return arguments
| cutlass-main | python/cutlass/op/gemm.py |
#################################################################################################
#
# Copyright (c) 2023 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#################################################################################################
from cutlass.op.gemm import Gemm
from cutlass.op.conv import Conv2d, Conv2dFprop, Conv2dDgrad, Conv2dWgrad
from cutlass.op.gemm_grouped import GroupedGemm
from cutlass.op.op import OperationBase
| cutlass-main | python/cutlass/op/__init__.py |
#################################################################################################
#
# Copyright (c) 2023 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#################################################################################################
"""
Base operation used for defining high-level CUTLASS operations (e.g., GEMM, Conv2d)
"""
from bisect import bisect_left
import cutlass
from cutlass import option_registry, epilogue
from cutlass.backend.utils.device import device_cc
from cutlass.epilogue import get_activations
from cutlass.library_defaults import _generator_ccs
from cutlass.swizzle import get_swizzling_functors
from cutlass.utils import datatypes
class OperationBase:
"""
Base operation used for defining high-level CUTLASS operations (e.g., GEMM, Conv2d)
"""
def __init__(self, cc: int = None, kernel_cc: int = None, operation_kind = cutlass.OperationKind.Gemm):
"""
:param cc: compute capability of device for which kernels should be compiled. For example, if running on H100, this should be set to 90
:type cc: int
:param kernel_cc: compute capability of kernels to generate. For example, if running on SM90, but desiring to use a CUTLASS 2.x-style Ampere kernel, this should be set to 80
:type kernel_cc: int
"""
self.operation_kind = operation_kind
self.cc = cc if cc is not None else device_cc()
self.specified_kernel_cc = kernel_cc is not None
self.current_cc = kernel_cc if kernel_cc is not None else self._find_closest_cc(self.cc)
self.tile_description = None
self.options = option_registry.options_for_cc(self.current_cc, operation_kind)
if self.options is None:
raise Exception(f"Invalid or unsupported compute capability: {self.current_cc}")
# Default activation function: identity
self._activation = epilogue.identity
def _find_closest_cc(self, cc: int) -> int:
"""
Returns the closest CC in _generator_ccs less than or equal to `cc`
:param cc: compute capability to query
:type cc: int
:returns: closest CC in _generator_ccs less than or equal to `cc`
:rtype: int
"""
if cc in _generator_ccs:
return cc
# Find closest CC lower than this CC
idx = bisect_left(_generator_ccs, cc)
if idx == 0:
raise Exception(f'No valid CC to fall back to for {cc}')
return _generator_ccs[idx-1]
def activations(self) -> list:
"""
Returns possible activation functions that can be used
:return: list of activation functions that can be used
:rtype: list
"""
return get_activations()
def swizzling_functors(self) -> list:
"""
Returns possible swizzling functions that can be used
:return: list of swizzling functions that can be used
:rtype: list
"""
return get_swizzling_functors()
def _reset_options(self, cc: int):
"""
Resets the kernel options based on cc
:param cc: compute capability to reset to
:type cc: int
"""
if cc != self.current_cc:
if cc not in _generator_ccs:
raise Exception(f'Invalid CC for CUTLASS kernels: {cc}.')
self.current_cc = cc
self.options = option_registry.options_for_cc(self.current_cc, self.operation_kind)
def _verify_scalar(self, scalar, ref_scalar, ref_dtype, name):
"""
Verifies the following properties:
1) Either ``scalar`` or ``ref_scakar`` must be set (i.e., not ``None``)
2) If ``scalar`` is not ``None``, its datatype must match matches the current version
set by the plan (i.e., those in ``ref_dtype``)
If either of these properties does not hold, an exception is raised. If these properties hold and
``scalar`` is not ``None``, ``scalar`` is returned. Otherwise, ``ref_scalar`` is returned.
:param scalar: object representing a tensor passed in to verify, or ``None`` if no tensor was passed in
:type scalar: numpy/cupy/torch scalar
:param ref_scalar: object representing a tensor passed in on construction of this object, or ``None`` if no tensor was passed in
:type ref_scalar: numpy/cupy/torch scalar
:param ref_dtype: data type for the scalar that this object was initialized to
:param name: identifier of the scalar to verify. Used in raising exceptions
:type name: str
:return: valid scalar to use
:rtype: numpy/cupy/torch scalar
"""
if scalar is None:
if ref_scalar is None:
raise Exception(f"Scalar {name} must be set.")
return ref_scalar
if hasattr(scalar, "dtype"):
dtype = datatypes.library_type(scalar.dtype)
if dtype != ref_dtype:
raise Exception(
f"Tensor {name} with type {dtype} does not match expected type {ref_dtype}."
)
return scalar
def _verify_tensor(self, tensor, ref_tensor, ref_dtype, ref_layout, name):
"""
Verifies the following properties:
1) Either ``tensor`` or ``ref_tensor`` must be set (i.e., not ``None``)
2) If ``tensor`` is not ``None``, its datatype and layout must match matches the current versions
set by the plan (i.e., those in ``ref_dtype`` and ``ref_layout``)
If either of these properties does not hold, an exception is raised. If these properties hold and
``tensor`` is not ``None``, ``tensor`` is returned. Otherwise, ``ref_tensor`` is returned.
:param tensor: object representing a tensor passed in to verify, or ``None`` if no tensor was passed in
:type tensor: numpy/cupy/torch array/tensor object
:param ref_tensor: object representing a tensor passed in on construction of this object, or ``None`` if no tensor was passed in
:type ref_tensor: numpy/cupy/torch array/tensor object
:param ref_dtype: data type for the tensor that this object was initialized to
:param ref_layout: layout for the tensor that this object was initialized to
:param name: identifier of the tensor to verify. Used in raising exceptions
:type name: str
:return: valid tensor object to use
:rtype: numpy/cupy/torch array/tensor object
"""
if tensor is None:
if ref_tensor is None:
raise Exception(f"Tensor {name} must be set.")
return ref_tensor
self._verify_type_and_layout(tensor, ref_dtype, ref_layout, name)
return tensor
#
# Opcode Related
#
@property
def opclass(self) -> cutlass.OpcodeClass:
"""
Returns the opcode class currently in use by the GEMM
:return: opcode class currently in use
:rtype: cutlass.OpcodeClass
"""
return self.op_class
@opclass.setter
def opclass(self, oc: cutlass.OpcodeClass):
if isinstance(oc, str):
oc = datatypes.getattr_enum(cutlass.OpcodeClass, oc)
if oc in self.possible_op_classes:
self.op_class = oc
else:
raise Exception(
f'Unsupported operation class {oc} for CC {self.cc} and data type combination '
f'({self._element_a}, {self._element_b}, {self._element_accumulator}) and '
f'layout combination ({self._layout_a}, {self._layout_b}).')
# Changing the op class changes the elements per access in the epilogue. Reset this.
if self.op_class == cutlass.OpcodeClass.Simt:
elements_per_access = 1
else:
elements_per_access = 128 // cutlass.DataTypeSize[self._element_c]
if self.epilogue_functor is not None:
self.epilogue_functor = self._reset_epilogue_functor_alignment(elements_per_access, self.epilogue_functor)
# Changing the op class also changes the possible operations available. Reset these.
self.possible_operations = self.options.operations(
self.op_class, self._element_a, self._element_b,
self._element_accumulator, self._layout_a, self._layout_b)
#
# Epilogue
#
def _create_epilogue_functor_activation(self, activation):
"""
Returns the epilogue functor with given activation function
"""
if self.epilogue_functor is None:
if self.op_class == cutlass.OpcodeClass.Simt:
elements_per_access = 1
else:
elements_per_access = 128 // cutlass.DataTypeSize[self._element_c]
else:
elements_per_access = self.epilogue_functor.epilogue_vector_length
if not self.specified_kernel_cc:
if self.current_cc == 90 and activation != epilogue.identity:
# CUTLASS 3.0 kernels currently only support identity activation. If one requests a non-identity activation,
# revert to using a CUTLASS 2.x kernel by using SM80-tagged kernels.
cutlass.logger.warning("Reverting to using SM80-tagged kernel. Opclass may change.")
self._reset_options(80)
self._reset_operations(reset_epilogue=False)
elif (self.cc == 90 and self.current_cc != 90 and activation == epilogue.identity):
# SM80 fallback kernels are currently used. Since an identity activation is requested,
# we can switch back to using SM90 kernels.
self._reset_options(90)
self._reset_operations(reset_epilogue=False)
else:
if self.current_cc == 90 and activation != epilogue.identity:
raise Exception("Epilogues with elementwise fusion are not currently supported "
"in the Python interface for 3.x kernels. To use 2.x kernels "
"with fused elementwise epilogues, do not set the `kernel_cc` "
"parameter when constructing the Gemm object.")
return epilogue.get_activation_epilogue(
activation,
datatypes.binding_type(self._element_c),
elements_per_access,
datatypes.binding_type(self._element_accumulator),
datatypes.binding_type(self._element_accumulator),
)
def _reset_epilogue_functor_activation(self, activation):
"""
Set the epilogue functor based on the provided activation function
"""
self.epilogue_functor = self._create_epilogue_functor_activation(activation)
def _reset_epilogue_functor_alignment(self, alignment, epilogue_functor):
"""
Reset the alignment of the current epilogue functor based on alignment C
"""
if epilogue_functor is None or not hasattr(epilogue_functor, 'activation_functor'):
# Identity epilogue does not have 'activation_functor'
activation = epilogue.identity
else:
activation = type(epilogue_functor.activation_functor)
epilogue_functor = epilogue.get_activation_epilogue(
activation,
datatypes.binding_type(self._element_c),
alignment,
datatypes.binding_type(self._element_accumulator),
datatypes.binding_type(self._element_accumulator),
)
return epilogue_functor
@property
def activation(self):
"""
Returns the type of the current activation function used
"""
if hasattr(self.epilogue_functor, "activation_functor"):
return type(self.epilogue_functor.activation_functor)
else:
return epilogue.identity
@activation.setter
def activation(self, act):
"""
Sets the type of the activation function to use
Activation can come with a set of arguments
:param act: type of activation function to use
:type act: str or tuple. e.g. "relu", ("leaky_relu", 0.01)
"""
if isinstance(act, tuple):
if isinstance(act[0], str):
act_fn = getattr(cutlass.backend.epilogue, act[0])
else:
act_fn = act[0]
self._reset_epilogue_functor_activation(act_fn)
self._activation_args = act[1]
self._activation = act[0]
else:
if isinstance(act, str):
act = getattr(cutlass.backend.epilogue, act)
self._reset_epilogue_functor_activation(act)
self._activation = act
| cutlass-main | python/cutlass/op/op.py |
#################################################################################################
#
# Copyright (c) 2023 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#################################################################################################
"""
Ease-of-use interface for constructing, compiling, and running GEMMs.
The ``GroupedGemm`` interface is meant to allow one to easily instantiate, compile, and run
grouped GEMM operations in CUTLASS via Python, without specifying many configuration parameters.
Under the hood, the interface will select sensible default parameters for the many template
parameters for CUTLASS grouped GEMMs.
Note: optimal performance is not to be expected from this interface. To achieve optimal
performance, one should specify and tune each configuration parameter.
The simplest example of using this interface is the following:
.. highlight:: python
.. code-block:: python
# As, Bs, Cs, and Ds are torch/numpy/cupy tensor objects
plan = cutlass.op.GroupedGemm(element=cutlass.DataType.f16, layout=cutlass.LayoutType.RowMajor)
plan.run([A0, A1], [B0, B1], [C0, C1], [D0, D1])
"""
import cutlass_bindings
from cutlass.backend.gemm_operation import (
GemmGroupedArguments,
GemmOperationGrouped,
)
from cutlass.backend.library import (
DataTypeSize,
SchedulerMode,
TensorDescription,
TileDescription,
)
from cutlass.op.gemm import Gemm
from cutlass.utils import check, datatypes
class GroupedGemm(Gemm):
"""
Constructs a ``GroupedGemm`` object.
The data types and layouts of operands A, B, and C, along with the data type of output D
and that used for accumulation, are bound to the ``GroupedGemm`` object throughout its lifetime --
these are not to be changed after a ``GroupedGemm`` has been constructed.
The constructor has optional parameters for flexibly setting these parameters. Please see the constructor
for ``Gemm`` for examples of these.
:param cc: compute capability of device to generate kernels for
:type cc: int
:param A: tensor representing data type and layout of operands A
:param B: tensor representing data type and layout of operands B
:param C: tensor representing data type and layout of operands C
:param D: tensor representing data type and layout of operands D
:param alpha: scalar paramter alpha from GEMM computation that scales the product of operands A and B
:param beta: scalar parameter beta from GEMM operation that scales operand C
:param element_accumulator: data type to be used in accumulation of the product of operands A and B
:type element_accumulator: cutlass.DataType
:param element: generic data type to be used for operands A, B, C, D, as well as the accumulation data type
:type element: cutlass.DataType
:param layout: generic layout type to be used for operands A, B, C, and D
:type layout: cutlass.LayoutType
:param element_A: data type to be used for operand A
:type element_A: cutlass.DataType
:param element_B: data type to be used for operand B
:type element_B: cutlass.DataType
:param element_C: data type to be used for operand C
:type element_C: cutlass.DataType
:param element_D: data type to be used for operand D
:type element_D: cutlass.DataType
:type layout_A: layout of operand A
:param layout_A: cutlass.LayoutType
:type layout_B: layout of operand B
:param layout_B: cutlass.LayoutType
:type layout_C: layout of operand C
:param layout_C: cutlass.LayoutType
:type layout_D: layout of operand D
:param layout_D: cutlass.LayoutType
"""
def __init__(
self, A=None, B=None, C=None, D=None,
alpha=1.0, beta=0.0, element_accumulator=None,
element=None, layout=None,
element_A=None, element_B=None, element_C=None, element_D=None,
layout_A=None, layout_B=None, layout_C=None,
cc: int = None,
):
super().__init__(
A=A, B=B, C=C, D=D,
alpha=alpha, beta=beta,
element_accumulator=element_accumulator,
element=element, layout=layout,
element_A=element_A, element_B=element_B,
element_C=element_C, element_D=element_D,
layout_A=layout_A, layout_B=layout_B, layout_C=layout_C,
cc=cc
)
# Grouped GEMM specializations for SM90 are currently unavailable. Revert to using SM80
if self.current_cc == 90:
self._reset_options(80)
self._reset_operations(reset_epilogue=False)
self.name = "grouped_gemm"
@Gemm.swizzling_functor.setter
def swizzling_functor(self, swizzling_functor):
"""
Sets the swizzling functor to the type specified by `swizzling_functor`
"""
raise Exception('Grouped GEMM does not currently support different swizzling functors')
def construct(self, tile_description: TileDescription = None,
alignment_A: int = None,
alignment_B: int = None,
alignment_C: int = None) -> GemmOperationGrouped:
"""
Constructs a ``cutlass.backend.GemmOperationGrouped`` based on the input parameters and current
kernel specification of the ``Gemm`` object.
:param tile_description: tile description specifying shapes and operand types to use in the kernel
:type tile_description: cutlass.backend.TileDescription
:param alignment_A: alignment of operand A
:type alignment_A: int
:param alignment_B: alignment of operand B
:type alignment_B: int
:param alignment_C: alignment of operand C
:type alignment_C: int
:return: operation that was constructed
:rtype: cutlass.backend.GemmOperationGrouped
"""
alignment_preference = max(self.possible_operations.alignments)
alignment_A = check.alignment_or_default(alignment_A, alignment_preference)
alignment_B = check.alignment_or_default(alignment_B, alignment_preference)
alignment_C = check.alignment_or_default(alignment_C, alignment_preference)
self.epilogue_functor = self._reset_epilogue_functor_alignment(alignment_C, self.epilogue_functor)
tensor_A = TensorDescription(
datatypes.binding_type(self._element_a),
datatypes.binding_layout(self._layout_a),
alignment_A
)
tensor_B = TensorDescription(
datatypes.binding_type(self._element_b),
datatypes.binding_layout(self._layout_b),
alignment_B
)
tensor_C = TensorDescription(
datatypes.binding_type(self._element_c),
datatypes.binding_layout(self._layout_c),
alignment_C
)
if tile_description is None:
op = self.possible_operations.operations(alignment_A)[0]
tile_description = datatypes.td_from_profiler_op(op)
else:
valid, err_str = self._valid_tile_description(tile_description)
if not valid:
raise Exception(f"Invalid tile description. {err_str}")
self.tile_description = tile_description
operation = GemmOperationGrouped(
arch=self.current_cc,
tile_description=tile_description,
A=tensor_A, B=tensor_B, C=tensor_C,
epilogue_functor=self.epilogue_functor,
swizzling_functor=self._swizzling_functor,
precompute_mode=SchedulerMode.Device)
return operation
def run(self, A, B, C, D,
alpha=None, beta=None, sync: bool = True,
print_module: bool = False) -> GemmGroupedArguments:
"""
Runs the kernel currently specified.
By default, this call returns only once the kernel has completed. To launch the kernel
and immediately return, set ``sync=False``. In this case, it is the responsibility of the
caller to syncrhonize the results of the kernel before attempting to access outputs
by calling ``sync()`` on the arguments returned from this call.
:param A: list of tensors representing data type and layout of operand A
:type A: list
:param B: list of tensors representing data type and layout of operand B
:type B: list
:param C: list of tensors representing data type and layout of operand C
:type C: list
:param D: list of tensors representing data type and layout of operand D
:type D: list
:param alpha: scalar paramter alpha from GEMM computation that scales the product of operands A and B
:param beta: scalar parameter beta from GEMM operation that scales operand C
:param sync: whether the call should wait for the kernel to complete before returning
:type sync: bool
:param print_module: whether to print the emitted C++ code
:type print_module: bool
:return: arguments passed in to the kernel
:rtype: cutlass.backend.GemmGroupedArguments
"""
if len(A) != len(B) or len(A) != len(C) or len(A) != len(D):
raise Exception("Lengths of A, B, C, and D lists must be equal")
problem_sizes = []
As, Bs, Cs, Ds = ([None] * len(A) for _ in range(4))
for i in range(len(A)):
As[i] = self._verify_tensor(A[i], self.A, self._element_a, self._layout_a, "A")
Bs[i] = self._verify_tensor(B[i], self.B, self._element_b, self._layout_b, "B")
Cs[i] = self._verify_tensor(C[i], self.C, self._element_c, self._layout_c, "C")
Ds[i] = self._verify_tensor(D[i], self.D, self._element_d, self._layout_d, "D")
problem_sizes.append(cutlass_bindings.gemm.GemmCoord(A[i].shape[0], B[i].shape[1], A[i].shape[1]))
alpha = self._verify_scalar(alpha, self.alpha, self._element_c, "alpha")
beta = self._verify_scalar(beta, self.beta, self._element_c, "beta")
alignment_a = min((self.possible_operations.find_alignment(A.shape, self._layout_a) for A in As))
alignment_b = min((self.possible_operations.find_alignment(B.shape, self._layout_b) for B in Bs))
alignment_c = min((self.possible_operations.find_alignment(C.shape, self._layout_c) for C in Cs))
self.compile(self.tile_description, alignment_A=alignment_a, alignment_B=alignment_b,
alignment_C=alignment_c, print_module=print_module)
arguments = GemmGroupedArguments(
operation=self.operation,
problem_sizes=problem_sizes,
A=As, B=Bs, C=Cs, D=Ds,
output_op=self.operation.epilogue_type(alpha, beta)
)
self.operation.run(arguments)
if sync:
arguments.sync()
return arguments
| cutlass-main | python/cutlass/op/gemm_grouped.py |
#################################################################################################
#
# Copyright (c) 2023 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#################################################################################################
"""
Ease-of-use interface for constructing, compiling, and running CONVs
The ``Conv2d`` interface is meant to allow one to easily instantiate, compile, and run
CONV2D operations in CUTLASS via Python, without specifying many configuration parameters.
Under the hood, the interface will select sensible default parameters for the many template
parameters for CUTLASS CONVs.
Note: optimal performance is not to be expected from this interface. To achieve optimal
performance, one should specify and tune each configuration parameter.
The simplest example of using this interface is the following:
.. highlight:: python
.. code-block:: python
# A, B, C, and D are torch/numpy/cupy tensor objects
plan = cutlass.op.Conv(A, B, C, D)
plan.run(stride=(1, 1), padding=(0, 0), dilation=(1, 1))
One can also use the interface by specifying data types of operands at construction
and using different tensor objects with these data types at runtime:
.. highlight:: python
.. code-block:: python
# The following is shorthand for:
# cutlass.op.Conv2d(kind="fprop",
# element_A=torch.float32, element_B=torch.float32,
# element_C=torch.float32, element_D=torch.float32,
# element_accumulator=torch.float32)
plan = cutlass.op.Conv2d(kind="fprop", element=torch.float32)
A0 = torch.rand((128, 256), dtype=torch.float32, device='cuda')
B0 = torch.rand((256, 64), dtype=torch.float32, device='cuda')
C0 = torch.zeros((128, 64), dtype=torch.float32, device='cuda')
D0 = torch.zeros((128, 64), dtype=torch.float32, device.'cuda')
plan.run(A0, B0, C0, D0, stride=(1, 1), padding=(0, 0), dilation=(1, 1))
A = torch.rand((32, 128), dtype=torch.float32, device='cuda')
B = torch.rand((128, 256), dtype=torch.float32, device='cuda')
C = torch.zeros((32, 256), dtype=torch.float32, device='cuda')
D = torch.zeros((32, 256), dtype=torch.float32, device.'cuda')
plan.run(A1, B1, C1, D1, stride=(1, 1), padding=(0, 0), dilation=(1, 1))
The interface additionally enables one to decouple the compilation of the underlying CUTLASS
kernel from its execution:
.. highlight:: python
.. code-block:: python
plan = cutlass.op.Conv2d(kind="fprop", element=np.float32)
# Do other work...
plan.run(A0, B0, C0, D0, stride=(1, 1), padding=(0, 0), dilation=(1, 1))
# Do other work...
plan.run(A1, B1, C1, D1, stride=(1, 1), padding=(0, 0), dilation=(1, 1))
Elementwise activation functions are easily fused to the GEMM via the interface:
.. highlight:: python
.. code-block:: python
plan = cutlass.op.Conv2d(kind="fprop", element=np.float32)
plan.activation = cutlass.epilogue.relu
Operations can also be run asynchronously:
.. highlight:: python
.. code-block:: python
plan = cutlass.op.Conv2d(kind="fprop", element=np.float32)
args = plan.run()
# Do other work...
args.sync()
"""
import cutlass_bindings
import cutlass
from cutlass import epilogue
from cutlass.backend import compiler
from cutlass.backend.conv2d_operation import Conv2dArguments, Conv2dOperation
from cutlass.backend.reduction_operation import ReductionOperation, ReductionArguments
from cutlass.backend.library import TensorDescription, TileDescription
from cutlass.op.op import OperationBase
from cutlass.utils import check, datatypes
class Conv2d(OperationBase):
"""
Constructs a ``Conv2d`` object.
The convolution kind (fprop, wgrad, degrad), the data types of operands A, B, and C,
along with the data type of output D and that used for accumulation, are bound to the ``Conv``
object throughout its lifetime -- these are not to be changed after a ``Conv2d`` has been constructed.
The constructor has optional parameters for flexibly setting these parameters. The following
constructors are equivalent:
.. highlight:: python
.. code-block:: python
# Use F32 for A, B, C, D, and accumulation in fprop
# Use the generic ``element`` parameter to concisely set all data types for operands to the same values.
Conv2d(kind="fprop", element=cutlass.DataType.f32)
# Explicitly specify the data types to use for A, B, C, and D.
Conv2d(kind="fprop", element_A=cutlass.DataType.f32, element_B=cutlass.DataType.f32,
element_C=cutlass.DataType.f32, element_D=cutlass.DataType.f32)
# Set the data types and elements from existing tensors. Note that one can use different tensors when
# executing GEMM via the ``run()`` method than passed in here (though those passed in to ``run()`` must
# have the same data type as those passed in here).
# A, B, C, and D are torch.Tensor objects of type torch.float32 under the channel-last layout
Conv2d(kind="fprop", A=A, B=B, C=C, D=D)
# Explicitly specify the data type for only some of A, B, C, and D. Unspecified data types will inherit
# those passed in via the generic ``element``
Conv2d(kind="fprop", element_A=cutlass.DataType.f32, element_accumulator=cutlass.DataType.f32,
element=cutlass.DataType.f32)
The order of precedence for the setting of the data type for a given operand/output is as follows:
1) If the tensor type is specified (e.g., ``A``), use the data type inferred from this tensor
2) Otherwise, if the data type (e.g., ``element_A``) is specified, use those
3) Otherwise, use the generic values (e.g., ``element``)
:param kind: the convolution kind (i.e. fprop, wgrad, and dgrad)
:type kind: str
:param A: tensor representing data type of operand A
:param B: tensor representing data type of operand B
:param C: tensor representing data type of operand C
:param D: tensor representing data type of operand D
:param alpha: scalar paramter alpha from GEMM computation that scales the product of operands A and B
:param beta: scalar parameter beta from GEMM operation that scales operand C
:param element: generic data type to be used for operands A, B, C, D, as well as the accumulation data type
:type element: cutlass.DataType
:param element_A: data type to be used for operand A
:type element_A: cutlass.DataType
:param element_B: data type to be used for operand B
:type element_B: cutlass.DataType
:param element_C: data type to be used for operand C
:type element_C: cutlass.DataType
:param element_D: data type to be used for operand D
:type element_D: cutlass.DataType
:param element_accumulator: data type to be used in accumulation of the product of operands A and B
:type element_accumulator: cutlass.DataType
:param cc: compute capability of device for which kernels should be compiled. For example, if running on H100, this should be set to 90
:type cc: int
:param kernel_cc: compute capability of kernels to generate. For example, if running on SM90, but desiring to use a CUTLASS 2.x-style Ampere kernel, this should be set to 80
:type kernel_cc: int
"""
def __init__(
self, kind="fprop",
A=None, B=None, C=None, D=None, alpha=1.0, beta=0.0,
element=None,
element_A=None, element_B=None, element_C=None, element_D=None,
element_accumulator=None,
cc: int = None, kernel_cc: int = None
):
super().__init__(cc=cc, kernel_cc=kernel_cc, operation_kind=cutlass.OperationKind.Conv2d)
# Verify the kernel cc
if self.current_cc == 90:
# The Conv2d kernel on Hopper (SM90) is currently unsupported
# Revert to use SM80-tagged kernels
cutlass.logger.warning("Reverting to using SM80-tagged kernel. Opclass may change.")
self.specified_kernel_cc = 80
self._reset_options(80)
# The arch is used in testing
self.arch = self.current_cc
self.name = "conv2d" + kind
# The convolution kind. (concept: cutlass_bindings.conv.Operator)
self.conv_kind = getattr(cutlass_bindings.conv.Operator, kind)
# The element types (concept: cutlass library types) of A, B, C, and D
elements = []
layouts = []
# Complete the data types based on user-provided arguments
for elt, tens, name in zip([element_A, element_B, element_C, element_D],
[A, B, C, D],
["A", "B", "C", "D"]):
if elt is not None and tens is not None:
raise Exception(f'Must not specify both element_{name} and tensor {name}')
if elt is None and tens is None and element is None:
raise Exception(f'Must specify one of element_{name}, tensor {name}, or generic element.')
elt_to_set = None
lay_to_set = None
if tens is not None:
elt_to_set, _ = datatypes.get_datatype_and_layout(tens)
else:
elt_to_set = elt if elt is not None else element
assert elt_to_set is not None
# Currently we only support layout TensorNHWC
lay_to_set = cutlass.LayoutType.TensorNHWC
elements.append(datatypes.library_type(elt_to_set))
layouts.append(lay_to_set)
self._element_a, self._element_b, self._element_c, self._element_d = elements
self._layout_a, self._layout_b, self._layout_c, self._layout_d = layouts
self.A, self.B, self.C, self.D, self.alpha, self.beta = A, B, C, D, alpha, beta
if element_accumulator is None:
self._element_accumulator = self._element_c
else:
self._element_accumulator = datatypes.library_type(element_accumulator)
# Default inputs if none is supplied in run()
self.A = A
self.B = B
self.C = C
self.D = D
self.alpha = alpha
self.beta = beta
# We only specify the stride of the swizzling functor here
# The actual swizzling functor is determined in run based on conv_kind and stride
self._swizzling_stride = 1
# Arguments that will be set to default value in _reset_operations
# The default tile_description and op_class are fetched from manifest of cutlass library
self._tile_description = None
self.op_class = None
# The default identity epilogue will be created
self.epilogue_functor = None
self._reset_operations()
# Arguments that will be determined online based on arguments of "run"
# based on stride, input/output channels, alignment, and conv_kind
self._iterator_algorithm = None
self._stride_support = None
def _reset_operations(self, reset_epilogue: bool = True):
# Set the default op class
datatype_comb = (self._element_a, self._element_b, self._element_accumulator)
layout_comb = (self._layout_a, self._layout_b)
self.possible_op_classes = self.options.supporting_opclasses(
self._element_a, self._element_b, self._element_accumulator,
self._layout_a, self._layout_b
)
if cutlass.OpcodeClass.TensorOp in self.possible_op_classes:
self.opclass = cutlass.OpcodeClass.TensorOp
elif cutlass.OpcodeClass.Simt in self.possible_op_classes:
self.opclass = cutlass.OpcodeClass.Simt
else:
raise Exception(f'No kernel configuration found for supported data type and layout '
f'combination {datatype_comb}x{layout_comb}')
if reset_epilogue:
self._reset_epilogue_functor_activation(epilogue.identity)
self.alignment_pref_A = min(
128 // cutlass.DataTypeSize[self._element_a], max(self.possible_operations.alignments))
self.alignment_pref_B = min(
128 // cutlass.DataTypeSize[self._element_b], max(self.possible_operations.alignments))
self.alignment_pref_C = min(
128 // cutlass.DataTypeSize[self._element_c], max(self.possible_operations.alignments))
#
# Tile description Related
#
@property
def tile_description(self) -> TileDescription:
"""
Returns the tile description
"""
return self._tile_description
@tile_description.setter
def tile_description(
self, td=None):
"""
Set the tile description
:param td: tile description
:type td: cutlass.backend.TileDescription, or a dict with keys
{
"threadblock_shape": [int, int, int],
"warp_count": [int, int, int],
"stages": int,
"instruction_shape": [int, int, int] (optional),
"cluster_shape": [int, int, int] (optional)
}
"""
if td is None:
return
if isinstance(td, dict):
if self._tile_description is None:
alignment = list(self.possible_operations.kernels_by_alignment.keys())[0]
op = self.possible_operations.operations(alignment)[0]
self._tile_description = datatypes.td_from_profiler_op(op)
if "cluster_shape" in td.keys():
if td["cluster_shape"] != [1, 1, 1]:
cutlass.logger.warning("Conv2d currently only support 'cluster_shape'=[1, 1, 1]'.")
td["cluster_shape"] = [1, 1, 1]
td = self._tile_description.clone_and_update(td)
valid, msg = self._valid_tile_description(td)
if valid:
self._tile_description = td
else:
raise Exception(msg)
def _valid_tile_description(self, td: TileDescription) -> tuple:
"""
Checks whether the provided tile description is valid for the given compute capability. At present,
this checks the following:
- Does the tile description use a number of stages supported by the compute capability in question?
- Does the tile size requested fit within shared memory?
- Are cluster dimensions outside the valid range requested for a given architecture (e.g.,
more non-unit cluster dimensions for pre-SM90 architectures)?
- Is the kernel schedule being used supported on the architecture in question?
:param td: tile description to validate
:type td: cutlass.backend.TileDescription
:return: tuple in which the first element is a bool indicating that the tile description is valid
and the second element is a string providing an optional error message.
:rtype: tuple
"""
# Check stage count based on the CC to which we are compiling (self.cc), rather
# than the CC from which we find kernels (self.current_cc)
valid, msg = check.valid_stage_count(self.cc, td)
if not valid:
return (valid, msg)
valid, msg = check.valid_cluster_shape(self.current_cc, td.cluster_shape)
if not valid:
return (valid, msg)
return valid, msg
def tile_descriptions(self) -> list:
"""
Returns a list of valid tile descriptions for the operations
:returns: list of valid tile descriptions for the operations
:rtype: list
"""
descriptions = []
description_str = []
for op in self.possible_operations.all_operations:
td = datatypes.td_from_profiler_op(op)
if str(td) not in description_str:
description_str.append(str(td))
descriptions.append(td)
return descriptions
#
# Swizzling functor Related
#
@property
def swizzling_stride(self):
"""
Returns the stride of swizzling currently being used by the Conv2d
:return: swizzing stride
"""
return self._swizzling_stride
@swizzling_stride.setter
def swizzling_stride(self, stride: int):
"""
Sets the swizzling functor to the type specified by `swizzling_functor`
"""
if not isinstance(stride, int):
raise Exception(f"Expect integer (1, 2, 4, 8), got {stride}")
self._swizzling_stride = stride
def _propose_swizzling_functor(self, stride):
"""
Automatically propose the swizzling functor based on the stride
"""
if self.conv_kind == cutlass_bindings.conv.Operator.dgrad:
if stride[0] != 1 or stride[1] != 1:
return getattr(cutlass.swizzle, f"StridedDgradIdentitySwizzle{self._swizzling_stride}")
return getattr(cutlass.swizzle, f"IdentitySwizzle{self._swizzling_stride}")
#
# Iterator Algorithm Related
#
@property
def iterator_algorithm(self) -> cutlass_bindings.conv.IteratorAlgorithm:
"""
Returns the iterator algorithm
"""
return self._iterator_algorithm
@iterator_algorithm.setter
def iterator_algorithm(self, alg: str):
"""
Sets the iterator algorithm
:param alg: The iterator algorithm
:type td: string, options: "analytic", "optimized", "few_channels", and "fixed_channels"
"""
# Check if the iterator algorithm is valid
if alg in ["few_channels", "fixed_channels"] and self.conv_kind != cutlass_bindings.conv.Operator.fprop:
raise Exception(f"{self.conv_kind} does not support iterator algorithm {alg}.")
self._iterator_algorithm = getattr(cutlass_bindings.conv.IteratorAlgorithm, alg)
def _propose_iterator_algorithm(self, problem_size, alignment_a, alignment_b) -> cutlass_bindings.conv.IteratorAlgorithm:
"""
Propose a valid iterator algorithm based on problem size and alignment
"""
if self.conv_kind == cutlass_bindings.conv.Operator.fprop:
# Check whether the fixed channel is applicable
if problem_size.C == alignment_a:
return cutlass_bindings.conv.IteratorAlgorithm.fixed_channels
elif (problem_size.C % alignment_a == 0 and
problem_size.R <= 32 and problem_size.S <= 32):
return cutlass_bindings.conv.IteratorAlgorithm.optimized
else:
return cutlass_bindings.conv.IteratorAlgorithm.analytic
elif self.conv_kind == cutlass_bindings.conv.Operator.dgrad:
if (problem_size.K % alignment_a == 0 and
problem_size.R <= 32 and problem_size.S <= 32 and
problem_size.C % alignment_b == 0):
return cutlass_bindings.conv.IteratorAlgorithm.optimized
else:
return cutlass_bindings.conv.IteratorAlgorithm.analytic
elif self.conv_kind == cutlass_bindings.conv.Operator.wgrad:
if (problem_size.K % alignment_a == 0 and
problem_size.C % alignment_b == 0):
return cutlass_bindings.conv.IteratorAlgorithm.optimized
else:
return cutlass_bindings.conv.IteratorAlgorithm.analytic
def _validate_iterator_algorithm(self, iterator_algorithm, problem_size, alignment_a, alignment_b) -> bool:
"""
Validate whether the user provide iterator algorithm works for the given problem size
"""
if self.conv_kind == cutlass_bindings.conv.Operator.fprop:
if iterator_algorithm == cutlass_bindings.conv.IteratorAlgorithm.fixed_channels:
return problem_size.C == alignment_a
elif iterator_algorithm == cutlass_bindings.conv.IteratorAlgorithm.optimized:
return (problem_size.C % alignment_a == 0 and
problem_size.R <= 32 and problem_size.S <= 32)
elif iterator_algorithm == cutlass_bindings.conv.IteratorAlgorithm.few_channels:
return problem_size.C % alignment_a == 0
elif self.conv_kind == cutlass_bindings.conv.Operator.dgrad:
if iterator_algorithm == cutlass_bindings.conv.IteratorAlgorithm.optimized:
return (problem_size.K % alignment_a == 0 and
problem_size.R <= 32 and problem_size.S <= 32 and
problem_size.C % alignment_b == 0)
elif self.conv_kind == cutlass_bindings.conv.Operator.wgrad:
if iterator_algorithm == cutlass_bindings.conv.IteratorAlgorithm.optimized:
return (problem_size.K % alignment_a == 0 and
problem_size.C % alignment_b == 0)
return True
#
# Stride Support Related
#
def _propose_stride_support(self, stride):
if self.conv_kind == cutlass_bindings.conv.Operator.dgrad:
if stride[0] == 1 and stride[1] == 1:
return cutlass.backend.library.StrideSupport.Unity
return cutlass.backend.library.StrideSupport.Strided
#
# Construct and Compilation
#
def construct(
self, tile_description: TileDescription = None,
alignment_A: int = None, alignment_B: int = None, alignment_C: int = None,
iterator_algorithm: cutlass_bindings.conv.IteratorAlgorithm = None,
stride_support = None, swizzling_functor: cutlass.swizzle = None,
epilogue_functor=None) -> cutlass.backend.Conv2dOperation:
"""
Constructs a ``cutlass.backend.Conv2dOperation`` based on the input parameters and current
kernel specification of the ``Conv2d`` object.
:param tile_description: tile description specifying shapes and operand types to use in the kernel
:type tile_description: cutlass.backend.TileDescription
:param alignment_A: alignment of operand A
:type alignment_A: int
:param alignment_B: alignment of operand B
:type alignment_B: int
:param alignment_C: alignment of operand C
:type alignment_C: int
:param iterator_algorithm: the iterator algorithm used
:type iterator_algorithm: cutlass_bindings.conv.IteratorAlgorithm
:param stride_support: the stride support of dgrad
:type stride_support: cutlass.backend.library.StrideSupport
:param swizzling_functor: the swizzling functor
:type swizzling_functor: cutlass.swizzle
:param epilogue_functor: the epilogue functor
:return: operation that was constructed
:rtype: cutlass.backend.Conv2dOperation
"""
# Get alignment
alignment_A = check.alignment_or_default(alignment_A, self.alignment_pref_A)
alignment_B = check.alignment_or_default(alignment_B, self.alignment_pref_B)
alignment_C = check.alignment_or_default(alignment_C, self.alignment_pref_C)
tensor_A = TensorDescription(
datatypes.binding_type(self._element_a),
datatypes.binding_layout(self._layout_b),
alignment_A
)
tensor_B = TensorDescription(
datatypes.binding_type(self._element_b),
datatypes.binding_layout(self._layout_b),
alignment_B
)
tensor_C = TensorDescription(
datatypes.binding_type(self._element_c),
datatypes.binding_layout(self._layout_c),
alignment_C
)
if tile_description is None:
if self.tile_description is not None:
tile_description = self.tile_description
else:
op = self.possible_operations.operations(alignment_A)[0]
tile_description = datatypes.td_from_profiler_op(op)
else:
valid, err_str = self._valid_tile_description(tile_description)
if not valid:
raise Exception(f"Invalid tile description. {err_str}")
self.tile_description = tile_description
if iterator_algorithm is None:
# If the iterator algorithm is already set
if self.iterator_algorithm is not None:
iterator_algorithm = self.iterator_algorithm
else:
# Otherwise, we conservatively use the analytic iterator for correctness
iterator_algorithm = cutlass_bindings.conv.IteratorAlgorithm.analytic
if stride_support is None:
# If the stride support is already set
if self._stride_support is not None:
stride_support = self._stride_support
else:
# Otherwise, we assume strided
stride_support = cutlass.backend.library.StrideSupport.Strided
if swizzling_functor is None:
# If the swizzling functor is already set
swizzling_functor = self._propose_swizzling_functor(stride=(2, 2))
if epilogue_functor is None:
if self.epilogue_functor is not None:
epilogue_functor = self.epilogue_functor
else:
epilogue_functor = self._create_epilogue_functor_activation(self._activation)
# Reset the alignment of the epilogue functor
epilogue_functor = self._reset_epilogue_functor_alignment(alignment_C, epilogue_functor)
operation = Conv2dOperation(
conv_kind=self.conv_kind,
iterator_algorithm=iterator_algorithm,
arch=self.current_cc,
tile_description=tile_description,
A=tensor_A, B=tensor_B, C=tensor_C,
stride_support=stride_support,
epilogue_functor=epilogue_functor,
swizzling_functor=swizzling_functor,
)
return operation
def compile(self, tile_description: TileDescription = None,
alignment_A: int = None, alignment_B: int = None, alignment_C: int = None,
iterator_algorithm: cutlass_bindings.conv.IteratorAlgorithm = None,
stride_support = None, swizzling_functor: cutlass.swizzle = None,
epilogue_functor = None, print_module: bool = False) -> cutlass.backend.Conv2dOperation:
"""
Emits and compiles the kernel currently specified. If ``tile_description`` and any
of the ``alignment`` parameters are set, the kernel will be chosen using this
tile description and alignments. Otherwise, a default tile description and alignment
will be used.
::param tile_description: tile description specifying shapes and operand types to use in the kernel
:type tile_description: cutlass.backend.TileDescription
:param alignment_A: alignment of operand A
:type alignment_A: int
:param alignment_B: alignment of operand B
:type alignment_B: int
:param alignment_C: alignment of operand C
:type alignment_C: int
:param iterator_algorithm: the iterator algorithm used
:type iterator_algorithm: cutlass_bindings.conv.IteratorAlgorithm
:param stride_support: the stride support of dgrad
:type stride_support: cutlass.backend.library.StrideSupport
:param swizzling_functor: the swizzling functor
:type swizzling_functor: cutlass.swizzle
:param epilogue_functor: the epilogue functor
:return: operation that was compiled
:rtype: cutlass.backend.Conv2dOperation
"""
self.operation = self.construct(
tile_description, alignment_A, alignment_B, alignment_C,
iterator_algorithm, stride_support, swizzling_functor, epilogue_functor)
if print_module:
print(self.operation.rt_module.emit())
compiler.add_module([self.operation,])
return self.operation
#
# Run Related
#
def _verify_type_and_layout(self, tensor, ref_type, ref_layout, name):
"""
Verifies that ``tensor`` has data type ``ref_type`` and layout ``ref_layout``. An exception
is raised if it does not.
:param tensor: object representing a tensor passed in to verify, or ``None`` if no tensor was passed in
:type tensor: numpy/cupy/torch array/tensor object
:param ref_dtype: data type for the tensor that this object was initialized to
:param name: identifier of the tensor to verify. Used in raising exceptions
:type name: str
"""
dtype, _ = datatypes.get_datatype_and_layout(tensor)
if dtype != ref_type:
raise Exception(f'Tensor {name} with type and layout {dtype} '
f'does not match the expected type of {ref_type}.')
def _get_and_verify_conv_problem_size(self, A, B, C, stride, padding, dilation):
if self.conv_kind == cutlass_bindings.conv.Operator.fprop:
input = A
weight = B
output = C
output_tensor = "C"
elif self.conv_kind == cutlass_bindings.conv.Operator.dgrad:
output = A
weight = B
input = C
output_tensor = "A"
elif self.conv_kind == cutlass_bindings.conv.Operator.wgrad:
output = A
input = B
weight = C
output_tensor = "A"
else:
raise Exception(f"Convolution kind {self.conv_kind} is not supported")
N_, H_, W_, C_ = datatypes.get_tensor_shape(input)
K_, R_, S_, _ = datatypes.get_tensor_shape(weight)
_, P_, Q_, _ = datatypes.get_tensor_shape(output)
problem_size = cutlass_bindings.conv.Conv2dProblemSize(
cutlass_bindings.Tensor4DCoord(N_, H_, W_, C_),
cutlass_bindings.Tensor4DCoord(K_, R_, S_, C_),
cutlass_bindings.Tensor4DCoord(padding[0], padding[0], padding[1], padding[1]),
cutlass_bindings.MatrixCoord(stride[0], stride[1]),
cutlass_bindings.MatrixCoord(dilation[0], dilation[1]),
cutlass_bindings.conv.Mode.cross_correlation,
1, 1
)
if P_ != problem_size.P or Q_ != problem_size.Q:
raise Exception(
f"Tensor {output_tensor} size should be ({N_}, {problem_size.P}, {problem_size.Q}, {K_}), got ({N_}, {P_}, {Q_}, {K_})")
return problem_size
def run(self, A=None, B=None, C=None, D=None,
stride=(1, 1), padding=(0, 0), dilation=(1, 1),
alpha=None, beta=None,
split_k=("serial", 1), sync: bool = True,
print_module: bool = False) -> Conv2dArguments:
"""
Runs the kernel currently specified. If it has not already been, the kernel is emitted and
compiled. Tensors holding operands and outputs of the kernel are sourced either from the
``A``, ``B``, ``C``, ``D``, ``alpha``, and ``beta``
parameters provided in the call, or from those
passed in on the construction of this object -- one of the two must be specified.
By default, this call returns only once the kernel has completed. To launch the kernel
and immediately return, set ``sync=False``. In this case, it is the responsibility of the
caller to syncrhonize the results of the kernel before attempting to access outputs
by calling ``sync()`` on the arguments returned from this call.
:param A: tensor representing data type and layout of operand A
:param B: tensor representing data type and layout of operand B
:param C: tensor representing data type and layout of operand C
:param D: tensor representing data type and layout of operand D
:param stride: (stride_h, stride_w) describing the convolution stride. Default: (1, 1)
:param padding: (pad_h, pad_w) describing the convolution padding. Default: (0, 0)
:param dilation: (dilation_h, dilation_w) describing the dilation of convolution. Default: (1, 1)
:param alpha: scalar paramter alpha from GEMM computation that scales the product of operands A and B
:param beta: scalar parameter beta from GEMM operation that scales operand C
:param split_k: a tuple (split_k_mode, split_k_slices)
:param sync: whether the call should wait for the kernel to complete before returning
:type sync: bool
:param print_module: whether to print the emitted C++ code
:type print_module: bool
:return: arguments passed in to the kernel
:rtype: cutlass.backend.Conv2dArguments
"""
A = self._verify_tensor(A, self.A, self._element_a, self._layout_a, "A")
B = self._verify_tensor(B, self.B, self._element_b, self._layout_b, "B")
C = self._verify_tensor(C, self.C, self._element_c, self._layout_c, "C")
D = self._verify_tensor(D, self.D, self._element_d, self._layout_d, "D")
alpha = self._verify_scalar(alpha, self.alpha, self._element_c, "alpha")
beta = self._verify_scalar(beta, self.beta, self._element_c, "beta")
# handle the case when there is no C
if C is None:
if beta != 0:
raise Exception(f"With beta {beta} != 0, C has to be provided.")
else:
C = D
# Construct problem size based on input
# It also verifies whether the A, B, C, D, stride, padding, and dilation are matching
problem_size = self._get_and_verify_conv_problem_size(A, B, C, stride, padding, dilation)
# Propose stride support based on input
stride_support = self._propose_stride_support(stride)
# Propose swizzling functor
swizzling_functor = self._propose_swizzling_functor(stride)
# Get the alignment
alignment_a = self.possible_operations.find_alignment(datatypes.get_tensor_shape(A), self._layout_a)
alignment_b = self.possible_operations.find_alignment(datatypes.get_tensor_shape(B), self._layout_b)
alignment_c = self.possible_operations.find_alignment(datatypes.get_tensor_shape(C), self._layout_c)
alignment_a = check.update_alignment(alignment_a, self.alignment_pref_A)
alignment_b = check.update_alignment(alignment_b, self.alignment_pref_B)
alignment_c = check.update_alignment(alignment_c, self.alignment_pref_C)
# Propose iterator algorithm based on input
if self._iterator_algorithm is None:
# Propose a default itertaor algorithm based on the problem size
iterator_algorithm = self._propose_iterator_algorithm(problem_size, alignment_a, alignment_b)
else:
if (self._validate_iterator_algorithm(self._iterator_algorithm, problem_size, alignment_a, alignment_b)):
iterator_algorithm = self._iterator_algorithm
else:
raise Exception(f"Iterator algorithm {self._iterator_algorithm} is invalid for current problem.")
epilogue_args = [alpha, beta]
if hasattr(self, "_activation_args"):
if isinstance(self._activation_args, list):
epilogue_args += self._activation_args
else:
epilogue_args.append(self._activation_args)
if split_k[0] == "parallel" and split_k[1] > 1:
epilogue_functor = self._create_epilogue_functor_activation(epilogue.identity)
else:
epilogue_functor = self.epilogue_functor
# The alignment is determined by the iterator function (I believe)
self.compile(tile_description=self.tile_description, alignment_A=alignment_a, alignment_B=alignment_b,
alignment_C=alignment_c, iterator_algorithm=iterator_algorithm, stride_support=stride_support,
swizzling_functor=swizzling_functor, epilogue_functor=epilogue_functor, print_module=print_module)
# Create reduction operation for parallel split-k
if split_k[0] == "parallel" and split_k[1] > 1:
epilogue_functor_reduction = self._reset_epilogue_functor_alignment(alignment_c, self.epilogue_functor)
self.reduction_operation = ReductionOperation(
shape=cutlass_bindings.MatrixCoord(4, 32 * alignment_c), C=self.operation.C,
element_accumulator=datatypes.binding_type(self._element_accumulator),
element_compute=datatypes.binding_type(self._element_accumulator),
epilogue_functor=epilogue_functor_reduction,
count=alignment_c
)
if print_module:
print(self.reduction_operation.rt_module.emit())
compiler.add_module([self.reduction_operation,])
arguments = Conv2dArguments(
operation=self.operation, problem_size=problem_size,
A=A, B=B, C=C, D=D,
output_op=self.operation.epilogue_type(*epilogue_args),
split_k_mode=datatypes.getattr_enum(cutlass_bindings.conv.SplitKMode, split_k[0]),
split_k_slices=split_k[1]
)
self.operation.run(arguments)
if split_k[0] == "parallel" and split_k[1] > 1:
implicit_gemm_size = cutlass_bindings.conv.implicit_gemm_problem_size(
self.conv_kind, arguments.problem_size
)
reduction_arguments = ReductionArguments(
self.reduction_operation,
problem_size=[implicit_gemm_size.m(), implicit_gemm_size.n()],
partitions=split_k[1],
workspace=arguments.ptr_D,
destination=D,
source=C,
output_op=self.reduction_operation.epilogue_type(*epilogue_args)
)
self.reduction_operation.run(reduction_arguments)
if sync:
if split_k[0] == "parallel" and split_k[1] > 1:
reduction_arguments.sync()
else:
arguments.sync()
return arguments
#
# Helper functions
#
@staticmethod
def output_size(input_size, weight_size, padding, stride, dilation):
problem_size = cutlass_bindings.conv.Conv2dProblemSize(
cutlass_bindings.Tensor4DCoord(*input_size),
cutlass_bindings.Tensor4DCoord(*weight_size),
cutlass_bindings.Tensor4DCoord(padding[0], padding[0], padding[1], padding[1]),
cutlass_bindings.MatrixCoord(stride[0], stride[1]),
cutlass_bindings.MatrixCoord(dilation[0], dilation[1]),
cutlass_bindings.conv.Mode.cross_correlation,
1, 1
)
return (problem_size.N, problem_size.P, problem_size.Q, problem_size.K)
#
# Easy to use interfaces for fprop, wgrad, and dgrad
#
class Conv2dFprop(Conv2d):
def __init__(
self,
input=None, weight=None, C=None, output=None, alpha=1, beta=0,
element=None,
element_input=None, element_weight=None, element_C=None, element_output=None,
element_accumulator=None,
cc: int = None, kernel_cc: int = None):
A, B, D = input, weight, output
element_A, element_B, element_D = element_input, element_weight, element_output
super().__init__(
"fprop", A, B, C, D, alpha, beta, element,
element_A, element_B, element_C, element_D,
element_accumulator, cc, kernel_cc)
def run(
self, input=None, weight=None, C=None, output=None, alpha=None, beta=None,
stride=(1, 1), padding=(0, 0), dilation=(1, 1), split_k=("serial", 1),
sync: bool = True, print_module: bool = False) -> Conv2dArguments:
A, B, D = input, weight, output
return super().run(
A, B, C, D, alpha, beta, stride, padding, dilation, split_k, sync, print_module)
class Conv2dDgrad(Conv2d):
def __init__(
self,
grad_output=None, weight=None, C=None, grad_input=None, alpha=1, beta=0,
element=None,
element_grad_output=None, element_weight=None, element_C=None, element_grad_input=None,
element_accumulator=None,
cc: int = None, kernel_cc: int = None):
A, B, D = grad_output, weight, grad_input
element_A, element_B, element_D = element_grad_output, element_weight, element_grad_input
super().__init__(
"dgrad", A, B, C, D, alpha, beta, element,
element_A, element_B, element_C, element_D,
element_accumulator, cc, kernel_cc)
def run(self, grad_output=None, weight=None, C=None, grad_input=None, alpha=None, beta=None,
stride=(1, 1), padding=(0, 0), dilation=(1, 1), split_k=("serial", 1),
sync: bool = True, print_module: bool = False) -> Conv2dArguments:
#
A, B, D = grad_output, weight, grad_input
return super().run(
A, B, C, D, alpha, beta, stride, padding, dilation, split_k, sync, print_module)
class Conv2dWgrad(Conv2d):
def __init__(
self,
grad_output=None, input=None, C=None, grad_weight=None, alpha=1, beta=0,
element=None,
element_grad_output=None, element_input=None, element_C=None, element_grad_weight=None,
element_accumulator=None,
cc: int = None, kernel_cc: int = None):
A, B, D = grad_output, input, grad_weight
element_A, element_B, element_D = element_grad_output, element_input, element_grad_weight
super().__init__(
"wgrad", A, B, C, D, alpha, beta, element,
element_A, element_B, element_C, element_D,
element_accumulator, cc, kernel_cc)
def run(self, grad_output=None, input=None, C=None, grad_weight=None, alpha=None, beta=None,
stride=(1, 1), padding=(0, 0), dilation=(1, 1), split_k=("serial", 1),
sync: bool = True, print_module: bool = False) -> Conv2dArguments:
#
A, B, D = grad_output, input, grad_weight
return super().run(
A, B, C, D, alpha, beta, stride, padding, dilation, split_k, sync, print_module)
| cutlass-main | python/cutlass/op/conv.py |
#################################################################################################
#
# Copyright (c) 2023 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#################################################################################################
"""
Utility functions for checking constraints on kernels and calculating kernel attributes
"""
import ctypes
import cutlass_bindings
import cutlass
from cutlass.backend.library import DataTypeSize, TileDescription
from cutlass.utils.datatypes import binding_type
def calculate_smem_usage_per_stage(td: TileDescription, operation_kind: cutlass.OperationKind) -> int:
"""
Returns the amount of shared memory in bytes consumed in a single stage of a kernel.
:param td: tile description to compute shared memory of
:type td: TileDescription
:param operation_kind: identifier for the type of operation being performed
:type operation_kind: cutlass.OperationKind
:return: number of bytes of shared memory consumed by a single stage
:rtype: int
"""
m, n, k = td.threadblock_shape
if operation_kind == cutlass.OperationKind.Gemm:
stage_barrier_bytes = 32
return (
(DataTypeSize[td.math_instruction.element_a] * m * k // 8)
+ (DataTypeSize[td.math_instruction.element_b] * k * n // 8)
+ stage_barrier_bytes
)
else:
raise Exception(f"No available shared memory calculation for operation kind {operation.operation_kind}")
def calculate_smem_usage(operation) -> int:
"""
Returns the amount of shared memory in bytes consumed by a kernel.
:return: number of bytes of shared memory consumed by the operation
:return: int
"""
_per_stage = calculate_smem_usage_per_stage(operation.tile_description, operation.operation_kind)
return _per_stage * operation.tile_description.stages
def valid_stage_count(
cc: int,
td: TileDescription,
element_C: cutlass.DataType = None,
element_D: cutlass.DataType = None) -> tuple:
"""
Checks whether a device with `cc` supports the number of stages within `tile_description`, both
based on raw limits on the number of stages and based on shared memory capacity
:param cc: compute capability of device in question
:type cc: int
:param td: tile description to check
:type td: TileDescription
:param element_C: data type of operand C
:type element_C: cutlass.DataType
:param element_D: data type of operand D
:type element_D: cutlass.DataType
:return: tuple with the first element indicating whether the provided tile description is
valid for the provided device and the second element being an error message
:rtype: tuple
"""
if cc == 90:
if (td.stages is None or td.stages == 0):
# Stage count of None or 0 for SM90 indicates that the CollectiveBuilder automatically
# determines the stage count to use. Thus, all settings are valid in these scenarios.
return (True, "")
else:
cutlass.logger.warning(
"Setting an explicit stage count for SM90 kernels currently may "
"result in compilation errors if the combination of tile shape, "
"stage count, and shared memory requirement of the epilogue exceeds "
"the available shared memory per SM.")
if td.stages <= 0:
return (False, f"Stage counts must be positive integers. Tile description has stage count of {td.stages}.")
if cc < 80 and td.stages != 2:
return (False, f"Tile description has stage count of {td.stages}, "
f"but only 2 stages are supported on SM{cc}.")
# The calculation below does not consider shared memory used by the epilogue and, thus,
# only catches cases in which the mainloop exceeds the device's shared memory capacity.
# This is not a concern for CUTLASS 2.x kernels, for which the shared memory of the
# mainloop and epilogue is shared.
smem_per_stage = calculate_smem_usage_per_stage(td, cutlass.OperationKind.Gemm)
smem_usage_mainloop = (smem_per_stage * td.stages)
smem_arch = cutlass.SharedMemPerCC[cc] << 10
if smem_usage_mainloop > smem_arch:
return ( False,
"Configuration uses too much shared memory. Consider reducing stage count or tile shape.\n"
f"Details:\n"
f"Mainloop uses {smem_per_stage} bytes of shared memory per stage, and "
f"{td.stages} stages for a total of {smem_usage_mainloop} bytes.\n"
f"The maxmium amount of shared memory that can be used per block on CC {cc} is {smem_arch}.")
return (True, "")
def valid_cluster_shape(cc: int, cluster_shape: list) -> tuple:
"""
Checks whether a device with `cc` supports a thread block cluster of shape `cluster_shape`.
:param cc: compute capability of device in question
:type cc: int
:param cluster_shape: dimensions of thread block cluster shape to check
:type cluster_shape: list
:return: tuple with the first element indicating whether the provided cluster shape is
valid for the provided device and the second element being an error message
:rtype: tuple
"""
if cc < 90:
if cluster_shape != [1, 1, 1]:
return (False,
f"Cluster shape for pre-SM90 architectures must be [1, 1, 1]. Received cluster shape of "
f"{cluster_shape} for SM{cc}.")
else:
return (True, "")
if len(cluster_shape) != 3:
return (False,
f"Cluster shapes must be rank-3. Received {cluster_shape} (rank {len(cluster_shape)}")
if cluster_shape[2] != 1:
return (False,
"CUTLASS kernels currently require the third dimension of cluster shape to be 1. "
f"Received cluster shape of {cluster_shape}.")
# The CUDA programming guide currently defines a maximum of 8 thread blocks per cluster
# as being portably supported (https://docs.nvidia.com/cuda/cuda-c-programming-guide/#thread-block-clusters).
# Current CUTLASS kernels only have non-unit cluster dimensions within the first two dimensions,
# so we check that the first two dimensions of the cluster shape do not exceed 8 thread blocks in total.
blocks_in_2d = cluster_shape[0] * cluster_shape[1]
if blocks_in_2d > 8:
return (False,
f"Thread block clusters with more than 8 thread blocks are currently unsupported on SM{cc}. "
f"Received cluster shape {cluster_shape}, which has {blocks_in_2d} thread blocks.")
return (True, "")
def valid_schedule(
cc: int,
kernel_schedule: cutlass.KernelScheduleType,
epilogue_schedule: cutlass.EpilogueScheduleType,
tile_scheduler: cutlass.TileSchedulerType) -> tuple:
"""
Checks that the kernel and epilogue schedules passed in are a valid combination for
a device of compute capability ``cc``.
:param cc: compute capability of device in question
:type cc: int
:param kernel_schedule: kernel schedule type
:type kernel_schedule: cutlass.KernelScheduleType
:param epilogue_schedule: epilogue schedule type
:type epilogue_schedule: cutlass.EpilogueScheduleType
:param tile_scheduler: tile scheduler type
:type tile_scheduler: cutlass.TileSchedulerType
:return: tuple with the first element indicating whether the provided schedules are
valid for the provided device and the second element being an error message
:rtype: tuple
"""
kernel_auto = (kernel_schedule == cutlass.KernelScheduleType.ScheduleAuto)
epilogue_auto = (epilogue_schedule == cutlass.EpilogueScheduleType.ScheduleAuto)
tile_scheduler_default = (tile_scheduler == cutlass.TileSchedulerType.Default)
if cc < 90 and not (kernel_auto and epilogue_auto and tile_scheduler_default):
return (False, "Non-default schedules are only supported on SM90 and beyond")
if (kernel_auto and not epilogue_auto) or (not kernel_auto and epilogue_auto):
return (False, "Kernel and epilogue schedules must either both be auto or neither be auto")
if not tile_scheduler_default:
if (tile_scheduler == cutlass.TileSchedulerType.StreamK) and (kernel_schedule != cutlass.KernelScheduleType.TmaWarpSpecializedCooperative):
return (False, "Stream-K tile scheduler is currently only supported with the cooperative kernel schedule")
return (True, "")
def alignment_or_default(alignment_provided: int, default_alignment: int) -> int:
"""
Returns `alignment_provided` if it is set, otherwise `default_alignment` and checks
that `alignment_provided` does not exceed `default_alignment`.
:param alignment_provided: alignment preference specified. Can be None.
:type alignment_provided: int
:param default_alignment: alignment to use if `alignment_provided` is None
:type default_alignment: int
:return: alignment to use
:rtype: int
"""
if alignment_provided is not None:
if alignment_provided > default_alignment:
raise Exception(f"Alignment {alignment_provided} exceeds the maximum supported of {default_alignment}.")
return alignment_provided
return default_alignment
def update_alignment(alignment_provided:int, default_alignment: int) -> int:
"""
Returns `alignment_provided` if it is set, otherwise `default_alignment` and checks
that `alignment_provided` does not exceed `default_alignment`.
:param alignment_provided: alignment preference specified. Can be None.
:type alignment_provided: int
:param default_alignment: alignment to use if `alignment_provided` is None
:type default_alignment: int
:return: alignment to use
:rtype: int
"""
if alignment_provided is not None:
if alignment_provided > default_alignment:
if alignment_provided % default_alignment == 0:
return default_alignment
raise Exception(f"Alignment {alignment_provided} exceeds the maximum supported of {default_alignment}.")
return alignment_provided
return default_alignment
| cutlass-main | python/cutlass/utils/check.py |
#################################################################################################
#
# Copyright (c) 2023 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#################################################################################################
from cutlass.utils.check import (
alignment_or_default,
update_alignment,
calculate_smem_usage,
calculate_smem_usage_per_stage,
valid_cluster_shape,
valid_schedule,
valid_stage_count,
)
| cutlass-main | python/cutlass/utils/__init__.py |
#################################################################################################
#
# Copyright (c) 2023 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#################################################################################################
"""
Utility functions for converting between frontend datatypes and CUTLASS datatypes
"""
import cutlass_bindings
import cutlass
from cutlass.backend.library import (
DataTypeSize,
MathInstruction,
MathOperation,
ShortLayoutTypeNames,
TileDescription,
)
try:
import numpy as np
numpy_available = True
_library_to_numpy_dict = {
cutlass.DataType.f16: np.float16,
cutlass.DataType.f32: np.float32,
cutlass.DataType.f64: np.float64,
cutlass.DataType.s8: np.int8,
cutlass.DataType.s32: np.int32,
}
except ImportError:
numpy_available = False
_library_to_numpy_dict = {}
def numpy_library_type(inp) -> cutlass.DataType:
if numpy_available:
if inp == np.float16:
return cutlass.DataType.f16
elif inp == np.float32:
return cutlass.DataType.f32
elif inp == np.float64:
return cutlass.DataType.f64
elif inp == np.int8:
return cutlass.DataType.s8
elif inp == np.int32:
return cutlass.DataType.s32
return None
def numpy_type(inp):
return _library_to_numpy_dict.get(inp, None)
try:
import cupy as cp
cupy_available = True
_library_to_cupy_dict = {
cutlass.DataType.f16: cp.float16,
cutlass.DataType.f32: cp.float32,
cutlass.DataType.f64: cp.float64,
cutlass.DataType.s8: cp.int8,
cutlass.DataType.s32: cp.int32,
}
except ImportError:
cupy_available = False
_library_to_cupy_dict = {}
def cupy_library_type(inp) -> cutlass.DataType:
if cupy_available:
if inp == cp.float16:
return cutlass.DataType.f16
elif inp == cp.float32:
return cutlass.DataType.f32
elif inp == cp.float64:
return cutlass.DataType.f64
return None
def cupy_type(inp):
return _library_to_cupy_dict.get(inp, None)
try:
import torch
torch_available = True
_torch_to_library_dict = {
torch.half: cutlass.DataType.f16,
torch.float16: cutlass.DataType.f16,
torch.bfloat16: cutlass.DataType.bf16,
torch.float: cutlass.DataType.f32,
torch.float32: cutlass.DataType.f32,
torch.double: cutlass.DataType.f64,
torch.float64: cutlass.DataType.f64,
}
_library_to_torch_dict = {
cutlass.DataType.f16: torch.half,
cutlass.DataType.f16: torch.float16,
cutlass.DataType.bf16: torch.bfloat16,
cutlass.DataType.f32: torch.float,
cutlass.DataType.f32: torch.float32,
cutlass.DataType.f64: torch.double,
cutlass.DataType.f64: torch.float64,
}
except ImportError:
torch_available = False
_torch_to_library_dict = {}
_library_to_torch_dict = {}
def torch_library_type(inp) -> cutlass.DataType:
return _torch_to_library_dict.get(inp, None)
def torch_type(inp):
return _library_to_torch_dict.get(inp, None)
try:
import bfloat16
bfloat16_available = True
except ImportError:
bfloat16_available = False
def bfloat16_library_type(inp) -> cutlass.DataType:
if bfloat16_available:
if inp == bfloat16.bfloat16:
return cutlass.DataType.bf16
def bfloat16_type(inp) -> bfloat16.bfloat16:
if bfloat16_available:
if inp == cutlass.DataType.bf16:
return bfloat16.bfloat16
# Mapping from library data type to Python-bound CUTLASS data type
library_to_binding_dict = {
cutlass.DataType.s8: cutlass_bindings.int8,
cutlass.DataType.s32: cutlass_bindings.int32,
cutlass.DataType.f16: cutlass_bindings.float16,
cutlass.DataType.bf16: cutlass_bindings.bfloat16,
cutlass.DataType.f32: cutlass_bindings.float32,
cutlass.DataType.f64: cutlass_bindings.float64,
cutlass.DataType.tf32: cutlass_bindings.tfloat32,
}
# Mapping from Python-bound CUTLASS data type to library data type
binding_to_library = {
cutlass_bindings.int8: cutlass.DataType.s8,
cutlass_bindings.int32: cutlass.DataType.s32,
cutlass_bindings.float16: cutlass.DataType.f16,
cutlass_bindings.bfloat16: cutlass.DataType.bf16,
cutlass_bindings.float32: cutlass.DataType.f32,
cutlass_bindings.float64: cutlass.DataType.f64,
cutlass_bindings.tfloat32: cutlass.DataType.tf32,
}
def binding_library_type(inp):
if inp in binding_to_library:
return binding_to_library[inp]
return None
def has_binding_type(inp: cutlass.DataType):
return inp in library_to_binding_dict
def library_to_binding(inp: cutlass.DataType):
if not has_binding_type(inp):
raise Exception(f"No available conversion from library type {inp} to Python-bound CUTLASS type")
return library_to_binding_dict[inp]
def library_type(inp):
if inp in cutlass.DataTypeSize.keys():
return inp
for cvt_fn in [
bfloat16_library_type,
cupy_library_type,
numpy_library_type,
torch_library_type,
binding_library_type,
]:
out = cvt_fn(inp)
if out is not None:
return out
raise Exception(f"No available conversion from type {inp} to a library type.")
def library_layout(layout):
if layout in cutlass.LayoutTag.keys():
return layout
# Convert Python-bound CUTLASS layout to profiler library layout
if layout == cutlass_bindings.RowMajor:
return cutlass.LayoutType.RowMajor
elif layout == cutlass_bindings.ColumnMajor:
return cutlass.LayoutType.ColumnMajor
elif layout == cutlass_bindings.TensorNHWC:
return cutlass.LayoutType.TensorNHWC
else:
raise Exception(f"No conversion available for layout {layout} to library layout.")
def binding_type(inp):
if inp in DataTypeSize.keys():
return inp
libtype = library_type(inp)
return library_to_binding(libtype)
def binding_layout(layout):
if layout in ShortLayoutTypeNames.keys():
return layout
elif layout == cutlass.LayoutType.RowMajor:
return cutlass_bindings.RowMajor
elif layout == cutlass.LayoutType.ColumnMajor:
return cutlass_bindings.ColumnMajor
elif layout == cutlass.LayoutType.TensorNHWC:
return cutlass_bindings.TensorNHWC
else:
raise Exception(f"No conversion available for layout {layout} to Python-bound CUTLASS layout.")
def _tensor_from_numpy(np_tensor):
dtype = library_type(np_tensor.dtype)
if np_tensor.flags.c_contiguous:
layout = cutlass.LayoutType.RowMajor
elif np_tensor.flags.f_contiguous:
layout = cutlass.LayoutType.ColumnMajor
return (dtype, layout)
def _tensor_from_torch(pt_tensor):
dtype = library_type(pt_tensor.dtype)
return (dtype, cutlass.LayoutType.RowMajor)
def get_datatype_and_layout(tensor):
if (numpy_available and isinstance(tensor, np.ndarray)) or (
cupy_available and isinstance(tensor, cp.ndarray)
):
return _tensor_from_numpy(tensor)
elif torch_available and isinstance(tensor, torch.Tensor):
return _tensor_from_torch(tensor)
else:
raise Exception(f"Unable to convert tensor of type {type(tensor)} to Python-bound CUTLASS datatype and layout.")
def get_tensor_shape(tensor):
if (numpy_available and isinstance(tensor, np.ndarray)) or (
cupy_available and isinstance(tensor, cp.ndarray)
):
return tensor.shape
elif torch_available and isinstance(tensor, torch.Tensor):
size = tensor.size()
return (size[0], size[2], size[3], size[1])
else:
raise Exception(f"Unable to convert tensor of type {type(tensor)} to Python-bound CUTLASS datatype and layout.")
def binding_opclass(opclass: cutlass.OpcodeClass):
if opclass == cutlass.OpcodeClass.TensorOp:
return cutlass_bindings.OpClass.TensorOp
elif opclass == cutlass.OpcodeClass.Simt:
return cutlass_bindings.OpClass.Simt
else:
raise Exception(f"Unable to convert opcode class of type {opclass} to Python-bound CUTLASS opcode class.")
_math_operation_value_map = {x.value: x for x in MathOperation}
def backend_math_operation(math_op: cutlass.MathOperation):
if math_op.value not in _math_operation_value_map.keys():
raise Exception(f"Unable to convert math operation of type {math_op} to backend math operation.")
return _math_operation_value_map[math_op.value]
def construct_backend_td(td: cutlass.TileDescription,
kernel_schedule: cutlass.KernelScheduleType,
epilogue_schedule: cutlass.EpilogueScheduleType,
tile_scheduler: cutlass.TileSchedulerType) -> TileDescription:
mi = td.math_instruction
backend_mi = MathInstruction(
mi.instruction_shape,
binding_type(mi.element_a),
binding_type(mi.element_b),
binding_type(mi.element_accumulator),
binding_opclass(mi.opcode_class),
backend_math_operation(mi.math_operation)
)
cluster_shape = td.cluster_shape if hasattr(td, "cluster_shape") else [1, 1, 1]
return TileDescription(td.threadblock_shape, td.stages, td.warp_count,
backend_mi, cluster_shape, kernel_schedule, epilogue_schedule, tile_scheduler)
def td_from_profiler_op(op) -> TileDescription:
"""
Converts the profiler's TileDescription in ``op`` into the backend TileDescription
:param op: profiler Operation
:returns: backend TileDescription
:rtype: cutlass.backend.TileDescription
"""
kschedule = op.kernel_schedule if hasattr(op, 'kernel_schedule') else None
eschedule = op.epilogue_schedule if hasattr(op, 'epilogue_schedule') else None
tschedule = op.tile_scheduler if hasattr(op, 'tile_scheduler') else None
return construct_backend_td(op.tile_description, kschedule, eschedule, tschedule)
def td_from_profiler_td(td: cutlass.backend.TileDescription) -> TileDescription:
"""
Converts the profiler's TileDescription into the backend TileDescription
:param td: profiler TileDescription
:type td: cutlass.TileDescription
:returns: backend TileDescription
:rtype: cutlass.backend.TileDescription
"""
return construct_backend_td(td, kernel_schedule=None, epilogue_schedule=None, tile_scheduler=None)
def to_camel_case(snake_str):
return "".join(x.capitalize() for x in snake_str.lower().split("_"))
def getattr_enum(obj, attr_name):
# The attr_name is under the snake_case
camel_attr = to_camel_case(attr_name)
if hasattr(obj, camel_attr):
return getattr(obj, camel_attr)
else:
raise Exception(f"Invalid option: {attr_name}")
| cutlass-main | python/cutlass/utils/datatypes.py |
#################################################################################################
#
# Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#################################################################################################
import ctypes
import cutlass_bindings
from cutlass import (
DataType,
KernelScheduleType
)
from cutlass.backend.library import DataTypeSizeBytes
class GemmCoord_(ctypes.Structure):
_fields_ = [
("m", ctypes.c_int),
("n", ctypes.c_int),
("k", ctypes.c_int)
]
def __init__(self, gemm_coord) -> None:
for field_name, _ in self._fields_:
setattr(self, field_name, getattr(gemm_coord, field_name)())
class GemmCoordBatched_(ctypes.Structure):
"""
Wrapper around a GemmCoord that also contains batch count. This is used for encoding
batched GEMM inputs to CUTLASS 3 GEMMs.
"""
_fields_ = [
("m", ctypes.c_int),
("n", ctypes.c_int),
("k", ctypes.c_int),
("batch_count", ctypes.c_int)
]
def __init__(self, gemm_coord, batch_count) -> None:
for field_name, _ in self._fields_[:-1]:
setattr(self, field_name, getattr(gemm_coord, field_name)())
setattr(self, "batch_count", batch_count)
class MatrixCoord_(ctypes.Structure):
_fields_ = [
("row", ctypes.c_int),
("column", ctypes.c_int)
]
class dim3_(ctypes.Structure):
_fields_ = [
("x", ctypes.c_int),
("y", ctypes.c_int),
("z", ctypes.c_int)
]
class StrideBatched_(ctypes.Structure):
"""
CUTLASS 3.0 strides for operands contain one static dimension and two variable dimensions. The
variable dimensions represent the stride along non-unit-stride dimension of the row/column major
layout, and the batch stride. This structure encodes the two variable dimensions.
"""
_fields_ = [
("major_stride", ctypes.c_int64),
("batch_stride", ctypes.c_int64)
]
dtype2ctype = {
cutlass_bindings.float16: ctypes.c_uint16,
cutlass_bindings.float32: ctypes.c_float,
cutlass_bindings.float64: ctypes.c_double,
cutlass_bindings.int32: ctypes.c_int32,
}
class GenericMainloopArguments3x_(ctypes.Structure):
"""
Structure representing the superset of possible mainloop arguments.
This structure should not be passed to kernels directly, but, rather,
be used as an input to one of the more specific schedule arguments, which
will each select those arguments relevant to the particular schedule.
"""
_fields_ = [
("ptr_A", ctypes.c_void_p),
("stride_A", StrideBatched_),
("ptr_B", ctypes.c_void_p),
("stride_B", StrideBatched_),
("mma_promotion_interval", ctypes.c_int)
]
def get_mainloop_arguments_3x(
kernel_schedule: KernelScheduleType,
element_A,
element_B,
alignment_A: int,
alignment_B: int) -> ctypes.Structure:
"""
Returns the ctypes structure to be used for the 3.x kernel's mainloop parameters.
:param kernel_schedule: type of kernel schedule to be used in the mainloop
:type kerel_schedule: cutlass.KernelScheduleType
:param element_A: data type of operand A
:param element_B: data type of operand B
:param alignment_A: alignment of operand A
:type alignment_A: int
:param alignment_B: alignment of operand B
:type alignment_B: int
:returns: ctypes structure to be used for the 3.x kernel's mainloop parameters
:rtype: ctypes.Structure
"""
class _MainloopArgumentsTma(ctypes.Structure):
_fields_ = [
("ptr_A", ctypes.c_void_p),
("stride_A", StrideBatched_),
("ptr_B", ctypes.c_void_p),
("stride_B", StrideBatched_),
("mma_promotion_interval", ctypes.c_int)
]
@staticmethod
def from_generic_mainloop_args(args: GenericMainloopArguments3x_):
return _MainloopArgumentsTma(
args.ptr_A, args.stride_A, args.ptr_B, args.stride_B,
args.mma_promotion_interval
)
class _MainloopArgumentsMultistage(ctypes.Structure):
_fields_ = [
("ptr_A", ctypes.c_void_p),
("stride_A", StrideBatched_),
("ptr_B", ctypes.c_void_p),
("stride_B", StrideBatched_),
]
@staticmethod
def from_generic_mainloop_args(args: GenericMainloopArguments3x_):
return _MainloopArgumentsMultistage(
args.ptr_A, args.stride_A, args.ptr_B, args.stride_B,
)
tma_alignment_bytes = 16
is_tma_aligned_A = ((DataTypeSizeBytes[element_A] * alignment_A) % tma_alignment_bytes) == 0
is_tma_aligned_B = ((DataTypeSizeBytes[element_B] * alignment_B) % tma_alignment_bytes) == 0
is_tma_aligned = is_tma_aligned_A and is_tma_aligned_B
if kernel_schedule == KernelScheduleType.Multistage:
return _MainloopArgumentsMultistage
elif kernel_schedule == KernelScheduleType.ScheduleAuto:
if is_tma_aligned:
return _MainloopArgumentsTma
else:
return _MainloopArgumentsMultistage
else:
if is_tma_aligned:
return _MainloopArgumentsTma
else:
raise Exception(f"Specified a kernel schedule using TMA ({kernel_schedule}), but "
"the provided data types and alignments are not properly aligned for "
"using TMA.")
def get_gemm_arguments_3x(mainloop_arguments, epilogue_functor):
_EpilogueOutputOpParams = epilogue_functor.epilogue_type
class _EpilogueArguments(ctypes.Structure):
_fields_ = [
("epilogue", _EpilogueOutputOpParams),
("ptr_C", ctypes.c_void_p),
("stride_C", StrideBatched_),
("ptr_D", ctypes.c_void_p),
("stride_D", StrideBatched_),
]
class _HardwareInfo(ctypes.Structure):
_fields_ = [
("device_id", ctypes.c_int),
("sm_count", ctypes.c_int)
]
class _GemmArguments(ctypes.Structure):
_fields_ = [
("mode", ctypes.c_int),
("problem_size", GemmCoordBatched_),
("mainloop", mainloop_arguments),
("epilogue", _EpilogueArguments),
("hw_info", _HardwareInfo),
("splits", ctypes.c_int)
]
return _GemmArguments, _EpilogueArguments, _EpilogueOutputOpParams, _HardwareInfo
def get_gemm_arguments(epilogue_functor):
_EpilogueOutputOpParams = epilogue_functor.epilogue_type
class _GemmArguments(ctypes.Structure):
_fields_ = [
# Arguments from UniversalArgumentsBase
("mode", ctypes.c_int),
("problem_size", GemmCoord_),
("batch_count", ctypes.c_int),
("batch_stride_D", ctypes.c_longlong),
# Remaining arguments
("epilogue", _EpilogueOutputOpParams),
("ptr_A", ctypes.c_void_p),
("ptr_B", ctypes.c_void_p),
("ptr_C", ctypes.c_void_p),
("ptr_D", ctypes.c_void_p),
("batch_stride_A", ctypes.c_longlong),
("batch_stride_B", ctypes.c_longlong),
("batch_stride_C", ctypes.c_longlong),
("stride_a", ctypes.c_longlong),
("stride_b", ctypes.c_longlong),
("stride_c", ctypes.c_longlong),
("stride_d", ctypes.c_longlong),
("lda", ctypes.c_longlong),
("ldb", ctypes.c_longlong),
("ldc", ctypes.c_longlong),
("ldd", ctypes.c_longlong),
("ptr_gather_A_indices", ctypes.c_void_p),
("ptr_gather_B_indices", ctypes.c_void_p),
("ptr_scatter_D_indices", ctypes.c_void_p)
]
return _GemmArguments, _EpilogueOutputOpParams
def get_gemm_arguments_streamk(epilogue_functor):
_EpilogueOutputOpParams = epilogue_functor.epilogue_type
class _GemmArguments(ctypes.Structure):
_fields_ = [
("mode", ctypes.c_int),
("problem_size", GemmCoord_),
("batch_count", ctypes.c_int),
("epilogue", _EpilogueOutputOpParams),
("ptr_A", ctypes.c_void_p),
("ptr_B", ctypes.c_void_p),
("ptr_C", ctypes.c_void_p),
("ptr_D", ctypes.c_void_p),
("batch_stride_A", ctypes.c_longlong),
("batch_stride_B", ctypes.c_longlong),
("batch_stride_C", ctypes.c_longlong),
("batch_stride_D", ctypes.c_longlong),
("stride_a", ctypes.c_longlong),
("stride_b", ctypes.c_longlong),
("stride_c", ctypes.c_longlong),
("stride_d", ctypes.c_longlong),
("lda", ctypes.c_longlong),
("ldb", ctypes.c_longlong),
("ldc", ctypes.c_longlong),
("ldd", ctypes.c_longlong),
("avail_sms", ctypes.c_int)
]
return _GemmArguments, _EpilogueOutputOpParams
###########################################################################################
# GEMM Grouped
###########################################################################################
def get_gemm_grouped_arguments(epilogue_functor):
_EpilogueOutputOpParams = epilogue_functor.epilogue_type
class _GEMMGroupedArguments(ctypes.Structure):
_fields_ = [
("problem_sizes", ctypes.c_void_p),
("problem_count", ctypes.c_int),
("threadblock_count", ctypes.c_int),
("output_op", _EpilogueOutputOpParams),
("ptr_A", ctypes.c_void_p),
("ptr_B", ctypes.c_void_p),
("ptr_C", ctypes.c_void_p),
("ptr_D", ctypes.c_void_p),
("lda", ctypes.c_void_p),
("ldb", ctypes.c_void_p),
("ldc", ctypes.c_void_p),
("ldd", ctypes.c_void_p),
("host_problem_sizes", ctypes.c_void_p)
]
return _GEMMGroupedArguments, _EpilogueOutputOpParams
############################################################################################
# Convolution2D
############################################################################################
class Conv2DProblemSize(ctypes.Structure):
_fields_ = [
("N", ctypes.c_int),
("H", ctypes.c_int),
("W", ctypes.c_int),
("C", ctypes.c_int),
("P", ctypes.c_int),
("Q", ctypes.c_int),
("K", ctypes.c_int),
("R", ctypes.c_int),
("S", ctypes.c_int),
("pad_h", ctypes.c_int),
("pad_w", ctypes.c_int),
("stride_h", ctypes.c_int),
("stride_w", ctypes.c_int),
("dilation_h", ctypes.c_int),
("dilation_w", ctypes.c_int),
("mode", ctypes.c_int), # kCrossCorrelation: 0, kConvolution: 1
("split_k_slices", ctypes.c_int),
("groups", ctypes.c_int)
]
def __init__(self, problem_size) -> None:
for field_name, _ in self._fields_:
setattr(self, field_name, getattr(problem_size, field_name))
class Layout4D(ctypes.Structure):
_fields_ = [("stride", ctypes.c_int * 3)]
def __init__(self, tensor_ref):
stride = tensor_ref.stride()
setattr(self, "stride", (stride.at(0), stride.at(1), stride.at(2)))
class TensorRef_(ctypes.Structure):
_fields_ = [
("ptr", ctypes.c_void_p),
("layout", Layout4D)
]
def __init__(self, tensor_ref):
setattr(self, "ptr", tensor_ref.data())
setattr(self, "layout", Layout4D(tensor_ref.layout()))
class TensorRef2D_(ctypes.Structure):
_fields_ = [
("ptr", ctypes.c_void_p),
("stride", ctypes.c_int)
]
def get_conv2d_arguments(epilogue_functor):
_EpilogueOutputOpParams = epilogue_functor.epilogue_type
class _Conv2dArguments(ctypes.Structure):
_fields_ = [
("problem_size", Conv2DProblemSize),
("ref_A", TensorRef_),
("ref_B", TensorRef_),
("ref_C", TensorRef_),
("ref_D", TensorRef_),
("output_op", _EpilogueOutputOpParams),
("split_k_mode", ctypes.c_int)
]
return _Conv2dArguments, _EpilogueOutputOpParams
############################################################################################
# Reduction
############################################################################################
def get_reduction_params(epilogue_functor):
_EpilogueOutputParams = epilogue_functor.epilogue_type
class _ReductionParams(ctypes.Structure):
_fields_ = [
("problem_size", MatrixCoord_),
("partitions", ctypes.c_int),
("partition_stride", ctypes.c_longlong),
("workspace", TensorRef2D_),
("destination", TensorRef2D_),
("source", TensorRef2D_),
("output_op", _EpilogueOutputParams),
]
return _ReductionParams, _EpilogueOutputParams
| cutlass-main | python/cutlass/backend/c_types.py |
#################################################################################################
#
# Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#################################################################################################
import numpy as np
import rmm
class PoolMemoryManager:
def __init__(self, init_pool_size: int, max_pool_size: int) -> None:
self.pool = rmm.mr.PoolMemoryResource(
rmm.mr.CudaMemoryResource(),
initial_pool_size=init_pool_size,
maximum_pool_size=max_pool_size
)
self.mr = rmm.mr.TrackingResourceAdaptor(self.pool)
rmm.mr.set_current_device_resource(self.mr)
def get_allocated_size(self):
return self.mr.get_allocated_bytes()
def pool_size(self):
return self.pool.pool_size()
def todevice(host_data, dtype=np.float32):
"""
Pass the host_data to device memory
"""
if isinstance(host_data, list):
return rmm.DeviceBuffer.to_device(np.array(host_data, dtype=dtype).tobytes())
elif isinstance(host_data, np.ndarray):
return rmm.DeviceBuffer.to_device(host_data.tobytes())
def device_mem_alloc(size):
return rmm.DeviceBuffer(size=size)
def align_size(size, alignment=256):
return ((size + alignment - 1) // alignment) * alignment
def get_allocated_size():
device_resource = rmm.mr.get_current_device_resource()
return device_resource.get_allocated_bytes()
| cutlass-main | python/cutlass/backend/memory_manager.py |
#################################################################################################
#
# Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#################################################################################################
import ctypes
import json
import os
import sqlite3
import tempfile
from cuda import cuda, nvrtc
import cutlass_bindings
from cutlass import CACHE_FILE, CUDA_INSTALL_PATH, CUTLASS_PATH, logger
from cutlass.backend.gemm_operation import GemmOperationUniversal
from cutlass.backend.library import ApiVersion
from cutlass.backend.utils.device import device_cc
from cutlass.backend.utils.software import SubstituteTemplate
import subprocess
IncludeTemplate = r"""#include "${include}"
"""
def compile_with_nvcc(cmd, source, error_file):
succeed = True
try:
subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True)
except subprocess.CalledProcessError as e:
error_message = e.output.decode()
with open(error_file, "w") as error_out:
error_log = "Compilation error for the following kernel: \n"
error_log += source
error_log += "\nError Message:\n"
error_log += error_message
error_out.write(error_log)
succeed = False
if not succeed:
# Print the error log to stdout if log level is set to warning or higher
# verbosity. Otherwise, simply point to the error log file.
logger.warning(error_log)
raise Exception(f"Invalid Kernel. See '{error_file}' for details.")
class CompilationOptions:
"""
Compilation options.
"""
def __init__(self, flags, arch, include_paths=[]):
self.includes = []
self.include_paths = include_paths
self.flags = flags
self.arch = arch
def get_str(self):
options = ""
for flag in self.flags:
options += " " + flag
for incl in self.include_paths:
options += " --include-path=%s" % incl
arch_flag = " -arch=sm_%d" % self.arch
if self.arch == 90:
arch_flag += "a"
options += arch_flag
return options
def get(self):
options = []
for flag in self.flags:
options.append(bytes(str.encode(flag)))
for incl in self.include_paths:
options.append(bytes(str.encode("--include-path=%s" % incl)))
arch_flag = " -arch=sm_%d" % self.arch
if self.arch == 90:
arch_flag += "a"
options.append(bytes(str.encode(arch_flag)))
return options
def convertToBinaryData(filename):
with open(filename, "rb") as file:
blobData = file.read()
return blobData
def CDLLBin(host_binary):
tempfile.tempdir = "./"
temp_so = tempfile.NamedTemporaryFile(prefix="host_func", suffix=".so", delete=True)
with open(temp_so.name, "wb") as file:
file.write(host_binary)
host_lib = ctypes.CDLL(temp_so.name)
return host_lib
class ArtifactManager:
"""
Artifact manager
"""
def __init__(self) -> None:
connection = sqlite3.connect(CACHE_FILE)
cursor = connection.cursor()
# Create the table if it does not already exist
sqlite_create_table_query = """
CREATE TABLE IF NOT EXISTS compiled_operations(op_key TEXT NOT NULL UNIQUE,
cubin BLOB NOT NULL,
hostbin BLOB NOT NULL,
op_name TEXT NOT NULL,
op_attrs TEXT NOT NULL)
"""
cursor.execute(sqlite_create_table_query)
connection.commit()
cursor.close()
self._nvrtc_compile_options = ["-std=c++17", "-default-device"]
self._nvcc_compile_options = [
"-std=c++17",
"--expt-relaxed-constexpr",
"-Xcudafe --diag_suppress=esa_on_defaulted_function_ignored",
]
self.nvcc()
self.compiled_cache_device = cutlass_bindings.CompileCache()
self.compiled_cache_host = cutlass_bindings.CompileCache()
def nvrtc(self):
self.backend = "nvrtc"
self.default_compile_options = self._nvrtc_compile_options
def nvcc(self):
self.backend = "nvcc"
self.default_compile_options = self._nvcc_compile_options
def insert_operation(self, op_key, cubin, hostfile, op_name, op_attrs):
connection = sqlite3.connect(CACHE_FILE)
cursor = connection.cursor()
sqlite_insert_blob_query = """ INSERT OR IGNORE INTO compiled_operations (op_key, cubin, hostbin, op_name, op_attrs) VALUES (?, ?, ?, ?, ?)"""
hostbin = convertToBinaryData(hostfile)
data_tuple = (op_key, cubin, hostbin, op_name, json.dumps(op_attrs))
cursor.execute(sqlite_insert_blob_query, data_tuple)
connection.commit()
cursor.close()
def load_operation(self, op_key, extra_funcs):
connection = sqlite3.connect(CACHE_FILE)
cursor = connection.cursor()
sqlite_fetch_blob_query = """SELECT * from compiled_operations where op_key = ?"""
cursor.execute(sqlite_fetch_blob_query, (op_key,))
record = cursor.fetchall()
if len(record) == 0:
return False
for row in record:
key, cubin_image, host_binary, operation_name, op_attr = row
op_attr = json.loads(op_attr)
err, module = cuda.cuModuleLoadData(cubin_image)
if err != cuda.CUresult.CUDA_SUCCESS:
raise RuntimeError("Cuda Error: {}".format(err))
err, kernel = cuda.cuModuleGetFunction(module, bytes(str.encode(operation_name)))
self.compiled_cache_device.insert(key, kernel)
compiled_host_fns = {}
host_lib = CDLLBin(host_binary)
func_name = operation_name + "_get_params"
func = getattr(host_lib, func_name)
func.restype = ctypes.POINTER(ctypes.c_char * op_attr[0])
compiled_host_fns["get_args"] = func
func_name = operation_name + "_shared_memory_size"
func = getattr(host_lib, func_name)
compiled_host_fns["shared_memory_capacity"] = func()
for attr in op_attr:
if isinstance(attr, str):
func_name = operation_name + "_" + attr
func = getattr(host_lib, func_name)
# Set the return type of the function
if attr in extra_funcs and extra_funcs[attr] != None:
func.restype = extra_funcs[attr]
compiled_host_fns[attr] = func
self.compiled_cache_host.insert(key, compiled_host_fns)
return True
def emit_compile_(self, operation_list, compilation_options, host_compilation_options):
"""
Compile a list of kernels and store them into database
"""
source_buffer_device = ""
source_buffer_host = ""
# 1. include
includes = []
for operation in operation_list:
for incl in operation.emitter.includes:
if incl not in includes:
includes.append(incl)
includes_host = ["builtin_types.h", "device_launch_parameters.h", "stddef.h"] + includes
for incl in includes:
source_buffer_device += SubstituteTemplate(
IncludeTemplate,
{"include": incl},
)
for incl in includes_host:
if "/device/" not in incl:
source_buffer_host += SubstituteTemplate(
IncludeTemplate,
{"include": incl},
)
# 2. Operations
for operation in operation_list:
source_buffer_device += operation.emit()
source_buffer_host += operation.emit()
values = {
"operation_name": operation.name(),
"operation_suffix": operation.emitter.operation_suffix,
}
source_buffer_device += SubstituteTemplate(
operation.KernelTemplate,
values,
)
source_buffer_host += SubstituteTemplate(operation.HostTemplate, values)
if self.backend == "nvrtc":
# 3. compile
err, program = nvrtc.nvrtcCreateProgram(
str.encode(source_buffer_device),
bytes(str.encode("module.cu")),
0, [], [])
if err != nvrtc.nvrtcResult.NVRTC_SUCCESS:
raise RuntimeError("NVRTC Error: {}".format(err))
# Compile program
options = compilation_options.get()
err, = nvrtc.nvrtcCompileProgram(program, len(options), options)
if err != nvrtc.nvrtcResult.NVRTC_SUCCESS:
error_string = "NVRTC Error: {}\n".format(err)
# Get log from compilation
err, logSize = nvrtc.nvrtcGetProgramLogSize(program)
if err != nvrtc.nvrtcResult.NVRTC_SUCCESS:
raise RuntimeError("NVRTC Error: {}".format(err))
log = b" " * logSize
err, = nvrtc.nvrtcGetProgramLog(program, log)
if err != nvrtc.nvrtcResult.NVRTC_SUCCESS:
raise RuntimeError("NVRTC Error: {}".format(err))
raise RuntimeError(error_string + log.decode() + source_buffer_device)
# Get data from compilation
err, dataSize = nvrtc.nvrtcGetCUBINSize(program)
if err != nvrtc.nvrtcResult.NVRTC_SUCCESS:
raise RuntimeError("NVRTC Error: {}".format(err))
cubin_image = b" " * dataSize
(err,) = nvrtc.nvrtcGetCUBIN(program, cubin_image)
if err != nvrtc.nvrtcResult.NVRTC_SUCCESS:
raise RuntimeError("NVRTC Error: {}".format(err))
else: # with nvcc backend
# emit code
tempfile.tempdir = "./"
temp_cu = tempfile.NamedTemporaryFile(
prefix="kernel", suffix=".cu", delete=True)
temp_cubin = tempfile.NamedTemporaryFile(
prefix="kernel", suffix=".cubin", delete=True)
with open(temp_cu.name, "w") as file:
file.write(source_buffer_device)
# compile with nvcc
cmd_template = "${cuda_install_path}/bin/nvcc ${options} -cubin ${srcfile} -o ${tarfile}"
values = {
"cuda_install_path": CUDA_INSTALL_PATH,
"options": compilation_options.get_str(),
"srcfile": temp_cu.name,
"tarfile": temp_cubin.name,
}
cmd = SubstituteTemplate(cmd_template, values)
compile_with_nvcc(cmd, source_buffer_device, "./cutlass_python_compilation_device_error.txt")
# load the cubin image
with open(temp_cubin.name, "rb") as file:
cubin_image = file.read()
# Set up the host-side library code
cmd_template = (
"echo '%s'|${cuda_install_path}/bin/nvcc -x cu -Xcompiler=\"-fpermissive -w -fPIC\" ${options}"
% source_buffer_host
)
cmd = SubstituteTemplate(
cmd_template,
{
"cuda_install_path": CUDA_INSTALL_PATH,
"options": host_compilation_options.get_str(),
},
)
tempfile.tempdir = "./"
temp = tempfile.NamedTemporaryFile(
prefix="host_func", suffix=".so", delete=True)
cmd += " - -shared -o %s -lcudart -lcuda" % temp.name
compile_with_nvcc(cmd, source_buffer_host, error_file="./cutlass_python_compilation_host_error.txt")
host_lib = ctypes.CDLL(temp.name)
return cubin_image, host_lib, temp
def add_module(self, operations, compile_options=None, bypass_cache=False):
"""
Insert a new compiled device module
"""
include_paths = [
CUDA_INSTALL_PATH + "/include",
CUTLASS_PATH + "/include",
CUTLASS_PATH + "/tools/util/include",
CUTLASS_PATH + "/python/cutlass/cpp/include",
]
if device_cc() is not None:
arch = device_cc()
else:
# Find the maximum arch tag among the provided operations and compile for that target.
# Since we are compiling to .cubin files, only one architecture may be specified.
arch = max([op.arch for op in operations])
host_compile_options = CompilationOptions(
self._nvcc_compile_options, arch, include_paths)
if compile_options is None:
compile_options = CompilationOptions(
self.default_compile_options, arch, include_paths)
# save the cubin
operation_key = []
operation_list = []
for operation in operations:
# step 1: get kernel string as key
key = operation.rt_module.emit() + operation.procedural_name() + self.backend
# step 1: check if the operation is in cache
compiled_kernel = self.compiled_cache_device.at(key)
if compiled_kernel is None and not bypass_cache:
hit = self.load_operation(key, getattr( operation.rt_module, "extra_funcs", {}))
if hit:
compiled_kernel = self.compiled_cache_device.at(key)
assert compiled_kernel is not None
if compiled_kernel is not None:
operation.rt_module.kernel = compiled_kernel
compiled_host_fns = self.compiled_cache_host.at(key)
assert compiled_host_fns is not None
for key in compiled_host_fns.keys():
setattr(operation.rt_module, key, compiled_host_fns[key])
operation.rt_module.initialize()
else:
operation_list.append(operation.rt_module)
operation_key.append(key)
if len(operation_list) > 0:
cubin_image, host_lib, host_file = self.emit_compile_(
operation_list, compile_options, host_compile_options)
err, module = cuda.cuModuleLoadData(cubin_image)
if err != cuda.CUresult.CUDA_SUCCESS:
raise RuntimeError("Cuda Error: {}".format(err))
operation_name = []
operation_attr = []
for operation, key in zip(operation_list, operation_key):
# get device kernels
err, operation.kernel = cuda.cuModuleGetFunction(
module,
bytes(str.encode(operation.name()))
)
operation_name.append(operation.name())
self.compiled_cache_device.insert(key, operation.kernel)
# get host functions
compiled_host_fns = {}
op_attr = []
# get param size
func_name = operation.name() + "_get_param_size"
func = getattr(host_lib, func_name)
param_size = func()
func_name = operation.name() + "_get_params"
func = getattr(host_lib, func_name)
func.argtype = operation.argtype
func.restype = ctypes.POINTER(ctypes.c_char * param_size)
setattr(operation, "get_args", func)
compiled_host_fns["get_args"] = func
# set shared memory size
func_name = operation.name() + "_shared_memory_size"
func = getattr(host_lib, func_name)
setattr(operation, "shared_memory_capacity", func())
compiled_host_fns["shared_memory_capacity"] = func()
# set the maximum dynamic shared size
operation.initialize()
# get extra functions
op_attr.append(param_size)
if hasattr(operation, "extra_funcs"):
for suffix, ret_type in operation.extra_funcs.items():
func_name = operation.name() + "_" + suffix
func = getattr(host_lib, func_name)
if ret_type is not None:
func.restype = ret_type
setattr(operation, suffix, func)
compiled_host_fns[suffix] = func
op_attr.append(suffix)
operation_attr.append(op_attr)
self.compiled_cache_host.insert(key, compiled_host_fns)
for (key, operation_name, operation_attr,) in zip(operation_key, operation_name, operation_attr):
self.insert_operation(
key, cubin_image, host_file.name, operation_name, operation_attr)
| cutlass-main | python/cutlass/backend/compiler.py |
################################################################################
#
# Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
################################################################################
import copy
import ctypes
import enum
from cuda import cuda, cudart
import cutlass_bindings
import numpy as np
import rmm
from cutlass import (
EpilogueScheduleSuffixes,
EpilogueScheduleTag,
EpilogueScheduleType,
KernelScheduleSuffixes,
KernelScheduleTag,
KernelScheduleType,
TileSchedulerSuffixes,
TileSchedulerTag,
TileSchedulerType
)
from cutlass.backend.arguments import ArgumentBase
from cutlass.backend.c_types import (
GemmCoord_,
GemmCoordBatched_,
GenericMainloopArguments3x_,
StrideBatched_,
dim3_,
get_gemm_arguments,
get_gemm_arguments_3x,
get_gemm_arguments_streamk,
get_gemm_grouped_arguments,
get_mainloop_arguments_3x
)
from cutlass.backend.library import (
ApiVersion,
EmissionType,
ComplexTransformTag,
DataTypeNames,
DataTypeSize,
DataTypeTag,
GemmKind,
GemmKindNames,
LayoutTag,
MathOperation,
MathOperationTag,
OpcodeClassNames,
OpcodeClassTag,
OperationKind,
SchedulerMode,
SchedulerModeTag,
ShortComplexLayoutNames,
ShortDataTypeNames,
ShortLayoutTypeNames,
TensorDescription,
TileDescription,
api_version,
enum_auto,
get_complex_from_real,
)
from cutlass.backend.memory_manager import device_mem_alloc, todevice
from cutlass.backend.operation import ExecutableOperation, LaunchConfiguration
from cutlass.backend.tensor_ref import TensorRef
from cutlass.backend.type_hint import GemmOperation, Tensor
from cutlass.backend.utils.software import (
CheckPackages,
SubstituteTemplate,
device_sm_count,
)
if CheckPackages().check_torch():
import torch
################################################################################
#
# Data structure modeling a GEMM operation
#
################################################################################
def transpose_layout(layout: cutlass_bindings.layout):
if layout == cutlass_bindings.ColumnMajor:
return cutlass_bindings.RowMajor
elif layout == cutlass_bindings.RowMajor:
return cutlass_bindings.ColumnMajor
else:
raise ValueError("unsupported Layout {}".format(layout))
class GemmArguments2x(ArgumentBase):
"""
Argument wrapper for GEMM in CUTLASS 2. It encodes problem information and
user-provide tensors into the kernel's argument
:param operation: the GEMM operation to take the argument
:type operation: :class:`cutlass.backend.GemmOperationUniversal` |
:class:`cutlass.backend.GemmOperationGrouped`
:param problem_size: GEMM problem size gemm(M, N, K)
:type operation: :class:`cutlass_bindings.gemm.GemmCoord`
:param A: tensor A
:type A: cuda.CUdeviceptr | numpy.ndarray | torch.Tensor | cupy.ndarray
:param B: tensor B
:type B: cuda.CUdeviceptr | numpy.ndarray | torch.Tensor | cupy.ndarray
:param C: tensor C
:type C: cuda.CUdeviceptr | numpy.ndarray | torch.Tensor | cupy.ndarray
:param D: tensor D
:type D: cuda.CUdeviceptr | numpy.ndarray | torch.Tensor | cupy.ndarray
:param gemm_mode: GEMM mode
:type gemm_mode: :class:`cutlass_bindings.gemm.Mode`
:param output_op: output operator, optional
:type output_op: :class:`cutlass.backend.LinearCombinationFunctorArguments`
"""
def __init__(
self, operation: "GemmOperation", problem_size: "cutlass_bindings.gemm.GemmCoord",
A: "Tensor", B: "Tensor", C: "Tensor", D: "Tensor",
gemm_mode: "cutlass_bindings.gemm.Mode" = cutlass_bindings.gemm.Mode.Gemm, **kwargs):
self.operation = operation
self.layout_A: cutlass_bindings.layout = operation.A.layout
self.layout_B: cutlass_bindings.layout = operation.B.layout
self.layout_C: cutlass_bindings.layout = operation.C.layout
self.element_A = operation.A.element
self.element_B = operation.B.element
self.element_C = operation.C.element
if (operation.C.layout in
[cutlass_bindings.RowMajorInterleaved32, cutlass_bindings.ColumnMajorInterleaved32]):
# reorder tensor B for interleaved layout output
B = self.reorder_tensor_B(B, problem_size)
super().__init__(A, B, C, D, **kwargs)
if operation.switched:
self.problem_size = cutlass_bindings.gemm.GemmCoord(
problem_size.n(), problem_size.m(), problem_size.k())
self.ptr_A, self.ptr_B = self.ptr_B, self.ptr_A
else:
self.problem_size = cutlass_bindings.gemm.GemmCoord(
problem_size.m(), problem_size.n(), problem_size.k())
# if the number of elements in C = problem_size.n
# C is treated as the bias
if hasattr(self, "tensor_c_numel"):
if self.tensor_c_numel == self.problem_size.n() and self.problem_size.m() != 1:
self.bias = True
# get the leading dimension
self.lda = operation.A.layout.packed(self.problem_size.mk()).stride()
self.ldb = operation.B.layout.packed(self.problem_size.kn()).stride()
self.ldc = operation.C.layout.packed(self.problem_size.mn()).stride()
self.ldd = self.ldc
# stride 0 trick
if self.bias:
self.ldc = 0
if "output_op" in kwargs.keys() and gemm_mode != cutlass_bindings.gemm.Mode.GemmSplitKParallel:
self.output_op = kwargs["output_op"]
else:
self.output_op = self.operation.epilogue_type(1.0, 0.0)
# get number of slices on k dimension
self.gemm_mode = gemm_mode
if gemm_mode in [cutlass_bindings.gemm.Mode.Gemm, cutlass_bindings.gemm.Mode.GemmSplitKParallel]:
if "split_k_slices" in kwargs.keys():
self.batch_count = kwargs["split_k_slices"]
else:
self.batch_count = 1
self.split_k_slices = self.batch_count
if gemm_mode in [cutlass_bindings.gemm.Mode.Batched, cutlass_bindings.gemm.Mode.Array]:
if "batch" in kwargs.keys():
self.batch_count = kwargs["batch"]
else:
self.batch_count = 1
if "batch_strides" in kwargs:
self.batched_stride_A = kwargs["batch_strides"]["A"]
self.batched_stride_B = kwargs["batch_strides"]["B"]
self.batched_stride_C = kwargs["batch_strides"]["C"]
self.batched_stride_D = kwargs["batch_strides"]["D"]
else:
self.batched_stride_A = self.problem_size.m() * self.problem_size.k()
self.batched_stride_B = self.problem_size.n() * self.problem_size.k()
self.batched_stride_C = self.problem_size.m() * self.problem_size.n()
self.batched_stride_D = self.problem_size.m() * self.problem_size.n()
if self.bias:
self.batched_stride_C = self.problem_size.n()
# support GEMM Mode Array
if gemm_mode == cutlass_bindings.gemm.Mode.Array:
self.ptr_A_array = []
self.ptr_B_array = []
self.ptr_C_array = []
self.ptr_D_array = []
ptr_A_addr = int(self.ptr_A)
ptr_B_addr = int(self.ptr_B)
ptr_C_addr = int(self.ptr_C)
ptr_D_addr = int(self.ptr_D)
stride_A = self.batched_stride_A * DataTypeSize[self.element_A] // 8
stride_B = self.batched_stride_B * DataTypeSize[self.element_B] // 8
stride_C = self.batched_stride_C * DataTypeSize[self.element_C] // 8
stride_D = self.batched_stride_D * DataTypeSize[self.element_C] // 8
for _ in range(self.batch_count):
self.ptr_A_array.append(ptr_A_addr)
self.ptr_B_array.append(ptr_B_addr)
self.ptr_C_array.append(ptr_C_addr)
self.ptr_D_array.append(ptr_D_addr)
ptr_A_addr += stride_A
ptr_B_addr += stride_B
ptr_C_addr += stride_C
ptr_D_addr += stride_D
self.ptr_A_array_buffer = todevice(self.ptr_A_array, dtype=np.int64)
self.ptr_B_array_buffer = todevice(self.ptr_B_array, dtype=np.int64)
self.ptr_C_array_buffer = todevice(self.ptr_C_array, dtype=np.int64)
self.ptr_D_array_buffer = todevice(self.ptr_D_array, dtype=np.int64)
if isinstance(self.operation, GemmOperationUniversal):
self.initialize()
def reorder_tensor_B(self, tensor_B: "np.ndarray",
problem_size: "cutlass_bindings.gemm.GemmCoord"):
"""
Reorder tensor_B for interleaved layout
:param tensor_B: input tensor B
:type tensor_B: numpy.ndarray
:param problem_size: GEMM problem size
:type problem_size: :class:`cutlass_bindings.gemm.GemmCoord`
:return: reordered tensor B
:rtype: numpy.ndarray
"""
reordered_tensor_B = np.empty_like(tensor_B)
tensor_ref_B = self.get_tensor_ref(
tensor_B, self.element_B, self.layout_B, problem_size, "b"
)
reordered_tensor_ref_B = self.get_tensor_ref(
reordered_tensor_B, self.element_B, self.layout_B, problem_size, "b"
)
cutlass_bindings.gemm.host.reorder_column(
tensor_ref_B, reordered_tensor_ref_B, problem_size)
return reordered_tensor_B
def get_tensor_ref(
self, tensor, dtype, tensor_layout, problem_size, operand):
if operand == "a":
tensor_coord = problem_size.mk()
elif operand == "b":
tensor_coord = problem_size.kn()
elif operand in ["c", "d"]:
tensor_coord = problem_size.mn()
else:
raise ValueError("unknown operand: " + operand)
layout = tensor_layout.packed(tensor_coord)
return TensorRef(tensor, dtype, layout).tensor_ref
def get_arguments(self):
problem_size_ = GemmCoord_(self.problem_size)
grid_tiled_shape_ = GemmCoord_(
cutlass_bindings.gemm.GemmCoord(
self.grid_tiled_shape.x,
self.grid_tiled_shape.y,
self.grid_tiled_shape.z
)
)
if self.gemm_mode == cutlass_bindings.gemm.Mode.Array:
arguments = self.operation.argument_type(
# Arguments from UniversalArgumentsBase
self.gemm_mode,
problem_size_,
self.batch_count,
0,
# Remaining arguments
self.output_op,
int(self.ptr_A_array_buffer.ptr),
int(self.ptr_B_array_buffer.ptr),
int(self.ptr_C_array_buffer.ptr),
int(self.ptr_D_array_buffer.ptr),
0, 0, 0,
self.lda, self.ldb, self.ldc, self.ldd,
self.lda, self.ldb, self.ldc, self.ldd,
0, 0, 0
)
else:
arguments = self.operation.argument_type(
# Arguments from UniversalArgumentsBase
self.gemm_mode, problem_size_, self.batch_count, self.batched_stride_D,
# Remaining arguments
self.output_op,
int(self.ptr_A),
int(self.ptr_B),
int(self.ptr_C),
int(self.ptr_D),
self.batched_stride_A,
self.batched_stride_B,
self.batched_stride_C,
self.lda, self.ldb, self.ldc, self.ldd,
self.lda, self.ldb, self.ldc, self.ldd,
0, 0, 0
)
self.arguments = arguments, grid_tiled_shape_, self.gemm_k_size
def initialize(self):
# get launch configuration
launch_config = self.operation.rt_module.plan(self)
# get the host and evice workspace
device_workspace_size = self.operation.rt_module.get_device_workspace_size(self)
if device_workspace_size > 0:
self.workspace_buffer = device_mem_alloc(device_workspace_size)
workspace_ptr = self.workspace_buffer.ptr
err, = cuda.cuMemsetD32(
workspace_ptr, 0, device_workspace_size // 4)
else:
workspace_ptr = None
device_workspace = 0
if workspace_ptr is not None and self.gemm_mode == cutlass_bindings.gemm.Mode.GemmSplitKParallel:
# in GEMM splik-K parallel, the D pointer is redirected
# to the workspace
self.ptr_D = cuda.CUdeviceptr(workspace_ptr)
elif workspace_ptr is not None and self.gemm_mode == cutlass_bindings.gemm.Mode.Gemm:
# in GEMM split-K serial
device_workspace = workspace_ptr
self.get_arguments()
arguments, grid_tiled_shape, gemm_k_size = self.arguments
res_arg = self.operation.rt_module.get_args(
ctypes.byref(arguments), ctypes.c_void_p(int(device_workspace)))
host_workspace = bytearray(res_arg.contents)
device_workspace = None
self.host_workspace = host_workspace
self.device_workspace = device_workspace
self.launch_config = launch_config
class GemmArguments2xStreamK(GemmArguments2x):
"""
Argument wrapper for stream-K GEMMs in CUTLASS 2. It encodes problem information and
user-provide tensors into the kernel's argument
:param operation: the GEMM operation to take the argument
:type operation: :class:`cutlass.backend.GemmOperationUniversal` |
:class:`cutlass.backend.GemmOperationGrouped`
:param problem_size: GEMM problem size gemm(M, N, K)
:type operation: :class:`cutlass_bindings.gemm.GemmCoord`
:param A: tensor A
:type A: cuda.CUdeviceptr | numpy.ndarray | torch.Tensor | cupy.ndarray
:param B: tensor B
:type B: cuda.CUdeviceptr | numpy.ndarray | torch.Tensor | cupy.ndarray
:param C: tensor C
:type C: cuda.CUdeviceptr | numpy.ndarray | torch.Tensor | cupy.ndarray
:param D: tensor D
:type D: cuda.CUdeviceptr | numpy.ndarray | torch.Tensor | cupy.ndarray
:param gemm_mode: GEMM mode
:type gemm_mode: :class:`cutlass_bindings.gemm.Mode`
:param output_op: output operator, optional
:type output_op: :class:`cutlass.backend.LinearCombinationFunctorArguments`
"""
def __init__(
self, operation: "GemmOperation", problem_size: "cutlass_bindings.gemm.GemmCoord",
A: "Tensor", B: "Tensor", C: "Tensor", D: "Tensor",
gemm_mode: "cutlass_bindings.gemm.Mode" = cutlass_bindings.gemm.Mode.Gemm, **kwargs):
if gemm_mode not in [cutlass_bindings.gemm.Mode.Gemm, cutlass_bindings.gemm.Mode.Batched]:
raise Exception("Unsupporged GEMM mode {}.".format(gemm_mode))
super().__init__(operation, problem_size, A, B, C, D, gemm_mode, **kwargs)
def get_arguments(self):
batch_stride_A = self.problem_size.m() * self.problem_size.k()
batch_stride_B = self.problem_size.k() * self.problem_size.n()
batch_stride_C = self.problem_size.m() * self.problem_size.n()
batch_stride_D = self.problem_size.m() * self.problem_size.n()
arguments = self.operation.argument_type(
self.gemm_mode,
GemmCoord_(self.problem_size),
self.batch_count,
self.output_op,
int(self.ptr_A),
int(self.ptr_B),
int(self.ptr_C),
int(self.ptr_D),
batch_stride_A,
batch_stride_B,
batch_stride_C,
batch_stride_D,
self.lda, self.ldb, self.ldc, self.ldd, # strides
self.lda, self.ldb, self.ldc, self.ldd,
-1, # avail_sms
)
return arguments
def initialize(self):
# get the host and device workspace
device_workspace_size = self.operation.rt_module.get_device_workspace_size(self)
device_workspace_size = 10 << 20
if device_workspace_size > 0:
self.workspace_buffer = device_mem_alloc(device_workspace_size)
workspace_ptr = self.workspace_buffer.ptr
err, = cuda.cuMemsetD32(
workspace_ptr, 0, device_workspace_size // 4)
else:
workspace_ptr = None
device_workspace = 0
if workspace_ptr is not None and self.gemm_mode == cutlass_bindings.gemm.Mode.GemmSplitKParallel:
# in GEMM splik-K parallel, the D pointer is redirected
# to the workspace
self.ptr_D = cuda.CUdeviceptr(workspace_ptr)
elif workspace_ptr is not None and self.gemm_mode == cutlass_bindings.gemm.Mode.Gemm:
# in GEMM split-K serial
device_workspace = workspace_ptr
arguments = self.get_arguments()
res_arg = self.operation.rt_module.get_args(
ctypes.byref(arguments),
ctypes.c_void_p(int(device_workspace)),
device_sm_count(),
self.operation.rt_module.occupancy
)
host_workspace = bytearray(res_arg.contents)
grid = self.operation.rt_module.get_grid_shape(
ctypes.byref(arguments),
device_sm_count(),
self.operation.rt_module.occupancy
)
device_workspace = None
self.host_workspace = host_workspace
self.device_workspace = device_workspace
self.launch_config = LaunchConfiguration(
[grid.m, grid.n, grid.k],
[self.operation.rt_module.threads, 1, 1],
self.operation.rt_module.shared_memory_capacity
)
class GemmArguments3x(GemmArguments2x):
"""
Argument wrapper for GEMM in CUTLASS 3. It encodes problem information and
user-provide tensors into the kernel's argument
:param operation: the GEMM operation to take the argument
:type operation: :class:`cutlass.backend.GemmOperationUniversal` |
:class:`cutlass.backend.GemmOperationGrouped`
:param problem_size: GEMM problem size gemm(M, N, K)
:type operation: :class:`cutlass_bindings.gemm.GemmCoord`
:param A: tensor A
:type A: cuda.CUdeviceptr | numpy.ndarray | torch.Tensor | cupy.ndarray
:param B: tensor B
:type B: cuda.CUdeviceptr | numpy.ndarray | torch.Tensor | cupy.ndarray
:param C: tensor C
:type C: cuda.CUdeviceptr | numpy.ndarray | torch.Tensor | cupy.ndarray
:param D: tensor D
:type D: cuda.CUdeviceptr | numpy.ndarray | torch.Tensor | cupy.ndarray
:param gemm_mode: GEMM mode
:type gemm_mode: :class:`cutlass_bindings.gemm.Mode`
:param output_op: output operator, optional
:type output_op: :class:`cutlass.backend.LinearCombinationFunctorArguments`
"""
def __init__(
self, operation: "GemmOperation", problem_size: "cutlass_bindings.gemm.GemmCoord",
A: "Tensor", B: "Tensor", C: "Tensor", D: "Tensor",
gemm_mode: "cutlass_bindings.gemm.Mode" = cutlass_bindings.gemm.Mode.Gemm, **kwargs):
if gemm_mode not in [cutlass_bindings.gemm.Mode.Gemm, cutlass_bindings.gemm.Mode.Batched]:
raise Exception("Unsupporged GEMM mode {}.".format(gemm_mode))
super().__init__(operation, problem_size, A, B, C, D, gemm_mode, **kwargs)
def get_arguments(self):
problem_size_ = GemmCoordBatched_(self.problem_size, self.batch_count)
if self.batch_count > 1:
bsA = self.batched_stride_A
bsB = self.batched_stride_B
bsC = self.batched_stride_C
bsD = self.batched_stride_D
else:
bsA = 0
bsB = 0
bsC = 0
bsD = 0
stride_A = StrideBatched_(self.lda, bsA)
stride_B = StrideBatched_(self.ldb, bsB)
stride_C = StrideBatched_(self.ldc, bsC)
stride_D = StrideBatched_(self.ldd, bsD)
# Superset of potential mainloop arguments
generic_args = GenericMainloopArguments3x_(
int(self.ptr_A),
stride_A,
int(self.ptr_B),
stride_B,
4 # mma_promotion_interval
)
# Set of mainloop arguments needed for this kernel
mainloop = self.operation.rt_module.mainloop_args.from_generic_mainloop_args(generic_args)
epilogue = self.operation.rt_module.epilogue_args(
self.output_op,
int(self.ptr_C),
stride_C,
int(self.ptr_D),
stride_D,
)
# Set hardware info
hw_info = self.operation.rt_module.hw_info(0, device_sm_count())
self.arguments = self.operation.argument_type(
self.gemm_mode,
problem_size_,
mainloop,
epilogue,
hw_info,
)
return self.arguments
def initialize(self):
# get the host and evice workspace
device_workspace_size = self.operation.rt_module.get_device_workspace_size(self)
if device_workspace_size > 0:
self.workspace_buffer = device_mem_alloc(device_workspace_size)
workspace_ptr = self.workspace_buffer.ptr
err, = cuda.cuMemsetD32(
workspace_ptr, 0, device_workspace_size // 4)
else:
workspace_ptr = None
device_workspace = 0
if workspace_ptr is not None and self.gemm_mode == cutlass_bindings.gemm.Mode.GemmSplitKParallel:
# in GEMM splik-K parallel, the D pointer is redirected
# to the workspace
self.ptr_D = cuda.CUdeviceptr(workspace_ptr)
elif workspace_ptr is not None and self.gemm_mode == cutlass_bindings.gemm.Mode.Gemm:
# in GEMM split-K serial
device_workspace = workspace_ptr
self.get_arguments()
res_arg = self.operation.rt_module.get_args(
ctypes.byref(self.arguments),
ctypes.c_void_p(int(device_workspace)),
)
host_workspace = bytearray(res_arg.contents)
grid = self.operation.rt_module.get_grid_shape(
ctypes.byref(self.arguments),
ctypes.c_void_p(int(device_workspace)),
)
block = self.operation.rt_module.get_block_shape()
device_workspace = None
self.host_workspace = host_workspace
self.device_workspace = device_workspace
self.launch_config = LaunchConfiguration(
[grid.x, grid.y, grid.z],
[block.x, block.y, block.z],
self.operation.rt_module.shared_memory_capacity,
)
def GemmArguments(
operation: "GemmOperation", problem_size: "cutlass_bindings.gemm.GemmCoord",
A: "Tensor", B: "Tensor", C: "Tensor", D: "Tensor",
gemm_mode: "cutlass_bindings.gemm.Mode" = cutlass_bindings.gemm.Mode.Gemm, **kwargs):
"""
Argument wrapper for GEMM in CUTLASS 2 or 3. It returns either 2x arguments
or 3x arguments depending on the `arch` field specified in `operation`.
:param operation: the GEMM operation to take the argument
:type operation: :class:`cutlass.backend.GemmOperationUniversal` |
:class:`cutlass.backend.GemmOperationGrouped`
:param problem_size: GEMM problem size gemm(M, N, K)
:type operation: :class:`cutlass_bindings.gemm.GemmCoord`
:param A: tensor A
:type A: cuda.CUdeviceptr | numpy.ndarray | torch.Tensor | cupy.ndarray
:param B: tensor B
:type B: cuda.CUdeviceptr | numpy.ndarray | torch.Tensor | cupy.ndarray
:param C: tensor C
:type C: cuda.CUdeviceptr | numpy.ndarray | torch.Tensor | cupy.ndarray
:param D: tensor D
:type D: cuda.CUdeviceptr | numpy.ndarray | torch.Tensor | cupy.ndarray
:param gemm_mode: GEMM mode
:type gemm_mode: :class:`cutlass_bindings.gemm.Mode`
:param output_op: output operator, optional
:type output_op: :class:`cutlass.backend.LinearCombinationFunctorArguments`
"""
if isinstance(operation.swizzling_functor, cutlass_bindings.ThreadblockSwizzleStreamK):
if operation.api == ApiVersion.v3x:
raise Exception("Stream K is currently only supported in CUTLASS 2.x")
ArgClass = GemmArguments2xStreamK
else:
ArgClass = GemmArguments3x if operation.api == ApiVersion.v3x else GemmArguments2x
return ArgClass(operation, problem_size, A, B, C, D, gemm_mode, **kwargs)
class GemmGroupedArguments:
"""
Argument wrapper for GEMM Grouped. It encodes problem information and
user-provide tensors into the kernel's argument
:param operation: the GEMM Grouped operation to take the argument
:type operation: :class:`cutlass.backend.GemmOperationGrouped`
:param problem_size: list of GEMM problem size gemm(M, N, K)
:type operation: list[:class:`cutlass_bindings.gemm.GemmCoord`]
:param A: list of tensor A
:type A: list[cuda.CUdeviceptr | numpy.ndarray | torch.Tensor | cupy.ndarray]
:param B: list of tensor B
:type B: list[cuda.CUdeviceptr | numpy.ndarray | torch.Tensor | cupy.ndarray]
:param C: list of tensor C
:type C: list[cuda.CUdeviceptr | numpy.ndarray | torch.Tensor | cupy.ndarray]
:param D: list of tensor D
:type D: list[cuda.CUdeviceptr | numpy.ndarray | torch.Tensor | cupy.ndarray]
:param output_op: output operator, optional
:type output_op: :class:`cutlass.backend.LinearCombinationFunctorArguments`
"""
def __init__(
self, operation: "GemmOperationGrouped", problem_sizes: "list[cutlass_bindings.gemm.GemmCoord]",
A: "list[Tensor]", B: "list[Tensor]", C: "list[torch.Tensor]", D: "list[Tensor]", **kwargs):
# get number of problems in the group
self.problem_count = len(problem_sizes)
# check the input arguments
assert len(A) == self.problem_count
assert len(B) == self.problem_count
assert len(C) == self.problem_count
assert len(D) == self.problem_count
problem_size_host = []
self.ptr_A_host = []
self.ptr_B_host = []
self.ptr_C_host = []
self.ptr_D_host = []
lda_host = []
ldb_host = []
ldc_host = []
ldd_host = []
self.partitions = 1
self.operation = operation
# get the threadblock
threadblock_shape = operation.tile_description.threadblock_shape
self.threadblock_shape = cutlass_bindings.gemm.GemmCoord(
threadblock_shape[0],
threadblock_shape[1],
threadblock_shape[2],
)
self.threadblock_swizzle = operation.swizzling_functor
self.total_tiles = 0
self.gemm_arguments = []
# process the input arguments
for idx, problem_size in enumerate(problem_sizes):
M, N, K = problem_size.m(), problem_size.n(), problem_size.k()
temp_argument = GemmArguments2x(
operation=operation,
problem_size=cutlass_bindings.gemm.GemmCoord(M, N, K),
A=A[idx], B=B[idx], C=C[idx], D=D[idx])
self.gemm_arguments.append(temp_argument)
problem_size_host.append(
[temp_argument.problem_size.m(),
temp_argument.problem_size.n(),
temp_argument.problem_size.k()]
)
self.ptr_A_host.append(int(temp_argument.ptr_A))
lda_host.append(temp_argument.lda)
self.ptr_B_host.append(int(temp_argument.ptr_B))
ldb_host.append(temp_argument.ldb)
self.ptr_C_host.append(int(temp_argument.ptr_C))
ldc_host.append(temp_argument.ldc)
self.ptr_D_host.append(int(temp_argument.ptr_D))
ldd_host.append(temp_argument.ldd)
# get number of tiles
grid = self.threadblock_swizzle.get_grid_shape(
self.threadblock_swizzle.get_tiled_shape(
temp_argument.problem_size, self.threadblock_shape,
temp_argument.batch_count)
)
self.total_tiles += grid.x * grid.y * grid.z
self.problem_size_buffer = todevice(problem_size_host, np.int32)
self.ptr_A_buffer = todevice(self.ptr_A_host, np.int64)
self.ptr_B_buffer = todevice(self.ptr_B_host, np.int64)
self.ptr_C_buffer = todevice(self.ptr_C_host, np.int64)
self.ptr_D_buffer = todevice(self.ptr_D_host, np.int64)
self.lda_buffer = todevice(lda_host, np.int64)
self.ldb_buffer = todevice(ldb_host, np.int64)
self.ldc_buffer = todevice(ldc_host, np.int64)
self.ldd_buffer = todevice(ldd_host, np.int64)
if "output_op" in kwargs.keys():
self.alpha = kwargs["output_op"].alpha
self.beta = kwargs["output_op"].beta
else:
self.alpha = 1.0
self.beta = 0.0
if "output_op" in kwargs.keys():
self.output_op = kwargs["output_op"]
else:
self.output_op = self.operation.epilogue_type(1.0, 0.0)
# get host problem size
self.host_problem_size_ptr = np.array(problem_size_host, dtype=np.int32).__array_interface__["data"][0]
self.arguments = self.get_arguments()
self.initialize()
def get_arguments(self):
return self.operation.argument_type(
self.problem_size_buffer.ptr,
self.problem_count,
self.total_tiles,
self.output_op,
self.ptr_A_buffer.ptr,
self.ptr_B_buffer.ptr,
self.ptr_C_buffer.ptr,
self.ptr_D_buffer.ptr,
self.lda_buffer.ptr,
self.ldb_buffer.ptr,
self.ldc_buffer.ptr,
self.ldd_buffer.ptr,
ctypes.c_void_p(int(self.host_problem_size_ptr)),
)
def initialize(self):
# get launch configuration
launch_config = self.operation.rt_module.plan(self)
# get the host and evice workspace
device_workspace_size = self.operation.rt_module.get_device_workspace_size(self)
if device_workspace_size > 0:
self.workspace_buffer = device_mem_alloc(device_workspace_size)
workspace_ptr = self.workspace_buffer.ptr
err, = cuda.cuMemsetD32(
workspace_ptr, 0, device_workspace_size // 4)
else:
workspace_ptr = None
if self.operation.precompute_mode == SchedulerMode.Host:
device_workspace_ptr = self.operation.rt_module.host_precompute(
self, self.operation.rt_module.get_workspace_size(self),)
else:
device_workspace_ptr = 0
result = self.operation.rt_module.get_args(
ctypes.byref(self.arguments),
self.total_tiles,
ctypes.c_void_p(int(device_workspace_ptr)),
)
host_workspace = bytearray(result.contents)
device_workspace = None
self.host_workspace = host_workspace
self.device_workspace = device_workspace
self.launch_config = launch_config
def sync(self):
err, = cudart.cudaDeviceSynchronize()
if err != cuda.CUresult.CUDA_SUCCESS:
raise RuntimeError("CUDA Error %s" % str(err))
for arg in self.gemm_arguments:
arg.sync(stream_sync=False)
################################################################################
# Base class for GEMM runtime module
################################################################################
class GemmRTbase(ExecutableOperation):
"""
GemmRT manages the CUTLASS runtime components
"""
KernelTemplate = r"""
extern "C"
__global__ void
${operation_name}(${operation_name}${operation_suffix}::Params params) {
// Dynamic shared memory base pointer
extern __shared__ int SharedStorageBase[];
// Declare pointer to dynamic shared memory.
${operation_name}${operation_suffix}::SharedStorage *shared_storage =
reinterpret_cast<${operation_name}${operation_suffix}::SharedStorage *>(SharedStorageBase);
${operation_name}${operation_suffix}::invoke(params, *shared_storage);
}
"""
def __init__(self, operation: "GemmOperation"):
super().__init__(operation)
self.operation = operation
threadblock_shape = operation.tile_description.threadblock_shape
self.threadblock_shape = cutlass_bindings.gemm.GemmCoord(
threadblock_shape[0], threadblock_shape[1], threadblock_shape[2])
self.threadblock_swizzle = operation.swizzling_functor
# Threads per threadblock
self.threads = operation.tile_description.num_threads
def emit(self):
return self.emitter.emit(self.operation)
def can_implement(self, configuration, arguments):
raise NotImplementedError()
def get_host_workspace_size(self, arguments):
raise NotImplementedError()
def get_device_workspace_size(self, arguments):
return 0
def initialize(self):
err, = cuda.cuFuncSetAttribute(
self.kernel,
attrib=cuda.CUfunction_attribute.CU_FUNC_ATTRIBUTE_MAX_DYNAMIC_SHARED_SIZE_BYTES,
value=self.shared_memory_capacity)
if err != cuda.CUresult.CUDA_SUCCESS:
raise RuntimeError(
f"CUDA error on call to cuFuncSetAttribute: {cuda.cuGetErrorString(err)[1]}"
)
################################################################################
# Runtime module for GEMM Universal
################################################################################
class GemmRTUniversal(GemmRTbase):
"""
GemmRTUniversal manages the CUTLASS runtime components
"""
HostTemplate = r"""
extern "C" {
// Get the size of params in bytes
int ${operation_name}_get_param_size(){
return sizeof(${operation_name}${operation_suffix}::Params);
}
// Get the size of dynamic shared memory in bytes
int ${operation_name}_shared_memory_size() {
return int(sizeof(${operation_name}${operation_suffix}::SharedStorage));
}
// Get the params as byte array
char* ${operation_name}_get_params(${operation_name}_base::Arguments* argument, int* workspace){
${operation_name}_base::Params* params;
params = new ${operation_name}_base::Params(*argument,
-1, // SM count. Only used for stream-K
-1 // Occupancy. Only used for stream-K
);
// Semaphore holds the pointer to the workspace in the Params struct
params->semaphore = workspace;
char *bytes = ((char*)(params));
char *output = new char[sizeof(${operation_name}_base::Params)];
for (unsigned int i = 0; i < sizeof(${operation_name}_base::Params); i ++)
output[i] = bytes[i];
return output;
}
}
"""
def __init__(self, operation: "GemmOperation"):
super(GemmRTUniversal, self).__init__(operation)
self.emitter = EmitGemmUniversalInstance(
"_type", operation.direct_store, operation.visitor)
self.argument_type, self.epilogue_type = get_gemm_arguments(operation.epilogue_functor)
self.argtype = [
ctypes.POINTER(self.argument_type),
ctypes.POINTER(GemmCoord_), ctypes.c_int, ctypes.c_void_p
]
def plan(self, arguments):
grid = self.threadblock_swizzle.get_tiled_shape(
arguments.problem_size, self.threadblock_shape, arguments.batch_count
)
gemm_k_size = arguments.problem_size.k()
if arguments.gemm_mode in [cutlass_bindings.gemm.Mode.Gemm, cutlass_bindings.gemm.Mode.GemmSplitKParallel]:
alignk = max(max(128 // DataTypeSize[self.operation.A.element],
128 // DataTypeSize[self.operation.B.element]), 1)
gemm_k_size = (((arguments.problem_size.k() + arguments.batch_count - 1) //
arguments.batch_count + alignk - 1) // alignk) * alignk
if gemm_k_size:
grid_z = (arguments.problem_size.k() + gemm_k_size - 1) // gemm_k_size
grid = cutlass_bindings.gemm.GemmCoord(grid.m(), grid.n(), grid_z)
arguments.grid_tiled_shape = cutlass_bindings.dim3(grid.m(), grid.n(), grid.k())
grid = self.threadblock_swizzle.get_grid_shape(grid)
arguments.gemm_k_size = gemm_k_size
return LaunchConfiguration(
[grid.x, grid.y, grid.z],
[self.threads, 1, 1],
self.shared_memory_capacity)
def get_device_workspace_size(self, arguments: GemmArguments):
workspace_bytes = 0
if arguments.gemm_mode == cutlass_bindings.gemm.Mode.GemmSplitKParallel:
workspace_bytes = (DataTypeSize[arguments.operation.C.element]
* arguments.batched_stride_D * arguments.grid_tiled_shape.z // 8)
elif (arguments.gemm_mode == cutlass_bindings.gemm.Mode.Gemm and
arguments.split_k_slices > 1):
workspace_bytes = 4 * arguments.grid_tiled_shape.x * arguments.grid_tiled_shape.y
return workspace_bytes
class GemmRTUniversalStreamK(GemmRTUniversal):
"""
Manages the CUTLASS runtime components for 2.x stream K kernels
"""
HostTemplate = r"""
extern "C" {
// Get the size of params in bytes
int ${operation_name}_get_param_size(){
return sizeof(${operation_name}${operation_suffix}::Params);
}
// Get the size of dynamic shared memory in bytes
int ${operation_name}_shared_memory_size() {
return int(sizeof(${operation_name}${operation_suffix}::SharedStorage));
}
using GemmType = ${operation_name}_base;
// Get the params as byte array
char* ${operation_name}_get_params(GemmType::Arguments* argument, int* workspace,
int sm_count, int occupancy) {
GemmType::Params* params;
params = new GemmType::Params(*argument, sm_count, occupancy);
params->init_workspace(workspace);
char *bytes = ((char*)(params));
char *output = new char[sizeof(GemmType::Params)];
for (unsigned int i = 0; i < sizeof(GemmType::Params); i ++)
output[i] = bytes[i];
return output;
}
// Get the grid shape
dim3 ${operation_name}_get_grid_shape(GemmType::Arguments* args, int device_sms, int sm_occupancy) {
typename GemmType::Params params(*args, device_sms, sm_occupancy);
return params.get_grid_dims();
}
}
"""
def __init__(self, operation: "GemmOperation"):
super(GemmRTUniversalStreamK, self).__init__(operation)
self.extra_funcs = {
"get_grid_shape": GemmCoord_,
}
self._occupancy = None
self.argument_type, self.epilogue_type = get_gemm_arguments_streamk(operation.epilogue_functor)
@property
def occupancy(self):
if self._occupancy is None:
err, self._occupancy = cuda.cuOccupancyMaxActiveBlocksPerMultiprocessorWithFlags(
self.kernel, self.threads, self.shared_memory_capacity,
cuda.CUoccupancy_flags.CU_OCCUPANCY_DISABLE_CACHING_OVERRIDE)
if err != cuda.CUresult.CUDA_SUCCESS:
raise RuntimeError(
"CUDA error on call to cuOccupancyMaxActiveBlocksPerMultiprocessorWithFlags: "
f"{cuda.cuGetErrorString(err)[1]}")
return self._occupancy
################################################################################
# Runtime module for GEMM Universal within CUTLASS 3
################################################################################
class GemmRTUniversal3x(GemmRTUniversal):
"""
Manages the CUTLASS runtime components for 3.x kernels
"""
KernelTemplate = r"""
using Operator = ${operation_name}${operation_suffix};
extern "C"
__global__ __launch_bounds__(Operator::MaxThreadsPerBlock, Operator::MinBlocksPerMultiprocessor)
void ${operation_name}(__grid_constant__ typename Operator::Params const params) {
// Dynamic shared memory base pointer
extern __shared__ char smem[];
// Declare pointer to dynamic shared memory.
Operator op;
op(params, smem);
}
"""
HostTemplate = r"""
extern "C" {
// Get the size of params in bytes
int ${operation_name}_get_param_size(){
return sizeof(${operation_name}${operation_suffix}::Params);
}
// Get the size of dynamic shared memory in bytes
int ${operation_name}_shared_memory_size() {
return ${operation_name}${operation_suffix}::SharedStorageSize;
}
using GemmType = ${operation_name}_base;
// Get the workspace size
uint64_t ${operation_name}_get_kernel_workspace_size(GemmType::Arguments* argument) {
return GemmType::get_workspace_size(*argument);
}
// Get the params as byte array
char* ${operation_name}_get_params(GemmType::Arguments* argument, int* workspace){
GemmType::Params params = GemmType::to_underlying_arguments(*argument, workspace);
char *bytes = ((char*)(¶ms));
char *output = new char[sizeof(GemmType::Params)];
for (unsigned int i = 0; i < sizeof(GemmType::Params); i ++)
output[i] = bytes[i];
return output;
}
// Get the total number of blocks for a persistent kernel
uint64_t ${operation_name}_get_persistent_tiled_blk_shape_mnl(GemmType::ProblemShape problem) {
auto problem_shape_MNKL = append<4>(problem, Int<1>{});
auto [problem_blocks_m, problem_blocks_n, problem_blocks_l] =
cutlass::gemm::kernel::detail::PersistentTileSchedulerSm90::get_tiled_cta_shape_mnl(
problem_shape_MNKL, GemmType::TileShape{}, GemmType::DispatchPolicy::ClusterShape{});
return problem_blocks_m * problem_blocks_n * problem_blocks_l;
}
// Get the grid shape
dim3 ${operation_name}_get_grid_shape(GemmType::Arguments* args, int* workspace) {
auto tmp_params = GemmType::to_underlying_arguments(*args, workspace);
return GemmType::get_grid_shape(tmp_params);
}
// Get the block shape
dim3 ${operation_name}_get_block_shape() {
return GemmType::get_block_shape();
}
}
"""
def __init__(self, operation: "GemmOperation"):
super(GemmRTUniversal3x, self).__init__(operation)
self.extra_funcs = {
"get_grid_shape": dim3_,
"get_block_shape": dim3_,
"get_persistent_tiled_blk_shape_mnl": ctypes.c_uint64,
"get_kernel_workspace_size": ctypes.c_uint64
}
self.emitter = EmitGemmUniversalInstance3x("_type")
self.mainloop_args = get_mainloop_arguments_3x(
operation.tile_description.kernel_schedule,
operation.A.element,
operation.B.element,
operation.A.alignment,
operation.B.alignment
)
self.argument_type, self.epilogue_args, self.epilogue_type, self.hw_info = get_gemm_arguments_3x(self.mainloop_args, operation.epilogue_functor)
def get_device_workspace_size(self, arguments: GemmArguments3x):
return self.get_kernel_workspace_size(ctypes.byref(arguments.get_arguments()))
class EmitGemmUniversalInstance3x:
"""Responsible for emitting a CUTLASS 3 template definition"""
def __init__(self, operation_suffix=""):
self.operation_suffix = operation_suffix
self.includes = [
"cutlass/cutlass.h",
"cute/tensor.hpp",
"cute/atom/mma_atom.hpp",
"cutlass/numeric_types.h",
"cutlass/gemm/collective/collective_builder.hpp",
"cutlass/gemm/kernel/sm90_tile_scheduler.hpp",
"cutlass/gemm/kernel/gemm_universal.hpp",
"cutlass/epilogue/collective/collective_builder.hpp",
"cutlass/epilogue/collective/default_epilogue.hpp",
"cutlass/epilogue/thread/linear_combination.h"
]
self.gemm_template_kernel = """
using namespace cute;
using CollectiveEpilogue =
typename cutlass::epilogue::collective::CollectiveBuilder<
${arch}, ${opcode_class},
cute::Shape<cute::_${threadblock_shape_m}, cute::_${threadblock_shape_n}, cute::_${threadblock_shape_k}>,
cute::Shape<cute::_${cluster_m},cute::_${cluster_n},cute::_${cluster_k}>,
cutlass::epilogue::collective::EpilogueTileAuto,
${element_accumulator}, ${element_epilogue},
${element_c}, ${layout_c}, ${align_c},
${element_d}, ${layout_d}, ${align_d},
${epilogue_schedule}
>::CollectiveOp;
using CollectiveMainloop =
typename cutlass::gemm::collective::CollectiveBuilder<
${arch}, ${opcode_class},
${element_a}, ${layout_a}, ${align_a},
${element_b}, ${layout_b}, ${align_b},
${element_accumulator},
cute::Shape<cute::_${threadblock_shape_m}, cute::_${threadblock_shape_n}, cute::_${threadblock_shape_k}>,
cute::Shape<cute::_${cluster_m},cute::_${cluster_n},cute::_${cluster_k}>,
${stage_count_type},
${kernel_schedule}
>::CollectiveOp;
// Gemm operator ${operation_name}
using ${operation_name}_base = cutlass::gemm::kernel::GemmUniversal<
Shape<int,int,int,int>,
CollectiveMainloop,
CollectiveEpilogue,
${tile_scheduler}
>;
// Define named type
struct ${operation_name}${operation_suffix} :
public ${operation_name}_base { };
"""
self.gemm_template_device = self.gemm_template_kernel + """
// Define device-level operator
using DeviceKernel = cutlass::gemm::device::GemmUniversalAdapter<${operation_name}${operation_suffix}>;
"""
def emit(self, operation):
instance_layout_A, instance_layout_B, instance_layout_C, = \
(operation.A.layout, operation.B.layout, operation.C.layout)
# Support built-in epilogue functors or user-defined functions
epilogue_functor = operation.epilogue_functor.emit()
if operation.tile_description.stages is None or operation.tile_description.stages == 0:
stage_count_type = "cutlass::gemm::collective::StageCountAutoCarveout<sizeof(typename CollectiveEpilogue::SharedStorage)>"
else:
stage_count_type = "_" + str(operation.tile_description.stages)
if operation.emission_type == EmissionType.Kernel:
gemm_template = self.gemm_template_kernel
else:
gemm_template = self.gemm_template_device
kschedule = KernelScheduleType.ScheduleAuto
eschedule = EpilogueScheduleType.ScheduleAuto
tschedule = TileSchedulerType.Default
if operation.tile_description.kernel_schedule is not None:
kschedule = operation.tile_description.kernel_schedule
if operation.tile_description.epilogue_schedule is not None:
eschedule = operation.tile_description.epilogue_schedule
if operation.tile_description.tile_scheduler is not None:
tschedule = operation.tile_description.tile_scheduler
values = {
"operation_name": operation.procedural_name(),
"operation_suffix": self.operation_suffix,
"element_a": DataTypeTag[operation.A.element],
"layout_a": LayoutTag[instance_layout_A],
"element_b": DataTypeTag[operation.B.element],
"layout_b": LayoutTag[instance_layout_B],
"element_c": DataTypeTag[operation.C.element],
"layout_c": LayoutTag[instance_layout_C],
"element_d": DataTypeTag[operation.epilogue_functor.element_output],
"layout_d": LayoutTag[instance_layout_C],
"element_accumulator": DataTypeTag[operation.accumulator_type()],
"element_epilogue": DataTypeTag[operation.epilogue_functor.element_epilogue],
"epilogue_vector_length": str(operation.epilogue_functor.epilogue_vector_length),
"opcode_class": OpcodeClassTag[operation.tile_description.math_instruction.opcode_class],
"arch": "cutlass::arch::Sm%d" % operation.arch,
"threadblock_shape_m": str(operation.tile_description.threadblock_shape[0]),
"threadblock_shape_n": str(operation.tile_description.threadblock_shape[1]),
"threadblock_shape_k": str(operation.tile_description.threadblock_shape[2]),
"cluster_m": str(operation.tile_description.cluster_shape[0]),
"cluster_n": str(operation.tile_description.cluster_shape[1]),
"cluster_k": str(operation.tile_description.cluster_shape[2]),
"align_a": str(operation.A.alignment),
"align_b": str(operation.B.alignment),
"align_c": str(operation.C.alignment),
"align_d": str(operation.C.alignment),
"stage_count_type": stage_count_type,
"kernel_schedule": KernelScheduleTag[kschedule],
"epilogue_schedule": EpilogueScheduleTag[eschedule],
"tile_scheduler": TileSchedulerTag[tschedule]
}
values["epilogue_functor"] = operation.epilogue_functor.emit()
return SubstituteTemplate(gemm_template, values)
###################################################################################################
# Runtime module for GEMM Grouped
###################################################################################################
class GemmRTGrouped(GemmRTbase):
"""
GemmRTGrouped manages the CUTLASS runtime components
"""
KernelTemplate = r"""
extern "C"
__global__ void
${operation_name}(${operation_name}${operation_suffix}::Params params) {
// Dynamic shared memory base pointer
extern __shared__ int SharedStorageBase[];
// Declare pointer to dynamic shared memory.
${operation_name}${operation_suffix}::SharedStorage *shared_storage =
reinterpret_cast<${operation_name}${operation_suffix}::SharedStorage *>(SharedStorageBase);
${operation_name}${operation_suffix} op;
op(params, *shared_storage);
}
"""
HostTemplate = r"""
extern "C" {
// precompute scheduling information
char * ${operation_name}_precompute(${operation_name}_base::Arguments const &args, int tile_count, size_t workspace_bytes) {
char* host_workspace = new char[workspace_bytes];
${operation_name}_base::ProblemVisitor::host_precompute(
args.host_problem_sizes,
args.problem_count,
args.threadblock_count,
(void*)host_workspace
);
return host_workspace;
}
// Get the size of params in bytes
int ${operation_name}_get_param_size(){
return sizeof(${operation_name}${operation_suffix}::Params);
}
// Get the size of dynamic shared memory in bytes
int ${operation_name}_shared_memory_size() {
return int(sizeof(${operation_name}${operation_suffix}::SharedStorage));
}
// Get the params as byte array
char* ${operation_name}_get_params(${operation_name}_base::Arguments* argument, int tile_count, void* workspace=nullptr){
${operation_name}_base::Params* params;
params = new ${operation_name}_base::Params(*argument, workspace, tile_count);
char *bytes = ((char*)(params));
char *output = new char[sizeof(${operation_name}_base::Params)];
for (unsigned int i = 0; i < sizeof(${operation_name}_base::Params); i ++)
output[i] = bytes[i];
return output;
}
}
"""
def __init__(self, operation: "GemmOperation"):
super(GemmRTGrouped, self).__init__(operation)
self.extra_funcs = {"precompute": None}
self.emitter = EmitGemmGroupedInstance("_type")
self.argument_type, self.epilogue_type = get_gemm_grouped_arguments(operation.epilogue_functor)
self.argtype = [ctypes.POINTER(self.argument_type), ctypes.c_int, ctypes.c_void_p]
def host_precompute(self, arguments, workspace_bytes):
self.precompute.argtype = [
self.argtype[0], ctypes.c_int, ctypes.c_longlong]
self.precompute.restype = ctypes.POINTER(ctypes.c_byte * workspace_bytes)
problem_info = self.precompute(
ctypes.byref(arguments.arguments),
arguments.total_tiles,
workspace_bytes)
problem_info_array = bytearray(problem_info.contents)
# copy to device memory
return rmm.DeviceBuffer.to_device(problem_info_array).ptr
def plan(self, arguments):
return LaunchConfiguration(
[arguments.total_tiles, 1, 1],
[self.threads, 1, 1],
self.shared_memory_capacity,
)
def get_workspace_size(self, arguments):
if self.operation.precompute_mode == SchedulerMode.Device:
return 0
elif self.operation.precompute_mode == SchedulerMode.Host:
total_tiles = arguments.total_tiles
entries_per_block = 1
return 8 * entries_per_block * total_tiles # three int32_t
################################################################################
# Runtime module for GEMM and grouped GEMM
################################################################################
class GemmOperationBase:
"""
CUTLASS GEMM operation
"""
def __init__(
self, gemm_kind, arch, tile_description: TileDescription,
A: TensorDescription, B: TensorDescription, C: TensorDescription,
epilogue_functor, swizzling_functor=cutlass_bindings.IdentitySwizzle1,
api=ApiVersion.v2x, emission_type=EmissionType.Kernel, **kwargs):
self.operation_kind: OperationKind = OperationKind.Gemm
self.arch: int = arch
self.tile_description: TileDescription = tile_description
self.gemm_kind: GemmKind = gemm_kind
self.api = api
self.prefix = "3x" if self.api == ApiVersion.v3x else ""
self.emission_type = emission_type
# Optionally swap the TensorDescriptions for operands A and B and transpose their
# layouts. This is needed to mimic the transpose performed by device::GemmUniversal.
# The code below uses deep copy to avoid overwritting the original TensorDescription
self.switched = (self.api != ApiVersion.v3x and
self.emission_type == EmissionType.Kernel and
C.layout == cutlass_bindings.ColumnMajor)
self.A, self.B, self.C = GemmOperationBase.get_operands(A, B, C, self.switched)
self.epilogue_functor = epilogue_functor
self.swizzling_functor = swizzling_functor()
if "direct_store" in kwargs:
self.direct_store = kwargs["direct_store"]
else:
self.direct_store = False
if "visitor" in kwargs:
self.visitor = kwargs["visitor"]
else:
self.visitor = False
@staticmethod
def get_operands(A: TensorDescription, B: TensorDescription, C: TensorDescription, swap: bool):
"""
Makes copies of A, B, and C, and possibly transposes their order. If ``swap`` is set,
A and B are swapped, and the layout of A, B, and C are transposed.
:param A: description of operand A
:type A: TensorDescription
:param B: description of operand B
:type B: TensorDescription
:param C: description of operand C
:type C: TensorDescription
:return: descriptions of operands A, B, and C
:rtype: tuple[TileDescription]
"""
if swap:
A_out = copy.deepcopy(B)
B_out = copy.deepcopy(A)
C_out = copy.deepcopy(C)
A_out.layout = transpose_layout(A_out.layout)
B_out.layout = transpose_layout(B_out.layout)
C_out.layout = transpose_layout(C_out.layout)
else:
A_out = copy.deepcopy(A)
B_out = copy.deepcopy(B)
C_out = copy.deepcopy(C)
return A_out, B_out, C_out
def run(self, arguments: GemmArguments) -> cuda.CUresult:
"""
Configure and launch the cuda kernel with input arguments
"""
if self.emission_type == EmissionType.Device:
raise Exception('Running a kernel via PyCUTLASS is only enabled with emission type "Kernel"')
err = self.rt_module.run(
arguments.host_workspace,
arguments.device_workspace,
arguments.launch_config,
)
if err != cuda.CUresult.CUDA_SUCCESS:
raise RuntimeError("CUDA Error %s" % str(err))
return err
def free(self):
if hasattr(self, "workspace_buffer"):
del self.workspace_buffer
def is_complex(self):
complex_operators = [
MathOperation.multiply_add_complex,
MathOperation.multiply_add_complex_gaussian,
MathOperation.multiply_add_complex_fast_f32,
]
return self.tile_description.math_instruction.math_operation in complex_operators
def is_planar_complex(self):
return self.gemm_kind in (GemmKind.PlanarComplex, GemmKind.PlanarComplexArray)
def accumulator_type(self):
accum = self.tile_description.math_instruction.element_accumulator
if self.is_complex():
return get_complex_from_real(accum)
return accum
def short_math_name(self):
if self.tile_description.math_instruction.math_operation == MathOperation.multiply_add_complex_gaussian:
return "g%s" % ShortDataTypeNames[self.accumulator_type()]
return ShortDataTypeNames[self.accumulator_type()]
def core_name(self):
"""The basic operation kind is prefixed with a letter indicating the accumulation type."""
inst_shape = ""
inst_operation = ""
intermediate_type = ""
math_operations_map = {
MathOperation.xor_popc: "xor",
}
if (self.tile_description.math_instruction.opcode_class == cutlass_bindings.OpClass.TensorOp or
self.tile_description.math_instruction.opcode_class == cutlass_bindings.OpClass.WmmaTensorOp):
math_op = self.tile_description.math_instruction.math_operation
math_op_string = math_operations_map[math_op] if math_op in math_operations_map.keys() else ""
if self.tile_description.math_instruction.instruction_shape is not None:
inst_shape = "%dx%dx%d" % tuple(
self.tile_description.math_instruction.instruction_shape)
else:
inst_shape = "Default"
inst_shape += math_op_string
if (self.tile_description.math_instruction.element_a != self.A.element and
self.tile_description.math_instruction.element_a != self.tile_description.math_instruction.element_accumulator):
intermediate_type = DataTypeNames[self.tile_description.math_instruction.element_a]
return "%s%s%s%s" % (self.short_math_name(), inst_shape, intermediate_type, GemmKindNames[self.gemm_kind])
def extended_name(self):
"""Append data types if they differ from compute type."""
if self.is_complex():
extended_name = "${core_name}"
else:
if (self.C.element != self.tile_description.math_instruction.element_accumulator and
self.A.element != self.tile_description.math_instruction.element_accumulator):
extended_name = "${element_c}_${core_name}_${element_a}"
elif (self.C.element == self.tile_description.math_instruction.element_accumulator and
self.A.element != self.tile_description.math_instruction.element_accumulator):
extended_name = "${core_name}_${element_a}"
else:
extended_name = "${core_name}"
extended_name = SubstituteTemplate(extended_name, {
"element_a": DataTypeNames[self.A.element],
"element_c": DataTypeNames[self.C.element],
"core_name": self.core_name(),
})
return extended_name
def extended_name_3x(self):
"""Generates a string representing the MMA atom. Assumes accumulator type is C type."""
extended_name = "{core_name}_{element_a}_{element_b}_{element_acc}_{element_c}".format(
element_a=DataTypeNames[self.A.element],
element_b=DataTypeNames[self.B.element],
element_acc=DataTypeNames[self.tile_description.math_instruction.element_accumulator],
element_c=DataTypeNames[self.C.element],
core_name=self.core_name())
return extended_name
def layout_name(self):
if self.is_complex() or self.is_planar_complex():
return "%s%s" % (
ShortComplexLayoutNames[(self.A.layout, self.A.complex_transform)],
ShortComplexLayoutNames[(self.B.layout, self.B.complex_transform)]
)
return "%s%s" % (ShortLayoutTypeNames[self.A.layout], ShortLayoutTypeNames[self.B.layout])
# Generates a short string representing the ABC layout tags (e.g. ntn or tnn)
def layout_name_3x(self):
if self.is_complex() or self.is_planar_complex():
return "{}{}{}".format(
ShortComplexLayoutNames[(self.A.layout, self.A.complex_transform)],
ShortComplexLayoutNames[(self.B.layout, self.B.complex_transform)],
ShortComplexLayoutNames[(self.C.layout, self.C.complex_transform)])
else:
return "{}{}{}".format(
ShortLayoutTypeNames[self.A.layout],
ShortLayoutTypeNames[self.B.layout],
ShortLayoutTypeNames[self.C.layout])
# Generates a short string representing underlying kernel schedule type
def kernel_schedule_name_3x(self):
if self.tile_description.kernel_schedule is None:
return KernelScheduleSuffixes[KernelScheduleType.ScheduleAuto]
else:
return KernelScheduleSuffixes[self.tile_description.kernel_schedule]
# Generates a short string representing underlying epilogue schedule type
def epilogue_schedule_name_3x(self):
if self.tile_description.epilogue_schedule is None:
return EpilogueScheduleSuffixes[EpilogueScheduleType.ScheduleAuto]
else:
return EpilogueScheduleSuffixes[self.tile_description.epilogue_schedule]
def procedural_name(self):
"""The full procedural name indicates architecture, extended name, tile size, and layout."""
opcode_class_name = OpcodeClassNames[self.tile_description.math_instruction.opcode_class]
if self.api == ApiVersion.v3x and self.arch >= 90:
kernel_name_template = "cutlass{p}_sm{ar}_{op}_{ex}_{tbm}x{tbn}x{tbk}_{cm}x{cn}x{ck}_{l}_{s}_align{al}{k}{e}"
return kernel_name_template.format(
p=self.prefix,
ar=self.arch,
op=opcode_class_name,
ex=self.extended_name_3x(),
tbm=self.tile_description.threadblock_shape[0],
tbn=self.tile_description.threadblock_shape[1],
tbk=self.tile_description.threadblock_shape[2],
cm=self.tile_description.cluster_shape[0],
cn=self.tile_description.cluster_shape[1],
ck=self.tile_description.cluster_shape[2],
l=self.tile_description.stages,
s=self.layout_name_3x(),
al=str(self.A.alignment),
k=self.kernel_schedule_name_3x(),
e=self.epilogue_schedule_name_3x()
)
else:
threadblock = self.tile_description.procedural_name()
return "cutlass{p}_sm{ar}_{op}_{ex}_{tb}_{l}_align{a}".format(
p=self.prefix,
ar=self.arch,
op=opcode_class_name,
ex=self.extended_name(),
tb=threadblock,
l=self.layout_name(),
a=str(self.A.alignment)
)
def configuration_name(self):
"""The full procedural name indicates architecture, extended name, tile size, and layout."""
return self.procedural_name()
class GemmOperationUniversal(GemmOperationBase):
def __init__(self, arch, tile_description: TileDescription, A: TensorDescription, B, C,
epilogue_functor, swizzling_functor=cutlass_bindings.IdentitySwizzle1, **kwargs):
api = api_version(arch, tile_description.math_instruction.opcode_class, A.element)
super(GemmOperationUniversal, self).__init__(GemmKind.Universal, arch, tile_description,
A, B, C, epilogue_functor, swizzling_functor,
api=api, **kwargs, )
if api == ApiVersion.v3x:
if swizzling_functor == cutlass_bindings.ThreadblockSwizzleStreamK:
raise Exception("Stream K is currently only supported for CUTLASS 2.x kernels")
self.rt_module = GemmRTUniversal3x(self)
else:
if swizzling_functor == cutlass_bindings.ThreadblockSwizzleStreamK:
self.rt_module = GemmRTUniversalStreamK(self)
else:
self.rt_module = GemmRTUniversal(self)
self.argument_type = self.rt_module.argument_type
self.epilogue_type = self.rt_module.epilogue_type
def device_op(self):
"""
Returns a new GemmOperationUniversal object that is constructed with emission type
``EmissionType.Device``. Since the device-emitted kernel does not require swapping,
any swappng performed by the kernel-emitted operation is reversed.
:return: operation ready for device-level code emission
:rtype: GemmUniversalOperation
"""
A, B, C = GemmOperationBase.get_operands(self.A, self.B, self.C, self.switched)
return GemmOperationUniversal(self.arch, self.tile_description, A, B, C,
self.epilogue_functor, type(self.swizzling_functor),
emission_type=EmissionType.Device, direct_store=self.direct_store,
visitor=self.visitor)
class GemmOperationGrouped(GemmOperationBase):
def __init__(self, arch, tile_description: TileDescription, A: TensorDescription, B, C,
epilogue_functor, swizzling_functor=cutlass_bindings.IdentitySwizzle1, **kwargs):
super(GemmOperationGrouped, self).__init__(GemmKind.Grouped, arch, tile_description,
A, B, C, epilogue_functor, swizzling_functor, **kwargs)
assert "precompute_mode" in kwargs.keys(), "missing keyword arguement 'precompute_mode'."
self.precompute_mode = kwargs["precompute_mode"]
self.rt_module = GemmRTGrouped(self)
self.argument_type = self.rt_module.argument_type
self.epilogue_type = self.rt_module.epilogue_type
def device_op(self):
"""
Returns a new GemmOperationGrouped object that is constructed with emission type
``EmissionType.Device``. Since the device-emitted kernel does not require swapping,
any swappng performed by the kernel-emitted operation is reversed.
:return: operation ready for device-level code emission
:rtype: GemmOperationGrouped
"""
A, B, C = GemmOperationBase.get_operands(self.A, self.B, self.C, self.switched)
return GemmOperationGrouped(
self.arch, self.tile_description, A, B, C, self.epilogue_functor,
type(self.swizzling_functor), emission_type=EmissionType.Device,
direct_store=self.direct_store, precompute_mode=self.precompute_mode, )
###################################################################################################
#
# Emits single instances of a CUTLASS device-wide operator
#
###################################################################################################
class EmitGemmUniversalInstance:
"""Responsible for emitting a CUTLASS template definition"""
def __init__(
self,
operation_suffix="",
direct_store=False,
visitor=False,
):
self.operation_suffix = operation_suffix
self.direct_store = direct_store
self.visitor = visitor
self.includes = [
"cutlass/cutlass.h",
"cutlass/numeric_types.h",
"cutlass/arch/arch.h",
"cutlass/arch/mma.h",
"cutlass/layout/matrix.h",
"cutlass/gemm/device/gemm.h",
"cutlass/gemm/device/gemm_universal_adapter.h",
"cutlass/gemm/kernel/default_gemm_universal.h",
]
if self.visitor:
self.includes += [
"gemm/gemm_universal_with_visitor.h",
"epilogue/epilogue_visitor_with_layernorm.h",
"epilogue/epilogue_visitor_generic.h",
]
if self.direct_store:
self.includes.append(
"cutlass/epilogue/threadblock/default_epilogue_direct_store.h"
)
self.gemm_template_kernel = """
// Gemm operator ${operation_name}
using ${operation_name}_base =
typename cutlass::gemm::kernel::DefaultGemmUniversal<
${element_a}, ${layout_a}, ${transform_a}, ${align_a},
${element_b}, ${layout_b}, ${transform_b}, ${align_b},
${element_c}, ${layout_c},
${element_accumulator},
${opcode_class},
${arch},
cutlass::gemm::GemmShape<${threadblock_shape_m}, ${threadblock_shape_n}, ${threadblock_shape_k}>,
cutlass::gemm::GemmShape<${warp_shape_m}, ${warp_shape_n}, ${warp_shape_k}>,
cutlass::gemm::GemmShape<${instruction_shape_m}, ${instruction_shape_n}, ${instruction_shape_k}>,
${epilogue_functor},
${swizzling_functor},
${stages},
${math_operation}
>::GemmKernel;
// Define named type
struct ${operation_name}${operation_suffix} :
public ${operation_name}_base { };
"""
self.gemm_template_device = """
// Gemm operator ${operation_name}
using DeviceKernel =
typename cutlass::gemm::device::GemmUniversal<
// Data type and layout of operand A
${element_a}, ${layout_a},
// Data type and layout of operand B
${element_b}, ${layout_b},
// Data type and layout of operand C
${element_c}, ${layout_c},
// Data type of accumulator
${element_accumulator},
// Class of operation
${opcode_class},
// Compute capability of the target kernel
${arch},
// Threadblock tile shape
cutlass::gemm::GemmShape<${threadblock_shape_m}, ${threadblock_shape_n}, ${threadblock_shape_k}>,
// Warp tile shape
cutlass::gemm::GemmShape<${warp_shape_m}, ${warp_shape_n}, ${warp_shape_k}>,
// Instruction shape
cutlass::gemm::GemmShape<${instruction_shape_m}, ${instruction_shape_n}, ${instruction_shape_k}>,
// Epilogue functor
${epilogue_functor},
// Swizzling function
${swizzling_functor},
// Number of pipeline stages
${stages},
// Alignment of operands A and B
${align_a}, ${align_b},
// Type of math operation
${math_operation},
// Complex transform types of operands A and B
${transform_a}, ${transform_b}
>;
"""
self.gemm_template_direct_store = """
// Gemm operator ${operation_name}
using ${operation_name}_default =
typename cutlass::gemm::kernel::DefaultGemmUniversal<
${element_a}, ${layout_a}, ${transform_a}, ${align_a},
${element_b}, ${layout_b}, ${transform_b}, ${align_b},
${element_c}, ${layout_c},
${element_accumulator},
${opcode_class},
${arch},
cutlass::gemm::GemmShape<${threadblock_shape_m}, ${threadblock_shape_n}, ${threadblock_shape_k}>,
cutlass::gemm::GemmShape<${warp_shape_m}, ${warp_shape_n}, ${warp_shape_k}>,
cutlass::gemm::GemmShape<${instruction_shape_m}, ${instruction_shape_n}, ${instruction_shape_k}>,
${epilogue_functor},
${swizzling_functor},
${stages},
${math_operation}
>::GemmKernel;
using ${operation_name}_base =
cutlass::gemm::kernel::GemmUniversal<
${operation_name}_default::Mma,
cutlass::epilogue::threadblock::DefaultEpilogueDirectStore<
${operation_name}_default::Epilogue
>::Epilogue,
${operation_name}_default::ThreadblockSwizzle
>;
// Define named type
struct ${operation_name}${operation_suffix} :
public ${operation_name}_base { };
"""
self.gemm_template_visitor = """
// Gemm operator ${operation_name}
using ${operation_name}_default =
typename cutlass::gemm::kernel::DefaultGemmUniversal<
${element_a}, ${layout_a}, ${transform_a}, ${align_a},
${element_b}, ${layout_b}, ${transform_b}, ${align_b},
${element_c}, ${layout_c},
${element_accumulator},
${opcode_class},
${arch},
cutlass::gemm::GemmShape<${threadblock_shape_m}, ${threadblock_shape_n}, ${threadblock_shape_k}>,
cutlass::gemm::GemmShape<${warp_shape_m}, ${warp_shape_n}, ${warp_shape_k}>,
cutlass::gemm::GemmShape<${instruction_shape_m}, ${instruction_shape_n}, ${instruction_shape_k}>,
${elementwise_epilogue_functor},
${swizzling_functor},
${stages},
${math_operation}
>::GemmKernel;
${epilogue_visitor}
using ${operation_name}_Epilogue = typename cutlass::epilogue::threadblock::EpilogueWithVisitorFromExistingEpilogue<
${operation_name}_EpilogueVisitor,
typename ${operation_name}_default::Epilogue>::Epilogue;
using ${operation_name}_base =
cutlass::gemm::kernel::GemmUniversalwithEpilogueVisitor<
${operation_name}_default::Mma,
${operation_name}_Epilogue,
${operation_name}_default::ThreadblockSwizzle
>;
// Define named type
struct ${operation_name}${operation_suffix} :
public ${operation_name}_base { };
"""
def instance_template(self):
return """
${compile_guard_start}
manifest.append(new ${gemm_kind}<
cutlass::gemm::device::GemmUniversalAdapter<${operation_name}>
>("${operation_name}"));
${compile_guard_end}
"""
def emit(self, operation):
threadblock_shape = operation.tile_description.threadblock_shape
warp_count = operation.tile_description.warp_count
warp_shape = [threadblock_shape[idx] // warp_count[idx] for idx in range(3)]
instance_layout_A, instance_layout_B, instance_layout_C = \
(operation.A.layout, operation.B.layout, operation.C.layout)
if operation.emission_type == EmissionType.Kernel:
if self.direct_store:
gemm_template = self.gemm_template_direct_store
elif self.visitor:
gemm_template = self.gemm_template_visitor
else:
gemm_template = self.gemm_template_kernel
else:
gemm_template = self.gemm_template_device
values = {
"operation_name": operation.procedural_name(),
"operation_suffix": self.operation_suffix,
"element_a": DataTypeTag[operation.A.element],
"layout_a": LayoutTag[instance_layout_A],
"element_b": DataTypeTag[operation.B.element],
"layout_b": LayoutTag[instance_layout_B],
"element_c": DataTypeTag[operation.C.element],
"layout_c": LayoutTag[instance_layout_C],
"element_accumulator": DataTypeTag[operation.accumulator_type()],
"opcode_class": OpcodeClassTag[operation.tile_description.math_instruction.opcode_class],
"arch": "cutlass::arch::Sm%d" % operation.arch,
"threadblock_shape_m": str(operation.tile_description.threadblock_shape[0]),
"threadblock_shape_n": str(operation.tile_description.threadblock_shape[1]),
"threadblock_shape_k": str(operation.tile_description.threadblock_shape[2]),
"warp_shape_m": str(warp_shape[0]),
"warp_shape_n": str(warp_shape[1]),
"warp_shape_k": str(warp_shape[2]),
"instruction_shape_m": str(operation.tile_description.math_instruction.instruction_shape[0]),
"instruction_shape_n": str(operation.tile_description.math_instruction.instruction_shape[1]),
"instruction_shape_k": str(operation.tile_description.math_instruction.instruction_shape[2]),
"swizzling_functor": operation.swizzling_functor.tag(),
"stages": str(operation.tile_description.stages),
"align_a": str(operation.A.alignment),
"align_b": str(operation.B.alignment),
"transform_a": ComplexTransformTag[operation.A.complex_transform],
"transform_b": ComplexTransformTag[operation.B.complex_transform],
"math_operation": MathOperationTag[operation.tile_description.math_instruction.math_operation],
}
if self.visitor:
values["epilogue_visitor"] = operation.epilogue_functor.emit(operation)
values["elementwise_epilogue_functor"] = operation.epilogue_functor.elementwise_functor.emit()
else:
values["epilogue_functor"] = operation.epilogue_functor.emit()
return SubstituteTemplate(gemm_template, values)
class EmitGemmGroupedInstance:
"""Responsible for emitting a CUTLASS template definition"""
def __init__(self, operation_suffix=""):
self.operation_suffix = operation_suffix
self.includes = [
"cutlass/cutlass.h",
"cutlass/numeric_types.h",
"cutlass/arch/arch.h",
"cutlass/arch/mma.h",
"cutlass/layout/matrix.h",
"cutlass/gemm/kernel/gemm_grouped.h",
"cutlass/gemm/kernel/default_gemm_grouped.h",
]
self.gemm_template_kernel = """
// Gemm operator ${operation_name}
using ${operation_name}_base =
typename cutlass::gemm::kernel::DefaultGemmGrouped<
${element_a}, ${layout_a}, ${transform_a}, ${align_a},
${element_b}, ${layout_b}, ${transform_b}, ${align_b},
${element_c}, ${layout_c},
${element_accumulator},
${opcode_class},
${arch},
cutlass::gemm::GemmShape<${threadblock_shape_m}, ${threadblock_shape_n}, ${threadblock_shape_k}>,
cutlass::gemm::GemmShape<${warp_shape_m}, ${warp_shape_n}, ${warp_shape_k}>,
cutlass::gemm::GemmShape<${instruction_shape_m}, ${instruction_shape_n}, ${instruction_shape_k}>,
${epilogue_functor},
${swizzling_functor},
${stages},
${precompute_mode},
${math_operation}
>::GemmKernel;
// Define named type
struct ${operation_name}${operation_suffix} :
public ${operation_name}_base { };
"""
self.gemm_template_device = (
self.gemm_template_kernel
+ """
using DeviceKernel = cutlass::gemm::device::GemmGrouped<${operation_name}_base>;
"""
)
def instance_template(self):
return """
${compile_guard_start}
manifest.append(new ${gemm_kind}<
cutlass::gemm::device::GemmGrouped<${operation_name}>
>("${operation_name}"));
${compile_guard_end}
"""
def emit(self, operation):
threadblock_shape = operation.tile_description.threadblock_shape
warp_count = operation.tile_description.warp_count
warp_shape = [threadblock_shape[idx] // warp_count[idx] for idx in range(3)]
instance_layout_A, instance_layout_B, instance_layout_C = \
(operation.A.layout, operation.B.layout, operation.C.layout)
# Support built-in epilogue functors or user-defined functions
epilogue_functor = operation.epilogue_functor.emit()
values = {
"operation_name": operation.procedural_name(),
"operation_suffix": self.operation_suffix,
"element_a": DataTypeTag[operation.A.element],
"layout_a": LayoutTag[instance_layout_A],
"element_b": DataTypeTag[operation.B.element],
"layout_b": LayoutTag[instance_layout_B],
"element_c": DataTypeTag[operation.C.element],
"layout_c": LayoutTag[instance_layout_C],
"element_accumulator": DataTypeTag[operation.accumulator_type()],
"opcode_class": OpcodeClassTag[operation.tile_description.math_instruction.opcode_class],
"arch": "cutlass::arch::Sm%d" % operation.arch,
"threadblock_shape_m": str(operation.tile_description.threadblock_shape[0]),
"threadblock_shape_n": str(operation.tile_description.threadblock_shape[1]),
"threadblock_shape_k": str(operation.tile_description.threadblock_shape[2]),
"warp_shape_m": str(warp_shape[0]),
"warp_shape_n": str(warp_shape[1]),
"warp_shape_k": str(warp_shape[2]),
"instruction_shape_m": str(operation.tile_description.math_instruction.instruction_shape[0]),
"instruction_shape_n": str(operation.tile_description.math_instruction.instruction_shape[1]),
"instruction_shape_k": str(operation.tile_description.math_instruction.instruction_shape[2]),
"epilogue_functor": epilogue_functor,
"swizzling_functor": operation.swizzling_functor.tag(),
"stages": str(operation.tile_description.stages),
"align_a": str(operation.A.alignment),
"align_b": str(operation.B.alignment),
"transform_a": ComplexTransformTag[operation.A.complex_transform],
"transform_b": ComplexTransformTag[operation.B.complex_transform],
"precompute_mode": SchedulerModeTag[operation.precompute_mode],
"math_operation": MathOperationTag[operation.tile_description.math_instruction.math_operation],
}
if operation.emission_type == EmissionType.Kernel:
gemm_template = self.gemm_template_kernel
else:
gemm_template = self.gemm_template_device
return SubstituteTemplate(gemm_template, values)
| cutlass-main | python/cutlass/backend/gemm_operation.py |
#################################################################################################
#
# Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#################################################################################################
"""
Common data types and string names for them. This file is similar to /tools/library/scripts/library.py,
but uses the Pybind-bound CUTLASS data types as many keys to the dictionary.
"""
import enum
import cutlass_bindings
from cutlass import EpilogueScheduleType, KernelScheduleType, TileSchedulerType
# The following block implements enum.auto() for Python 3.5 variants that don't include it such
# as the default 3.5.2 on Ubuntu 16.04.
#
# https://codereview.stackexchange.com/questions/177309/reimplementing-pythons-enum-auto-for-compatibility
try:
from enum import auto as enum_auto
except ImportError:
__cutlass_library_auto_enum = 0
def enum_auto() -> int:
global __cutlass_library_auto_enum
i = __cutlass_library_auto_enum
__cutlass_library_auto_enum += 1
return i
ShortDataTypeNames = {
cutlass_bindings.int32: "i",
cutlass_bindings.float16: "h",
cutlass_bindings.float32: "s",
cutlass_bindings.float64: "d",
cutlass_bindings.dtype.cf32: "c",
cutlass_bindings.dtype.cf64: "z",
}
DataTypeNames = {
cutlass_bindings.dtype.b1: "b1",
cutlass_bindings.dtype.u4: "u4",
cutlass_bindings.dtype.u8: "u8",
cutlass_bindings.dtype.u16: "u16",
cutlass_bindings.dtype.u32: "u32",
cutlass_bindings.dtype.u64: "u64",
cutlass_bindings.dtype.s4: "s4",
cutlass_bindings.int8: "s8",
cutlass_bindings.dtype.s16: "s16",
cutlass_bindings.int32: "s32",
cutlass_bindings.dtype.s64: "s64",
cutlass_bindings.float16: "f16",
cutlass_bindings.bfloat16: "bf16",
cutlass_bindings.float32: "f32",
cutlass_bindings.tfloat32: "tf32",
cutlass_bindings.float64: "f64",
cutlass_bindings.dtype.cf16: "cf16",
cutlass_bindings.dtype.cbf16: "cbf16",
cutlass_bindings.dtype.cf32: "cf32",
cutlass_bindings.dtype.ctf32: "ctf32",
cutlass_bindings.dtype.cf64: "cf64",
cutlass_bindings.dtype.cu4: "cu4",
cutlass_bindings.dtype.cu8: "cu8",
cutlass_bindings.dtype.cu16: "cu16",
cutlass_bindings.dtype.cu32: "cu32",
cutlass_bindings.dtype.cu64: "cu64",
cutlass_bindings.dtype.cs4: "cs4",
cutlass_bindings.dtype.cs8: "cs8",
cutlass_bindings.dtype.cs16: "cs16",
cutlass_bindings.dtype.cs32: "cs32",
cutlass_bindings.dtype.cs64: "cs64",
}
DataTypeTag = {
cutlass_bindings.dtype.b1: "cutlass::uint1b_t",
cutlass_bindings.dtype.u4: "cutlass::uint4b_t",
cutlass_bindings.dtype.u8: "uint8_t",
cutlass_bindings.dtype.u16: "uint16_t",
cutlass_bindings.dtype.u32: "uint32_t",
cutlass_bindings.dtype.u64: "uint64_t",
cutlass_bindings.dtype.s4: "cutlass::int4b_t",
cutlass_bindings.int8: "int8_t",
cutlass_bindings.dtype.s16: "int16_t",
cutlass_bindings.int32: "int32_t",
cutlass_bindings.dtype.s64: "int64_t",
cutlass_bindings.float16: "cutlass::half_t",
cutlass_bindings.bfloat16: "cutlass::bfloat16_t",
cutlass_bindings.float32: "float",
cutlass_bindings.tfloat32: "cutlass::tfloat32_t",
cutlass_bindings.float64: "double",
cutlass_bindings.dtype.cf16: "cutlass::complex<cutlass::half_t>",
cutlass_bindings.dtype.cbf16: "cutlass::complex<cutlass::bfloat16_t>",
cutlass_bindings.dtype.cf32: "cutlass::complex<float>",
cutlass_bindings.dtype.ctf32: "cutlass::complex<cutlass::tfloat32_t>",
cutlass_bindings.dtype.cf64: "cutlass::complex<double>",
cutlass_bindings.dtype.cu4: "cutlass::complex<cutlass::uint4b_t>",
cutlass_bindings.dtype.cu8: "cutlass::complex<cutlass::uint8_t>",
cutlass_bindings.dtype.cu16: "cutlass::complex<cutlass::uint16_t>",
cutlass_bindings.dtype.cu32: "cutlass::complex<cutlass::uint32_t>",
cutlass_bindings.dtype.cu64: "cutlass::complex<cutlass::uint64_t>",
cutlass_bindings.dtype.cs4: "cutlass::complex<cutlass::int4b_t>",
cutlass_bindings.dtype.cs8: "cutlass::complex<cutlass::int8_t>",
cutlass_bindings.dtype.cs16: "cutlass::complex<cutlass::int16_t>",
cutlass_bindings.dtype.cs32: "cutlass::complex<cutlass::int32_t>",
cutlass_bindings.dtype.cs64: "cutlass::complex<cutlass::int64_t>",
}
DataTypeSize = {
cutlass_bindings.dtype.b1: 1,
cutlass_bindings.dtype.u4: 4,
cutlass_bindings.dtype.u8: 8,
cutlass_bindings.dtype.u16: 16,
cutlass_bindings.dtype.u32: 32,
cutlass_bindings.dtype.u64: 64,
cutlass_bindings.dtype.s4: 4,
cutlass_bindings.int8: 8,
cutlass_bindings.dtype.s16: 16,
cutlass_bindings.int32: 32,
cutlass_bindings.dtype.s64: 64,
cutlass_bindings.float16: 16,
cutlass_bindings.bfloat16: 16,
cutlass_bindings.float32: 32,
cutlass_bindings.tfloat32: 32,
cutlass_bindings.float64: 64,
cutlass_bindings.dtype.cf16: 32,
cutlass_bindings.dtype.cbf16: 32,
cutlass_bindings.dtype.cf32: 64,
cutlass_bindings.dtype.ctf32: 32,
cutlass_bindings.dtype.cf64: 128,
cutlass_bindings.dtype.cu4: 8,
cutlass_bindings.dtype.cu8: 16,
cutlass_bindings.dtype.cu16: 32,
cutlass_bindings.dtype.cu32: 64,
cutlass_bindings.dtype.cu64: 128,
cutlass_bindings.dtype.cs4: 8,
cutlass_bindings.dtype.cs8: 16,
cutlass_bindings.dtype.cs16: 32,
cutlass_bindings.dtype.cs32: 64,
cutlass_bindings.dtype.cs64: 128,
}
class DataTypeSizeBytes:
"""
Static class to mimic the `DataTypeSize` dictionary, but with checks for whether the
data type key is less than a full byte or a non-integer number of bytes.
"""
@staticmethod
def __class_getitem__(datatype):
"""
Returns the number of bytes in size the data type is. Raises an exception if the data type
is either less than a full byte or a non-integer number of bytes in size.
:param datatype: data type to query
:return: number of bytes the data type occupies
:rtype: int
"""
bits = DataTypeSize[datatype]
if bits < 8:
raise Exception(
"Data type {} is less than one byte in size.".format(datatype)
)
elif bits % 8 != 0:
raise Exception(
"Data type {} is not an integer number of bytes.".format(datatype)
)
return bits // 8
ComplexTransformTag = {
cutlass_bindings.complex_transform.none: "cutlass::ComplexTransform::kNone",
cutlass_bindings.complex_transform.conj: "cutlass::ComplexTransform::kConjugate",
}
RealComplexBijection = [
(cutlass_bindings.float16, cutlass_bindings.dtype.cf16),
(cutlass_bindings.float32, cutlass_bindings.dtype.cf32),
(cutlass_bindings.float64, cutlass_bindings.dtype.cf64),
]
def is_complex(data_type):
for r, c in RealComplexBijection:
if data_type == c:
return True
return False
def get_complex_from_real(real_type):
for r, c in RealComplexBijection:
if real_type == r:
return c
return cutlass_bindings.dtype.invalid
def get_real_from_complex(complex_type):
for r, c in RealComplexBijection:
if complex_type == c:
return r
return cutlass_bindings.dtype.invalid
class ComplexMultiplyOp(enum.Enum):
multiply_add = enum_auto()
gaussian = enum_auto()
class MathOperation(enum.Enum):
multiply_add = enum_auto()
multiply_add_saturate = enum_auto()
xor_popc = enum_auto()
multiply_add_fast_bf16 = enum_auto()
multiply_add_fast_f16 = enum_auto()
multiply_add_fast_f32 = enum_auto()
multiply_add_complex_fast_f32 = enum_auto()
multiply_add_complex = enum_auto()
multiply_add_complex_gaussian = enum_auto()
MathOperationNames = {
MathOperation.multiply_add: "multiply_add",
MathOperation.multiply_add_saturate: "multiply_add_saturate",
MathOperation.xor_popc: "xor_popc",
MathOperation.multiply_add_fast_bf16: "multiply_add_fast_bf16",
MathOperation.multiply_add_fast_f16: "multiply_add_fast_f16",
MathOperation.multiply_add_fast_f32: "multiply_add_fast_f32",
MathOperation.multiply_add_complex_fast_f32: "multiply_add_complex_fast_f32",
MathOperation.multiply_add_complex: "multiply_add_complex",
MathOperation.multiply_add_complex_gaussian: "multiply_add_complex_gaussian",
}
MathOperationTag = {
MathOperation.multiply_add: "cutlass::arch::OpMultiplyAdd",
MathOperation.multiply_add_saturate: "cutlass::arch::OpMultiplyAddSaturate",
MathOperation.xor_popc: "cutlass::arch::OpXorPopc",
MathOperation.multiply_add_fast_bf16: "cutlass::arch::OpMultiplyAddFastBF16",
MathOperation.multiply_add_fast_f16: "cutlass::arch::OpMultiplyAddFastF16",
MathOperation.multiply_add_fast_f32: "cutlass::arch::OpMultiplyAddFastF32",
MathOperation.multiply_add_complex_fast_f32: "cutlass::arch::OpMultiplyAddComplexFastF32",
MathOperation.multiply_add_complex: "cutlass::arch::OpMultiplyAddComplex",
MathOperation.multiply_add_complex_gaussian: "cutlass::arch::OpMultiplyAddGaussianComplex",
}
LayoutTag = {
cutlass_bindings.ColumnMajor: "cutlass::layout::ColumnMajor",
cutlass_bindings.RowMajor: "cutlass::layout::RowMajor",
cutlass_bindings.layout.ColumnMajorInterleaved2: "cutlass::layout::ColumnMajorInterleaved<2>",
cutlass_bindings.layout.RowMajorInterleaved2: "cutlass::layout::RowMajorInterleaved<2>",
cutlass_bindings.ColumnMajorInterleaved32: "cutlass::layout::ColumnMajorInterleaved<32>",
cutlass_bindings.RowMajorInterleaved32: "cutlass::layout::RowMajorInterleaved<32>",
cutlass_bindings.layout.ColumnMajorInterleaved64: "cutlass::layout::ColumnMajorInterleaved<64>",
cutlass_bindings.layout.RowMajorInterleaved64: "cutlass::layout::RowMajorInterleaved<64>",
cutlass_bindings.TensorNHWC: "cutlass::layout::TensorNHWC",
cutlass_bindings.layout.TensorNDHWC: "cutlass::layout::TensorNDHWC",
cutlass_bindings.layout.TensorNCHW: "cutlass::layout::TensorNCHW",
cutlass_bindings.layout.TensorNGHWC: "cutlass::layout::TensorNGHWC",
cutlass_bindings.TensorNC32HW32: "cutlass::layout::TensorNCxHWx<32>",
cutlass_bindings.TensorC32RSK32: "cutlass::layout::TensorCxRSKx<32>",
cutlass_bindings.layout.TensorNC64HW64: "cutlass::layout::TensorNCxHWx<64>",
cutlass_bindings.layout.TensorC64RSK64: "cutlass::layout::TensorCxRSKx<64>",
}
TransposedLayout = {
cutlass_bindings.ColumnMajor: cutlass_bindings.RowMajor,
cutlass_bindings.RowMajor: cutlass_bindings.ColumnMajor,
cutlass_bindings.layout.ColumnMajorInterleaved2: cutlass_bindings.layout.RowMajorInterleaved2,
cutlass_bindings.layout.RowMajorInterleaved2: cutlass_bindings.layout.ColumnMajorInterleaved2,
cutlass_bindings.ColumnMajorInterleaved32: cutlass_bindings.RowMajorInterleaved32,
cutlass_bindings.RowMajorInterleaved32: cutlass_bindings.ColumnMajorInterleaved32,
cutlass_bindings.layout.ColumnMajorInterleaved64: cutlass_bindings.layout.RowMajorInterleaved64,
cutlass_bindings.layout.RowMajorInterleaved64: cutlass_bindings.layout.ColumnMajorInterleaved64,
cutlass_bindings.TensorNHWC: cutlass_bindings.TensorNHWC,
}
ShortLayoutTypeNames = {
cutlass_bindings.ColumnMajor: "n",
cutlass_bindings.layout.ColumnMajorInterleaved2: "n2",
cutlass_bindings.ColumnMajorInterleaved32: "n32",
cutlass_bindings.layout.ColumnMajorInterleaved64: "n64",
cutlass_bindings.RowMajor: "t",
cutlass_bindings.layout.RowMajorInterleaved2: "t2",
cutlass_bindings.RowMajorInterleaved32: "t32",
cutlass_bindings.layout.RowMajorInterleaved64: "t64",
cutlass_bindings.TensorNHWC: "nhwc",
cutlass_bindings.layout.TensorNDHWC: "ndhwc",
cutlass_bindings.layout.TensorNCHW: "nchw",
cutlass_bindings.layout.TensorNGHWC: "nghwc",
cutlass_bindings.TensorNC32HW32: "nc32hw32",
cutlass_bindings.layout.TensorNC64HW64: "nc64hw64",
cutlass_bindings.TensorC32RSK32: "c32rsk32",
cutlass_bindings.layout.TensorC64RSK64: "c64rsk64",
}
ShortComplexLayoutNames = {
(cutlass_bindings.ColumnMajor, cutlass_bindings.complex_transform.none): "n",
(cutlass_bindings.ColumnMajor, cutlass_bindings.complex_transform.conj): "c",
(cutlass_bindings.RowMajor, cutlass_bindings.complex_transform.none): "t",
(cutlass_bindings.RowMajor, cutlass_bindings.complex_transform.conj): "h",
}
OpcodeClassNames = {
cutlass_bindings.OpClass.Simt: "simt",
cutlass_bindings.OpClass.TensorOp: "tensorop",
cutlass_bindings.OpClass.WmmaTensorOp: "wmma_tensorop",
cutlass_bindings.OpClass.SparseTensorOp: "sptensorop",
}
OpcodeClassTag = {
cutlass_bindings.OpClass.Simt: "cutlass::arch::OpClassSimt",
cutlass_bindings.OpClass.TensorOp: "cutlass::arch::OpClassTensorOp",
cutlass_bindings.OpClass.WmmaTensorOp: "cutlass::arch::OpClassWmmaTensorOp",
cutlass_bindings.OpClass.SparseTensorOp: "cutlass::arch::OpClassSparseTensorOp",
}
class OperationKind(enum.Enum):
Gemm = enum_auto()
Conv2d = enum_auto()
Conv3d = enum_auto()
OperationKindNames = {
OperationKind.Gemm: "gemm",
OperationKind.Conv2d: "conv2d",
OperationKind.Conv3d: "conv3d",
}
ArchitectureNames = {
50: "maxwell",
60: "pascal",
61: "pascal",
70: "volta",
75: "turing",
80: "ampere",
90: "hopper",
}
SharedMemPerCC = {
70: 96 << 10, # 96KB of SMEM
72: 96 << 10, # 96KB of SMEM
75: 64 << 10, # 64KB of SMEM
80: 160 << 10, # 164KB of SMEM - 4KB reserved for the driver
86: 100 << 10, # 100KB of SMEM
87: 160 << 10, # 164KB of SMEM - 4KB reserved for the driver
89: 100 << 10, # 100KB of SMEM
90: 227 << 10, # 228KB of SMEM - 1KB reserved for the driver
}
class GemmKind(enum.Enum):
Gemm = enum_auto()
Sparse = enum_auto()
Universal = enum_auto()
PlanarComplex = enum_auto()
PlanarComplexArray = enum_auto()
Grouped = enum_auto()
GemmKindNames = {
GemmKind.Gemm: "gemm",
GemmKind.Sparse: "spgemm",
GemmKind.Universal: "gemm",
GemmKind.PlanarComplex: "gemm_planar_complex",
GemmKind.PlanarComplexArray: "gemm_planar_complex_array",
GemmKind.Grouped: "gemm_grouped",
}
class SwizzlingFunctor(enum.Enum):
Identity1 = enum_auto()
Identity2 = enum_auto()
Identity4 = enum_auto()
Identity8 = enum_auto()
Horizontal = enum_auto()
BatchedIdentity1 = enum_auto()
StridedDgradIdentity1 = enum_auto()
StridedDgradIdentity4 = enum_auto()
StridedDgradHorizontal = enum_auto()
SwizzlingFunctorTag = {
cutlass_bindings.IdentitySwizzle1: "cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<1>",
SwizzlingFunctor.Identity2: "cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<2>",
SwizzlingFunctor.Identity4: "cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<4>",
SwizzlingFunctor.Identity8: "cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<8>",
SwizzlingFunctor.Horizontal: "cutlass::gemm::threadblock::GemmHorizontalThreadblockSwizzle",
SwizzlingFunctor.BatchedIdentity1: "cutlass::gemm::threadblock::GemmBatchedIdentityThreadblockSwizzle",
SwizzlingFunctor.StridedDgradIdentity1: "cutlass::conv::threadblock::StridedDgradIdentityThreadblockSwizzle<1>",
SwizzlingFunctor.StridedDgradIdentity4: "cutlass::conv::threadblock::StridedDgradIdentityThreadblockSwizzle<4>",
SwizzlingFunctor.StridedDgradHorizontal: "cutlass::conv::threadblock::StridedDgradHorizontalThreadblockSwizzle",
}
class SchedulerMode(enum.Enum):
Device = (enum_auto(),)
Host = enum_auto()
SchedulerModeTag = {
SchedulerMode.Device: "cutlass::gemm::kernel::GroupScheduleMode::kDeviceOnly",
SchedulerMode.Host: "cutlass::gemm::kernel::GroupScheduleMode::kHostPrecompute",
}
ShortSchedulerModeNames = {SchedulerMode.Device: "Device", SchedulerMode.Host: "Host"}
ConvKindTag = {
cutlass_bindings.conv.Operator.fprop: "cutlass::conv::Operator::kFprop",
cutlass_bindings.conv.Operator.dgrad: "cutlass::conv::Operator::kDgrad",
cutlass_bindings.conv.Operator.wgrad: "cutlass::conv::Operator::kWgrad",
}
ConvKindNames = {
cutlass_bindings.conv.Operator.fprop: "fprop",
cutlass_bindings.conv.Operator.dgrad: "dgrad",
cutlass_bindings.conv.Operator.wgrad: "wgrad",
}
IteratorAlgorithmTag = {
cutlass_bindings.conv.IteratorAlgorithm.analytic: "cutlass::conv::IteratorAlgorithm::kAnalytic",
cutlass_bindings.conv.IteratorAlgorithm.optimized: "cutlass::conv::IteratorAlgorithm::kOptimized",
cutlass_bindings.conv.IteratorAlgorithm.fixed_channels: "cutlass::conv::IteratorAlgorithm::kFixedChannels",
cutlass_bindings.conv.IteratorAlgorithm.few_channels: "cutlass::conv::IteratorAlgorithm::kFewChannels",
}
IteratorAlgorithmNames = {
cutlass_bindings.conv.IteratorAlgorithm.analytic: "analytic",
cutlass_bindings.conv.IteratorAlgorithm.optimized: "optimized",
cutlass_bindings.conv.IteratorAlgorithm.fixed_channels: "fixed_channels",
cutlass_bindings.conv.IteratorAlgorithm.few_channels: "few_channels",
}
class StrideSupport(enum.Enum):
Strided = enum_auto()
Unity = enum_auto()
StrideSupportTag = {
StrideSupport.Strided: "cutlass::conv::StrideSupport::kStrided",
StrideSupport.Unity: "cutlass::conv::StrideSupport::kUnity",
}
StrideSupportNames = {
StrideSupport.Strided: "",
StrideSupport.Unity: "unity_stride",
}
class ConvMode(enum.Enum):
CrossCorrelation = enum_auto()
Convolution = enum_auto()
ConvModeTag = {
ConvMode.CrossCorrelation: "cutlass::conv::Mode::kCrossCorrelation",
ConvMode.Convolution: "cutlass::conv::Mode::kConvolution",
}
class MathInstruction:
"""
Description of a the lowest-level matrix-multiply-accumulate operation to be used in a kernel
"""
def __init__(
self,
instruction_shape,
element_a,
element_b,
element_accumulator,
opcode_class=cutlass_bindings.OpClass.Simt,
math_operation=MathOperation.multiply_add,
):
"""
:param instruction_shape: size of the [M, N, K] dimensions of the instruction
:type instruction_shape: list or tuple
:param element_a: data type of operand A
:param element_b: data type of operand B
:param element_accumulator: data type used in accumulation
:param opcode_class: higher-level class of the instruction (e.g., SIMT or Tensor Core)
:type opcode_class: cutlass_bindings.OpClass
:param math_operation: the type of low-level operation to be performed (e.g., multiply accumulate)
:type math_operation: MathOperation
"""
self.instruction_shape = instruction_shape
self.element_a = element_a
self.element_b = element_b
self.element_accumulator = element_accumulator
self.opcode_class = opcode_class
self.math_operation = math_operation
class TileDescription:
"""
Description of a tile of computation to be performed in the kernel, encompassing threadblock, cluster, and warp shapes,
stage count, and math instruction specification
"""
def __init__(
self,
threadblock_shape,
stages,
warp_count,
math_instruction,
cluster_shape=[1, 1, 1],
kernel_schedule: KernelScheduleType = None,
epilogue_schedule: EpilogueScheduleType = None,
tile_scheduler: TileSchedulerType = None,
):
"""
:param threadblock_shape: shape of a threadblock tyle
:type threadblock_shape: list or tuple
:param stages: number of pipline stages in the operation. For SM90 kernels, this can be set to `None` and the maximum
number of stages that can be supported for an operation on a given architecture will be computed at a later time
:type stages: int or None
:param warp_count: number of warps in each [M, N, K] dimension of a threadblock tile
:type warp_count: list, tuple, or None
:param math_instruction: specification of the instruction type and shape to be performed and the types of its operands
:type math_instruction: MathInstruction
:param cluster_shape: number of threadblocks in the [X, Y, Z] dimensions of a threadblock cluster
:param kernel_schedule: type of kernel schedule to use (only available for SM90+)
:type kernel_schedule: cutlass.KernelScheduleType
:param epilogue_schedule: type of epilogue schedule to use (only available for SM90+)
:type epilogue_schedule: cutlass.EpilogueScheduleType
:param tile_scheduler: type of tile scheduler to use (only available for SM90+)
:type tile_scheduler: cutlass.TileSchedulerType
"""
if ((kernel_schedule is None and epilogue_schedule is not None) or
(kernel_schedule is not None and epilogue_schedule is None)):
raise Exception("Kernel and epilogue schedule must either both be Auto or neither be Auto.")
self.threadblock_shape = threadblock_shape
self.cluster_shape = cluster_shape
self.kernel_schedule = kernel_schedule
self.epilogue_schedule = epilogue_schedule
self.tile_scheduler = tile_scheduler
self.stages = stages
self.math_instruction = math_instruction
self.instruction_shape = math_instruction.instruction_shape
# Number of warps along x, y, z directions
self.warp_count = warp_count
def clone_and_update(self, td: dict):
attrs = {
"cluster_shape": None,
"threadblock_shape": None,
"warp_count": None,
"stages": None,
"instruction_shape": None,
"kernel_schedule": None,
"epilogue_schedule": None,
"tile_scheduler": None
}
for key in attrs.keys():
if key in td.keys():
attrs[key] = td[key]
else:
attrs[key] = getattr(self, key)
mi = MathInstruction(
attrs["instruction_shape"],
self.math_instruction.element_a,
self.math_instruction.element_b,
self.math_instruction.element_accumulator,
self.math_instruction.opcode_class,
self.math_instruction.math_operation
)
return TileDescription(
attrs["threadblock_shape"], attrs["stages"],
attrs["warp_count"], mi, attrs["cluster_shape"],
attrs["kernel_schedule"], attrs["epilogue_schedule"]
)
@property
def num_threads(self):
"""
Returns the number of threads in the threadblock
:return: number of threads in the threadblock
:rtype: int or None (if warp count is None)
"""
if self.warp_count is not None:
threads = 32
for cnt in self.warp_count:
threads *= cnt
return threads
return None
def procedural_name(self):
"""
Returns a name identifying the tile description
:return: name identifying the tile description
:rtype: int
"""
emit_stages = 0 if self.stages is None else self.stages
name = "%dx%dx%d_%dx%d_%dx%d" % (
self.cluster_shape[0],
self.cluster_shape[1],
self.cluster_shape[2],
self.threadblock_shape[0],
self.threadblock_shape[1],
self.threadblock_shape[2],
emit_stages
)
return name
def __str__(self):
"""
Returns a string with containing each of the tile description's values
:return: contents of tile description
:rtype: str
"""
if self.kernel_schedule is not None:
kschedule = self.kernel_schedule
else:
kschedule = KernelScheduleType.ScheduleAuto
if self.epilogue_schedule is not None:
eschedule = self.epilogue_schedule
else:
eschedule = EpilogueScheduleType.ScheduleAuto
if self.tile_scheduler is not None:
tschedule = self.tile_scheduler.name
else:
tschedule = "None"
return f"""
{{
ClusterShape: {self.cluster_shape}
ThreadblockShape: {self.threadblock_shape}
WarpCount: {self.warp_count}
Stages: {self.stages if self.stages is not None else 'Auto'}
InstructionShape: {self.math_instruction.instruction_shape}
Kernel schedule: {kschedule.name}
Epilogue schedule: {kschedule.name}
TileScheduler: {tschedule}
}}"""
class TensorDescription:
def __init__(self, element, layout, alignment=1,
complex_transform=cutlass_bindings.complex_transform.none):
self.element = element
self.layout = layout
self.alignment = min(128 // DataTypeSize[self.element], alignment)
self.complex_transform = complex_transform
def CalculateSmemUsagePerStage(operation):
"""
Returns the amount of shared memory in bytes consumed in a single stage of a kernel.
:param op: operation for which the maximum stages should be computed. If stages are
set via the `op.tile_description.stages` parameter, this setting is ignored
in the present calculation
:type op: cutlass.backend.Operation
:return: number of bytes of shared memory consumed by a single stage
:rtype: int
"""
m, n, k = operation.tile_description.threadblock_shape
if operation.operation_kind == OperationKind.Gemm:
stage_barrier_bytes = 32
return (
(DataTypeSize[operation.A.element] * m * k // 8)
+ (DataTypeSize[operation.B.element] * k * n // 8)
+ stage_barrier_bytes
)
else:
raise Exception("Unsupported operation kind {}.".format(operation.operation_kind))
def CalculateSmemUsage(operation):
"""
Returns the amount of shared memory in bytes consumed by a kernel.
:param op: operation for which the maximum stages should be computed. If stages are
set via the `op.tile_description.stages` parameter, this setting is ignored
in the present calculation
:type op: cutlass.backend.Operation
:return: int
"""
return operation.tile_description.stages * CalculateSmemUsagePerStage(operation)
class ApiVersion(enum.Enum):
"""
Differentiate between CUTLASS 2.x and 3.x API versions
"""
v2x = enum_auto()
v3x = enum_auto()
def api_version(arch, opclass, datatype):
"""
Returns whether the architecture, opcode class, and datatype in question require using CUTLASS 2.x
or 3.x for code emission.
:param arch: compute capability of device on which to run
:type arch: int
:param opclass: class of the operation being performed
:type opclass: cutlass_bindings.OpClass
:param datatype: data type to be used in operation (assumes that ElementA and ElementB are the same)
:return: API version to be used in code emission
:rtype: ApiVersion
"""
if (arch >= 90 and
opclass == cutlass_bindings.OpClass.TensorOp and
(datatype != cutlass_bindings.float64)):
return ApiVersion.v3x
else:
return ApiVersion.v2x
class EmissionType(enum.Enum):
"""
Tags for whether to emit a kernel- or device-level operation
"""
Kernel = enum_auto()
Device = enum_auto()
| cutlass-main | python/cutlass/backend/library.py |
################################################################################
#
# Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
################################################################################
GemmOperation = "Union[GemmOperationUniversal, GemmOperationGrouped]"
Tensor = "Union[cuda.CUdeviceptr, np.ndarray, torch.Tensor, cp.ndarray]"
| cutlass-main | python/cutlass/backend/type_hint.py |
################################################################################
#
# Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
################################################################################
from typing import Union
import ctypes
from cuda import cuda, cudart
import cutlass_bindings
import numpy as np
from cutlass.backend.c_types import MatrixCoord_, TensorRef2D_, get_reduction_params
from cutlass.backend.frontend import NumpyFrontend, TorchFrontend
from cutlass.backend.library import (
DataTypeNames,
DataTypeSize,
DataTypeTag,
TensorDescription,
)
from cutlass.backend.operation import ExecutableOperation, LaunchConfiguration
from cutlass.backend.utils.software import CheckPackages, SubstituteTemplate
if CheckPackages().check_torch():
import torch
class ReductionOperation:
pass
class ReductionArguments:
"""
Arguments of reduction
"""
def __init__(
self,
operation: ReductionOperation,
problem_size: "list[int]",
partitions: int,
workspace: cuda.CUdeviceptr,
destination: "Union[cuda.CUdeviceptr, np.ndarray, torch.Tensor]",
source: "Union[cuda.CUdeviceptr, np.ndarray, torch.Tensor]",
**kwargs,
) -> None:
# tensor_C can be interpreted as the bias with bias=True in keyword args
if "bias" in kwargs.keys():
self.bias = kwargs["bias"]
else:
# by default, tensor_C is not bias
self.bias = False
self.operation = operation
#: pointer to the workspace
self.ptr_workspace = workspace
#: number of split-k partitions
self.partitions = partitions
if isinstance(destination, np.ndarray):
self.host_D = destination
self.destination_buffer = NumpyFrontend.argument(destination, True)
self.source_buffer = NumpyFrontend.argument(source, False)
self.ptr_destination = cuda.CUdeviceptr(self.destination_buffer.ptr)
self.ptr_source = cuda.CUdeviceptr(self.source_buffer.ptr)
elif CheckPackages().check_torch() and isinstance(destination, torch.Tensor):
self.ptr_destination = TorchFrontend.argument(destination)
self.ptr_source = TorchFrontend.argument(source)
elif isinstance(destination, cuda.CUdeviceptr):
self.ptr_destination = destination
self.ptr_source = source
else:
raise TypeError("unknown Type")
self.problem_size = MatrixCoord_(problem_size[0], problem_size[1])
self.partition_stride = (
problem_size[0] * problem_size[1] * DataTypeSize[operation.C.element] // 8
)
if "output_op" in kwargs.keys():
self.output_op = kwargs["output_op"]
else:
self.output_op = self.operation.epilogue_type(1.0, 0.0)
# get arguments
self.get_arguments()
@staticmethod
def get_tensor_ref(
extent: "tuple[int]",
device_ptr: cuda.CUdeviceptr,
layout: cutlass_bindings.layout,
):
if layout == cutlass_bindings.RowMajor:
return TensorRef2D_(int(device_ptr), extent[1])
else:
raise ValueError("unknown layout type")
def get_arguments(self):
ref_workspace = ReductionArguments.get_tensor_ref(
extent=[
self.problem_size.row,
self.problem_size.column,
],
device_ptr=self.ptr_workspace,
layout=cutlass_bindings.RowMajor,
)
if self.bias:
ref_source = ReductionArguments.get_tensor_ref(
extent=[0, 0],
device_ptr=self.ptr_source,
layout=cutlass_bindings.RowMajor,
)
else:
ref_source = ReductionArguments.get_tensor_ref(
extent=[
self.problem_size.row,
self.problem_size.column,
],
device_ptr=self.ptr_source,
layout=cutlass_bindings.RowMajor,
)
ref_destination = ReductionArguments.get_tensor_ref(
extent=[
self.problem_size.row,
self.problem_size.column,
],
device_ptr=self.ptr_destination,
layout=cutlass_bindings.RowMajor,
)
self.c_arguments = self.operation.argument_type(
self.problem_size,
self.partitions,
self.partition_stride,
ref_workspace,
ref_destination,
ref_source,
self.output_op,
)
params_ = self.operation.rt_module.get_args(ctypes.byref(self.c_arguments))
self.host_workspace = bytearray(params_.contents)
def sync(self):
(err,) = cudart.cudaDeviceSynchronize()
if err != cuda.CUresult.CUDA_SUCCESS:
raise RuntimeError("CUDA Error %s" % str(err))
if hasattr(self, "host_D"):
(err,) = cuda.cuMemcpyDtoH(
self.host_D,
self.ptr_destination,
self.host_D.size * self.host_D.itemsize,
)
if err != cuda.CUresult.CUDA_SUCCESS:
raise RuntimeError("CUDA Error %s" % str(err))
def free(self):
if hasattr(self, "destination_buffer"):
del self.destination_buffer
if hasattr(self, "source_buffer"):
del self.source_buffer
class ReductionRT(ExecutableOperation):
"""
ReductionRT manages the CUTLASS runtime components for reduction
"""
KernelTemplate = r"""
extern "C"
__global__ void
${operation_name}(${operation_name}${operation_suffix}::Params params) {
// Dynamic shared memory base pointer
extern __shared__ int SharedStorageBase[];
// Declare pointer to dynamic shared memory.
${operation_name}${operation_suffix}::SharedStorage *shared_storage =
reinterpret_cast<${operation_name}${operation_suffix}::SharedStorage *>(SharedStorageBase);
${operation_name}${operation_suffix} op;
op(params, *shared_storage);
}
"""
HostTemplate = r"""
extern "C" {
// Get the size of params in bytes
int ${operation_name}_get_param_size(){
return sizeof(${operation_name}${operation_suffix}::Params);
}
// Get the size of dynamic shared memory in bytes
int ${operation_name}_shared_memory_size() {
return int(sizeof(${operation_name}${operation_suffix}::SharedStorage));
}
// Get the params as byte array
char* ${operation_name}_get_params(${operation_name}${operation_suffix}::Params* params){
char *bytes = ((char*)(params));
char *output = new char[sizeof(${operation_name}${operation_suffix}::Params)];
for (unsigned int i = 0; i < sizeof(${operation_name}${operation_suffix}::Params); i ++)
output[i] = bytes[i];
return output;
}
}
"""
def __init__(self, operation: ReductionOperation):
super().__init__(operation)
self.operation: ReductionOperation = operation
self.emitter = EmitReductionInstance("_type")
self.elements_per_access = self.operation.count
(
self.argument_type,
self.epilogue_type,
) = get_reduction_params(operation.epilogue_functor)
self.argtype = [ctypes.POINTER(self.argument_type)]
def emit(self):
return self.emitter.emit(self.operation)
def plan(self, arguments: ReductionArguments):
block_shape = [
self.operation.shape.column() // self.elements_per_access,
self.operation.shape.row(),
1,
]
grid_shape = [
(arguments.problem_size.row + self.operation.shape.row() - 1)
// self.operation.shape.row(),
(arguments.problem_size.column + self.operation.shape.column() - 1)
// self.operation.shape.column(),
1,
]
return LaunchConfiguration(
grid_shape,
block_shape,
self.shared_memory_capacity,
)
def initialize(self):
(err,) = cuda.cuFuncSetAttribute(
self.kernel,
attrib=cuda.CUfunction_attribute.CU_FUNC_ATTRIBUTE_MAX_DYNAMIC_SHARED_SIZE_BYTES,
value=self.shared_memory_capacity,
)
if err != cuda.CUresult.CUDA_SUCCESS:
raise RuntimeError("Cuda Error: {}".format(err))
class ReductionOperation:
"""
CUTLASS Reduction Operation
shape: shape of CTA
outputop: output operator
r
"""
def __init__(
self,
shape: cutlass_bindings.MatrixCoord,
C: TensorDescription,
element_accumulator,
element_workspace=None,
element_compute=None,
epilogue_functor=None,
count: int = 1,
partitions_per_stage: int = 4,
) -> None:
"""Constructor"""
self.shape = shape
#: epilogue functor (default: LinearCombination)
self.epilogue_functor = epilogue_functor
#: datatype of accumulator
self.element_accumulator = element_accumulator
if element_workspace is None:
#: datatype of workspace
self.element_workspace = element_accumulator
else:
#: datatype of workspace
self.element_workspace = element_workspace
if element_compute is None:
#: datatype of workspace
self.element_compute = element_accumulator
else:
#: datatype of workspace
self.element_compute = element_compute
#: datatype of output
self.element_output = C.element
#: operand C
self.C: TensorDescription = C
#: reduce op processing size
self.count: int = count
#: number of partitions to reduce per stage
self.partitions_per_stage: int = partitions_per_stage
self.rt_module: ReductionRT = ReductionRT(self)
self.argument_type = self.rt_module.argument_type
self.epilogue_type = self.rt_module.epilogue_type
#
def extended_name(self):
extend_name = "${element_workspace}_${element_accumulator}_${element_compute}_${element_output}"
return SubstituteTemplate(
extend_name,
{
"element_workspace": DataTypeNames[self.element_workspace],
"element_accumulator": DataTypeNames[self.element_accumulator],
"element_compute": DataTypeNames[self.element_compute],
"element_output": DataTypeNames[self.element_output],
},
)
#
def configuration_name(self):
"""The full procedural name indicates architecture, extended name, tile size"""
configuration_name = "cutlass_reduce_split_k_${extended_name}_${threadblock}"
threadblock = "%dx%d" % (
self.shape.row(),
self.shape.column(),
)
return SubstituteTemplate(
configuration_name,
{
"extended_name": self.extended_name(),
"threadblock": threadblock,
},
)
#
def procedural_name(self):
"""The full procedural name indicates architeture, extended name, tile size"""
return self.configuration_name()
def run(self, arguments: ReductionArguments) -> cuda.CUresult:
"""
Configure and launch the cuda kernel with input arguments
"""
# get launch configuration
launch_config = self.rt_module.plan(arguments)
# get the host and device workspace
host_workspace = arguments.host_workspace
device_workspace = None
# launch the kernel
err = self.rt_module.run(
host_workspace,
device_workspace,
launch_config,
)
if err != cuda.CUresult.CUDA_SUCCESS:
raise RuntimeError("CUDA Error %s" % str(err))
return err
class EmitReductionInstance:
def __init__(self, operation_suffix="") -> None:
self.operation_suffix = operation_suffix
self.includes = [
"cutlass/cutlass.h",
"cutlass/numeric_types.h",
"cutlass/arch/arch.h",
"cutlass/arch/mma.h",
"cutlass/layout/matrix.h",
"cutlass/gemm/device/gemm.h",
"cutlass/gemm/device/gemm_universal_adapter.h",
"cutlass/gemm/kernel/default_gemm_universal.h",
"cutlass/reduction/kernel/reduce_split_k.h",
"cutlass/reduction/thread/reduction_operators.h",
]
self.template = """
// Reduction kernel instance
using ${operation_name}_base =
typename cutlass::reduction::kernel::ReduceSplitK<
cutlass::MatrixShape<${shape_row}, ${shape_column}>,
${epilogue_functor},
cutlass::reduction::thread::ReduceAdd<
${element_accumulator},
${element_output},
${count}>,
${partition_per_stage}>;
struct ${operation_name}${operation_suffix}:
public ${operation_name}_base { };
"""
def emit(self, operation: ReductionOperation):
epilogue_vector_length = int(
min(
operation.C.alignment * DataTypeSize[operation.C.element],
128,
)
/ DataTypeSize[operation.C.element]
)
values = {
"operation_name": operation.configuration_name(),
"operation_suffix": self.operation_suffix,
"shape_row": str(operation.shape.row()),
"shape_column": str(operation.shape.column()),
"epilogue_functor": operation.epilogue_functor.emit(),
"element_output": DataTypeTag[operation.element_output],
"epilogue_vector_length": str(epilogue_vector_length),
"element_accumulator": DataTypeTag[operation.element_accumulator],
"element_compute": DataTypeTag[operation.element_compute],
"element_workspace": DataTypeTag[operation.element_workspace],
"count": str(operation.count),
"partition_per_stage": str(operation.partitions_per_stage),
}
return SubstituteTemplate(self.template, values)
| cutlass-main | python/cutlass/backend/reduction_operation.py |
#################################################################################################
#
# Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#################################################################################################
from typing import Union
from cuda import cuda, cudart
import numpy as np
from cutlass.backend.frontend import CupyFrontend, NumpyFrontend, TorchFrontend
from cutlass.backend.utils.software import CheckPackages
torch_available = CheckPackages().check_torch()
if torch_available:
import torch
cupy_available = CheckPackages().check_cupy()
if cupy_available:
import cupy as cp
class ArgumentBase:
"""
Base class for operation arguments
"""
def __init__(
self,
A: "Union[cuda.CUdeviceptr, np.ndarray, torch.Tensor, cp.ndarray]",
B: "Union[cuda.CUdeviceptr, np.ndarray, torch.Tensor, cp.ndarray]",
C: "Union[cuda.CUdeviceptr, np.ndarray, torch.Tensor, cp.ndarray]",
D: "Union[cuda.CUdeviceptr, np.ndarray, torch.Tensor, cp.ndarray]",
**kwargs,
) -> None:
# tensor_C can be interpreted as the bias with bias=True in keyword args
if "bias" in kwargs.keys():
self.bias = kwargs["bias"]
else:
# by default, tensor_C is not bias
self.bias = False
# preprocessing input tensors
if isinstance(A, np.ndarray):
self.host_D = D
self.buffer_A = NumpyFrontend.argument(A, False)
self.buffer_B = NumpyFrontend.argument(B, False)
self.buffer_C = NumpyFrontend.argument(C, False)
self.buffer_D = NumpyFrontend.argument(D, True)
self.ptr_A = self.buffer_A.ptr
self.ptr_B = self.buffer_B.ptr
self.ptr_C = self.buffer_C.ptr
self.ptr_D = self.buffer_D.ptr
# number of elements in C
self.tensor_c_numel = C.size
elif torch_available and isinstance(A, torch.Tensor):
self.ptr_A = TorchFrontend.argument(A)
self.ptr_B = TorchFrontend.argument(B)
self.ptr_C = TorchFrontend.argument(C)
self.ptr_D = TorchFrontend.argument(D)
# number of elements in C
self.tensor_c_numel = C.numel()
elif isinstance(A, cuda.CUdeviceptr):
self.ptr_A = A
self.ptr_B = B
self.ptr_C = C
self.ptr_D = D
elif cupy_available and isinstance(A, cp.ndarray):
self.ptr_A = CupyFrontend.argument(A)
self.ptr_B = CupyFrontend.argument(B)
self.ptr_C = CupyFrontend.argument(C)
self.ptr_D = CupyFrontend.argument(D)
# number of elements in C
self.tensor_c_numel = C.size
else:
raise TypeError("Unsupported Frontend. Only support numpy and torch")
def sync(self, stream_sync=True):
if stream_sync:
(err,) = cudart.cudaDeviceSynchronize()
if err != cuda.CUresult.CUDA_SUCCESS:
raise RuntimeError("CUDA Error %s" % str(err))
if hasattr(self, "host_D"):
(err,) = cuda.cuMemcpyDtoH(
self.host_D,
self.ptr_D,
self.host_D.size * self.host_D.itemsize,
)
if err != cuda.CUresult.CUDA_SUCCESS:
raise RuntimeError("CUDA Error %s" % str(err))
| cutlass-main | python/cutlass/backend/arguments.py |
################################################################################
#
# Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
################################################################################
from cuda import cuda
import numpy as np
from cutlass.backend.memory_manager import device_mem_alloc, todevice
from cutlass.backend.utils.software import CheckPackages
if CheckPackages().check_torch():
import torch
if CheckPackages().check_cupy():
import cupy as cp
class NumpyFrontend:
"""
Frontend node for numpy
"""
@staticmethod
def argument(np_tensor: "np.ndarray", is_output: "bool") -> cuda.CUdeviceptr:
"""Convert the input numpy tensor to CUDA device pointer
:param np_tensor: input numpy nd array
:param is_output: whether the tensor is output
:return: CUDA device pointer
"""
# copy the data to device
if is_output:
return device_mem_alloc(np_tensor.size * np_tensor.itemsize)
else:
return todevice(np_tensor)
class TorchFrontend:
"""
Frontend node for torch
"""
@staticmethod
def argument(torch_tensor: "torch.Tensor") -> cuda.CUdeviceptr:
"""Convert the input torch tensor to CUDA device pointer
:param torch_tensor: input torch tensor
:param is_output: whether the tensor is output
:return: CUDA device pointer
"""
# check the device of torch_tensor
if not torch_tensor.is_cuda:
torch_tensor = torch_tensor.to("cuda")
return cuda.CUdeviceptr(torch_tensor.data_ptr())
class CupyFrontend:
"""
Frontend node for cupy
"""
@staticmethod
def argument(cupy_ndarray: "cp.ndarray"):
return cuda.CUdeviceptr(int(cupy_ndarray.data.ptr))
| cutlass-main | python/cutlass/backend/frontend.py |
# module-wide variables
import os
from cutlass.backend.arguments import *
from cutlass.backend.c_types import *
from cutlass.backend.compiler import ArtifactManager
from cutlass.backend.conv2d_operation import *
from cutlass.backend.epilogue import *
from cutlass.backend.frontend import *
from cutlass.backend.gemm_operation import *
from cutlass.backend.library import *
from cutlass.backend.memory_manager import PoolMemoryManager
from cutlass.backend.operation import *
from cutlass.backend.parser import *
from cutlass.backend.reduction_operation import *
from cutlass.backend.tensor_ref import *
from cutlass.backend.type_hint import *
from cutlass.backend.utils import *
from cutlass.backend.utils.device import device_cc
from cutlass.backend.utils.software import (
CheckPackages,
SubstituteTemplate,
device_sm_count,
get_memory_pool,
)
compiler = ArtifactManager()
| cutlass-main | python/cutlass/backend/__init__.py |
################################################################################
#
# Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
################################################################################
# from typeguard import typechecked
import ctypes
from typing import Union
from cuda import cuda
import cutlass_bindings
import numpy as np
from cutlass.backend.arguments import ArgumentBase
from cutlass.backend.c_types import Conv2DProblemSize, TensorRef_, get_conv2d_arguments
from cutlass.backend.library import (
EmissionType,
ConvKindNames,
ConvKindTag,
DataTypeNames,
DataTypeSize,
DataTypeTag,
IteratorAlgorithmNames,
IteratorAlgorithmTag,
LayoutTag,
MathOperation,
MathOperationTag,
OpcodeClassNames,
OpcodeClassTag,
OperationKind,
ShortDataTypeNames,
ShortLayoutTypeNames,
StrideSupport,
StrideSupportTag,
TensorDescription,
TileDescription,
get_complex_from_real,
)
from cutlass.backend.memory_manager import device_mem_alloc
from cutlass.backend.operation import ExecutableOperation, LaunchConfiguration
from cutlass.backend.tensor_ref import TensorRef
from cutlass.backend.utils.software import CheckPackages, SubstituteTemplate
if CheckPackages().check_torch():
import torch
# @typechecked
class Conv2dArguments(ArgumentBase):
"""
Argument wrapper for Conv2d. It encodes problem information and
user-provide tensors into the kernel's argument.
:param operation: the Conv2d operation to take the argument
:type operation: :class:`cutlass.backend.Conv2dOperation`
:param problem_size: the Conv2d problem size
:type problem_size: :class:`cutlass_bindings.conv.Conv2dProblemSize`
:param A: tensor A
:type A: cuda.CUdeviceptr | numpy.ndarray | torch.Tensor | cupy.ndarray
:param B: tensor B
:type B: cuda.CUdeviceptr | numpy.ndarray | torch.Tensor | cupy.ndarray
:param C: tensor C
:type C: cuda.CUdeviceptr | numpy.ndarray | torch.Tensor | cupy.ndarray
:param D: tensor D
:type D: cuda.CUdeviceptr | numpy.ndarray | torch.Tensor | cupy.ndarray
:param split_k_mode: conv2d split K mode, defaults to cutlass_bindings.conv.SplitKMode.Serial
:type split_k_mode: cutlass_bindings.conv.SplitKMode, optional
:param output_op: output operator, optional
:type output_op: :class:`cutlass.backend.LinearCombinationFunctorArguments`
"""
def __init__(
self,
operation: "Conv2dOperation",
problem_size: "cutlass_bindings.conv.Conv2dProblemSize",
A: "Union[cuda.CUdeviceptr, np.ndarray, torch.Tensor]",
B: "Union[cuda.CUdeviceptr, np.ndarray, torch.Tensor]",
C: "Union[cuda.CUdeviceptr, np.ndarray, torch.Tensor]",
D: "Union[cuda.CUdeviceptr, np.ndarray, torch.Tensor]",
split_k_mode: "cutlass_bindings.conv.SplitKMode" = cutlass_bindings.conv.SplitKMode.Serial,
**kwargs,
) -> None:
self.operation = operation
#: convolution kind
self.conv_kind: cutlass_bindings.conv.Operator = operation.conv_kind
self.layout_A: cutlass_bindings.layout = operation.A.layout
self.layout_B: cutlass_bindings.layout = operation.B.layout
self.layout_C: cutlass_bindings.layout = operation.C.layout
self.element_A = operation.A.element
self.element_B = operation.B.element
self.element_C = operation.C.element
if self.layout_C == cutlass_bindings.TensorNC32HW32:
B = self.reorder_tensor_B(B, problem_size)
super().__init__(A, B, C, D, **kwargs)
# preprocessing output ops
if "split_k_slices" in kwargs.keys() and kwargs["split_k_slices"] > 1:
self.split_k_mode = split_k_mode
self.split_k_slices = kwargs["split_k_slices"]
else:
self.split_k_mode = cutlass_bindings.conv.SplitKMode.Serial
self.split_k_slices = 1
if "output_op" in kwargs.keys() and self.split_k_mode != cutlass_bindings.conv.SplitKMode.Parallel:
self.output_op = kwargs["output_op"]
else:
self.output_op = self.operation.epilogue_type(1.0, 0.0)
#: problem_size
self.problem_size: cutlass_bindings.conv.Conv2dProblemSize = problem_size
self.problem_size.split_k_slices = self.split_k_slices
if hasattr(self, "tensor_c_numel"):
c_coord = cutlass_bindings.conv.implicit_gemm_tensor_c_extent(
self.conv_kind, problem_size)
if self.tensor_c_numel == c_coord.at(3) and self.tensor_c_numel < c_coord.size():
self.bias = True
#
# initialize the argument
#
self.initialize()
# @typechecked
def reorder_tensor_B(self, tensor_B: "np.ndarray",
problem_size: "cutlass_bindings.conv.Conv2dProblemSize"):
"""
Reorder tensor_B for interleaved layout
:param tensor_B: input tensor B
:type tensor_B: numpy.ndarray
:param problem_size: Conv2d problem size
:type problem_size: :class:`cutlass_bindings.conv.Conv2dProblemSize`
:return: reordered tensor B
:rtype: numpy.ndarray
"""
reordered_tensor_B = np.empty_like(tensor_B)
tensor_ref_B = self.get_tensor_ref(
tensor_B, self.element_B, self.layout_B, problem_size, "b")
reordered_tensor_ref_B = self.get_tensor_ref(
reordered_tensor_B, self.element_B, self.layout_B, problem_size, "b")
cutlass_bindings.conv.host.reorder_convK(
reordered_tensor_ref_B, tensor_ref_B, self.conv_kind, problem_size)
return reordered_tensor_B
def get_tensor_ref(
self, tensor, dtype, tensor_layout, problem_size, operand):
if operand == "a":
tensor_coord = cutlass_bindings.conv.implicit_gemm_tensor_a_extent(
self.conv_kind, problem_size)
elif operand == "b":
tensor_coord = cutlass_bindings.conv.implicit_gemm_tensor_b_extent(
self.conv_kind, problem_size)
elif operand in ["c", "d"]:
tensor_coord = cutlass_bindings.conv.implicit_gemm_tensor_c_extent(
self.conv_kind, problem_size)
else:
raise ValueError("unknown operand: " + operand)
# Zero stride trick
if operand == "c" and self.bias:
tensor_coord = cutlass_bindings.Tensor4DCoord(0, 0, 0, 0)
layout = tensor_layout.packed(tensor_coord)
return TensorRef(tensor, dtype, layout).tensor_ref
def get_arguments(self, semaphore):
ref_A = TensorRef_(self.get_tensor_ref(
self.ptr_A, self.element_A, self.layout_A, self.problem_size, "a"))
ref_B = TensorRef_(self.get_tensor_ref(
self.ptr_B, self.element_B, self.layout_B, self.problem_size, "b"))
ref_C = TensorRef_(self.get_tensor_ref(
self.ptr_C, self.element_C, self.layout_C, self.problem_size, "c"))
ref_D = TensorRef_(self.get_tensor_ref(
self.ptr_D, self.element_C, self.layout_C, self.problem_size, "d"))
self.c_arguments = self.operation.argument_type(
Conv2DProblemSize(self.problem_size),
ref_A, ref_B, ref_C, ref_D, self.output_op, self.split_k_mode)
self.semaphore = semaphore
def initialize(self):
# Get launch configuration
self.launch_config = self.operation.rt_module.plan(self)
# Allocate and initialize device workspace
device_workspace_size = self.operation.rt_module.get_device_workspace_size(self)
if device_workspace_size > 0:
self.workspace_buffer = device_mem_alloc(device_workspace_size)
workspace_ptr = self.workspace_buffer.ptr
err, = cuda.cuMemsetD32(
workspace_ptr, 0, device_workspace_size // 4)
else:
workspace_ptr = None
# Get kernel params as a bytearray
semaphore = 0
if (workspace_ptr is not None
and self.split_k_mode == cutlass_bindings.conv.SplitKMode.Parallel):
self.ptr_D = workspace_ptr
elif (workspace_ptr is not None
and self.split_k_mode == cutlass_bindings.conv.SplitKMode.Serial):
semaphore = workspace_ptr
self.get_arguments(semaphore)
params_ = self.operation.rt_module.get_args(
ctypes.byref(self.c_arguments), ctypes.c_void_p(int(self.semaphore)))
self.host_workspace = bytearray(params_.contents)
self.device_workspace = None
def sync(self):
"""
Synchronize the arguments. If the input tensor is in host,
copy it from device to host.
"""
return super().sync()
# @typechecked
class Conv2dRT(ExecutableOperation):
"""
Conv2dRT manages the CUTLASS runtime components
"""
KernelTemplate = r"""
extern "C"
__global__ void
${operation_name}(${operation_name}${operation_suffix}::Params params) {
// Dynamic shared memory base pointer
extern __shared__ int SharedStorageBase[];
// Declare pointer to dynamic shared memory.
${operation_name}${operation_suffix}::SharedStorage *shared_storage =
reinterpret_cast<${operation_name}${operation_suffix}::SharedStorage *>(SharedStorageBase);
${operation_name}${operation_suffix} op;
op(params, *shared_storage);
}
"""
HostTemplate = r"""
extern "C" {
// Get the size of params in bytes
int ${operation_name}_get_param_size(){
return sizeof(${operation_name}${operation_suffix}::Params);
}
// Get the size of dynamic shared memory in bytes
int ${operation_name}_shared_memory_size() {
return int(sizeof(${operation_name}${operation_suffix}::SharedStorage));
}
// Get the params as byte array
char* ${operation_name}_get_params(${operation_name}${operation_suffix}::Arguments* arguments, int *semaphore=nullptr){
typename ${operation_name}${operation_suffix}::Params* params;
params = new ${operation_name}${operation_suffix}::Params(*arguments, semaphore);
char *bytes = ((char*)(params));
char *output = new char[sizeof(${operation_name}${operation_suffix}::Params)];
for (unsigned int i = 0; i < sizeof(${operation_name}${operation_suffix}::Params); i ++)
output[i] = bytes[i];
return output;
}
}
"""
def __init__(self, operation: "Conv2dOperation"):
super().__init__(operation)
self.argument_type, self.epilogue_type = get_conv2d_arguments(operation.epilogue_functor)
self.argtype = [ctypes.POINTER(self.argument_type), ctypes.c_void_p]
self.conv_kind = operation.conv_kind
self.operation: Conv2dOperation = operation
self.emitter = EmitConv2dInstance("_type")
self.threads: int = operation.tile_description.num_threads
self.swizzle_functor = operation.swizzling_functor
def emit(self):
return self.emitter.emit(self.operation)
def get_device_workspace_size(self, arguments: Conv2dArguments):
workspace_bytes = 0
launch_config = arguments.launch_config
self.conv_kind = self.operation.conv_kind
if arguments.split_k_mode == cutlass_bindings.conv.SplitKMode.Parallel:
problem_size = arguments.problem_size
workspace_bytes = DataTypeSize[self.operation.C.element] \
* launch_config.grid[2] * cutlass_bindings.conv.implicit_gemm_tensor_c_size(
self.conv_kind, problem_size
) // 8
elif arguments.split_k_mode == cutlass_bindings.conv.SplitKMode.Serial and \
arguments.split_k_slices > 1:
workspace_bytes = launch_config.grid[0] * launch_config.grid[1] * 4
return workspace_bytes
# @typechecked
def plan(self, arguments: Conv2dArguments):
tile_size = cutlass_bindings.gemm.GemmCoord(
self.operation.tile_description.threadblock_shape[0],
self.operation.tile_description.threadblock_shape[1],
self.operation.tile_description.threadblock_shape[2],
)
grid = self.swizzle_functor.get_grid_shape(
self.swizzle_functor.get_tiled_shape(
self.conv_kind, arguments.problem_size,
tile_size, arguments.split_k_slices
)
)
return LaunchConfiguration(
[grid.x, grid.y, grid.z], [self.threads, 1, 1],
self.shared_memory_capacity)
def initialize(self):
err, = cuda.cuFuncSetAttribute(
self.kernel,
attrib=cuda.CUfunction_attribute.CU_FUNC_ATTRIBUTE_MAX_DYNAMIC_SHARED_SIZE_BYTES,
value=self.shared_memory_capacity)
if err != cuda.CUresult.CUDA_SUCCESS:
raise RuntimeError("Cuda Error: {}".format(err))
class Conv2dOperation:
"""
CUTLASS Conv2d operation description.
:param conv_kind: convolution operator
:type conv_kind: :class:`cutlass_bindings.conv.Operator`
:param iterator_algorithm: Selects among several implementation
variants trading off performance with simplicity
:type iterator_algorithm: :class:`cutlass_bindings.conv.IteratorAlgorithm`
:param arch: GPU compute capability (sm_xx)
:type arch: int
:param tile_description: tile description
:type tile_description: :class:`cutlass.backend.TileDescription`
:param A: tensor A description
:type A: :class:`cutlass.backend.TensorDescription`
:param B: tensor B description
:type B: :class:`cutlass.backend.TensorDescription`
:param C: tensor C description
:type C: :class:`cutlass.backend.TensorDescription`
:param D: tensor D description
:type D: :class:`cutlass.backend.TensorDescription`
:param element_epilogue: element type for computation in epilogue \
:type element_epilogue: cutlass_bindings.int8 | cutlass_bindings.int32 | cutlass_bindings.float16 | \
cutlass_bindings.bfloat16 | cutlass_bindings.float32 | cutlass_bindings.float64
:param stride_support: distinguish among partial specializations that \
accelerate certain problems where convolution stride is unit \
:type stride_support: :class:`cutlass_bindings.conv.StrideSupport`
:param epilogue_functor: convolution epilogue functor
:type epilogue_functor: :class:`EpilogueFunctor`
:param swizzling_functor: threadblock swizzling functor
"""
def __init__(
self,
conv_kind: cutlass_bindings.conv.Operator,
iterator_algorithm: cutlass_bindings.conv.IteratorAlgorithm,
arch: int,
tile_description: TileDescription,
A: TensorDescription,
B: TensorDescription,
C: TensorDescription,
stride_support,
epilogue_functor,
swizzling_functor=cutlass_bindings.IdentitySwizzle1,
emission_type=EmissionType.Kernel,
**kwargs
):
self.operation_kind: OperationKind = OperationKind.Conv2d
self.arch: int = arch
self.tile_description: TileDescription = tile_description
self.conv_kind = conv_kind
self.A: TensorDescription = A
self.B: TensorDescription = B
self.C: TensorDescription = C
self.epilogue_functor = epilogue_functor
self.iterator_algorithm = iterator_algorithm
self.stride_support = stride_support
self.swizzling_functor = swizzling_functor()
self.emission_type = emission_type
self.rt_module: Conv2dRT = Conv2dRT(self)
self.argument_type = self.rt_module.argument_type
self.epilogue_type = self.rt_module.epilogue_type
def run(self, arguments: Conv2dArguments) -> cuda.CUresult:
"""
Launch the cuda kernel with input arguments
:param arguments: conv2d arguments
:type arguments: :class:`cutlass.backend.Conv2dArguments`
"""
# launch the kernel
err = self.rt_module.run(
arguments.host_workspace,
arguments.device_workspace,
arguments.launch_config,
)
if err != cuda.CUresult.CUDA_SUCCESS:
raise RuntimeError("CUDA Error %s" % str(err))
return err
#
# Get function name
#
def procedural_name(self):
"""The full procedural name indicates architecture, extended name, tile size, and layout."""
return self.configuration_name()
#
def configuration_name(self):
"""The full procedural name indicates architecture, extended name, tile size, and layout."""
opcode_class_name = OpcodeClassNames[
self.tile_description.math_instruction.opcode_class
]
threadblock = "%dx%d_%dx%d" % (
self.tile_description.threadblock_shape[0],
self.tile_description.threadblock_shape[1],
self.tile_description.threadblock_shape[2],
self.tile_description.stages,
)
if self.stride_support == StrideSupport.Unity:
configuration_name = "cutlass_sm${arch}_${opcode_class}_${extended_name}_${threadblock}_${layout}_unity_stride_align${alignment}"
else:
configuration_name = "cutlass_sm${arch}_${opcode_class}_${extended_name}_${threadblock}_${layout}_align${alignment}"
return SubstituteTemplate(
configuration_name,
{
"arch": str(self.arch),
"opcode_class": opcode_class_name,
"extended_name": self.extended_name(),
"threadblock": threadblock,
"layout": self.layout_name(),
"alignment": "%d" % self.A.alignment
},
)
#
def extended_name(self):
"""Append data types if they differ from compute type."""
if self.C.element != self.tile_description.math_instruction.element_accumulator and \
self.A.element != self.tile_description.math_instruction.element_accumulator:
extended_name = "${element_c}_${core_name}_${element_a}"
elif self.C.element == self.tile_description.math_instruction.element_accumulator and \
self.A.element != self.tile_description.math_instruction.element_accumulator:
extended_name = "${core_name}_${element_a}"
else:
extended_name = "${core_name}"
extended_name = SubstituteTemplate(extended_name, {
"element_a": DataTypeNames[self.A.element],
"element_c": DataTypeNames[self.C.element],
"core_name": self.core_name(),
})
return extended_name
#
def layout_name(self):
return "%s" % (ShortLayoutTypeNames[self.A.layout])
#
def core_name(self):
"""The basic operation kind is prefixed with a letter indicating the accumulation type."""
intermediate_type = ""
if self.tile_description.math_instruction.opcode_class == cutlass_bindings.OpClass.TensorOp:
inst_shape = "%dx%dx%d" % tuple(
self.tile_description.math_instruction.instruction_shape)
if self.tile_description.math_instruction.element_a != self.A.element and \
self.tile_description.math_instruction.element_a != self.accumulator_type():
intermediate_type = DataTypeNames[self.tile_description.math_instruction.element_a]
else:
inst_shape = ""
return "%s%s%s%s_%s" % (
ShortDataTypeNames[self.accumulator_type()],
inst_shape,
intermediate_type,
ConvKindNames[self.conv_kind],
IteratorAlgorithmNames[self.iterator_algorithm]
)
#
def is_complex(self):
complex_operators = [
MathOperation.multiply_add_complex,
MathOperation.multiply_add_complex_gaussian,
]
return self.tile_description.math_instruction.math_operation in complex_operators
#
def accumulator_type(self):
accum = self.tile_description.math_instruction.element_accumulator
if self.is_complex():
return get_complex_from_real(accum)
return accum
def device_op(self):
"""
Returns a new Conv2dOperation object that is constructed with emission type
``EmissionType.Device``.
:return: operation ready for device-level code emission
:rtype: Conv2dOperation
"""
return Conv2dOperation(
self.conv_kind, self.iterator_algorithm, self.arch, self.tile_description,
self.A, self.B, self.C, self.stride_support, self.epilogue_functor, type(self.swizzling_functor),
emission_type=EmissionType.Device)
###################################################################################################
#
# Emits single instances of a CUTLASS device-wide operator
#
###################################################################################################
class EmitConv2dInstance:
def __init__(self, operation_suffix=""):
self.operation_suffix = operation_suffix
self.includes = [
"cutlass/cutlass.h",
"cutlass/conv/kernel/default_conv2d_fprop.h",
"cutlass/conv/kernel/default_conv2d_dgrad.h",
"cutlass/conv/kernel/default_conv2d_wgrad.h"
]
self.template = """
// Conv2d${conv_kind_name} ${iterator_algorithm_name} kernel instance "${operation_name}"
using ${operation_name}_base =
typename cutlass::conv::kernel::DefaultConv2d${conv_kind_name}<
${element_a},
${layout_a},
${element_b},
${layout_b},
${element_c},
${layout_c},
${element_accumulator},
${opcode_class},
${arch},
cutlass::gemm::GemmShape<${threadblock_shape_m}, ${threadblock_shape_n}, ${threadblock_shape_k}>,
cutlass::gemm::GemmShape<${warp_shape_m}, ${warp_shape_n}, ${warp_shape_k} >,
cutlass::gemm::GemmShape<${instruction_shape_m}, ${instruction_shape_n}, ${instruction_shape_k}>,
${epilogue_functor},
${swizzling_functor},
${stages},
${math_operator},
${iterator_algorithm},
${stride_support},
${align_a},
${align_b}
>::Kernel;
struct ${operation_name}${operation_suffix}:
public ${operation_name}_base { };
"""
self.template_device = """
// Conv2d operation ${operation_name}
using Conv2d${conv_kind_name}Kernel = typename cutlass::conv::kernel::DefaultConv2d${conv_kind_name}<
${element_a},
${layout_a},
${element_b},
${layout_b},
${element_c},
${layout_c},
${element_accumulator},
${opcode_class},
${arch},
cutlass::gemm::GemmShape<${threadblock_shape_m}, ${threadblock_shape_n}, ${threadblock_shape_k}>,
cutlass::gemm::GemmShape<${warp_shape_m}, ${warp_shape_n}, ${warp_shape_k} >,
cutlass::gemm::GemmShape<${instruction_shape_m}, ${instruction_shape_n}, ${instruction_shape_k}>,
${epilogue_functor},
${swizzling_functor},
${stages},
${math_operator},
${iterator_algorithm},
${stride_support},
${align_a},
${align_b}
>::Kernel;
using DeviceKernel =
typename cutlass::conv::device::ImplicitGemmConvolution<Conv2d${conv_kind_name}Kernel>;
"""
def emit(self, operation):
warp_shape = [int(operation.tile_description.threadblock_shape[idx] /
operation.tile_description.warp_count[idx]) for idx in range(3)]
epilogue_vector_length = int(min(
operation.C.alignment * DataTypeSize[operation.C.element], 128) / DataTypeSize[operation.C.element])
values = {
"operation_name": operation.procedural_name(),
"operation_suffix": self.operation_suffix,
"conv_kind": ConvKindTag[operation.conv_kind],
"conv_kind_name": ConvKindNames[operation.conv_kind].capitalize(),
"element_a": DataTypeTag[operation.A.element],
"layout_a": LayoutTag[operation.A.layout],
"element_b": DataTypeTag[operation.B.element],
"layout_b": LayoutTag[operation.B.layout],
"element_c": DataTypeTag[operation.C.element],
"layout_c": LayoutTag[operation.C.layout],
"element_accumulator": DataTypeTag[operation.accumulator_type()],
"opcode_class": OpcodeClassTag[operation.tile_description.math_instruction.opcode_class],
"arch": "cutlass::arch::Sm%d" % operation.arch,
"threadblock_shape_m": str(operation.tile_description.threadblock_shape[0]),
"threadblock_shape_n": str(operation.tile_description.threadblock_shape[1]),
"threadblock_shape_k": str(operation.tile_description.threadblock_shape[2]),
"warp_shape_m": str(warp_shape[0]),
"warp_shape_n": str(warp_shape[1]),
"warp_shape_k": str(warp_shape[2]),
"instruction_shape_m": str(operation.tile_description.math_instruction.instruction_shape[0]),
"instruction_shape_n": str(operation.tile_description.math_instruction.instruction_shape[1]),
"instruction_shape_k": str(operation.tile_description.math_instruction.instruction_shape[2]),
"epilogue_vector_length": str(epilogue_vector_length),
"epilogue_functor": operation.epilogue_functor.emit(),
"swizzling_functor": operation.swizzling_functor.tag(),
"stages": str(operation.tile_description.stages),
"iterator_algorithm": IteratorAlgorithmTag[operation.iterator_algorithm],
"iterator_algorithm_name": IteratorAlgorithmNames[operation.iterator_algorithm].capitalize(),
"stride_support": StrideSupportTag[operation.stride_support],
"math_operator": "cutlass::arch::OpMultiplyAddComplex" if operation.is_complex() else MathOperationTag[operation.tile_description.math_instruction.math_operation],
"align_a": str(operation.A.alignment),
"align_b": str(operation.B.alignment),
}
if operation.emission_type == EmissionType.Kernel:
conv2d_template = self.template
else:
conv2d_template = self.template_device
return SubstituteTemplate(conv2d_template, values)
| cutlass-main | python/cutlass/backend/conv2d_operation.py |
################################################################################
#
# Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
################################################################################
from cuda import cuda
import cutlass_bindings
import numpy as np
from cutlass.backend.utils.software import CheckPackages
cupy_available = CheckPackages().check_cupy()
if cupy_available:
import cupy as cp
torch_available = CheckPackages().check_torch()
if torch_available:
import torch
class TensorRef:
"""
Python Wrapper for cutlass_bindings.TensorRef
"""
def __init__(self, tensor, dtype, layout) -> None:
if isinstance(tensor, np.ndarray):
ptr = cuda.CUdeviceptr(tensor.__array_interface__["data"][0])
elif torch_available and isinstance(tensor, torch.Tensor):
ptr = cuda.CUdeviceptr(tensor.data_ptr())
elif torch_available and isinstance(tensor, cp.ndarray):
ptr = cuda.CUdeviceptr(int(tensor.data.ptr))
elif isinstance(tensor, cuda.CUdeviceptr):
ptr = tensor
elif isinstance(tensor, int):
ptr = cuda.CUdeviceptr(tensor)
else:
raise NotImplementedError(tensor)
# the dtype(0) is used to overload between different data types
# with the same layout
self.tensor_ref = cutlass_bindings.get_tensor_ref(int(ptr), dtype(0), layout)
| cutlass-main | python/cutlass/backend/tensor_ref.py |
################################################################################
#
# Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
################################################################################
import ctypes
from cuda import __version__, cuda
from cutlass.backend.utils.device import device_cc
_version_splits = [int(x) for x in __version__.split("rc")[0].split(".")]
supports_cluster_launch = device_cc() >= 90 and (
_version_splits[0] > 11 or (_version_splits[0] == 11 and _version_splits[1] >= 8)
)
class LaunchConfiguration:
def __init__(self, grid=[1, 1, 1], block=[1, 1, 1], smem=0):
self.grid = grid
self.block = block
self.shared_memory_capacity = smem
class ExecutableOperation:
def __init__(self, operation):
self.operation = operation
self.module = None
self.kernel = None
def name(self):
return self.operation.procedural_name()
def emit(self):
return ""
def can_implement(self, configuration, arguments):
raise NotImplementedError()
def get_host_workspace_size(self, arguments):
raise NotImplementedError()
def get_device_workspace_size(self, arguments):
raise NotImplementedError()
def plan(self, arguments):
raise NotImplementedError()
def initialize(self, host_workspace, device_workspace, launch_config, arguments, stream=cuda.CUstream(0)):
raise NotImplementedError()
def run_with_clusters(self, launch_config, kernel_params, stream=cuda.CUstream(0)):
if hasattr(self.operation, "tile_description") and hasattr(self.operation.tile_description, "cluster_shape"):
attr = cuda.CUlaunchAttribute()
attr.value.clusterDim.x, attr.value.clusterDim.y, attr.value.clusterDim.z = self.operation.tile_description.cluster_shape
attr.id = cuda.CUstreamAttrID.CU_LAUNCH_ATTRIBUTE_CLUSTER_DIMENSION
attrs = [attr]
# Allow for non-portable cluster sizes
err, = cuda.cuFuncSetAttribute(
self.kernel, cuda.CUfunction_attribute.CU_FUNC_ATTRIBUTE_NON_PORTABLE_CLUSTER_SIZE_ALLOWED, 1)
if err != cuda.CUresult.CUDA_SUCCESS:
return err
else:
attrs = []
config = cuda.CUlaunchConfig()
config.gridDimX, config.gridDimY, config.gridDimZ = launch_config.grid
config.blockDimX, config.blockDimY, config.blockDimZ = launch_config.block
config.blockDimZ = launch_config.block[2]
config.sharedMemBytes = launch_config.shared_memory_capacity
config.hStream = stream
config.attrs = attrs
config.numAttrs = len(attrs)
err, = cuda.cuLaunchKernelEx(
config, f=self.kernel, kernelParams=kernel_params, extra=0)
return err
def run_without_clusters(self, launch_config, kernel_params, stream=cuda.CUstream(0)):
err, = cuda.cuLaunchKernel(
self.kernel,
launch_config.grid[0], launch_config.grid[1], launch_config.grid[2],
launch_config.block[0], launch_config.block[1], launch_config.block[2],
launch_config.shared_memory_capacity,
stream,
kernel_params,
0)
return err
def run(self, host_workspace, device_workspace, launch_config, stream=cuda.CUstream(0)):
cArg = (ctypes.c_char * len(host_workspace)).from_buffer(host_workspace)
packed = (ctypes.c_void_p * 1)()
packed[0] = ctypes.addressof(cArg)
if supports_cluster_launch:
return self.run_with_clusters(launch_config, packed, stream)
else:
return self.run_without_clusters(launch_config, packed, stream)
| cutlass-main | python/cutlass/backend/operation.py |
################################################################################
#
# Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
################################################################################
import ast
import ctypes
import inspect
import textwrap
from typing import Generic, TypeVar
from cuda import cuda, cudart
import numpy as np
from treelib import Tree
from cutlass.backend.epilogue import (
AccumulatorOp,
BinaryOp,
ColumnBroadcastOp,
ColumnReductionOp,
RowBroadcastOp,
RowReductionOp,
TensorInputOp,
TensorOutputOp,
UnaryOp,
)
from cutlass.backend.frontend import NumpyFrontend
from cutlass.backend.utils.software import SubstituteTemplate
import cutlass.backend as backend
################################################################################
# Type annotation for input arguments
################################################################################
Ttype = TypeVar("Ttype")
Dtype = TypeVar("Dtype")
class NDArray(np.ndarray, Generic[Ttype, Dtype]):
pass
################################################################################
# Operations
################################################################################
operators = {
ast.Add: "Add",
ast.Div: "Div",
ast.Eq: "Equal",
ast.Mult: "Mult",
}
################################################################################
# AST Node abstractions
################################################################################
class UnaryNode:
cnt = 0
# Concept: this is created by the BinOp Node in python ast
def __init__(
self,
element_accumulator,
element_compute,
elements_per_access,
node,
args,
) -> None:
if isinstance(node, BinOpNode):
self.op = node.op
elif isinstance(node, ast.Call):
if isinstance(node.func, ast.Name):
self.op = node.func.id
elif isinstance(node.func, ast.Attribute):
self.op = node.func.value.id
else:
raise TypeError
else:
raise TypeError
self.tag = "Unary" + self.op + str(UnaryNode.cnt)
self.id = self.op + str(UnaryNode.cnt)
self.args = args
UnaryNode.cnt += 1
self.type = "tensor"
self.epilogue_op = getattr(backend, self.op)(element_compute)
# data types
self.element_accumulator = element_accumulator
self.element_compute = element_compute
self.elements_per_access = elements_per_access
def get_epilogue_node(self, visitors):
self.epilogue_node = UnaryOp(
self.element_accumulator,
self.element_compute,
self.elements_per_access,
*visitors,
self.epilogue_op,
)
def get_argument(self, visitor_args, kwargs):
epilogue_ops = []
for arg in self.args:
try:
epilogue_ops.append(kwargs[arg])
except:
epilogue_ops.append(arg) # direct arguments like constant
self.argument = self.epilogue_node.argument_type(
self.epilogue_op.argument_type(*epilogue_ops),
*visitor_args,
)
class BinOpNode:
cnt = 0
# Concept: this is created by the BinOp Node in python ast
def __init__(
self,
element_accumulator,
element_compute,
elements_per_access,
node,
) -> None:
self.op = operators[type(node.op)]
self.tag = "Binary" + self.op + str(BinOpNode.cnt)
self.id = self.op + str(BinOpNode.cnt)
self.args = None
BinOpNode.cnt += 1
self.type = "tensor"
self.epilogue_op = getattr(backend, "Vector" + self.op)(element_compute)
# data types
self.element_accumulator = element_accumulator
self.element_compute = element_compute
self.elements_per_access = elements_per_access
def get_epilogue_node(self, visitors):
self.epilogue_node = BinaryOp(
self.element_accumulator,
self.element_compute,
self.elements_per_access,
*visitors,
self.epilogue_op,
)
def get_argument(self, visitor_args, kwargs):
self.argument = self.epilogue_node.argument_type(
self.epilogue_op.argument_type(self.args),
*visitor_args,
)
class NameNode:
# Concept: this is created by the Name Node in python ast
def __init__(self, node) -> None:
try:
self.id = node.id
except:
self.id = node.targets[0].id
self.tag = self.id
class ScalarInputNode(NameNode):
# Concept: scalar
def __init__(self, node) -> None:
super().__init__(node)
self.tag = "Scalar:" + self.tag
self.type = "scalar"
class AccumulatorNode(NameNode):
# Concept: VisitorOpAccumulator
def __init__(
self,
element_accumulator,
elements_per_access,
node,
) -> None:
super().__init__(node)
self.tag = "Accum:" + self.tag
self.type = "tensor"
self.element_accumulator = element_accumulator
self.elements_per_access = elements_per_access
def get_epilogue_node(self, visitors):
self.epilogue_node = AccumulatorOp(
self.element_accumulator,
self.elements_per_access,
)
def get_argument(self, visitor_args, kwargs):
self.argument = self.epilogue_node.argument_type()
class TensorInputNode(NameNode):
# Concept: VisitorOpTensorInput
def __init__(self, element_accumulator, node) -> None:
super().__init__(node)
self.tag = "TensorInput:" + self.tag
self.type = "tensor"
self.element_accumulator = element_accumulator
def get_epilogue_node(self, *args):
self.epilogue_node = TensorInputOp(self.element_accumulator)
def get_argument(self, visitor_args, kwargs):
self.argument = self.epilogue_node.argument_type(
kwargs[self.id + "_ptr"],
kwargs["problem_size"][1],
kwargs["problem_size"][0] * kwargs["problem_size"][1],
)
class RowBroadcastNode(NameNode):
# Concept: VisitorOpRowBroadcast
def __init__(
self,
element_accumulator,
element_fragment,
node,
) -> None:
super().__init__(node)
#
self.tag = "RowBroadcast:" + self.tag
self.type = "tensor"
self.element_accumulator = element_accumulator
self.element_fragment = element_fragment
def get_epilogue_node(self, *args):
self.epilogue_node = RowBroadcastOp(
self.element_accumulator,
self.element_fragment,
)
def get_argument(self, visitor_args, kwargs):
self.argument = self.epilogue_node.argument_type(
kwargs[self.id + "_ptr"],
kwargs["problem_size"][1],
)
class ColumnBroadcastNode(NameNode):
# Concept: VisitorOpColumnBroadcast
def __init__(
self,
element_accumulator,
element_fragment,
node,
) -> None:
super().__init__(node)
self.tag = "ColumnBroadcast:" + self.tag
self.type = "tensor"
self.element_accumulator = element_accumulator
self.element_fragment = element_fragment
def get_epilogue_node(self, *args):
self.epilogue_node = ColumnBroadcastOp(
self.element_accumulator,
self.element_fragment,
)
def get_argument(self, visitor_args, kwargs):
self.argument = self.epilogue_node.argument_type(
kwargs[self.id + "_ptr"],
kwargs["problem_size"][0],
)
class TensorOutputNode(NameNode):
# Concept: VisitorOpTensorOutput
def __init__(self, element_accumulator, node) -> None:
super().__init__(node)
self.tag = "TensorOutput:" + self.tag
self.type = "tensor"
self.element_accumulator = element_accumulator
def get_epilogue_node(self, visitors):
self.epilogue_node = TensorOutputOp(self.element_accumulator, *visitors)
def get_argument(self, visitor_args, kwargs):
self.argument = self.epilogue_node.argument_type(
kwargs[self.id + "_ptr"],
kwargs["problem_size"][1],
*visitor_args,
kwargs["problem_size"][0] * kwargs["problem_size"][1],
)
class RowReductionNode:
# Concept: RowReductionOp
def __init__(
self,
element_accumulator,
element_reduction,
element_reduction_accumulator,
id,
factor,
) -> None:
#
self.id = id
self.tag = "RowReduction:" + self.id
self.type = "tensor"
self.element_accumulator = element_accumulator
self.element_reduction = element_reduction
self.element_reduction_accumulator = element_reduction_accumulator
self.factor = factor
def get_epilogue_node(self, visitors):
self.epilogue_node = RowReductionOp(
self.element_accumulator,
self.element_reduction,
self.element_reduction_accumulator,
*visitors,
)
def get_batch_stride(self, problem_size):
return problem_size[0] * ((problem_size[1] + self.factor - 1) // self.factor)
def get_argument(self, visitor_args, kwargs):
self.argument = self.epilogue_node.argument_type(
kwargs[self.id + "_ptr"],
*visitor_args,
self.get_batch_stride(kwargs["problem_size"]),
)
class ColumnReductionNode:
# Concept: ColumnReductionOp
def __init__(
self,
element_accumulator,
element_reduction,
element_reduction_accumulator,
id,
factor,
) -> None:
#
self.id = id
self.tag = "ColumnReduction:" + self.id
self.type = "tensor"
self.element_accumulator = element_accumulator
self.element_reduction = element_reduction
self.element_reduction_accumulator = element_reduction_accumulator
self.factor = factor
def get_epilogue_node(self, visitors):
self.epilogue_node = ColumnReductionOp(
self.element_accumulator,
self.element_reduction,
self.element_reduction_accumulator,
*visitors,
)
def get_batch_stride(self, problem_size):
return problem_size[1] * ((problem_size[0] + self.factor - 1) // self.factor)
def get_argument(self, visitor_args, kwargs):
self.argument = self.epilogue_node.argument_type(
kwargs[self.id + "_ptr"],
*visitor_args,
self.get_batch_stride(kwargs["problem_size"]),
)
################################################################################
# Epilogue parser function
################################################################################
class EpilogueAST(ast.NodeVisitor):
def __init__(
self,
epilogue,
tile_description,
element_accumulator,
elements_per_access,
element_compute,
element_output,
) -> None:
#
self.tile_description = tile_description
self.element_accumulator = element_accumulator
self.elements_per_access = elements_per_access
self.element_compute = element_compute
self.element_output = element_output
self.epilogue = epilogue
self.source = textwrap.dedent(inspect.getsource(epilogue.__call__))
self.ast_tree = ast.parse(self.source)
self.epilogue_tree = Tree()
# print(ast.dump(self.ast_tree, indent=4)) # For Debug purpose
# input arguments
self.input_args = {}
# return nodes
self.returns = []
# reduction source nodes
self.reduction_source = {}
# stack used to keep the parent node id
self.stack = []
# visit the AST
self.visit(self.ast_tree)
# visit the name node
def visit_Name(self, node):
# append the return ids into self.returns
if self.stack[-1] == "return":
self.returns.append(node.id)
else:
# accum is produced from accumulator node
if node.id == "accum":
name_node = AccumulatorNode(
self.element_accumulator,
self.elements_per_access,
node,
)
else:
# for input nodes
if node.id in self.input_args.keys():
type = self.input_args[node.id][0]
if type == "tensor":
name_node = TensorInputNode(
self.element_accumulator,
node,
)
elif type == "row":
name_node = RowBroadcastNode(
self.element_accumulator,
self.element_compute,
node,
)
elif type == "column":
name_node = ColumnBroadcastNode(
self.element_accumulator,
self.element_compute,
node,
)
elif type == "scalar":
name_node = ScalarInputNode(node)
else:
raise ValueError(type)
# for output nodes
else:
name_node = TensorOutputNode(
self.element_accumulator,
node,
)
self.epilogue_tree.create_node(
name_node.tag,
name_node.id,
data=name_node,
parent=self.stack[-1],
)
def visit_Assign(self, node):
pre_assign_node = self.epilogue_tree.get_node(node.targets[0].id)
if pre_assign_node is None:
# The assign is to a root node
# skip the reduction nodes
if isinstance(node.value, ast.Call):
if isinstance(node.value.func, ast.Name):
func_type = node.value.func.id
elif isinstance(node.value.func, ast.Attribute):
func_type = node.value.func.value.id
else:
raise TypeError
if func_type == "reduction_op":
self.reduction_source[node.value.args[0].id] = [
node.value.args[1].value,
node.value.args[2].value,
node.targets[0].id,
]
return
name_node = TensorOutputNode(self.element_accumulator, node)
self.epilogue_tree.create_node(
name_node.tag,
name_node.id,
data=name_node,
)
self.stack.append(name_node.id)
else:
if (
node.targets[0].id in self.returns
or node.targets[0].id in self.reduction_source.keys()
):
self.stack.append(node.targets[0].id)
else:
self.stack.append(
pre_assign_node.predecessor(self.epilogue_tree.identifier)
)
self.epilogue_tree.remove_node(node.targets[0].id)
# get child tag
self.visit(node.value)
self.stack.pop()
def visit_Call(self, node):
if isinstance(node.func, ast.Name):
func_type = node.func.id
elif isinstance(node.func, ast.Attribute):
func_type = node.func.value.id
else:
raise TypeError
if func_type == "reduction_op":
self.visit(node.args[0])
else:
arg_list = []
for idx, arg in enumerate(node.args):
if idx == 0:
continue
if isinstance(arg, ast.Constant):
arg_list.append(arg.value)
elif isinstance(arg, ast.Name):
arg_list.append(arg.id)
else:
raise TypeError
unary_node = UnaryNode(
self.element_accumulator,
self.element_compute,
self.elements_per_access,
node,
arg_list,
)
self.epilogue_tree.create_node(
unary_node.tag,
unary_node.id,
parent=self.stack[-1],
data=unary_node,
)
self.stack.append(unary_node.id)
self.visit(node.args[0])
self.stack.pop()
def visit_BinOp(self, node):
binop = BinOpNode(
self.element_accumulator,
self.element_compute,
self.elements_per_access,
node,
)
self.epilogue_tree.create_node(
binop.tag,
binop.id,
data=binop,
parent=self.stack[-1],
)
self.stack.append(binop.id)
self.visit(node.left)
self.visit(node.right)
self.stack.pop()
def visit_Return(self, node):
self.stack.append("return")
self.visit(node.value)
self.stack.pop()
# # A function definition
def visit_FunctionDef(self, node: ast.FunctionDef):
# visit args
for arg in node.args.args:
if arg.arg == "self":
continue
if isinstance(arg.annotation, ast.Constant):
self.input_args[arg.arg] = [
arg.annotation.value,
]
# visit the assign in the reverse order
for idx in range(len(node.body)):
self.visit(node.body[-1 - idx])
#
# Tree optimization pass
#
# pass 1: lower Binary to Unary
def pass_binary_2_unary(self, tree, nid):
node = tree.get_node(nid)
if isinstance(node.data, BinOpNode):
lhs_node = tree.get_node(node.successors(tree.identifier)[0])
left_type = lhs_node.data.type
rhs_node = tree.get_node(node.successors(tree.identifier)[1])
right_type = rhs_node.data.type
if left_type == "scalar" and right_type == "tensor":
node.data = UnaryNode(
self.element_accumulator,
self.element_compute,
self.elements_per_access,
node.data,
[
lhs_node.data.id,
],
)
node.tag = node.data.tag
tree.remove_node(lhs_node.data.id)
self.pass_binary_2_unary(tree, rhs_node.data.id)
elif left_type == "tensor" and right_type == "scalar":
node.data = UnaryNode(
self.element_accumulator,
self.element_compute,
self.elements_per_access,
node.data,
[
rhs_node.id,
],
)
node.tag = node.data.tag
tree.remove_node(rhs_node.data.id)
self.pass_binary_2_unary(tree, lhs_node.data.id)
else:
self.pass_binary_2_unary(tree, lhs_node.data.id)
self.pass_binary_2_unary(tree, rhs_node.data.id)
else:
for child in node.successors(tree.identifier):
self.pass_binary_2_unary(tree, child)
# pass 2: inject reduction nodes
def pass_inject_reduction(self, tree, nid):
node = tree.get_node(nid)
if isinstance(node.data, TensorOutputNode):
if node.data.id in self.reduction_source.keys():
direction = self.reduction_source[node.data.id][0]
target = self.reduction_source[node.data.id][-1]
if direction == "row":
reduction_node = RowReductionNode(
self.element_accumulator,
self.element_output,
self.element_accumulator,
target,
self.tile_description.threadblock_shape[1],
)
elif direction == "column":
reduction_node = ColumnReductionNode(
self.element_accumulator,
self.element_output,
self.element_accumulator,
target,
self.tile_description.threadblock_shape[0],
)
else:
raise ValueError(direction)
child_nid = node.successors(tree.identifier)[0]
# if this output node is injected only for reduction
if node.data.id not in self.returns:
# get reduction config from disc
node.data = reduction_node
node.tag = reduction_node.tag
self.pass_inject_reduction(tree, child_nid)
# if this output node is also a tensor output, inject reduction as its children
else:
# get child node
tree.create_node(
reduction_node.tag,
reduction_node.id,
data=reduction_node,
parent=node.data.id,
)
tree.move_node(
child_nid,
reduction_node.id,
)
child = tree.get_node(child_nid)
for grand_child in child.successors(tree.identifier):
self.pass_inject_reduction(tree, grand_child)
else:
for child in node.successors(tree.identifier):
self.pass_inject_reduction(tree, child)
else:
for child in node.successors(tree.identifier):
self.pass_inject_reduction(tree, child)
def pass_inject_epilogue_op(self, tree, nid):
node = tree.get_node(nid)
visitors = []
for child in node.successors(tree.identifier):
visitors.append(self.pass_inject_epilogue_op(tree, child))
node.data.get_epilogue_node(visitors)
return node.data.epilogue_node
def get_arguments(self, tree, nid, kwargs):
node = tree.get_node(nid)
visitor_args = []
for child in node.successors(tree.identifier):
visitor_args.append(self.get_arguments(tree, child, kwargs))
node.data.get_argument(visitor_args, kwargs)
return node.data.argument
class EpilogueVisitTree:
KernelTemplate = """
${visitor}
using ${operation_name}_EpilogueVisitor = cutlass::epilogue::threadblock::EpilogueVisitorGeneric<${visitor_name}>;
"""
def __init__(
self,
elementwise_functor,
tile_description,
element_accumulator,
elements_per_access,
element_compute,
element_output,
) -> None:
#
# data types
self.tile_description = tile_description
self.element_accumulator = element_accumulator
self.elements_per_access = elements_per_access
self.element_compute = element_compute
self.element_output = element_output
self.elementwise_functor = elementwise_functor
pass
def initialize(self):
function = EpilogueAST(
self,
self.tile_description,
self.element_accumulator,
self.elements_per_access,
self.element_compute,
self.element_output,
)
#
tree = function.epilogue_tree
self.tree = tree
function.pass_binary_2_unary(self.tree, self.tree.root)
function.pass_inject_reduction(self.tree, self.tree.root)
function.pass_inject_epilogue_op(self.tree, self.tree.root)
visitor = self.tree.get_node(self.tree.root).data.epilogue_node
self.visitor = visitor
class _Argument(ctypes.Structure):
_fields_ = [
(
"visitor_arg",
visitor.argument_type,
)
]
def __init__(self, **kwargs) -> None:
# process input args
_kwargs = {}
for input_key in function.input_args.keys():
if input_key == "accum":
continue
if function.input_args[input_key][0] == "scalar":
continue
# tensor input
else:
setattr(
self,
"buffer_tensor_" + input_key,
NumpyFrontend.argument(
kwargs[input_key],
False,
),
)
setattr(
self,
input_key + "_ptr",
int(
getattr(
self,
"buffer_tensor_" + input_key,
).ptr
),
)
_kwargs[input_key + "_ptr"] = getattr(
self,
input_key + "_ptr",
)
# process the return args
for ret in function.returns:
setattr(
self,
"buffer_tensor_" + ret,
NumpyFrontend.argument(kwargs[ret], True),
)
setattr(
self,
ret + "_ptr",
int(
getattr(
self,
"buffer_tensor_" + ret,
).ptr
),
)
_kwargs[ret + "_ptr"] = getattr(self, ret + "_ptr")
setattr(
self,
"host_tensor_" + ret,
kwargs[ret],
)
_kwargs.update(kwargs)
function.get_arguments(tree, tree.root, _kwargs)
self.visitor_arg = tree.get_node(tree.root).data.argument
def sync(self, stream_sync=True):
if stream_sync:
(err,) = cudart.cudaDeviceSynchronize()
if err != cuda.CUresult.CUDA_SUCCESS:
raise RuntimeError("CUDA Error %s" % str(err))
for ret in function.returns:
(err,) = cuda.cuMemcpyDtoH(
getattr(
self,
"host_tensor_" + ret,
),
cuda.CUdeviceptr(getattr(self, ret + "_ptr")),
getattr(
self,
"host_tensor_" + ret,
).size
* getattr(
self,
"host_tensor_" + ret,
).itemsize,
)
if err != cuda.CUresult.CUDA_SUCCESS:
raise RuntimeError("CUDA Error %s" % str(err))
pass
self.epilogue_type = _Argument
def emit(self, operation):
values = {
"visitor": self.visitor.emit(operation),
"operation_name": operation.procedural_name(),
"visitor_name": self.visitor.instance_name,
}
return SubstituteTemplate(self.KernelTemplate, values)
| cutlass-main | python/cutlass/backend/parser.py |
################################################################################
#
# Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
################################################################################
import ctypes
from cuda import cuda, cudart
import cutlass_bindings
import numpy as np
from scipy.special import erf
from cutlass.backend.c_types import MatrixCoord_
from cutlass.backend.frontend import NumpyFrontend
from cutlass.backend.library import DataTypeTag
from cutlass.backend.utils.software import CheckPackages, SubstituteTemplate
dtype2ctype = {
cutlass_bindings.int8: ctypes.c_int8,
cutlass_bindings.float16: ctypes.c_uint16,
cutlass_bindings.float32: ctypes.c_float,
cutlass_bindings.float64: ctypes.c_double,
cutlass_bindings.int32: ctypes.c_int32
}
#################################################################################################
#
# Epilogue Functors
#
#################################################################################################
class EpilogueFunctorBase:
"""
Base class for thread-level epilogue functors
"""
def __init__(self) -> None:
pass
def emit(self, tag, template_argument):
template = """${tag}<${arguments}>"""
arguments = ""
for idx, arg in enumerate(template_argument):
arguments += arg
if idx < len(template_argument) - 1:
arguments += ", "
values = {
"tag": tag,
"arguments": arguments,
}
return SubstituteTemplate(template, values)
class LinearCombination(EpilogueFunctorBase):
"""
Apply a linear combination operator to an array of elements
D = alpha * accumulator + beta * source
:param element_output: data type used to load and store tensors
:param epilogue_vector_length: number of elements computed per operation.
Usually it is 128/sizeof_bits<ElementOutput_>, but we use 64 and 32 sometimes
when there are not enough data to store
:param element_accumulator: Accumulator data type
:param element_epilogue: data type used to compute linear combination
"""
tag = "cutlass::epilogue::thread::LinearCombination"
def __init__(
self, element_output, epilogue_vector_length,
element_accumulator=None, element_epilogue=None) -> None:
super().__init__()
if element_accumulator is None:
element_accumulator = element_output
if element_epilogue is None:
element_epilogue = element_output
self.element_output = element_output
self.element_accumulator = element_accumulator
self.element_epilogue = element_epilogue
self.epilogue_vector_length = epilogue_vector_length
self.template_arguments = [
DataTypeTag[element_output],
str(epilogue_vector_length),
DataTypeTag[element_accumulator],
DataTypeTag[element_epilogue],
]
# get epilogue output op type
c_element_epilogue = dtype2ctype[self.element_epilogue]
element_epilogue = self.element_epilogue
class _EpilogueOutputOpParams(ctypes.Structure):
_fields_ = [
("alpha", c_element_epilogue),
("beta", c_element_epilogue),
("alpha_ptr", ctypes.c_void_p),
("beta_ptr", ctypes.c_void_p)
]
def __init__(self, alpha, beta, *args) -> None:
self.alpha = element_epilogue(alpha).storage
self.beta = element_epilogue(beta).storage
self.epilogue_type = _EpilogueOutputOpParams
def emit(self):
return super().emit(self.tag, self.template_arguments)
class LinearCombinationClamp(LinearCombination):
"""
Applies a linear combination operator to an array of elements then clamps
the output before converting to the output element type.
D = alpha * accumulator + beta * source + uniform
:param element_output: data type used to load and store tensors
:param epilogue_vector_length: number of elements computed per operation.
Usually it is 128/sizeof_bits<ElementOutput_>, but we use 64 and 32 sometimes
when there are not enough data to store
:param element_accumulator: Accumulator data type
:param element_epilogue: data type used to compute linear combination
"""
tag = "cutlass::epilogue::thread::LinearCombinationClamp"
def __init__(
self, element_output, epilogue_vector_length,
element_accumulator=None, element_epilogue=None) -> None:
# Base constructor
super().__init__(
element_output,
epilogue_vector_length,
element_accumulator,
element_epilogue,
)
c_element_epilogue = dtype2ctype[self.element_epilogue]
element_epilogue = self.element_epilogue
class _EpilogueOutputOpParams(ctypes.Structure):
_fields_ = [
("alpha", c_element_epilogue),
("beta", c_element_epilogue),
("alpha_ptr", ctypes.c_void_p),
("beta_ptr", ctypes.c_void_p),
]
def __init__(self, alpha, beta, *args) -> None:
self.alpha = element_epilogue(alpha).storage
self.beta = element_epilogue(beta).storage
self.epilogue_type = _EpilogueOutputOpParams
class FastLinearCombinationClamp(EpilogueFunctorBase):
"""
Applies a linear combination operator to an array of elements then clamps
the output before converting to the output element type.
D = alpha * accumulator + beta * source
Note: The below method only when problem_size_K <= 256 for signed int8 gemm
or problem_size_K <= 128 for unsigned int8 gemm. The default approach is
above.
:param element_output: data type used to load and store tensors
:param epilogue_vector_length: number of elements computed per operation.
Usually it is 128/sizeof_bits<ElementOutput_>, but we use 64 and 32 sometimes
when there are not enough data to store
"""
tag = "cutlass::epilogue::thread::FastLinearCombinationClamp"
def __init__(self, element_output, epilogue_vector_length, *args) -> None:
super().__init__()
self.template_arguments = [
DataTypeTag[element_output], str(epilogue_vector_length)
]
self.element_accumulator = cutlass_bindings.int32
self.element_epilogue = cutlass_bindings.float32
# get epilogue output op
c_element_epilogue = dtype2ctype[self.element_epilogue]
element_epilogue = self.element_epilogue
class _EpilogueOutputOpParams(ctypes.Structure):
_fields_ = [
("alpha", c_element_epilogue),
("beta", c_element_epilogue),
("alpha_ptr", ctypes.c_void_p),
("beta_ptr", ctypes.c_void_p),
]
def __init__(self, alpha, beta, *args) -> None:
self.alpha = element_epilogue(alpha).storage
self.beta = element_epilogue(beta).storage
self.epilogue_type = _EpilogueOutputOpParams
def emit(self):
return super().emit(self.tag, self.template_arguments)
class LinearCombinationGeneric(LinearCombination):
"""
Applies a linear combination operator followed by an activation function
to an array of elements.
D = activation(alpha * accumulator + beta * source)
:param activation_functor: input activation functor
:param element_output: data type used to load and store tensors
:param epilogue_vector_length: number of elements computed per operation.
Usually it is 128/sizeof_bits<ElementOutput_>, but we use 64 and 32 sometimes
when there are not enough data to store
:param element_accumulator: Accumulator data type
:param element_epilogue: data type used to compute linear combination
"""
tag = "cutlass::epilogue::thread::LinearCombinationGeneric"
def __init__(
self, activation_functor,
element_output, epilogue_vector_length,
element_accumulator=None, element_epilogue=None) -> None:
super().__init__(
element_output,
epilogue_vector_length,
element_accumulator,
element_epilogue,
)
self.template_arguments = [
activation_functor.emit()] + self.template_arguments
self.activation_functor = activation_functor
self.element_epilogue = element_epilogue
# get epilogue output op
self.epilogue_type = self.activation_functor.epilogue_output_op(self.element_epilogue)
class ActivationFunctor:
"""
Base class for frequently used activation functions
"""
def __init__(self, element_compute) -> None:
pass
@staticmethod
def numpy(x: np.ndarray):
raise NotImplementedError()
def emit(self):
return self.tag
@staticmethod
def epilogue_output_op(element_epilogue):
c_element_epilogue = dtype2ctype[element_epilogue]
class _EpilogueOutputOpParams(ctypes.Structure):
_fields_ = [
("alpha", c_element_epilogue),
("beta", c_element_epilogue),
("alpha_ptr", ctypes.c_void_p),
("beta_ptr", ctypes.c_void_p),
]
def __init__(self, alpha, beta, *args) -> None:
self.alpha = element_epilogue(alpha).storage
self.beta = element_epilogue(beta).storage
return _EpilogueOutputOpParams
# identity operator
class identity(ActivationFunctor):
tag = "cutlass::epilogue::thread::Identity"
def numpy(x: np.ndarray):
return x
# ReLu operator,
class relu(ActivationFunctor):
tag = "cutlass::epilogue::thread::ReLu"
def __init__(self, element_compute):
super().__init__(element_compute)
class _Arguments(ctypes.Structure):
_fields_ = [
("threshold", dtype2ctype[element_compute])
]
def __init__(self, threshold=0.0) -> None:
self.threshold = element_compute(threshold).storage
self.argument_type = _Arguments
def emit_visitor(self):
return "cutlass::ReLUVisitor"
@staticmethod
def numpy(x: np.ndarray):
return np.maximum(x, 0)
# Leaky ReLu operator
class leaky_relu(ActivationFunctor):
tag = "cutlass::epilogue::thread::LeakyReLU"
def __init__(self, element_compute) -> None:
super().__init__(element_compute)
class _Arguments(ctypes.Structure):
_fields_ = [
("leaky_alpha", dtype2ctype[element_compute])
]
def __init__(self, leaky_alpha) -> None:
self.leaky_alpha = element_compute(leaky_alpha).storage
self.argument_type = _Arguments
def emit_visitor(self):
return "cutlass::LeakyReLUVisitor"
@staticmethod
def numpy(x: np.ndarray, leaky_alpha):
return np.maximum(x, 0) + np.minimum(x, 0) * leaky_alpha
def epilogue_output_op(self, element_epilogue):
c_element_epilogue = dtype2ctype[element_epilogue]
class _EpilogueOutputOpParams(ctypes.Structure):
_fields_ = [
("alpha", c_element_epilogue),
("beta", c_element_epilogue),
("alpha_ptr", ctypes.c_void_p),
("beta_ptr", ctypes.c_void_p),
("leaky_alpha", c_element_epilogue)
]
def __init__(self, alpha, beta, leaky_alpha=0.2, *args) -> None:
self.alpha = element_epilogue(alpha).storage
self.beta = element_epilogue(beta).storage
self.alpha_ptr = 0
self.beta_ptr = 0
self.leaky_alpha = element_epilogue(leaky_alpha).storage
return _EpilogueOutputOpParams
# Tanh operator
class tanh(ActivationFunctor):
tag = "cutlass::epilogue::thread::Tanh"
def __init__(self, element_compute) -> None:
super().__init__(element_compute)
class _Arguments(ctypes.Structure):
_fields_ = [("tmp", ctypes.c_int)]
def __init__(self, *args) -> None:
self.tmp = 0
self.argument_type = _Arguments
def emit_visitor(self):
return "cutlass::TanhVisitor"
@staticmethod
def numpy(x: np.ndarray):
return np.tanh(x)
def sigmoid_op(x: np.ndarray):
return 1.0 / (1.0 + np.exp(-x))
# Sigmoid operator
class sigmoid(ActivationFunctor):
tag = "cutlass::epilogue::thread::Sigmoid"
@staticmethod
def numpy(x: np.ndarray):
return sigmoid_op(x)
# SiLu operator
class silu(ActivationFunctor):
tag = "cutlass::epilogue::thread::SiLu"
@staticmethod
def numpy(x: np.ndarray):
return x * sigmoid_op(x)
# Hardswish operator
class hardswish(ActivationFunctor):
tag = "cutlass::epilogue::thread::HardSwish"
@staticmethod
def numpy(x: np.ndarray):
relu6 = np.minimum(np.maximum(x + 3.0, 0), 6.0)
return x * relu6 / 6.0
# GELU operator
class gelu(ActivationFunctor):
tag = "cutlass::epilogue::thread::GELU"
@staticmethod
def numpy(x: np.ndarray):
return 0.5 * x * (1 + erf(x / np.sqrt(2.0)))
# reduction operator
def reduction_op(tensor, direction, math, factor):
batch, m, n = tensor.shape
if math == "Add":
if direction == "row":
num_cta_n = (n + factor - 1) // factor
reduction = np.transpose(
np.sum(tensor.reshape(batch, m, num_cta_n, factor), axis=-1),
axes=[0, 2, 1]).flatten()
elif direction == "column":
num_cta_m = (m + factor - 1) // factor
reduction = np.sum(
tensor.reshape(batch, num_cta_m, factor, n), axis=-2).flatten()
else:
raise NotImplementedError
return reduction
else:
raise NotImplementedError
################################################################################
# Epilogue Visitor
################################################################################
class LayerNorm(EpilogueFunctorBase):
"""
Apply a linear combination operator to an array of elements
D = alpha * accumulator + beta * source
:param element_output: data type used to load and store tensors
:param epilogue_vector_length: number of elements computed per operation.
Usually it is 128/sizeof_bits<ElementOutput_>, but we use 64 and 32 sometimes
when there are not enough data to store
:param element_accumulator: Accumulator data type
:param element_epilogue: data type used to compute linear combination
"""
KernelTemplate = """
cutlass::epilogue::threadblock::EpilogueVisitorLayerNorm<
cutlass::gemm::GemmShape<${threadblock_shape_m}, ${threadblock_shape_n}, ${threadblock_shape_k}>,
${operation_name}_default::kThreadCount,
${operation_name}_default::Epilogue::OutputTileIterator,
${operation_name}_default::Epilogue::AccumulatorFragmentIterator::AccumulatorTile,
${element_compute}, // element_compute
${element_variance}, // element_variance
${element_mean}, // element_mean
${element_layer_norm_compute}, // element_layer_norm_compute
${epilogue_functor},
${shifted_k}>;
"""
headers = [
"gemm/gemm_universal_with_visitor.h",
"epilogue/epilogue_visitor_with_layernorm.h"
]
def __init__(
self, elementwise_functor,
element_variance=None, element_mean=None,
element_layer_norm_compute=None, shifted_k=True, ) -> None:
super().__init__()
self.elementwise_functor = elementwise_functor
self.element_compute = elementwise_functor.element_epilogue
self.element_output = elementwise_functor.element_output
if element_variance is None:
self.element_variance = self.element_output
if element_mean is None:
self.element_mean = self.element_output
if element_layer_norm_compute is None:
self.element_layer_norm_compute = self.element_compute
if shifted_k:
self.shifted_k = "true"
else:
self.shifted_k = "false"
# get epilogue output op
elementwise_params_type = self.elementwise_functor.epilogue_type
class _EpilogueVisitorParams(ctypes.Structure):
_fields_ = [
("element_wise", elementwise_params_type),
("ptr_Variance", ctypes.c_void_p),
("ptr_Mean_", ctypes.c_void_p),
("ptr_Shifted_K_", ctypes.c_void_p),
("extent", MatrixCoord_),
]
def __init__(self, elementwise_params, variance, mean, shift_k, extent) -> None:
self.element_wise = elementwise_params
if isinstance(variance, np.ndarray):
self.buffer_variance = NumpyFrontend.argument(variance, False)
self.buffer_mean = NumpyFrontend.argument(mean, False)
self.buffer_shift_k = NumpyFrontend.argument(shift_k, False)
self.ptr_Variance = int(self.buffer_variance.ptr)
self.ptr_Mean_ = int(self.buffer_mean.ptr)
self.ptr_Shifted_K_ = int(self.buffer_shift_k.ptr)
self.extent = MatrixCoord_(extent[0], extent[1])
self.host_variance = variance
self.host_mean = mean
self.host_shift_k = shift_k
def sync(self, stream_sync=True):
if stream_sync:
err, = cudart.cudaDeviceSynchronize()
if err != cuda.CUresult.CUDA_SUCCESS:
raise RuntimeError("CUDA Error %s" % str(err))
err, = cuda.cuMemcpyDtoH(
self.host_variance,
cuda.CUdeviceptr(self.ptr_Variance),
self.host_variance.size * self.host_variance.itemsize)
err, = cuda.cuMemcpyDtoH(
self.host_mean,
cuda.CUdeviceptr(self.ptr_Mean_),
self.host_mean.size * self.host_mean.itemsize)
err, = cuda.cuMemcpyDtoH(
self.host_shift_k,
cuda.CUdeviceptr(self.ptr_Shifted_K_),
self.host_shift_k.size * self.host_shift_k.itemsize)
if err != cuda.CUresult.CUDA_SUCCESS:
raise RuntimeError("CUDA Error %s" % str(err))
self.epilogue_type = _EpilogueVisitorParams
def emit(self, operation):
values = {
"threadblock_shape_m": str(operation.tile_description.threadblock_shape[0]),
"threadblock_shape_n": str(operation.tile_description.threadblock_shape[1]),
"threadblock_shape_k": str(operation.tile_description.threadblock_shape[2]),
"operation_name": operation.procedural_name(),
"element_compute": DataTypeTag[self.element_compute],
"element_variance": DataTypeTag[self.element_variance],
"element_mean": DataTypeTag[self.element_mean],
"element_layer_norm_compute": DataTypeTag[self.element_layer_norm_compute],
"epilogue_functor": self.elementwise_functor.emit(),
"shifted_k": self.shifted_k,
}
return SubstituteTemplate(self.KernelTemplate, values)
class AccumulatorOp:
Template = """
using ${instance_name} = cutlass::epilogue::threadblock::VisitorOpAccumulator<${element_accumulator}, ${elements_per_access}>;
"""
counter = 0
def __init__(self, element_accumulator, elements_per_access) -> None:
self.element_accumulator = element_accumulator
self.elements_per_access = elements_per_access
self.instance_name = "AccumulatorOp%d" % AccumulatorOp.counter
AccumulatorOp.counter += 1
class _Arguments(ctypes.Structure):
_fields_ = [("tmp", ctypes.c_int)]
def __init__(self):
self.tmp = 0
self.argument_type = _Arguments
def emit(self, *args):
values = {
"instance_name": self.instance_name,
"element_accumulator": DataTypeTag[self.element_accumulator],
"elements_per_access": str(self.elements_per_access),
}
return SubstituteTemplate(self.Template, values)
class LinearCombinationOp:
Template = """
${visitor_a}
${visitor_b}
using ${instance_name} = cutlass::epilogue::threadblock::VisitorOpLinearCombination<
${element_accumulator}, ${element_compute},
${elements_per_access}, ${visitor_a_name}, ${visitor_b_name}>;
"""
counter = 0
def __init__(self, element_accumulator, element_compute,
elements_per_access, visitor_a, visitor_b) -> None:
self.element_accumulator = element_accumulator
self.element_compute = element_compute
self.elements_per_access = elements_per_access
self.visitor_a = visitor_a
self.visitor_b = visitor_b
self.instance_name = "LinearCombinationOp%d" % LinearCombinationOp.counter
LinearCombinationOp.counter += 1
class _Arguments(ctypes.Structure):
_fields_ = [
("alpha", dtype2ctype[self.element_compute]),
("beta", dtype2ctype[self.element_compute]),
("visitor_a", self.visitor_a.argument_type),
("visitor_b", self.visitor_b.argument_type)
]
def __init__(self, alpha, beta, visitor_a_arg, visitor_b_arg) -> None:
self.alpha = element_compute(alpha).storage
self.beta = element_compute(beta).storage
self.visitor_a = visitor_a_arg
self.visitor_b = visitor_b_arg
self.argument_type = _Arguments
def emit(self, operation):
values = {
"instance_name": self.instance_name,
"element_accumulator": DataTypeTag[self.element_accumulator],
"element_compute": DataTypeTag[self.element_compute],
"elements_per_access": str(self.elements_per_access),
"visitor_a_name": self.visitor_a.instance_name,
"visitor_b_name": self.visitor_b.instance_name,
"visitor_a": self.visitor_a.emit(operation),
"visitor_b": self.visitor_b.emit(operation)
}
return SubstituteTemplate(self.Template, values)
class VectorAdd:
def __init__(self, *args) -> None:
class _Arguments(ctypes.Structure):
_fields_ = [("tmp", ctypes.c_int)]
def __init__(self, *args) -> None:
self.tmp = 0
self.argument_type = _Arguments
def emit(self):
return "cutlass::VectorAdd"
class VectorMult:
def __init__(self, *args) -> None:
class _Arguments(ctypes.Structure):
_fields_ = [("tmp", ctypes.c_int)]
def __init__(self, *args) -> None:
self.tmp = 0
self.argument_type = _Arguments
def emit(self):
return "cutlass::VectorMult"
class BinaryOp:
Template = """
${visitor_a}
${visitor_b}
using ${instance_name} = cutlass::epilogue::threadblock::VisitorOpBinary<
${element_accumulator}, ${element_compute},
${elements_per_access}, ${visitor_a_name}, ${visitor_b_name}, ${binary_op}>;
"""
counter = 0
def __init__(self, element_accumulator, element_compute,
elements_per_access, visitor_a, visitor_b, binary_op) -> None:
self.element_accumulator = element_accumulator
self.element_compute = element_compute
self.elements_per_access = elements_per_access
self.visitor_a = visitor_a
self.visitor_b = visitor_b
self.binary_op = binary_op
self.instance_name = "BinaryOp%d" % BinaryOp.counter
BinaryOp.counter += 1
class _Arguments(ctypes.Structure):
_fields_ = [
("binary_param", binary_op.argument_type),
("visitor_a", self.visitor_a.argument_type),
("visitor_b", self.visitor_b.argument_type)
]
def __init__(self, binary_param, visitor_a_arg, visitor_b_arg) -> None:
self.binary_param = binary_param
self.visitor_a = visitor_a_arg
self.visitor_b = visitor_b_arg
self.argument_type = _Arguments
def emit(self, operation):
values = {
"instance_name": self.instance_name,
"element_accumulator": DataTypeTag[self.element_accumulator],
"element_compute": DataTypeTag[self.element_compute],
"elements_per_access": str(self.elements_per_access),
"visitor_a_name": self.visitor_a.instance_name,
"visitor_b_name": self.visitor_b.instance_name,
"visitor_a": self.visitor_a.emit(operation),
"visitor_b": self.visitor_b.emit(operation),
"binary_op": self.binary_op.emit()
}
return SubstituteTemplate(self.Template, values)
class Mult:
def __init__(self, element_compute) -> None:
class _Arguments(ctypes.Structure):
_fields_ = [
("alpha", dtype2ctype[element_compute])
]
def __init__(self, alpha) -> None:
self.alpha = element_compute(alpha).storage
self.argument_type = _Arguments
def emit_visitor(self):
return "cutlass::Mult"
class UnaryOp:
Template = """
${visitor}
using ${instance_name} = cutlass::epilogue::threadblock::VisitorOpUnary<
${element_accumulator}, ${element_compute},
${elements_per_access}, ${visitor_name}, ${unary_op}>;
"""
counter = 0
def __init__(self, element_accumulator, element_compute,
elements_per_access, visitor, unary_op) -> None:
self.element_accumulator = element_accumulator
self.element_compute = element_compute
self.elements_per_access = elements_per_access
self.visitor = visitor
self.unary_op = unary_op
self.instance_name = "UnaryOp%d" % UnaryOp.counter
UnaryOp.counter += 1
class _Arguments(ctypes.Structure):
_fields_ = [
("unary_param", unary_op.argument_type),
("visitor_arg", self.visitor.argument_type),
]
def __init__(self, unary_param, visitor_arg) -> None:
self.unary_param = unary_param
self.visitor_arg = visitor_arg
self.argument_type = _Arguments
def emit(self, operation):
values = {
"instance_name": self.instance_name,
"element_accumulator": DataTypeTag[self.element_accumulator],
"element_compute": DataTypeTag[self.element_compute],
"elements_per_access": str(self.elements_per_access),
"visitor_name": self.visitor.instance_name,
"unary_op": self.unary_op.emit_visitor(),
"visitor": self.visitor.emit(operation),
}
return SubstituteTemplate(self.Template, values)
class RowBroadcastOp:
Template = """
using ${instance_name} = cutlass::epilogue::threadblock::VisitorOpRowBroadcast<
${element_accumulator}, ${element_fragment}, ${input_tile_iterator}>;
"""
counter = 0
def __init__(self, element_accumulator, element_fragment) -> None:
self.element_accumulator = element_accumulator
self.element_fragment = element_fragment
self.instance_name = "RowBroadcastOp%d" % RowBroadcastOp.counter
RowBroadcastOp.counter += 1
class _Arguments(ctypes.Structure):
_fields_ = [
("broadcast_ptr", ctypes.c_void_p),
("batch_stride", ctypes.c_longlong)
]
def __init__(self, broadcast_ptr, batch_stride=0):
self.broadcast_ptr = int(broadcast_ptr)
self.batch_stride = batch_stride
self.argument_type = _Arguments
def emit(self, operation):
values = {
"instance_name": self.instance_name,
"element_accumulator": DataTypeTag[self.element_accumulator],
"element_fragment": DataTypeTag[self.element_fragment],
"input_tile_iterator": operation.procedural_name() + "_default::Epilogue::OutputTileIterator"
}
return SubstituteTemplate(self.Template, values)
class ColumnBroadcastOp:
Template = """
using ${instance_name} = cutlass::epilogue::threadblock::VisitorOpColumnBroadcast<
${element_accumulator}, ${element_fragment}, ${input_tile_iterator}>;
"""
counter = 0
def __init__(self, element_accumulator, element_fragment) -> None:
self.element_accumulator = element_accumulator
self.element_fragment = element_fragment
self.instance_name = "ColumnBroadcastOp%d" % ColumnBroadcastOp.counter
ColumnBroadcastOp.counter += 1
class _Arguments(ctypes.Structure):
_fields_ = [
("broadcast_ptr", ctypes.c_void_p),
("batch_stride", ctypes.c_longlong)
]
def __init__(self, broadcast_ptr, batch_stride=0):
self.broadcast_ptr = int(broadcast_ptr)
self.batch_stride = batch_stride
self.argument_type = _Arguments
def emit(self, operation):
values = {
"instance_name": self.instance_name,
"element_accumulator": DataTypeTag[self.element_accumulator],
"element_fragment": DataTypeTag[self.element_fragment],
"input_tile_iterator": operation.procedural_name() + "_default::Epilogue::OutputTileIterator"
}
return SubstituteTemplate(self.Template, values)
class TensorInputOp:
Template = """
using ${instance_name} = cutlass::epilogue::threadblock::VisitorOpTensorInput<
${element_accumulator}, ${input_tile_iterator}>;
"""
counter = 0
def __init__(self, element_accumulator) -> None:
self.element_accumulator = element_accumulator
self.instance_name = "TensorInputOp%d" % TensorInputOp.counter
TensorInputOp.counter += 1
class _Arguments(ctypes.Structure):
_fields_ = [
("input_ptr", ctypes.c_void_p),
("ldt", ctypes.c_int),
("batch_stride", ctypes.c_longlong)
]
def __init__(self, input_ptr, ldt, batch_stride=0) -> None:
self.input_ptr = int(input_ptr)
self.ldt = ldt
self.batch_stride = batch_stride
self.argument_type = _Arguments
def emit(self, operation):
values = {
"instance_name": self.instance_name,
"element_accumulator": DataTypeTag[self.element_accumulator],
"input_tile_iterator": operation.procedural_name() + "_default::Epilogue::OutputTileIterator"
}
return SubstituteTemplate(self.Template, values)
class TensorOutputOp:
Template = """
${visitor}
using ${instance_name} = cutlass::epilogue::threadblock::VisitorOpTensorOutput<
${element_accumulator}, ${output_tile_iterator}, ${visitor_name}>;
"""
counter = 0
def __init__(self, element_accumulator, visitor) -> None:
self.element_accumulator = element_accumulator
self.visitor = visitor
self.instance_name = "TensorOutputOp%d" % TensorOutputOp.counter
TensorOutputOp.counter += 1
class _Arguments(ctypes.Structure):
_fields_ = [
("output_ptr", ctypes.c_void_p),
("ldt", ctypes.c_int),
("batch_stride", ctypes.c_longlong),
("visitor_arg", self.visitor.argument_type)
]
def __init__(self, output_ptr, ldt, visitor_arg, batch_stride=0) -> None:
self.output_ptr = int(output_ptr)
self.ldt = int(ldt)
self.visitor_arg = visitor_arg
self.batch_stride = batch_stride
self.argument_type = _Arguments
def emit(self, operation):
values = {
"instance_name": self.instance_name,
"element_accumulator": DataTypeTag[self.element_accumulator],
"output_tile_iterator": operation.procedural_name() + "_default::Epilogue::OutputTileIterator",
"visitor_name": self.visitor.instance_name,
"visitor": self.visitor.emit(operation),
}
return SubstituteTemplate(self.Template, values)
class ColumnReductionOp:
Template = """
${visitor}
using ${instance_name} = cutlass::epilogue::threadblock::VisitorOpColumnReduction<
cutlass::gemm::GemmShape<${threadblock_shape_m}, ${threadblock_shape_n}, ${threadblock_shape_k}>,
${element_accumulator}, ${element_reduction}, ${element_reduction_accumulator},
${output_tile_iterator}, ${visitor_name}>;
"""
counter = 0
def __init__(self, element_accumulator, element_reduction,
element_reduction_accumulator, visitor) -> None:
self.element_accumulator = element_accumulator
self.element_reduction = element_reduction
self.element_reduction_accumulator = element_reduction_accumulator
self.visitor = visitor
self.instance_name = "ColumnReductionOp%d" % ColumnReductionOp.counter
ColumnReductionOp.counter += 1
class _Arguments(ctypes.Structure):
_fields_ = [
("reduction_ptr", ctypes.c_void_p),
("batch_stride", ctypes.c_longlong),
("visitor_arg", self.visitor.argument_type)
]
def __init__(self, reduction_ptr, visitor_arg, batch_stride=0) -> None:
self.reduction_ptr = reduction_ptr
self.batch_stride = batch_stride
self.visitor_arg = visitor_arg
self.argument_type = _Arguments
def emit(self, operation):
values = {
"instance_name": self.instance_name,
"threadblock_shape_m": str(operation.tile_description.threadblock_shape[0]),
"threadblock_shape_n": str(operation.tile_description.threadblock_shape[1]),
"threadblock_shape_k": str(operation.tile_description.threadblock_shape[2]),
"element_accumulator": DataTypeTag[self.element_accumulator],
"element_reduction": DataTypeTag[self.element_reduction],
"element_reduction_accumulator": DataTypeTag[self.element_reduction_accumulator],
"output_tile_iterator": operation.procedural_name() + "_default::Epilogue::OutputTileIterator",
"visitor_name": self.visitor.instance_name,
"visitor": self.visitor.emit(operation),
}
return SubstituteTemplate(self.Template, values)
class RowReductionOp:
Template = """
${visitor}
using ${instance_name} = cutlass::epilogue::threadblock::VisitorOpRowReduction<
cutlass::gemm::GemmShape<${threadblock_shape_m}, ${threadblock_shape_n}, ${threadblock_shape_k}>,
${element_accumulator}, ${element_reduction}, ${element_reduction_accumulator},
${output_tile_iterator}, ${visitor_name}>;
"""
counter = 0
def __init__(self, element_accumulator, element_reduction,
element_reduction_accumulator, visitor) -> None:
self.element_accumulator = element_accumulator
self.element_reduction = element_reduction
self.element_reduction_accumulator = element_reduction_accumulator
self.visitor = visitor
self.instance_name = "RowReductionOp%d" % RowReductionOp.counter
RowReductionOp.counter += 1
class _Arguments(ctypes.Structure):
_fields_ = [
("reduction_ptr", ctypes.c_void_p),
("batch_stride", ctypes.c_longlong),
("visitor_arg", self.visitor.argument_type)
]
def __init__(self, reduction_ptr, visitor_arg, batch_stride=0) -> None:
self.reduction_ptr = reduction_ptr
self.visitor_arg = visitor_arg
self.batch_stride = batch_stride
self.argument_type = _Arguments
def emit(self, operation):
values = {
"instance_name": self.instance_name,
"threadblock_shape_m": str(operation.tile_description.threadblock_shape[0]),
"threadblock_shape_n": str(operation.tile_description.threadblock_shape[1]),
"threadblock_shape_k": str(operation.tile_description.threadblock_shape[2]),
"element_accumulator": DataTypeTag[self.element_accumulator],
"element_reduction": DataTypeTag[self.element_reduction],
"element_reduction_accumulator": DataTypeTag[self.element_reduction_accumulator],
"output_tile_iterator": operation.procedural_name() + "_default::Epilogue::OutputTileIterator",
"visitor_name": self.visitor.instance_name,
"visitor": self.visitor.emit(operation),
}
return SubstituteTemplate(self.Template, values)
| cutlass-main | python/cutlass/backend/epilogue.py |
#################################################################################################
#
# Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#################################################################################################
from bfloat16 import bfloat16
import cutlass_bindings
import numpy as np
from cutlass.backend import compiler
from cutlass.backend.gemm_operation import GemmGroupedArguments, GemmOperationGrouped
from cutlass.backend.library import DataTypeSize, ShortDataTypeNames
from cutlass.backend.memory_manager import get_allocated_size
from cutlass.backend.test.gemm_testbed import getTensorRef, getTensorView, transpose
class TestbedGrouped:
def __init__(self, operation: GemmOperationGrouped, seed: int = 2080) -> None:
compiler.add_module([operation])
self.seed = seed
self.operation = operation
element_size = DataTypeSize[operation.A.element]
self.dtype_A = self.numpy_type(operation.A.element)
self.dtype_B = self.numpy_type(operation.B.element)
self.dtype_C = self.numpy_type(operation.C.element)
self.dtype_D = self.numpy_type(operation.C.element)
if element_size == 1:
self.scope_max = 1
self.scope_min = 0
elif element_size <= 8:
self.scope_max = 1
self.scope_min = -1
elif element_size == 16:
self.scope_max = 4
self.scope_min = -4
else:
self.scope_max = 8
self.scope_min = -8
#: compute type
self.compute_type = operation.epilogue_functor.element_epilogue
self.accumulator_type = (
operation.tile_description.math_instruction.element_accumulator
)
@staticmethod
def numpy_type(type):
if type == cutlass_bindings.float64:
return np.float64
elif type == cutlass_bindings.float32:
return np.float32
elif type == cutlass_bindings.float16:
return np.float16
elif type == cutlass_bindings.bfloat16:
return bfloat16
elif type == cutlass_bindings.int32:
return np.int32
elif type == cutlass_bindings.int8:
return np.int8
else:
raise ValueError("unsupported type: %s" % ShortDataTypeNames[type])
def uniform_init(self, size, dtype):
if dtype in [np.float32, np.float16, bfloat16, np.float64]:
return np.ceil(
np.random.uniform(
low=self.scope_min - 0.5, high=self.scope_max - 0.5, size=size
).astype(dtype)
)
else:
return np.random.uniform(
low=self.scope_min - 1, high=self.scope_max + 1, size=size
).astype(dtype)
def print_problem_size(self, p):
problem_size = "problem: %d, %d, %d\n" % (p.m(), p.n(), p.k())
print(problem_size)
def run(self, problem_count: int, alpha: float = 1.0, beta: float = 0.0) -> bool:
assert get_allocated_size() == 0, (
"%d byte of pool memory is not released in previous run"
% get_allocated_size()
)
# initialize
passed = False
np.random.seed(self.seed)
# generate the problem sizes
problem_sizes = []
tensor_As = []
tensor_Bs = []
tensor_Cs = []
tensor_Ds = []
tensor_D_refs = []
for i in range(problem_count):
if self.dtype_A == np.int8:
if i == 0:
problem_size = cutlass_bindings.gemm.GemmCoord(48, 16, 32)
else:
problem_size = cutlass_bindings.gemm.GemmCoord(
16 * np.random.randint(0, 64) + 48,
16 * np.random.randint(0, 64) + 48,
16 * np.random.randint(0, 64) + 48,
)
else:
if i == 0:
problem_size = cutlass_bindings.gemm.GemmCoord(48, 16, 8)
else:
problem_size = cutlass_bindings.gemm.GemmCoord(
8 * np.random.randint(0, 64) + 24,
8 * np.random.randint(0, 64) + 24,
8 * np.random.randint(0, 64) + 24,
)
tensor_As.append(
self.uniform_init(
size=(problem_size.m() * problem_size.k(),), dtype=self.dtype_A
)
)
tensor_Bs.append(
self.uniform_init(
size=(problem_size.n() * problem_size.k(),), dtype=self.dtype_B
)
)
tensor_Cs.append(
self.uniform_init(
size=(problem_size.m() * problem_size.n(),), dtype=self.dtype_C
)
)
tensor_Ds.append(
np.zeros(
shape=(problem_size.m() * problem_size.n(),), dtype=self.dtype_D
)
)
tensor_D_refs.append(
np.ones(
shape=(problem_size.m() * problem_size.n(),), dtype=self.dtype_D
)
)
problem_sizes.append(problem_size)
arguments = GemmGroupedArguments(
operation=self.operation,
problem_sizes=problem_sizes,
A=tensor_As,
B=tensor_Bs,
C=tensor_Cs,
D=tensor_Ds,
output_op=self.operation.epilogue_type(alpha, beta),
)
self.operation.run(arguments)
arguments.sync()
#
# Reference check
#
alpha = self.compute_type(alpha).value()
beta = self.compute_type(beta).value()
init_acc = self.accumulator_type(0).value()
for idx, problem_size in enumerate(problem_sizes):
if self.operation.switched:
tensor_ref_A = getTensorRef(
tensor_As[idx],
problem_size,
"a",
transpose(self.operation.B.layout),
)
tensor_ref_B = getTensorRef(
tensor_Bs[idx],
problem_size,
"b",
transpose(self.operation.A.layout),
)
tensor_ref_C = getTensorRef(
tensor_Cs[idx],
problem_size,
"c",
transpose(self.operation.C.layout),
)
tensor_ref_D_ref = getTensorRef(
tensor_D_refs[idx],
problem_size,
"d",
transpose(self.operation.C.layout),
)
else:
tensor_ref_A = getTensorRef(
tensor_As[idx], problem_size, "a", self.operation.A.layout
)
tensor_ref_B = getTensorRef(
tensor_Bs[idx], problem_size, "b", self.operation.B.layout
)
tensor_ref_C = getTensorRef(
tensor_Cs[idx], problem_size, "c", self.operation.C.layout
)
tensor_ref_D_ref = getTensorRef(
tensor_D_refs[idx], problem_size, "d", self.operation.C.layout
)
tensor_view_D_ref = getTensorView(
tensor_D_refs[idx], problem_size, "d", self.operation.C.layout
)
cutlass_bindings.test.gemm.host.gemm(
problem_size,
alpha,
tensor_ref_A,
tensor_ref_B,
beta,
tensor_ref_C,
tensor_ref_D_ref,
init_acc,
)
tensor_view_D = getTensorView(
tensor_Ds[idx], problem_size, "d", self.operation.C.layout
)
passed = cutlass_bindings.test.gemm.host.equals(
tensor_view_D, tensor_view_D_ref
)
try:
assert passed
except AssertionError:
self.print_problem_size(problem_size)
del arguments
assert get_allocated_size() == 0, (
"%d byte of pool memory is not released after current run"
% get_allocated_size()
)
return passed
| cutlass-main | python/cutlass/backend/test/gemm_grouped_testbed.py |
#################################################################################################
#
# Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#################################################################################################
import re
import subprocess
from time import sleep
from bfloat16 import bfloat16
import cutlass_bindings
import numpy as np
from cutlass.backend import compiler
from cutlass.backend.conv2d_operation import Conv2dArguments, Conv2dOperation
from cutlass.backend.library import DataTypeSize, ShortDataTypeNames, StrideSupport
from cutlass.backend.memory_manager import get_allocated_size
from cutlass.backend.reduction_operation import ReductionArguments, ReductionOperation
from cutlass.backend.test.profiler import GpuTimer
from cutlass.backend.utils.software import SubstituteTemplate
def getTensorRef(tensor, tensor_layout, conv_kind, problem_size, operand):
ptr = tensor.__array_interface__["data"][0]
if operand == "a":
tensor_coord = cutlass_bindings.conv.implicit_gemm_tensor_a_extent(
conv_kind, problem_size
)
elif operand == "b":
tensor_coord = cutlass_bindings.conv.implicit_gemm_tensor_b_extent(
conv_kind, problem_size
)
elif operand in ["c", "d"]:
tensor_coord = cutlass_bindings.conv.implicit_gemm_tensor_c_extent(
conv_kind, problem_size
)
else:
raise ValueError("unknown operand: " + operand)
layout = tensor_layout.packed(tensor_coord)
if tensor.dtype == np.float64:
return cutlass_bindings.TensorRefF64NHWC(ptr, layout)
elif tensor.dtype == np.float32:
return cutlass_bindings.TensorRefF32NHWC(ptr, layout)
elif tensor.dtype == np.float16:
return cutlass_bindings.TensorRefF16NHWC(ptr, layout)
if tensor.dtype == bfloat16:
return cutlass_bindings.TensorRefBF16NHWC(ptr, layout)
elif tensor.dtype == np.int32:
return cutlass_bindings.TensorRefS32NHWC(ptr, layout)
elif tensor.dtype == np.int8:
if tensor_layout == cutlass_bindings.TensorNC32HW32:
return cutlass_bindings.TensorRefS8NC32HW32(ptr, layout)
elif tensor_layout == cutlass_bindings.TensorC32RSK32:
return cutlass_bindings.TensorRefS8C32RSK32(ptr, layout)
else:
return cutlass_bindings.TensorRefS8NHWC(ptr, layout)
else:
raise ValueError("unsupported data type")
def getTensorView(tensor, tensor_layout, conv_kind, problem_size, operand):
tensor_ref = getTensorRef(tensor, tensor_layout, conv_kind, problem_size, operand)
if operand == "a":
tensor_coord = cutlass_bindings.conv.implicit_gemm_tensor_a_extent(
conv_kind, problem_size
)
elif operand == "b":
tensor_coord = cutlass_bindings.conv.implicit_gemm_tensor_b_extent(
conv_kind, problem_size
)
elif operand in ["c", "d"]:
tensor_coord = cutlass_bindings.conv.implicit_gemm_tensor_c_extent(
conv_kind, problem_size
)
else:
raise ValueError("unknown operand: " + operand)
if tensor.dtype == np.float64:
return cutlass_bindings.TensorViewF64NHWC(tensor_ref, tensor_coord)
elif tensor.dtype == np.float32:
return cutlass_bindings.TensorViewF32NHWC(tensor_ref, tensor_coord)
elif tensor.dtype == np.float16:
return cutlass_bindings.TensorViewF16NHWC(tensor_ref, tensor_coord)
elif tensor.dtype == bfloat16:
return cutlass_bindings.TensorViewBF16NHWC(tensor_ref, tensor_coord)
elif tensor.dtype == np.int32:
return cutlass_bindings.TensorViewS32NHWC(tensor_ref, tensor_coord)
elif tensor.dtype == np.int8:
if tensor_layout == cutlass_bindings.TensorNC32HW32:
return cutlass_bindings.TensorViewS8NC32HW32(tensor_ref, tensor_coord)
elif tensor_layout == cutlass_bindings.TensorC32RSK32:
return cutlass_bindings.TensorViewS8C32RSK32(tensor_ref, tensor_coord)
else:
return cutlass_bindings.TensorViewS8NHWC(tensor_ref, tensor_coord)
else:
raise ValueError("unsupported data type")
class Conv2dLauncher:
"""
Launcher that runs the operation on given problem size
"""
def __init__(
self,
operation: "Conv2dOperation",
seed: int = 2080,
interleaved=False,
verification=True,
profiling=False,
warmup_iterations=500,
iterations=500,
compilation_mode="nvcc",
**kwargs,
) -> None:
self.enable_cached_results = True
self.interleaved = interleaved
# create the reduction kernel
self.reduction_operation = ReductionOperation(
shape=cutlass_bindings.MatrixCoord(4, 32 * operation.C.alignment),
C=operation.C,
element_accumulator=operation.tile_description.math_instruction.element_accumulator,
element_compute=operation.epilogue_functor.element_epilogue,
epilogue_functor=operation.epilogue_functor,
count=operation.C.alignment,
)
#: verify the output result
self.verification = verification
#: profile the kernel's runtime
self.profiling = profiling
self.timer = GpuTimer()
self.warmup_iterations = warmup_iterations
self.iterations = iterations
if "sleep" in kwargs.keys():
self.sleep_time = kwargs["sleep"]
else:
self.sleep_time = 0
#
# Compile the operator
#
if compilation_mode == "nvcc":
compiler.nvcc()
elif compilation_mode == "nvrtc":
compiler.nvrtc()
else:
raise Exception(f"Unexpected compilation mode {compilation_mode}")
compiler.add_module([operation, self.reduction_operation])
self.operation = operation
self.dtype_A = Conv2dLauncher.numpy_type(operation.A.element)
self.layout_A = operation.A.layout
self.dtype_B = Conv2dLauncher.numpy_type(operation.B.element)
self.layout_B = operation.B.layout
self.dtype_C = Conv2dLauncher.numpy_type(operation.C.element)
self.layout_C = operation.C.layout
self.dtype_D = Conv2dLauncher.numpy_type(operation.C.element)
self.layout_D = operation.C.layout
accumulator_size = DataTypeSize[
operation.tile_description.math_instruction.element_accumulator
]
element_size = DataTypeSize[operation.A.element]
if element_size <= 8:
self.randomization_max = 1
elif element_size == 16:
if accumulator_size <= 16:
self.randomization_max = 2
else:
self.randomization_max = 4
else:
self.randomization_max = 7
# Seed
self.seed = seed
self.conv_kind = operation.conv_kind
#
# Get the host reference function
#
self.element_compute = operation.epilogue_functor.element_epilogue
self.host_conv2d = cutlass_bindings.test.conv.host.conv2d
self.timer = GpuTimer()
@staticmethod
def numpy_type(type):
if type == cutlass_bindings.float64:
return np.float64
elif type == cutlass_bindings.float32:
return np.float32
elif type == cutlass_bindings.float16:
return np.float16
elif type == cutlass_bindings.bfloat16:
return bfloat16
elif type == cutlass_bindings.int32:
return np.int32
elif type == cutlass_bindings.int8:
return np.int8
else:
raise ValueError("unsupported type: %s" % ShortDataTypeNames[type])
def print_problem_size(self, p, split_k_mode=1):
print(
"nhwc_%dx%dx%dx%d_krsc_%dx%dx%dx%d_padding_%dx%d_stride_%dx%d_dilation_%dx%d_splitkslices_%d_splitkmode_%d"
% (
p.N,
p.H,
p.W,
p.C,
p.K,
p.R,
p.S,
p.C,
p.pad_h,
p.pad_w,
p.stride_h,
p.stride_w,
p.dilation_h,
p.dilation_w,
p.split_k_slices,
split_k_mode,
)
)
def uniform_init(self, size, dtype):
if dtype in [np.float32, np.float16, bfloat16, np.float64]:
return np.ceil(
np.random.uniform(
low=-self.randomization_max - 0.5, high=self.randomization_max - 0.5, size=size
).astype(dtype)
)
else:
return np.random.uniform(
low=-self.randomization_max - 1, high=self.randomization_max + 1, size=size
).astype(dtype)
def eq_gemm_size(self, problem_size):
n = problem_size.N
p = problem_size.P
q = problem_size.Q
k = problem_size.K
r = problem_size.R
s = problem_size.S
c = problem_size.C
h = problem_size.H
w = problem_size.W
if self.conv_kind == cutlass_bindings.conv.Operator.fprop:
return cutlass_bindings.gemm.GemmCoord(n * p * q, k, r * s * c)
elif self.conv_kind == cutlass_bindings.conv.Operator.dgrad:
return cutlass_bindings.gemm.GemmCoord(n * h * w, c, k * r * s)
else:
return cutlass_bindings.gemm.GemmCoord(k, r * s * c, n * p * q)
def bytes(self, problem_size, alpha, beta):
mnk = self.eq_gemm_size(problem_size)
bytes_ = (
(DataTypeSize[self.operation.A.element] * mnk.m() // 8) * mnk.k()
+ (DataTypeSize[self.operation.B.element] * mnk.n() // 8) * mnk.k()
+ (DataTypeSize[self.operation.C.element] * mnk.m() // 8) * mnk.n()
)
if beta != 0:
bytes_ += (DataTypeSize[self.operation.C.element] * mnk.m() // 8) * mnk.n()
return bytes_
def flops(self, problem_size):
mnk = self.eq_gemm_size(problem_size)
flops_mainloop_ = mnk.m() * mnk.n() * mnk.k() * 2
flops_epilogue_ = mnk.m() * mnk.n() * 2
# Adjust mainloop flop for dgrad stride
if self.conv_kind == cutlass_bindings.conv.Operator.dgrad:
flops_mainloop_ = flops_mainloop_ // (
problem_size.stride_h * problem_size.stride_w
)
flops_total_ = flops_mainloop_ + flops_epilogue_
return flops_total_
def host_reference(self, problem_size, tensor_A, tensor_B, tensor_C, alpha, beta):
if self.element_compute == cutlass_bindings.float16:
alpha = cutlass_bindings.float16(alpha)
beta = cutlass_bindings.float16(beta)
elif self.element_compute == cutlass_bindings.int32:
alpha = int(alpha)
beta = int(beta)
else:
alpha = alpha
beta = beta
# if cached result is loaded
cached_result_loaded = False
if self.enable_cached_results:
# get problem key
cached_test_key = cutlass_bindings.test.conv.host.CreateCachedConv2dTestKey(
self.conv_kind,
problem_size,
alpha,
beta,
getTensorView(
tensor_A, self.layout_A, self.conv_kind, problem_size, "a"
),
getTensorView(
tensor_B, self.layout_B, self.conv_kind, problem_size, "b"
),
getTensorView(
tensor_C, self.layout_C, self.conv_kind, problem_size, "c"
),
)
cached_test_result = cutlass_bindings.test.conv.host.CachedTestResult()
conv2d_result_cache_name = "cached_results_SM%d_%d.txt" % (
self.operation.arch,
self.seed,
)
cached_results = cutlass_bindings.test.conv.host.CachedTestResultListing(
conv2d_result_cache_name
)
# CachedTestResultListing cached_results(conv2d_result_cache_name);
cached = cached_results.find(cached_test_key)
cached_result_loaded = cached[0]
if cached_result_loaded:
cached_test_result = cached[1]
if not cached_result_loaded:
# compute the conv2d on host
tensor_D_ref = np.ones_like(tensor_C)
tensor_ref_A = getTensorRef(
tensor_A, self.layout_A, self.conv_kind, problem_size, "a"
)
tensor_ref_B = getTensorRef(
tensor_B, self.layout_B, self.conv_kind, problem_size, "b"
)
tensor_ref_C = getTensorRef(
tensor_C, self.layout_C, self.conv_kind, problem_size, "c"
)
tensor_ref_D_ref = getTensorRef(
tensor_D_ref, self.layout_D, self.conv_kind, problem_size, "d"
)
self.host_conv2d(
self.conv_kind,
problem_size,
tensor_ref_A,
tensor_ref_B,
tensor_ref_C,
tensor_ref_D_ref,
alpha,
beta,
)
tensor_view_D_ref = getTensorView(
tensor_D_ref, self.layout_D, self.conv_kind, problem_size, "d"
)
if self.enable_cached_results:
cached_test_result.D = cutlass_bindings.test.conv.host.TensorHash(
tensor_view_D_ref
)
cached_results = (
cutlass_bindings.test.conv.host.CachedTestResultListing(
conv2d_result_cache_name
)
)
cached_results.append(cached_test_key, cached_test_result)
cached_results.write(conv2d_result_cache_name)
else:
return tensor_D_ref
return cached_test_result.D
def equal(self, tensor_D, tensor_D_ref, problem_size):
if self.enable_cached_results:
tensor_view_D = getTensorView(
tensor_D, self.layout_D, self.conv_kind, problem_size, "d"
)
tensor_D_hash = cutlass_bindings.test.conv.host.TensorHash(tensor_view_D)
return tensor_D_hash == tensor_D_ref
else:
tensor_view_D = getTensorView(
tensor_D, self.layout_D, self.conv_kind, problem_size, "d"
)
tensor_view_D_ref = getTensorView(
tensor_D_ref, self.layout_D, self.conv_kind, problem_size, "d"
)
return cutlass_bindings.test.conv.host.equals(
tensor_view_D, tensor_view_D_ref
)
def run_cutlass_profiler(
self,
problem_size,
split_k_mode=cutlass_bindings.conv.SplitKMode.Serial,
alpha=1.0,
beta=0.0,
):
if split_k_mode == cutlass_bindings.conv.SplitKMode.Serial:
split_k_mode_ = "serial"
else:
split_k_mode_ = "parallel"
cutlass_path = os.getenv("CUTLASS_PATH")
assert (
cutlass_path is not None
), "Environment variable 'CUTLASS_PATH' is not defined."
values = {
"profiler_path": cutlass_path + "/build/tools/profiler/cutlass_profiler",
"kernel_name": self.operation.procedural_name(),
"verification_providers": "device",
"provider": "cutlass",
"n": str(problem_size.N),
"h": str(problem_size.H),
"w": str(problem_size.W),
"c": str(problem_size.C),
"k": str(problem_size.K),
"r": str(problem_size.R),
"s": str(problem_size.S),
"p": str(problem_size.P),
"q": str(problem_size.Q),
"pad_h": str(problem_size.pad_h),
"pad_w": str(problem_size.pad_w),
"stride_h": str(problem_size.stride_h),
"stride_w": str(problem_size.stride_w),
"dilation_h": str(problem_size.dilation_h),
"dilation_w": str(problem_size.dilation_w),
"split_k_slices": str(problem_size.split_k_slices),
"split_k_mode": split_k_mode_,
"alpha": str(alpha),
"beta": str(beta),
"warmup": str(self.warmup_iterations),
"profile": str(self.iterations),
}
cmd_template = (
"${profiler_path} --kernels=${kernel_name} --verification-providers=${verification_providers}"
" --providers=${provider} --n=${n} --h=${h} --w=${w} --c=${c} --k=${k} --r=${r} --s=${s} --p=${p}"
" --q=${q} --pad_h=${pad_h} --pad_w=${pad_w} --stride_h={stride_h} --stride_w=${stride_w}"
" --dilation_h=${dilation_h} --dilation_w=${dilation_w} --warmup-iterations=${warmup} --profiling-iterations=${profile}"
" --split_k_slices=${split_k_slices} --alpha=${alpha} --beta=${beta} --split_k_mode=${split_k_mode}"
)
cmd = SubstituteTemplate(cmd_template, values)
result = subprocess.getoutput(cmd)
m = re.search(r"Runtime:\s+(?P<runtime>\d+.\d+)", result)
runtime = float(m.group("runtime"))
m = re.search(r"Bytes:\s+(?P<bytes>\d+)", result)
bytes = int(m.group("bytes"))
m = re.search(r"FLOPs:\s+(?P<flops>\d+)", result)
flops = int(m.group("flops"))
# check if the problem size matches
assert bytes == self.bytes(problem_size, alpha, beta)
assert flops == self.flops(problem_size)
return runtime
def run(
self,
problem_size,
split_k_mode=cutlass_bindings.conv.SplitKMode.Serial,
alpha=1.0,
beta=0.0,
):
assert get_allocated_size() == 0, (
"%d byte of pool memory is not released in previous run"
% get_allocated_size()
)
#
# Initialize input and output tensors
#
tensor_A_size = cutlass_bindings.conv.implicit_gemm_tensor_a_size(
self.conv_kind, problem_size
)
tensor_B_size = cutlass_bindings.conv.implicit_gemm_tensor_b_size(
self.conv_kind, problem_size
)
tensor_C_size = cutlass_bindings.conv.implicit_gemm_tensor_c_size(
self.conv_kind, problem_size
)
np.random.seed(self.seed)
tensor_A = self.uniform_init(size=(tensor_A_size,), dtype=self.dtype_A)
tensor_B = self.uniform_init(size=(tensor_B_size,), dtype=self.dtype_B)
tensor_C = self.uniform_init(size=(tensor_C_size,), dtype=self.dtype_C)
tensor_D = np.zeros(shape=(tensor_C_size,), dtype=self.dtype_D)
#
# Launch kernel
#
arguments = Conv2dArguments(
operation=self.operation,
problem_size=problem_size,
A=tensor_A,
B=tensor_B,
C=tensor_C,
D=tensor_D,
output_op=self.operation.epilogue_type(alpha, beta),
split_k_slices=problem_size.split_k_slices,
split_k_mode=split_k_mode,
)
if split_k_mode == cutlass_bindings.conv.SplitKMode.Parallel:
implicit_gemm_size = cutlass_bindings.conv.implicit_gemm_problem_size(
self.operation.conv_kind, arguments.problem_size
)
reduction_arguments = ReductionArguments(
self.reduction_operation,
problem_size=[implicit_gemm_size.m(), implicit_gemm_size.n()],
partitions=problem_size.split_k_slices,
workspace=arguments.ptr_D,
destination=tensor_D,
source=tensor_C,
output_op=self.reduction_operation.epilogue_type(alpha, beta),
)
self.operation.run(arguments)
if split_k_mode == cutlass_bindings.conv.SplitKMode.Parallel:
self.reduction_operation.run(reduction_arguments)
passed = True
if self.verification:
if split_k_mode == cutlass_bindings.conv.SplitKMode.Parallel:
reduction_arguments.sync()
else:
arguments.sync()
tensor_D_ref = self.host_reference(
problem_size, tensor_A, tensor_B, tensor_C, alpha, beta
)
passed = self.equal(tensor_D, tensor_D_ref, problem_size)
try:
assert passed
except AssertionError:
self.print_problem_size(problem_size, split_k_mode)
if self.profiling:
sleep(self.sleep_time)
for _ in range(self.warmup_iterations):
self.operation.run(arguments)
if split_k_mode == cutlass_bindings.conv.SplitKMode.Parallel:
self.reduction_operation.run(reduction_arguments)
self.timer.start()
for _ in range(self.warmup_iterations):
self.operation.run(arguments)
if split_k_mode == cutlass_bindings.conv.SplitKMode.Parallel:
self.reduction_operation.run(reduction_arguments)
self.timer.stop_and_wait()
runtime = self.timer.duration(self.iterations)
# free memory
del arguments
if split_k_mode == cutlass_bindings.conv.SplitKMode.Parallel:
del reduction_arguments
assert get_allocated_size() == 0, (
"%d byte of pool memory is not released after current run"
% get_allocated_size()
)
if self.profiling:
return runtime
return passed
########################################################################################################
# TestAllConv: Runs cutlass::conv::device::ImplicitGemmConvolution operator and compares it with reference
# TestAllConv runs conv operator on default conv problem sizes from test::conv::device::TestbedConv2dProblemSizes
# Additionaly, each conv2d test can provide conv problem sizes (conv_test_sizes) and blacklist of sizes
# (conv_blacklist_sizes)
############################################################################################################
def test_all_conv2d_from_compilation_mode(
operation: Conv2dOperation,
conv_test_sizes,
interleaved,
compilation_mode):
passed = True
testbed = Conv2dLauncher(operation, interleaved=interleaved, compilation_mode=compilation_mode)
#
# Get conv problem sizes to run conv operator
#
conv_problems = cutlass_bindings.test.conv.TestbedConv2dProblemSizes(64)
# Vector of conv2d problem sizes to avoid duplicate runs
conv_tested_sizes = []
# Flatten 2D problem_vectors into a 1D problem sizes
problem_sizes = conv_problems.conv2d_default_sizes
problem_sizes = [conv_problem for conv_problem in problem_sizes] + conv_test_sizes
# Sweep conv2d problem sizes (split-k-mode=kSerial, split-k-slices=1, alpha=1.0, beta=0.0)
for conv_problem in problem_sizes:
if conv_problem in conv_tested_sizes:
continue
# skip channel dimension % 32 != 0 for interleaved case
if interleaved:
if conv_problem.K % 32 != 0 or conv_problem.C % 32 != 0:
continue
#
# Procedurally disable certain cases
#
# CUTLASS DGRAD's *unity* stride specialization only support stride {1, 1}
if (
operation.conv_kind == cutlass_bindings.conv.Operator.dgrad
and operation.stride_support == StrideSupport.Unity
):
if not ((conv_problem.stride_h == 1) and (conv_problem.stride_w == 1)):
continue
if not interleaved:
# Fixed channels algorithm requires channel count to match access size
if (
operation.iterator_algorithm
== cutlass_bindings.conv.IteratorAlgorithm.fixed_channels
):
if conv_problem.C != operation.A.alignment:
continue
# Few channels algorithm requires channel count to match access size
if (
operation.iterator_algorithm
== cutlass_bindings.conv.IteratorAlgorithm.few_channels
):
if conv_problem.C % operation.A.alignment:
continue
# CUTLASS DGRAD's *strided* stride specialization supports all stride {stride_h, stride_w}
# Although strided dgrad works for all stride combinations, we are only going
# to run strided dgrad for non-unity strides
if (
operation.conv_kind == cutlass_bindings.conv.Operator.dgrad
and operation.stride_support == StrideSupport.Strided
):
if (conv_problem.stride_h == 1) and (conv_problem.stride_w == 1):
continue
#
# Test
#
# push back tested problem size to avoid re-running duplicates
conv_tested_sizes.append(conv_problem)
passed = testbed.run(conv_problem)
if not passed:
return False
if interleaved:
return True
#
# filter the cases for split K
#
# Small-channels convolution can't run here.
if operation.iterator_algorithm in [
cutlass_bindings.conv.IteratorAlgorithm.fixed_channels,
cutlass_bindings.conv.IteratorAlgorithm.few_channels,
]:
return True
# CUTLASS DGRAD's *stride* specialization does not support split-k mode
if (
operation.conv_kind == cutlass_bindings.conv.Operator.dgrad
and operation.stride_support == StrideSupport.Strided
):
conv_problem = cutlass_bindings.conv.Conv2dProblemSize(
cutlass_bindings.Tensor4DCoord(1, 56, 56, 8),
cutlass_bindings.Tensor4DCoord(8, 1, 1, 8),
cutlass_bindings.Tensor4DCoord(0, 0, 0, 0),
cutlass_bindings.MatrixCoord(2, 2),
cutlass_bindings.MatrixCoord(1, 1),
cutlass_bindings.conv.Mode.cross_correlation,
1,
1,
)
passed = testbed.run(conv_problem)
return passed
# Sweep split-k-slice using serial and prallel reduction with non-unity alpha and non-zero beta for
# a single conv2d problem size. Convolution unit tests take a long time to run so only sweep parameters
# which are abolutely neccessary to catch functional bugs. The below code does provide option to sweep
# alpha and beta for local testing, but only runs one value for alpha and beta.
conv2d_split_k_test_size = cutlass_bindings.conv.Conv2dProblemSize(
cutlass_bindings.Tensor4DCoord(1, 17, 11, 288),
cutlass_bindings.Tensor4DCoord(160, 3, 3, 288),
cutlass_bindings.Tensor4DCoord(1, 1, 1, 1),
cutlass_bindings.MatrixCoord(1, 1),
cutlass_bindings.MatrixCoord(1, 1),
cutlass_bindings.conv.Mode.cross_correlation,
1,
1,
)
split_k_modes = [
cutlass_bindings.conv.SplitKMode.Parallel,
cutlass_bindings.conv.SplitKMode.Serial,
]
split_k_slices = [1, 2, 3, 4, 201]
problem_alpha = [
2.0,
]
problem_beta = [
2.0,
]
for split_k_mode in split_k_modes:
for split_k_slice in split_k_slices:
for alpha in problem_alpha:
for beta in problem_beta:
passed = testbed.run(
conv2d_split_k_test_size.reset_split_k_slices(split_k_slice),
split_k_mode,
alpha,
beta,
)
return passed
def test_all_conv2d(
operation: Conv2dOperation,
conv_test_sizes=[],
interleaved=False,
compilation_modes=["nvcc", "nvrtc"]):
for compilation_mode in compilation_modes:
passed = test_all_conv2d_from_compilation_mode(operation, conv_test_sizes, interleaved, compilation_mode)
if not passed:
return False
return True
| cutlass-main | python/cutlass/backend/test/conv2d_testbed.py |
################################################################################
#
# Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
################################################################################
from cutlass.backend.test.conv2d_testbed import *
from cutlass.backend.test.gemm_grouped_testbed import *
from cutlass.backend.test.gemm_testbed import *
from cutlass.backend.test.profiler import *
| cutlass-main | python/cutlass/backend/test/__init__.py |
#################################################################################################
#
# Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#################################################################################################
import cutlass
import cutlass_bindings
from cutlass import EpilogueScheduleSuffixes, KernelScheduleSuffixes
from cutlass.utils.datatypes import binding_opclass, binding_type
from cutlass.backend import library
from cutlass.backend.test.gemm_testbed import test_all_gemm
from cutlass.backend.utils.software import SubstituteTemplate
class Layout:
"""
Utility class to map transpose and non-transpose terminology to row- and column-major terminology
"""
T = cutlass_bindings.RowMajor
N = cutlass_bindings.ColumnMajor
class LayoutCombination:
"""
Utility class defining all combinations of row- and column-major layouts for operands to a GEMMs
"""
NNN = (Layout.N, Layout.N, Layout.N)
NNT = (Layout.N, Layout.N, Layout.T)
NTN = (Layout.N, Layout.T, Layout.N)
NTT = (Layout.N, Layout.T, Layout.T)
TNN = (Layout.T, Layout.N, Layout.N)
TNT = (Layout.T, Layout.N, Layout.T)
TTN = (Layout.T, Layout.T, Layout.N)
TTT = (Layout.T, Layout.T, Layout.T)
def get_name(
layouts,
alignments,
element_output,
element_accumulator,
element_epilogue,
cluster_shape,
threadblock_shape,
stages,
element_a,
element_b,
arch,
opclass,
kernel_schedule=None,
epilogue_schedule=None,
suffix="",
):
"""
Generates a procedural name for a test case.
:param layouts: indexable container of layouts of A, B, and C operands
:param alignments: indexable container of alignments of A, B, and C operands
:param element_output: data type of the output element
:param element_accumulator: data type used in accumulation
:param element_epilogue: data type used in computing the epilogue
:param cluster_shape: indexable container of dimensions of threadblock cluster to be launched
:param threadblock_shape: indexable container of dimensions of threadblock tiles
:param stages: number of pipeline stages to use in the kernel
:type stages: int
:param element_a: data type of operand A
:param element_b: data type of operand B
:param arch: compute capability of kernel being generated
:type arch: int
:param opclass: class of operation being performed (e.g., SIMT, Tensor Core)
:type opclass: cutlass_bindings.OpClass
:param kernel_schedule: kernel_schedule type
:type kernel_schedule: cutlass.KernelScheduleType
:param epilogue_schedule: epilogue_schedule type
:type epilogue_schedule: cutlass.EpilogueScheduleType
:param suffix: additional string to add to the suffix of the name
:type suffix: str
:return: str
"""
name_format = "test_SM${arch}_Device_Gemm_${eA}${lA}_${eB}${lB}_${eC}${lC}_${opclass}_${acc}_${tbM}x${tbN}x${tbK}_${cM}x${cN}x${cK}_${stages}_align${aA}-${aB}-${aC}${k}${e}${suffix}"
return SubstituteTemplate(
name_format,
{
"arch": str(arch),
"eA": library.DataTypeNames[binding_type(element_a)],
"eB": library.DataTypeNames[binding_type(element_b)],
"eC": library.DataTypeNames[binding_type(element_output)],
"lA": library.ShortLayoutTypeNames[layouts[0]],
"lB": library.ShortLayoutTypeNames[layouts[1]],
"lC": library.ShortLayoutTypeNames[layouts[2]],
"opclass": library.OpcodeClassNames[binding_opclass(opclass)],
"acc": library.DataTypeNames[binding_type(element_accumulator)],
"cM": str(cluster_shape[0]),
"cN": str(cluster_shape[1]),
"cK": str(cluster_shape[2]),
"tbM": str(threadblock_shape[0]),
"tbN": str(threadblock_shape[1]),
"tbK": str(threadblock_shape[2]),
"stages": str(stages) if stages is not None else "auto",
"aA": str(alignments[0]),
"aB": str(alignments[1]),
"aC": str(alignments[2]),
"k": "" if kernel_schedule is None else KernelScheduleSuffixes[kernel_schedule],
"e": "" if epilogue_schedule is None else EpilogueScheduleSuffixes[epilogue_schedule],
"suffix": "" if suffix is None else suffix,
},
)
def get_name_conv2d(
arch,
conv_kind,
element,
element_accumulator,
element_output,
opclass,
threadblock_shape,
warp_count,
instruction_shape,
stages,
iterator_algorithm,
swizzle,
split_k_mode,
split_k_slices,
activation
):
"""
Generates a procedural name for a test case for conv2d
:param arch: compute capability of kernel being generated
:type arch: int
:param conv_kind: the convolution type (i.e. fprop, dgrad, wgrad)
:type conv_kind: str
:param iterator_algorithm: the iterator algorithm applied
:type iterator_algorithm: cutlass_bindings.conv.IteratorAlgorithm
:param element_a: data type of operand A
:param element_b: data type of operand B
:param element_c: data type of operand C
:param element_accumulator: data type used in accumulation
:param opclass: class of operation being performed (e.g., SIMT, Tensor Core)
:type opclass: cutlass_bindings.OpClass
:param threadblock_shape: indexable container of dimensions of threadblock tiles
:param stages: number of pipeline stages to use in the kernel
:type stages: int
:param stride_support: stride support of dgrad
:param alignment: int
:type alignment: int
:return: str
"""
if iterator_algorithm is None:
iterator_algorithm = "AUTO"
if swizzle is None:
swizzle = 1
name_format = "test_SM${arch}_Device_Conv2d_${conv_kind}_${iter_alg}_ImplicitGemm_${eA}nhwc_${eB}nhwc_${eC}nhwc_${opclass}_${acc}_${tbM}x${tbN}x${tbK}_${wM}x${wN}x${wK}_${IM}${IN}${IK}_stage${stages}_swizzle${swizzle}_${split_k_mode}${split_k_slices}_${activation}"
return SubstituteTemplate(
name_format,
{
"arch": str(arch),
"conv_kind": conv_kind,
"iter_alg": iterator_algorithm,
"eA": library.DataTypeNames[binding_type(element)],
"eB": library.DataTypeNames[binding_type(element)],
"eC": library.DataTypeNames[binding_type(element_output)],
"opclass": opclass,
"acc": library.DataTypeNames[binding_type(element_accumulator)],
"tbM": str(threadblock_shape[0]),
"tbN": str(threadblock_shape[1]),
"tbK": str(threadblock_shape[2]),
"wM": str(threadblock_shape[0] // warp_count[0]),
"wN": str(threadblock_shape[1] // warp_count[1]),
"wK": str(threadblock_shape[2] // warp_count[2]),
"IM": str(instruction_shape[0]),
"IN": str(instruction_shape[1]),
"IK": str(instruction_shape[2]),
"stages": str(stages),
"swizzle": str(swizzle),
"split_k_mode": split_k_mode,
"split_k_slices": str(split_k_slices),
"activation": activation
}
)
def add_test_gemm(
cls=None,
cc=None,
element=None,
layouts=None,
alignments=None,
element_output=None,
element_accumulator=None,
cluster_shape=None,
threadblock_shape=None,
warp_count=None,
stages=None,
opclass=None,
swizzle=None,
kernel_schedule=None,
epilogue_schedule=None,
compilation_modes=['nvcc', 'nvrtc']):
"""
Create test-running functions with the given specification and set it as a method of ``cls``.
:param cls: class to which the generated method will be added
:type cls: type
:param cc: compute capability to compile for
:type cc: int
:param element: data type of A and B operands
:type element: cutlass.DataType.f16
:param layouts: layouts of A, B, and C operands
:type layouts: list or tuple
:param alignments: alingments of A, B, and C operands
:type alignments: list or tuple
:param element_output: data type of the output element
:type element_output: cutlass.DataType
:param element_accumulator: data type used in accumulation
:type element_accumulator: cutlass.DataType
:param cluster_shape: dimensions of clusters
:type cluster_shape: list or tuple
:param threadblock_shape: dimensions of threadblock tiles
:type threadblock_shape: list or tuple
:param warp_count: warps to be launched per threadblock dimension
:type warp_count: list or tuple
:param stages: number of pipeline stages to use in the kernel
:type stages: int
:param opclass: class of operation being performed (e.g., SIMT, Tensor Core)
:type opclass: cutlass.OpClass
:param swizzle: threadblock swizzling functor
:param kernel_schedule: kernel schedule to use
:type kernel_schedule: cutlass.KernelScheduleType
:param epilogue_schedule: epilogue schedule to use
:type epilogue_schedule: cutlass.EpilogueScheduleType
:param compilation_modes: list of compilers to used in testing the kernel (options: 'nvrtc', 'nvcc')
:type compilation_modes: list
"""
for compilation_mode in compilation_modes:
def run(self):
"""
Dynamically-generated function that constructs a GEMM operation and verifies it against
multiple test cases.
"""
element_A = element
element_B = element
layout_A, layout_B, layout_C = layouts
alignment_A, alignment_B, alignment_C = alignments
plan = cutlass.op.Gemm(element_A=element_A, element_B=element_B,
element_C=element_output, element_D=element_output,
layout_A=layout_A, layout_B=layout_B, layout_C=layout_C,
element_accumulator=element_accumulator,
kernel_cc=cc)
plan.opclass = opclass
if swizzle is not None:
plan.swizzling_functor = swizzle
td = plan.tile_descriptions()[0]
td.threadblock_shape = threadblock_shape
td.stages = stages
if warp_count is not None:
td.warp_count = warp_count
td.cluster_shape = cluster_shape
op = plan.construct(tile_description=td, alignment_A=alignment_A, alignment_B=alignment_B, alignment_C=alignment_C)
self.assertTrue(test_all_gemm(op, 'universal', compilation_mode=compilation_mode))
element_epilogue = element_accumulator
name = get_name(
layouts=layouts, alignments=alignments, element_output=element_output, element_accumulator=element_accumulator,
element_epilogue=element_epilogue, cluster_shape=cluster_shape, threadblock_shape=threadblock_shape,
stages=stages, element_a=element, element_b=element, arch=cc, opclass=opclass,
kernel_schedule=kernel_schedule, epilogue_schedule=epilogue_schedule, suffix=f'_{compilation_mode}')
setattr(cls, name, run)
| cutlass-main | python/cutlass/backend/test/utils.py |
#################################################################################################
#
# Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#################################################################################################
import os
import re
import subprocess
from time import sleep
from bfloat16 import bfloat16
from cuda import cuda, cudart
import cutlass_bindings
import numpy as np
from cutlass.backend import compiler
from cutlass.backend.gemm_operation import GemmArguments, GemmOperationUniversal
from cutlass.backend.library import (
DataTypeSize,
DataTypeSizeBytes,
MathOperation,
ShortDataTypeNames,
)
from cutlass.backend.memory_manager import get_allocated_size
from cutlass.backend.reduction_operation import ReductionArguments, ReductionOperation
from cutlass.backend.test.profiler import GpuTimer
from cutlass.backend.utils.datatypes import to_cutlass
from cutlass.backend.utils.software import SubstituteTemplate
def transpose(layout):
if layout == cutlass_bindings.RowMajor:
return cutlass_bindings.ColumnMajor
elif layout == cutlass_bindings.ColumnMajor:
return cutlass_bindings.RowMajor
elif layout == cutlass_bindings.ColumnMajorInterleaved32:
return cutlass_bindings.RowMajorInterleaved32
elif layout == cutlass_bindings.RowMajorInterleaved32:
return cutlass_bindings.ColumnMajorInterleaved32
def getTensorRef(
tensor: np.ndarray,
problem_size: cutlass_bindings.gemm.GemmCoord,
operand: str,
layout: cutlass_bindings.layout,
batch_offset: int = 0,
):
ptr = tensor.__array_interface__["data"][0]
if operand == "a":
tensor_coord = problem_size.mk()
batch_stride = problem_size.m() * problem_size.k()
elif operand == "b":
tensor_coord = problem_size.kn()
batch_stride = problem_size.k() * problem_size.n()
elif operand in ["c", "d"]:
tensor_coord = problem_size.mn()
batch_stride = problem_size.m() * problem_size.n()
else:
raise ValueError("Unknown operand: " + operand)
elt_size = DataTypeSizeBytes[to_cutlass(tensor.dtype)]
ptr += batch_offset * batch_stride * elt_size
if layout == cutlass_bindings.RowMajor:
layout = cutlass_bindings.RowMajor.packed(tensor_coord)
layout_tag = "RowMajor"
elif layout == cutlass_bindings.ColumnMajor:
layout = cutlass_bindings.ColumnMajor.packed(tensor_coord)
layout_tag = "ColumnMajor"
elif layout == cutlass_bindings.ColumnMajorInterleaved32:
layout = cutlass_bindings.ColumnMajorInterleaved32.packed(tensor_coord)
layout_tag = "ColumnMajorInterleaved32"
elif layout == cutlass_bindings.RowMajorInterleaved32:
layout = cutlass_bindings.RowMajorInterleaved32.packed(tensor_coord)
layout_tag = "RowMajorInterleaved32"
else:
raise ValueError("unsupported layout")
if tensor.dtype == np.float32:
ref_name = "TensorRefF32" + layout_tag
elif tensor.dtype == np.float64:
ref_name = "TensorRefF64" + layout_tag
elif tensor.dtype == np.float16:
ref_name = "TensorRefF16" + layout_tag
elif tensor.dtype == bfloat16:
ref_name = "TensorRefBF16" + layout_tag
elif tensor.dtype == np.int8:
ref_name = "TensorRefS8" + layout_tag
elif tensor.dtype == np.int32:
ref_name = "TensorRefS32" + layout_tag
else:
raise ValueError("unsupported datatype %s" % ShortDataTypeNames[tensor.dtype])
return getattr(cutlass_bindings, ref_name)(ptr, layout)
def getTensorView(
tensor: np.ndarray,
problem_size: cutlass_bindings.gemm.GemmCoord,
operand: str,
layout: str,
batch_offset: int = 0,
):
tensor_ref = getTensorRef(tensor, problem_size, operand, layout, batch_offset)
if operand == "a":
tensor_coord = problem_size.mk()
elif operand == "b":
tensor_coord = problem_size.kn()
elif operand in ["c", "d"]:
tensor_coord = problem_size.mn()
else:
raise ValueError("Unknown operand: " + operand)
if layout == cutlass_bindings.RowMajor:
layout_tag = "RowMajor"
elif layout == cutlass_bindings.ColumnMajor:
layout_tag = "ColumnMajor"
elif layout == cutlass_bindings.ColumnMajorInterleaved32:
layout_tag = "ColumnMajorInterleaved32"
elif layout == cutlass_bindings.RowMajorInterleaved32:
layout_tag = "RowMajorInterleaved32"
else:
raise ValueError("unsupported layout")
if tensor.dtype == np.float32:
ref_name = "TensorViewF32" + layout_tag
elif tensor.dtype == np.float64:
ref_name = "TensorViewF64" + layout_tag
elif tensor.dtype == np.float16:
ref_name = "TensorViewF16" + layout_tag
elif tensor.dtype == bfloat16:
ref_name = "TensorViewBF16" + layout_tag
elif tensor.dtype == np.int32:
ref_name = "TensorViewS32" + layout_tag
elif tensor.dtype == np.int8:
ref_name = "TensorViewS8" + layout_tag
else:
raise ValueError("unsupported datatype")
return getattr(cutlass_bindings, ref_name)(tensor_ref, tensor_coord)
class GemmUniversalLauncher:
def __init__(
self,
operation: "GemmOperationUniversal",
seed: int = 2080,
interleaved=False,
verification=True,
profiling=False,
warmup_iterations=500,
iterations=500,
compiler_mode: str = "nvcc",
**kwargs,
) -> None:
# create the reduction kernel
self.reduction_operation: ReductionOperation = ReductionOperation(
shape=cutlass_bindings.MatrixCoord(4, 32 * operation.C.alignment),
C=operation.C,
element_accumulator=operation.tile_description.math_instruction.element_accumulator,
element_compute=operation.epilogue_functor.element_epilogue,
epilogue_functor=operation.epilogue_functor,
count=operation.C.alignment,
)
self.math_operation = operation.tile_description.math_instruction.math_operation
#: verify the output result
self.verification = verification
#: profile the kernel's runtime
self.profiling = profiling
self.timer = GpuTimer()
self.warmup_iterations = warmup_iterations
self.iterations = iterations
if "sleep" in kwargs.keys():
self.sleep_time = kwargs["sleep"]
else:
self.sleep_time = 0
#
# Compile the operator
#
if compiler_mode == "nvcc":
compiler.nvcc()
elif compiler_mode == "nvrtc":
compiler.nvrtc()
else:
raise Exception(f"Unexpected compiler string {compiler_mode}")
op_list = [operation]
if operation.arch < 90:
# Split K via Python is currently only supported for pre-SM90 kernels
op_list.append(self.reduction_operation)
compiler.add_module(op_list, bypass_cache=True)
self.operation = operation
self.dtype_A = GemmUniversalLauncher.numpy_type(operation.A.element)
self.dtype_B = GemmUniversalLauncher.numpy_type(operation.B.element)
self.dtype_C = GemmUniversalLauncher.numpy_type(operation.C.element)
self.dtype_D = GemmUniversalLauncher.numpy_type(operation.C.element)
accumulator_size = DataTypeSize[
operation.tile_description.math_instruction.element_accumulator
]
element_size = DataTypeSize[operation.A.element]
if element_size == 1:
self.scope_max = 1
self.scope_min = 0
elif element_size <= 8:
self.scope_max = 1
self.scope_min = -1
elif element_size == 16:
self.scope_max = 4
self.scope_min = -4
else:
self.scope_max = 8
self.scope_min = -8
#: seed
self.seed: int = seed
#: whether the layout is interleaved
self.interleaved = interleaved
#: compute type
self.compute_type = operation.epilogue_functor.element_epilogue
self.accumulator_type = (
operation.tile_description.math_instruction.element_accumulator
)
def print_problem_size(self, p, mode, batch_count):
if mode == cutlass_bindings.gemm.Mode.Gemm:
mode = "Gemm"
elif mode == cutlass_bindings.gemm.Mode.Batched:
mode = "GemmBatched"
elif mode == cutlass_bindings.gemm.Mode.GemmSplitKParallel:
mode = "GemmSplitKParallel"
problem_size = "problem: %d, %d, %d\n batch_count: %d\n mode: %s" % (
p.m(),
p.n(),
p.k(),
batch_count,
mode,
)
print(problem_size)
@staticmethod
def numpy_type(type):
if type == cutlass_bindings.float64:
return np.float64
elif type == cutlass_bindings.float32:
return np.float32
elif type == cutlass_bindings.float16:
return np.float16
elif type == cutlass_bindings.bfloat16:
return bfloat16
elif type == cutlass_bindings.int32:
return np.int32
elif type == cutlass_bindings.int8:
return np.int8
else:
raise ValueError("unsupported type: %s" % ShortDataTypeNames[type])
def uniform_init(self, size, dtype):
if dtype in [np.float32, np.float16, bfloat16, np.float64]:
return np.ceil(
np.random.uniform(
low=self.scope_min - 0.5, high=self.scope_max - 0.5, size=size
).astype(dtype)
)
else:
return np.random.uniform(
low=self.scope_min - 1, high=self.scope_max + 1, size=size
).astype(dtype)
def reorder_tensor_B(self, tensor_B, problem_size):
reordered_tensor_B = np.empty_like(tensor_B)
tensor_ref_B = getTensorRef(
tensor_B, problem_size, "b", self.operation.B.layout
)
reordered_tensor_ref_B = getTensorRef(
reordered_tensor_B, problem_size, "b", self.operation.B.layout
)
cutlass_bindings.gemm.host.reorder_column(
tensor_ref_B, reordered_tensor_ref_B, problem_size
)
return reordered_tensor_B
def host_reference(self, problem_size, batch_count, tensor_A, tensor_B, tensor_C, alpha, beta):
tensor_D_ref = np.ones_like(tensor_C)
alpha = self.numpy_type(self.compute_type)(alpha)
beta = self.numpy_type(self.compute_type)(beta)
init_acc = 0
alpha = self.compute_type(alpha).value()
beta = self.compute_type(beta).value()
init_acc = self.accumulator_type(init_acc).value()
for i in range(batch_count):
if self.operation.switched:
tensor_ref_A = getTensorRef(
tensor_A,
problem_size,
"a",
transpose(self.operation.B.layout),
batch_offset=i,
)
tensor_ref_B = getTensorRef(
tensor_B,
problem_size,
"b",
transpose(self.operation.A.layout),
batch_offset=i,
)
tensor_ref_C = getTensorRef(
tensor_C,
problem_size,
"c",
transpose(self.operation.C.layout),
batch_offset=i,
)
tensor_ref_D_ref = getTensorRef(
tensor_D_ref,
problem_size,
"d",
transpose(self.operation.C.layout),
batch_offset=i,
)
else:
tensor_ref_A = getTensorRef(
tensor_A, problem_size, "a", self.operation.A.layout, batch_offset=i
)
tensor_ref_B = getTensorRef(
tensor_B, problem_size, "b", self.operation.B.layout, batch_offset=i
)
tensor_ref_C = getTensorRef(
tensor_C, problem_size, "c", self.operation.C.layout, batch_offset=i
)
tensor_ref_D_ref = getTensorRef(
tensor_D_ref,
problem_size,
"d",
self.operation.C.layout,
batch_offset=i,
)
if self.math_operation in [MathOperation.multiply_add_saturate]:
cutlass_bindings.test.gemm.host.gemm_saturate(
problem_size,
alpha,
tensor_ref_A,
tensor_ref_B,
beta,
tensor_ref_C,
tensor_ref_D_ref,
init_acc,
)
else:
cutlass_bindings.test.gemm.host.gemm(
problem_size,
alpha,
tensor_ref_A,
tensor_ref_B,
beta,
tensor_ref_C,
tensor_ref_D_ref,
init_acc,
)
return tensor_D_ref
def equal(self, tensor_D, tensor_D_ref, problem_size, batch_count):
for i in range(batch_count):
tensor_view_D = getTensorView(
tensor_D, problem_size, "d", self.operation.C.layout, batch_offset=i
)
tensor_view_D_ref = getTensorView(
tensor_D_ref, problem_size, "d", self.operation.C.layout, batch_offset=i
)
if not cutlass_bindings.test.gemm.host.equals(
tensor_view_D, tensor_view_D_ref
):
return False
return True
def bytes(self, problem_size, batch_count=1, alpha=1.0, beta=0.0):
m = problem_size.m()
n = problem_size.n()
k = problem_size.k()
bytes = (
(DataTypeSize[self.operation.A.element] * m // 8) * k
+ (DataTypeSize[self.operation.B.element] * n // 8) * k
+ (DataTypeSize[self.operation.C.element] * m // 8) * n
)
if beta != 0:
bytes += (DataTypeSize[self.operation.C.element] * m // 8) * n
bytes *= batch_count
return bytes
def flops(self, problem_size, batch_count=1):
m = problem_size.m()
n = problem_size.n()
k = problem_size.k()
flops_ = (m * n * k) * 2 * batch_count
return flops_
def run_cutlass_profiler(
self, mode, problem_size, batch_count=1, alpha=1.0, beta=0.0
):
cutlass_path = os.getenv("CUTLASS_PATH")
assert (
cutlass_path is not None
), "Environment variable 'CUTLASS_PATH' is not defined."
values = {
"profiler_path": cutlass_path + "/build/tools/profiler/cutlass_profiler",
"kernel_name": self.operation.procedural_name(),
"verification_providers": "device",
"provider": "cutlass",
"m": str(problem_size.m()),
"n": str(problem_size.n()),
"k": str(problem_size.k()),
"split_k_slices": str(batch_count),
"alpha": str(alpha),
"beta": str(beta),
"warmup": str(self.warmup_iterations),
"profile": str(self.iterations),
}
cmd_template = (
"${profiler_path} --kernels=${kernel_name} --verification-providers=${verification_providers}"
" --providers=${provider} --m=${m} --n=${n} --k=${k}"
)
cmd = SubstituteTemplate(cmd_template, values)
result = subprocess.getoutput(cmd)
m = re.search(r"Runtime:\s+(?P<runtime>\d+.\d+)", result)
runtime = float(m.group("runtime"))
m = re.search(r"Bytes:\s+(?P<bytes>\d+)", result)
bytes = int(m.group("bytes"))
m = re.search(r"FLOPs:\s+(?P<flops>\d+)", result)
flops = int(m.group("flops"))
# check if the problem size matches
assert bytes == self.bytes(problem_size, alpha, beta)
assert flops == self.flops(problem_size)
return runtime
def run(self, mode, problem_size, batch_count=1, split_k_slices=1, alpha=1.0, beta=0.0):
assert get_allocated_size() == 0, (
"%d byte of pool memory is not released in previous run"
% get_allocated_size()
)
np.random.seed(self.seed)
# Assign an actual batch count in cases where we are not running in batched mode.
# This is to differentiate between the number of split K slices and the batch count,
# which are overloaded within the single `batch_count` variable.
true_batch_count = (
batch_count if mode == cutlass_bindings.gemm.Mode.Batched else 1
)
tensor_A = self.uniform_init(
size=(problem_size.m() * problem_size.k() * true_batch_count,),
dtype=self.dtype_A,
)
tensor_B = self.uniform_init(
size=(problem_size.n() * problem_size.k() * true_batch_count,),
dtype=self.dtype_B,
)
tensor_C = self.uniform_init(
size=(problem_size.m() * problem_size.n() * true_batch_count,),
dtype=self.dtype_C,
)
tensor_D = np.zeros(
shape=(problem_size.m() * problem_size.n() * true_batch_count,),
dtype=self.dtype_D,
)
#
# Launch kernel
#
arguments = GemmArguments(
operation=self.operation,
problem_size=problem_size,
A=tensor_A,
B=tensor_B,
C=tensor_C,
D=tensor_D,
output_op=self.operation.epilogue_type(alpha, beta),
gemm_mode=mode,
split_k_slices=split_k_slices,
batch=batch_count,
)
if mode == cutlass_bindings.gemm.Mode.GemmSplitKParallel:
reduction_arguments = ReductionArguments(
self.reduction_operation,
problem_size=[problem_size.m(), problem_size.n()],
partitions=split_k_slices,
workspace=arguments.ptr_D,
destination=tensor_D,
source=tensor_C,
output_op=self.reduction_operation.epilogue_type(alpha, beta),
)
self.operation.run(arguments)
if mode == cutlass_bindings.gemm.Mode.GemmSplitKParallel:
self.reduction_operation.run(reduction_arguments)
passed = True
if self.verification:
if mode == cutlass_bindings.gemm.Mode.GemmSplitKParallel:
reduction_arguments.sync()
else:
arguments.sync()
tensor_D_ref = self.host_reference(
problem_size,
true_batch_count,
tensor_A,
tensor_B,
tensor_C,
alpha,
beta,
)
passed = self.equal(tensor_D, tensor_D_ref, problem_size, true_batch_count)
try:
assert passed
except AssertionError:
self.print_problem_size(problem_size, mode, batch_count)
if self.profiling:
sleep(self.sleep_time)
for _ in range(self.warmup_iterations):
self.operation.run(arguments)
if mode == cutlass_bindings.gemm.Mode.GemmSplitKParallel:
self.reduction_operation.run(reduction_arguments)
self.timer.start()
for _ in range(self.iterations):
self.operation.run(arguments)
if mode == cutlass_bindings.gemm.Mode.GemmSplitKParallel:
self.reduction_operation.run(reduction_arguments)
self.timer.stop_and_wait()
runtime = self.timer.duration(self.iterations)
# free memory and clear buffers
del arguments
if mode == cutlass_bindings.gemm.Mode.GemmSplitKParallel:
del reduction_arguments
assert get_allocated_size() == 0, (
"%d byte of pool memory is not released after current run"
% get_allocated_size()
)
if self.profiling:
return runtime
return passed
def test_all_gemm(operation: "GemmOperationUniversal", testcase="universal", compilation_mode="nvcc"):
passed = True
minimum_operand_element_size = min(
DataTypeSize[operation.A.element], DataTypeSize[operation.B.element]
)
opcode_class = operation.tile_description.math_instruction.opcode_class
if opcode_class == cutlass_bindings.OpClass.Simt:
alignment = 1
else:
alignment = 128 // minimum_operand_element_size
# int8_t gemm alignment constraints
if opcode_class == cutlass_bindings.OpClass.Simt and operation.A.element == cutlass_bindings.int8 and operation.A.layout == cutlass_bindings.ColumnMajor:
alignment_m = 4
else:
alignment_m = alignment
if (
opcode_class == cutlass_bindings.OpClass.Simt
and operation.B.element == cutlass_bindings.int8
and operation.A.layout == cutlass_bindings.RowMajor
):
alignment_n = 4
else:
alignment_n = alignment
if (
opcode_class == cutlass_bindings.OpClass.Simt
and operation.A.element == cutlass_bindings.int8
and operation.B.element == cutlass_bindings.int8
and (
operation.A.layout == cutlass_bindings.RowMajor
or operation.B.layout == cutlass_bindings.ColumnMajor
)
):
alignment_k = 4
else:
alignment_k = alignment
threadblock_k = operation.tile_description.threadblock_shape[2]
if testcase == "interleaved":
if operation.A.layout in [
cutlass_bindings.ColumnMajorInterleaved32,
cutlass_bindings.RowMajorInterleaved32,
]:
interleavedk = 32
else:
raise ValueError("Unknown layout")
# Split K mode via Python is currently only supported pre-SM90, and when stream K is not used.
# Stream K enables split-k functionality with mode `Gemm` and a non-unit batch count.
supports_split_k = operation.arch < 90 and not isinstance(
operation.swizzling_functor, cutlass_bindings.ThreadblockSwizzleStreamK
)
if testcase == "interleaved":
modes = [
cutlass_bindings.gemm.Mode.Gemm,
]
problem_size_m = [interleavedk, 512 + interleavedk]
problem_size_n = [interleavedk, 512 + interleavedk]
problem_size_k = [
interleavedk,
threadblock_k * operation.tile_description.stages + interleavedk,
]
problem_alpha = [1.0]
problem_beta = [0.0]
batch_counts = [
1,
]
elif testcase == "multistage":
modes = [
cutlass_bindings.gemm.Mode.Gemm,
]
problem_size_m = [16, 528]
problem_size_n = [16, 528]
problem_size_k = [
threadblock_k,
threadblock_k * operation.tile_description.stages
+ operation.tile_description.math_instruction.instruction_shape[2],
]
problem_alpha = [1.0]
problem_beta = [0.0]
batch_counts = [
1,
]
else: # universal
modes = [cutlass_bindings.gemm.Mode.Gemm]
batch_counts = [1, 2, 3, 5, 7]
if supports_split_k:
modes.append(cutlass_bindings.gemm.Mode.GemmSplitKParallel)
problem_size_m = [alignment_m, 512 - 3 * alignment_m]
problem_size_n = [alignment_n, 512 - 2 * alignment_n]
if operation.tile_description.stages is None:
stages_for_k_calc = 7
else:
stages_for_k_calc = operation.tile_description.stages
problem_size_k = [
alignment_k,
threadblock_k * stages_for_k_calc - alignment_k,
threadblock_k * stages_for_k_calc * 3 - alignment_k,
]
problem_alpha = [1.0]
problem_beta = [2.0]
testbed = GemmUniversalLauncher(operation, interleaved=(testcase == "interleaved"), compiler_mode=compilation_mode)
for mode in modes:
for m in problem_size_m:
for n in problem_size_n:
for k in problem_size_k:
for batch_count in batch_counts:
for alpha in problem_alpha:
for beta in problem_beta:
# skip very small K problems
if testcase == "universal":
if k // batch_count < 2 * threadblock_k:
continue
problem_size = cutlass_bindings.gemm.GemmCoord(m, n, k)
if supports_split_k:
split_k_slices = batch_count
else:
split_k_slices = 1
overridden_mode = mode
if (
mode == cutlass_bindings.gemm.Mode.Gemm
and batch_count > 1
):
overridden_mode = cutlass_bindings.gemm.Mode.Batched
passed = testbed.run(
overridden_mode,
problem_size,
batch_count,
split_k_slices,
alpha,
beta,
)
(err,) = cudart.cudaDeviceSynchronize()
if err != cuda.CUresult.CUDA_SUCCESS:
raise RuntimeError("CUDA Error %s" % str(err))
if not passed:
return False
return passed
| cutlass-main | python/cutlass/backend/test/gemm_testbed.py |
#################################################################################################
#
# Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#################################################################################################
from cuda import cuda, cudart
class GpuTimer:
def __init__(self) -> None:
self.events = [
cuda.cuEventCreate(cuda.CUevent_flags.CU_EVENT_DEFAULT)[1],
cuda.cuEventCreate(cuda.CUevent_flags.CU_EVENT_DEFAULT)[1],
]
def start(self, stream=cuda.CUstream(0)):
(err,) = cuda.cuEventRecord(self.events[0], stream)
if err != cuda.CUresult.CUDA_SUCCESS:
raise RuntimeError("CUDA Error %s" % str(err))
def stop(self, stream=cuda.CUstream(0)):
(err,) = cuda.cuEventRecord(self.events[1], stream)
if err != cuda.CUresult.CUDA_SUCCESS:
raise RuntimeError("CUDA Error %s" % str(err))
pass
def stop_and_wait(self, stream=cuda.CUstream(0)):
self.stop(stream)
if stream:
(err,) = cuda.cuStreamSynchronize(stream)
if err != cuda.CUresult.CUDA_SUCCESS:
raise RuntimeError("CUDA Error %s" % str(err))
else:
(err,) = cudart.cudaDeviceSynchronize()
if err != cuda.CUresult.CUDA_SUCCESS:
raise RuntimeError("CUDA Error %s" % str(err))
def duration(self, iterations=1):
err, duration = cuda.cuEventElapsedTime(self.events[0], self.events[1])
if err != cuda.CUresult.CUDA_SUCCESS:
raise RuntimeError("CUDA Error %s" % str(err))
return duration / float(iterations)
| cutlass-main | python/cutlass/backend/test/profiler.py |
#################################################################################################
#
# Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#################################################################################################
"""
Utility functions for interacting with the device
"""
from cuda import cudart
def check_cuda_errors(result: list):
"""
Checks whether `result` contains a CUDA error raises the error as an exception, if so. Otherwise,
returns the result contained in the remaining fields of `result`.
:param result: the results of the `cudart` method, consisting of an error code and any method results
:type result: list
:return: non-error-code results from the `results` parameter
"""
# `result` is of the format : (cudaError_t, result...)
err = result[0]
if err.value:
raise RuntimeError("CUDA error: {}".format(cudart.cudaGetErrorName(err)))
if len(result) == 1:
return None
elif len(result) == 2:
return result[1]
else:
return result[1:]
def device_cc(device: int = 0) -> int:
"""
Returns the compute capability of the device with ID `device`.
:param device: ID of the device to query
:type device: int
:return: compute capability of the queried device (e.g., 80 for SM80)
:rtype: int
"""
deviceProp = check_cuda_errors(cudart.cudaGetDeviceProperties(device))
major = str(deviceProp.major)
minor = str(deviceProp.minor)
return int(major + minor)
| cutlass-main | python/cutlass/backend/utils/device.py |
#################################################################################################
#
# Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#################################################################################################
from typing import Union
from bfloat16 import bfloat16
import cutlass_bindings
import numpy as np
from cutlass.backend.library import TensorDescription
from cutlass.backend.utils.software import CheckPackages
torch_available = CheckPackages().check_torch()
if torch_available:
import torch
class ReferenceModule:
def __init__(
self, A: TensorDescription, B: TensorDescription, C: TensorDescription
) -> None:
self.layout_A = A.layout
self.layout_B = B.layout
self.layout_C = C.layout
def run(
self,
A: np.ndarray,
B: np.ndarray,
C: np.ndarray,
problem_size: cutlass_bindings.gemm.GemmCoord,
alpha: float = 1.0,
beta: float = 0.0,
bias=False,
batch=1,
):
"""
Compute the reference result on CPU
Args:
A: dense operator with shape (M, K) in row-major and (K, M) in column-major
B: dense operator with shape (K, N) in row-major and (N, K) in column-major
C: dense operator with shape (M, N) in row-major and (N, M) in column-major
"""
M, N, K = problem_size.m(), problem_size.n(), problem_size.k()
if isinstance(A, np.ndarray):
if self.layout_A == cutlass_bindings.RowMajor:
A_row = np.reshape(A, newshape=(batch, M, K))
else:
A_col = np.reshape(A, newshape=(batch, K, M))
A_row = np.transpose(A_col, axes=(0, 2, 1))
if self.layout_B == cutlass_bindings.RowMajor:
B_row = np.reshape(B, newshape=(batch, K, N))
else:
B_col = np.reshape(B, newshape=(batch, N, K))
B_row = np.transpose(B_col, axes=(0, 2, 1))
if self.layout_C == cutlass_bindings.RowMajor:
if bias:
C_row = np.reshape(C, newshape=(batch, 1, N))
else:
C_row = np.reshape(C, newshape=(batch, M, N))
else:
if bias:
C_row = np.reshape(C, newshape=(batch, M, 1))
else:
C_col = np.reshape(C, newshape=(batch, N, M))
C_row = np.transpose(C_col, axes=(0, 2, 1))
if A_row.dtype == bfloat16:
# numpy's einsum doesn't support bfloat16
out_row = (
np.einsum(
"bik,bkj->bij",
A_row.astype(np.float32),
B_row.astype(np.float32),
)
* alpha
+ C_row * beta
)
out_row = out_row.astype(C_row.dtype)
else:
out_row = np.einsum("bik,bkj->bij", A_row, B_row) * alpha + C_row * beta
if self.layout_C == cutlass_bindings.ColumnMajor:
out = np.transpose(out_row, axes=(0, 2, 1))
else:
out = out_row
return out.ravel()
elif isinstance(A, torch.Tensor):
if self.layout_A == cutlass_bindings.RowMajor:
A_row = A.view((M, K))
else:
A_col = A.view((K, M))
A_row = torch.permute(A_col, (1, 0))
if self.layout_B == cutlass_bindings.RowMajor:
B_row = B.view((K, N))
else:
B_col = B.view((N, K))
B_row = torch.permute(B_col, (1, 0))
if self.layout_C == cutlass_bindings.RowMajor:
C_row = C.view((M, N))
else:
C_col = C.view((N, M))
C_row = torch.permute(C_col, (1, 0))
out_row = torch.matmul(A_row, B_row) * alpha + C_row * beta
if self.layout_C == cutlass_bindings.ColumnMajor:
out = torch.permute(out_row, (1, 0))
else:
out = out_row
return torch.flatten(out)
#####################################################################################################
# Conv2d
#####################################################################################################
if torch_available:
import torch
class Conv2dReferenceModule:
def __init__(
self,
A: TensorDescription,
B: TensorDescription,
C: TensorDescription,
kind: cutlass_bindings.conv.Operator.fprop,
) -> None:
self.layout_A = A.layout
self.layout_B = B.layout
self.layout_C = C.layout
self.kind = kind
def run(
self,
A: Union[np.ndarray, torch.Tensor],
B: Union[np.ndarray, torch.Tensor],
C: Union[np.ndarray, torch.Tensor],
problem_size,
alpha=1.0,
beta=0.0,
bias=False,
) -> np.ndarray:
"""
Compute the reference result on CPU
"""
n = problem_size.N
h = problem_size.H
w = problem_size.W
c = problem_size.C
k = problem_size.K
r = problem_size.R
s = problem_size.S
p = problem_size.P
q = problem_size.Q
stride_h = problem_size.stride_h
stride_w = problem_size.stride_w
pad_h = problem_size.pad_h
pad_w = problem_size.pad_w
dilation_h = problem_size.dilation_h
dilation_w = problem_size.dilation_w
groups = problem_size.groups
if isinstance(A, np.ndarray):
# the pytorch activation layout is NCHW
# weight layout is Cout Cin Kh Kw (also NCHW)
if self.layout_A == cutlass_bindings.TensorNHWC:
A_nhwc = np.reshape(A, newshape=(n, h, w, c))
A_torch_nhwc = torch.from_numpy(A_nhwc).to("cuda")
A_torch_nchw = torch.permute(A_torch_nhwc, (0, 3, 1, 2))
if self.layout_B == cutlass_bindings.TensorNHWC:
B_nhwc = np.reshape(B, newshape=(k, r, s, c))
B_torch_nhwc = torch.from_numpy(B_nhwc).to("cuda")
B_torch_nchw = torch.permute(B_torch_nhwc, (0, 3, 1, 2))
if self.layout_C == cutlass_bindings.TensorNHWC:
C_nhwc = np.reshape(C, newshape=(n, p, q, k))
C_torch_nhwc = torch.from_numpy(C_nhwc).to("cuda")
C_torch_nchw = torch.permute(C_torch_nhwc, (0, 3, 1, 2))
elif isinstance(A, torch.Tensor):
if self.kind == cutlass_bindings.conv.Operator.wgrad:
if self.layout_A == cutlass_bindings.TensorNHWC:
A_nhwc = A.view((n, p, q, k))
A_torch_nchw = torch.permute(A_nhwc, (0, 3, 1, 2))
if self.layout_B == cutlass_bindings.TensorNHWC:
B_nhwc = B.view((n, h, w, c))
B_torch_nchw = torch.permute(B_nhwc, (0, 3, 1, 2))
if self.layout_C == cutlass_bindings.TensorNHWC:
if bias:
C_nhwc = C.view((1, 1, 1, c))
else:
C_nhwc = C.view((k, r, s, c))
C_torch_nchw = torch.permute(C_nhwc, (0, 3, 1, 2))
elif self.kind == cutlass_bindings.conv.Operator.dgrad:
if self.layout_A == cutlass_bindings.TensorNHWC:
A_nhwc = A.view((n, p, q, k))
A_torch_nchw = torch.permute(A_nhwc, (0, 3, 1, 2))
if self.layout_B == cutlass_bindings.TensorNHWC:
B_nhwc = B.view((k, r, s, c))
B_torch_nchw = torch.permute(B_nhwc, (0, 3, 1, 2))
if self.layout_C == cutlass_bindings.TensorNHWC:
if bias:
C_nhwc = C.view((1, 1, 1, c))
else:
C_nhwc = C.view((n, h, w, c))
C_torch_nchw = torch.permute(C_nhwc, (0, 3, 1, 2))
else:
if self.layout_A == cutlass_bindings.TensorNHWC:
A_nhwc = A.view((n, h, w, c))
A_torch_nchw = torch.permute(A_nhwc, (0, 3, 1, 2))
if self.layout_B == cutlass_bindings.TensorNHWC:
B_nhwc = B.view((k, r, s, c))
B_torch_nchw = torch.permute(B_nhwc, (0, 3, 1, 2))
if self.layout_C == cutlass_bindings.TensorNHWC:
if bias:
C_nhwc = C.view((1, 1, 1, k))
else:
C_nhwc = C.view((n, p, q, k))
C_torch_nchw = torch.permute(C_nhwc, (0, 3, 1, 2))
if self.kind == cutlass_bindings.conv.Operator.fprop:
D_torch_nchw = (
alpha
* torch.nn.functional.conv2d(
A_torch_nchw,
B_torch_nchw,
stride=(stride_h, stride_w),
padding=(pad_h, pad_w),
dilation=(dilation_h, dilation_w),
groups=groups,
)
+ beta * C_torch_nchw
)
elif self.kind == cutlass_bindings.conv.Operator.dgrad:
D_torch_nchw = (
alpha
* torch.nn.grad.conv2d_input(
(n, c, h, w),
B_torch_nchw,
A_torch_nchw,
padding=(pad_h, pad_w),
stride=(stride_h, stride_w),
).to(torch.float32)
+ beta * C_torch_nchw
)
elif self.kind == cutlass_bindings.conv.Operator.wgrad:
D_torch_nchw = (
alpha
* torch.nn.grad.conv2d_weight(
B_torch_nchw,
(k, c, r, s),
A_torch_nchw,
padding=(pad_h, pad_w),
stride=(stride_h, stride_w),
).to(torch.float32)
+ beta * C_torch_nchw
)
if self.layout_C == cutlass_bindings.TensorNHWC:
if isinstance(A, np.ndarray):
D_torch_out = (
torch.permute(D_torch_nchw, (0, 2, 3, 1)).detach().cpu().numpy()
)
elif isinstance(A, torch.Tensor):
D_torch_out = torch.permute(D_torch_nchw, (0, 2, 3, 1))
return D_torch_out.flatten()
| cutlass-main | python/cutlass/backend/utils/reference_model.py |
################################################################################
#
# Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
################################################################################
from cutlass.backend.utils.datatypes import *
from cutlass.backend.utils.device import check_cuda_errors, device_cc
from cutlass.backend.utils.reference_model import ReferenceModule
from cutlass.backend.utils.software import (
CheckPackages,
SubstituteTemplate,
device_sm_count,
get_memory_pool,
)
| cutlass-main | python/cutlass/backend/utils/__init__.py |
#################################################################################################
#
# Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#################################################################################################
"""
Utility functions for converting between frontend datatypes and CUTLASS datatypes
"""
import cutlass_bindings
from cutlass.backend.utils.software import CheckPackages
numpy_available = CheckPackages().check_numpy()
if numpy_available:
import numpy as np
numpy_to_cutlass_dict = {
np.float16: cutlass_bindings.float16,
np.float32: cutlass_bindings.float32,
np.float64: cutlass_bindings.float64,
np.int8: cutlass_bindings.int8,
np.int32: cutlass_bindings.int32,
np.dtype('float16'): cutlass_bindings.float16,
np.dtype('float32'): cutlass_bindings.float32,
np.dtype('float64'): cutlass_bindings.float64,
np.dtype('int8'): cutlass_bindings.int8,
np.dtype('int32'): cutlass_bindings.int32,
}
def numpy_to_cutlass(inp):
numpy_available = CheckPackages().check_numpy()
if numpy_available:
return numpy_to_cutlass_dict.get(inp, None)
cupy_available = CheckPackages().check_cupy()
if cupy_available:
import cupy as cp
cupy_to_cutlass_dict = {
cp.float16: cutlass_bindings.float16,
cp.float32: cutlass_bindings.float32,
cp.float64: cutlass_bindings.float64,
}
def cupy_to_cutlass(inp):
cupy_available = CheckPackages().check_cupy()
if cupy_available:
return cupy_to_cutlass_dict.get(inp, None)
torch_available = CheckPackages().check_torch()
if torch_available:
import torch
torch_to_cutlass_dict = {
torch.half: cutlass_bindings.float16,
torch.float16: cutlass_bindings.float16,
torch.float: cutlass_bindings.float32,
torch.float32: cutlass_bindings.float32,
torch.double: cutlass_bindings.float64,
torch.float64: cutlass_bindings.float64,
}
def torch_to_cutlass(inp):
if torch_available:
return torch_to_cutlass_dict.get(inp, None)
try:
import bfloat16
bfloat16_available = True
numpy_to_cutlass_dict[np.dtype(bfloat16.bfloat16)] = cutlass_bindings.bfloat16
except ImportError:
bfloat16_available = False
def bfloat16_to_cutlass(inp):
if bfloat16_available:
if inp == bfloat16.bfloat16:
return cutlass_bindings.bfloat16
def to_cutlass(inp):
for cvt_fn in [
bfloat16_to_cutlass,
cupy_to_cutlass,
numpy_to_cutlass,
torch_to_cutlass,
]:
out = cvt_fn(inp)
if out is not None:
return out
raise Exception(
"No available conversion from type {} to a CUTLASS type.".format(inp)
)
| cutlass-main | python/cutlass/backend/utils/datatypes.py |
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#################################################################################################
import re
import sys
from cutlass.backend.memory_manager import PoolMemoryManager
class CheckPackages:
def __init__(self) -> None:
pass
def check_cupy(self):
if "cupy" in sys.modules:
return True
else:
try:
import cupy
cupy_available = True
except ImportError:
print("cupy is not loaded.")
def check_numpy(self):
if "numpy" in sys.modules:
return True
else:
try:
import numpy
numpy_available = True
except ImportError:
print("numpy is not loaded.")
def check_torch(self):
if "torch" in sys.modules:
return True
else:
try:
import torch
torch_available = True
except ImportError:
print("torch is not loaded.")
def SubstituteTemplate(template, values):
text = template
changed = True
while changed:
changed = False
for key, value in values.items():
regex = "\\$\\{%s\\}" % key
newtext = re.sub(regex, value, text)
if newtext != text:
changed = True
text = newtext
return text
# this._device_sm_count = None
def device_sm_count():
# Query the number of SMs, if needed
# if this._device_sm_count is None:
from cuda import cuda
_device = 0
err, _device_sm_count = cuda.cuDeviceGetAttribute(
cuda.CUdevice_attribute.CU_DEVICE_ATTRIBUTE_MULTIPROCESSOR_COUNT, _device
)
if err != cuda.CUresult.CUDA_SUCCESS:
raise Exception(
"Failed to retireve SM count. "
f"cuDeviceGetAttribute() failed with error: {cuda.cuGetErrorString(err)[1]}"
)
return _device_sm_count
def get_memory_pool(init_pool_size=0, max_pool_size=2 ** 34):
memory_pool = PoolMemoryManager(
init_pool_size=init_pool_size, max_pool_size=max_pool_size
)
return memory_pool
| cutlass-main | python/cutlass/backend/utils/software.py |
#################################################################################################
#
# Copyright (c) 2023 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#################################################################################################
from cutlass.emit.pytorch import pytorch
| cutlass-main | python/cutlass/emit/__init__.py |
#################################################################################################
#
# Copyright (c) 2023 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#################################################################################################
"""
Utilities for generating source for building a PyTorch CUDA extension that using a CUTLASS kernel.
If specified, the extension can be JIT compiled via PyTorch's ``cpp_extension.load`` method.
Example usage with JIT compilation:
.. highlight:: python
.. code-block:: python
plan = cutlass.op.Gemm(element=torch.float32, layout=cutlass.LayoutType.RowMajor)
op = plan.construct()
mod = cutlass.emit.pytorch(op, 'cutlass_gemm', 80, jit=True)
# Generate inputs for the GEMM
A, B, C = [torch.ones((512, 512)).to('cuda') for _ in range(3)]
# Run the module
D = mod.run(A, B, C)
Example usage without JIT compilation:
.. highlight:: python
.. code-block:: python
plan = cutlass.op.Gemm(element=torch.float32, layout=cutlass.LayoutType.RowMajor)
op = plan.construct()
cutlass.emit.pytorch(op, 'cutlass_gemm', 80, jit=False, sourcedir='output')
After this call, the directory ``output`` contains ``setup.py``,
``cutlass_gemm.cpp``, and ``cutlass_gemm_kernel.cu``. The module can be built from
within ``output`` by running: ``TORCH_CUDA_ARCH_LIST="8.0" python setup.py develop --user``.
The module can later be used in Python via:
.. highlight:: python
.. code-block:: python
import torch
import cutlass_gemm
# Generate inputs for the GEMM
A, B, C = [torch.ones((512, 512)).to('cuda') for _ in range(3)]
# Run the module
D = cutlass_gemm.run(A, B, C)
"""
import logging
import os
import cutlass_bindings
from cutlass import CUTLASS_PATH, logger, swizzle
from cutlass.backend.gemm_operation import GemmOperationGrouped, GemmOperationUniversal
from cutlass.backend.conv2d_operation import Conv2dOperation
from cutlass.backend.library import ApiVersion, ConvKindNames
from cutlass.backend.utils.software import CheckPackages, SubstituteTemplate
from cutlass.emit import common
torch_available = CheckPackages().check_torch()
if torch_available:
import torch
_PYTORCH_CUDA_TEMPLATE = common._CSTYLE_AUTOGEN_COMMENT + """
#include <cuda_runtime.h>
#include <torch/extension.h>
#include <ATen/ATen.h>
#include <ATen/cuda/CUDAContext.h>
#include "cutlass/cutlass.h"
#include "cutlass/util/device_memory.h"
// helper function allocating the memory
void* device_memory_allocation(size_t size, int device_id=0) {
if (size > 0) {
torch::Device device(torch::kCUDA, device_id);
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
torch::TensorOptions options = torch::TensorOptions().dtype(torch::kI8).device(device);
at::Tensor device_tensor = torch::empty({(long)size,}, options);
return reinterpret_cast<void*>(device_tensor.data_ptr());
} else {
return nullptr;
}
}
${includes}
${declaration}
${impl}
"""
_PYTORCH_GEMM_CPP_TEMPLATE = common._CSTYLE_AUTOGEN_COMMENT + """
#include <torch/extension.h>
#include <ATen/ATen.h>
#include <pybind11/stl.h>
// CUDA forward declarations
at::Tensor ${name}_kernel(const at::Tensor& A, const at::Tensor& B, at::optional<const at::Tensor> C=at::nullopt, float alpha=1.f, float beta=0.f);
// C++ interface
at::Tensor ${name}(const at::Tensor& A, const at::Tensor& B, at::optional<const at::Tensor> C=at::nullopt, float alpha=1.f, float beta=0.f) {
return ${name}_kernel(A, B, C, alpha, beta);
}
PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
m.def("run", py::overload_cast<const at::Tensor&, const at::Tensor&, at::optional<const at::Tensor>, float, float>(&${name}), py::arg("A"), py::arg("B"), py::arg("C") = nullptr, py::arg("alpha") = 1.f, py::arg("beta") = 0.f);
}
"""
_PYTORCH_GROUPED_GEMM_CPP_TEMPLATE = common._CSTYLE_AUTOGEN_COMMENT + """
#include <torch/extension.h>
#include <ATen/ATen.h>
#include <pybind11/stl.h>
// CUDA forward declarations
std::vector<at::Tensor> ${name}_kernel(const std::vector<at::Tensor>& A, const std::vector<at::Tensor>& B, at::optional<const std::vector<at::Tensor>> C=at::nullopt, float alpha=1.f, float beta=0.f);
// C++ interface
std::vector<at::Tensor> ${name}(const std::vector<at::Tensor>& A, const std::vector<at::Tensor>& B, at::optional<const std::vector<at::Tensor>> C=at::nullopt, float alpha=1.f, float beta=0.f) {
return ${name}_kernel(A, B, C, alpha, beta);
}
PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
m.def("run", py::overload_cast<const std::vector<at::Tensor>&, const std::vector<at::Tensor>&, at::optional<const std::vector<at::Tensor>>, float, float>(&${name}),
py::arg("A"), py::arg("B"), py::arg("C") = nullptr, py::arg("alpha") = 1.f, py::arg("beta") = 0.f);
}
"""
_PYTORCH_CONV2D_FPROP_CPP_TEMPLATE = common._CSTYLE_AUTOGEN_COMMENT + """
#include <torch/extension.h>
#include <ATen/ATen.h>
#include <pybind11/stl.h>
// CUDA forward declarations
at::Tensor ${name}_kernel(
const at::Tensor& A, const at::Tensor& B, at::optional<const at::Tensor> C=at::nullopt,
std::tuple<int, int> stride={1, 1}, std::tuple<int, int> padding={0, 0}, std::tuple<int, int> dilation={1, 1},
float alpha=1.f, float beta=0.f,
std::string split_k_mode="serial", int split_k_slices=1);
// C++ interface
at::Tensor ${name}(
const at::Tensor& A, const at::Tensor& B, at::optional<const at::Tensor> C=at::nullopt,
std::tuple<int, int> stride={1, 1}, std::tuple<int, int> padding={0, 0}, std::tuple<int, int> dilation={1, 1},
float alpha=1.f, float beta=0.f,
std::string split_k_mode="serial", int split_k_slices=1) {
return ${name}_kernel(A, B, C, stride, padding, dilation, alpha, beta, split_k_mode, split_k_slices);
}
PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
m.def("run",
py::overload_cast<
const at::Tensor&, const at::Tensor&, at::optional<const at::Tensor>,
std::tuple<int, int>, std::tuple<int, int>, std::tuple<int, int>, float, float, std::string, int>(
&${name}), py::arg("A"), py::arg("B"), py::arg("C") = nullptr,
py::arg("stride") = std::make_tuple(1, 1), py::arg("padding") = std::make_tuple(1, 1), py::arg("dilation") = std::make_tuple(1, 1),
py::arg("alpha") = 1.f, py::arg("beta") = 0.f,
py::arg("split_k_mode") = "serial", py::arg("split_k_slices") = 1);
}
"""
_PYTORCH_CONV2D_GRAD_CPP_TEMPLATE = common._CSTYLE_AUTOGEN_COMMENT + """
#include <torch/extension.h>
#include <ATen/ATen.h>
#include <pybind11/stl.h>
// CUDA forward declarations
at::Tensor ${name}_kernel(
std::tuple<int, int, int, int> result_size, const at::Tensor& A, const at::Tensor& B, at::optional<const at::Tensor> C=at::nullopt,
std::tuple<int, int> stride={1, 1}, std::tuple<int, int> padding={0, 0}, std::tuple<int, int> dilation={1, 1},
float alpha=1.f, float beta=0.f,
std::string split_k_mode="serial", int split_k_slices=1);
// C++ interface
at::Tensor ${name}(
std::tuple<int, int, int, int> result_size, const at::Tensor& A, const at::Tensor& B, at::optional<const at::Tensor> C=at::nullopt,
std::tuple<int, int> stride={1, 1}, std::tuple<int, int> padding={0, 0}, std::tuple<int, int> dilation={1, 1},
float alpha=1.f, float beta=0.f,
std::string split_k_mode="serial", int split_k_slices=1) {
return ${name}_kernel(result_size, A, B, C, stride, padding, dilation, alpha, beta, split_k_mode, split_k_slices);
}
PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
m.def("run",
py::overload_cast<
std::tuple<int, int, int, int>, const at::Tensor&, const at::Tensor&, at::optional<const at::Tensor>,
std::tuple<int, int>, std::tuple<int, int>, std::tuple<int, int>, float, float, std::string, int>(
&${name}), py::arg("result_size"), py::arg("A"), py::arg("B"), py::arg("C") = nullptr,
py::arg("stride") = std::make_tuple(1, 1), py::arg("padding") = std::make_tuple(1, 1), py::arg("dilation") = std::make_tuple(1, 1),
py::arg("alpha") = 1.f, py::arg("beta") = 0.f,
py::arg("split_k_mode") = "serial", py::arg("split_k_slices") = 1);
}
"""
_PYTORCH_GEMM_INCLUDES = {
ApiVersion.v2x: """
#include "cutlass/gemm/device/gemm_universal.h"
""",
ApiVersion.v3x: """
#include "cutlass/gemm/device/gemm_universal_adapter.h"
#include "cutlass/gemm/collective/collective_builder.hpp"
#include "cutlass/gemm/device/gemm_universal_adapter.h"
#include "cutlass/gemm/kernel/gemm_universal.hpp"
#include "cutlass/epilogue/collective/default_epilogue.hpp"
#include "cutlass/util/packed_stride.hpp"
""",
}
_PYTORCH_GROUPED_GEMM_INCLUDES = """
#include "cutlass/gemm/kernel/default_gemm_grouped.h"
#include "cutlass/gemm/device/gemm_grouped.h"
"""
_PYTORCH_CONV2D_INCLUDES = """
#include "cutlass/conv/kernel/default_conv2d_fprop.h"
#include "cutlass/conv/kernel/default_conv2d_dgrad.h"
#include "cutlass/conv/kernel/default_conv2d_wgrad.h"
#include "cutlass/conv/device/implicit_gemm_convolution.h"
"""
_CUTLASS_TYPE_TO_TORCH_TYPE = {
cutlass_bindings.float16: "torch::kF16",
cutlass_bindings.float32: "torch::kF32",
cutlass_bindings.float64: "torch::kF64",
cutlass_bindings.int8: "torch::I8",
cutlass_bindings.int32: "torch::I32",
}
_PYTORCH_GEMM_IMPL_TEMPLATE_2x = (
common._CUTLASS_KERNEL_RUN_GEMM_2x
+ """
at::Tensor ${name}_kernel(const at::Tensor& A, const at::Tensor& B, at::optional<const at::Tensor> C, float alpha, float beta) {
int M = A.size(0);
int N = B.size(1);
int K = A.size(1);
typename DeviceKernel::ElementC* ptrC = (C == at::nullopt) ?
nullptr :
reinterpret_cast<typename DeviceKernel::ElementC*>(C->contiguous().data_ptr());
at::Tensor D = B.new_empty({M, N}, ${torch_type_C});
cutlass::Status status = ${name}_kernel_run(M, N, K,
reinterpret_cast<typename DeviceKernel::ElementA*>(A.contiguous().data_ptr()),
reinterpret_cast<typename DeviceKernel::ElementB*>(B.contiguous().data_ptr()),
ptrC,
reinterpret_cast<typename DeviceKernel::ElementC*>(D.contiguous().data_ptr()),
ElementCompute(alpha), ElementCompute(beta));
TORCH_CHECK(status == cutlass::Status::kSuccess, "CUTLASS kernel failed");
return D;
}
"""
)
_PYTORCH_GEMM_IMPL_TEMPLATE_3x = (
common._CUTLASS_KERNEL_RUN_GEMM_3x
+ """
bool hw_info_queried = false;
cutlass::KernelHardwareInfo hw_info;
at::Tensor ${name}_kernel(const at::Tensor& A, const at::Tensor& B, at::optional<const at::Tensor> C, float alpha, float beta) {
int M = A.size(0);
int N = B.size(1);
int K = A.size(1);
int L = 1;
// Query hardware info if we haven't already
if (!hw_info_queried) {
hw_info.device_id = 0;
hw_info.sm_count = cutlass::KernelHardwareInfo::query_device_multiprocessor_count(hw_info.device_id);
}
typename DeviceKernel::ElementC* ptrC = (C == at::nullopt) ?
nullptr :
reinterpret_cast<typename DeviceKernel::ElementC*>(C->contiguous().data_ptr());
at::Tensor D = B.new_empty({M, N}, ${torch_type_C});
cutlass::Status status = ${name}_kernel_run(M, N, K, L,
reinterpret_cast<typename DeviceKernel::ElementA*>(A.contiguous().data_ptr()),
reinterpret_cast<typename DeviceKernel::ElementB*>(B.contiguous().data_ptr()),
ptrC,
reinterpret_cast<typename DeviceKernel::ElementC*>(D.contiguous().data_ptr()),
ElementCompute(alpha), ElementCompute(beta),
hw_info);
TORCH_CHECK(status == cutlass::Status::kSuccess, "CUTLASS kernel failed");
return D;
}
"""
)
_PYTORCH_GROUPED_GEMM_IMPL_TEMPLATE = (
common._CUTLASS_KERNEL_RUN_GROUPED_GEMM_2x
+ """
std::vector<at::Tensor> ${name}_kernel(const std::vector<at::Tensor>& A, const std::vector<at::Tensor>& B, at::optional<const std::vector<at::Tensor>> C, float alpha, float beta) {
size_t num = A.size();
// To avoid performing many small cudaMallocs and host-to-device copies,
// we serialize the grouped GEMM arguments on the host, allocate one
// large chunk of device memory, and perform a single cudaMemcpy to
// copy the host data to the device. Allocation overheads could be
// avoided by using a memory pool.
// Calculate the total size of the data to be copied from host to device
size_t total_size = sizeof(cutlass::gemm::GemmCoord) +
sizeof(DeviceKernel::ElementA*) +
sizeof(DeviceKernel::ElementB*) +
sizeof(DeviceKernel::ElementC*) +
sizeof(DeviceKernel::ElementC*) +
sizeof(int64_t) +
sizeof(int64_t) +
sizeof(int64_t);
total_size *= num;
// num * sizeof(cutlass::gemm::GemmCoord) may leave one at a non-multiple
// of sizeof(DeviceKernel::ElementA*) (which will be 64 on a 64-bit system).
// To ensure that we don't end up having misaligned loads in the kernel,
// we pad to the nearest multiple of 8.
//
// Note that, even on a 32-bit system (for which sizeof(X*) will not equal
// sizeof(int64_t)), only padding between the list of GemmCoords and the
// list of ptr_As is sufficient because the set of four equal-length lists of pointers
// (A*, B*, C*, D*) will ensure that the first list of int64_ts will always
// start on a multiple of 8.
int64_t padding = 8 - (total_size % 8);
total_size += padding;
uint8_t* host_data = new uint8_t[total_size];
cutlass::DeviceAllocation<uint8_t> device_data(total_size);
uint8_t* start = host_data;
cutlass::gemm::GemmCoord* problem_sizes_host = reinterpret_cast<cutlass::gemm::GemmCoord*>(start);
// Apply the padding after the list of GemmCoords
start += num * sizeof(cutlass::gemm::GemmCoord) + padding;
int64_t ptr_A_offset = start - host_data;
DeviceKernel::ElementA** ptr_A_host = reinterpret_cast<DeviceKernel::ElementA**>(start);
start += num * sizeof(DeviceKernel::ElementA*);
int64_t ptr_B_offset = start - host_data;
DeviceKernel::ElementB** ptr_B_host = reinterpret_cast<DeviceKernel::ElementB**>(start);
start += num * sizeof(DeviceKernel::ElementB*);
int64_t ptr_C_offset = start - host_data;
DeviceKernel::ElementC** ptr_C_host = reinterpret_cast<DeviceKernel::ElementC**>(start);
start += num * sizeof(DeviceKernel::ElementC*);
int64_t ptr_D_offset = start - host_data;
DeviceKernel::ElementC** ptr_D_host = reinterpret_cast<DeviceKernel::ElementC**>(start);
start += num * sizeof(DeviceKernel::ElementC*);
int64_t lda_offset = start - host_data;
int64_t* lda_host = reinterpret_cast<int64_t*>(start);
start += num * sizeof(int64_t);
int64_t ldb_offset = start - host_data;
int64_t* ldb_host = reinterpret_cast<int64_t*>(start);
start += num * sizeof(int64_t);
int64_t ldc_offset = start - host_data;
int64_t* ldc_host = reinterpret_cast<int64_t*>(start);
start += num * sizeof(int64_t);
std::vector<at::Tensor> D(num);
bool need_C = (C != at::nullopt) && (beta != 0.f);
for (size_t i = 0; i < num; ++i) {
int M = A[i].size(0);
int N = B[i].size(1);
int K = A[i].size(1);
*(problem_sizes_host + i) = {M, N, K};
*(ptr_A_host + i) = reinterpret_cast<typename DeviceKernel::ElementA*>(A[i].contiguous().data_ptr());
*(ptr_B_host + i) = reinterpret_cast<typename DeviceKernel::ElementB*>(B[i].contiguous().data_ptr());
if (need_C) {
*(ptr_C_host + i) = reinterpret_cast<typename DeviceKernel::ElementC*>(C->at(i).contiguous().data_ptr());
}
else {
*(ptr_C_host + i) = nullptr;
}
D[i] = B[i].new_empty({M, N}, ${torch_type_C});
*(ptr_D_host + i) = reinterpret_cast<typename DeviceKernel::ElementC*>(D[i].contiguous().data_ptr());
*(lda_host + i) = DeviceKernel::LayoutA::packed({M, K}).stride(0);
*(ldb_host + i) = DeviceKernel::LayoutB::packed({K, N}).stride(0);
*(ldc_host + i) = DeviceKernel::LayoutC::packed({M, N}).stride(0);
}
device_data.copy_from_host(host_data);
cutlass::Status status = ${name}_kernel_run(
num,
reinterpret_cast<cutlass::gemm::GemmCoord*>(device_data.get()),
reinterpret_cast<DeviceKernel::ElementA**>(device_data.get() + ptr_A_offset),
reinterpret_cast<DeviceKernel::ElementB**>(device_data.get() + ptr_B_offset),
reinterpret_cast<DeviceKernel::ElementC**>(device_data.get() + ptr_C_offset),
reinterpret_cast<DeviceKernel::ElementC**>(device_data.get() + ptr_D_offset),
reinterpret_cast<int64_t*>(device_data.get() + lda_offset),
reinterpret_cast<int64_t*>(device_data.get() + ldb_offset),
reinterpret_cast<int64_t*>(device_data.get() + ldc_offset),
reinterpret_cast<int64_t*>(device_data.get() + ldc_offset),
ElementCompute(alpha), ElementCompute(beta));
delete[] host_data;
TORCH_CHECK(status == cutlass::Status::kSuccess, "CUTLASS kernel failed");
return D;
}
"""
)
_PYTORCH_CONV2D_IMPL_TEMPLATE_2x = """
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
cutlass::Status status = ${name}_kernel_run(
&problem_size,
reinterpret_cast<typename UnderlyingKernel::ElementA*>(A.data_ptr()),
reinterpret_cast<typename UnderlyingKernel::ElementB*>(B.data_ptr()),
ptrC,
reinterpret_cast<typename UnderlyingKernel::ElementC*>(D.data_ptr()),
alpha, beta,
split_k_mode, stream, B.device().index());
TORCH_CHECK(status == cutlass::Status::kSuccess, "CUTLASS kernel failed");
return D;
}
"""
_PYTORCH_CONV2D_FPROP_IMPL_TEMPLATE_2x = (
common._CUTLASS_KERNEL_RUN_CONV2D_2x
+ """
at::Tensor ${name}_kernel(const at::Tensor& A, const at::Tensor& B, at::optional<const at::Tensor> C=at::nullopt,
std::tuple<int, int> stride={1, 1}, std::tuple<int, int> padding={0, 0}, std::tuple<int, int> dilation={1, 1},
float alpha=1.f, float beta=0.f, std::string split_k_mode="serial", int split_k_slices=1) {
int N, H, W, C_, K, R, S, P, Q;
N = A.size(0);
C_ = A.size(1);
H = A.size(2);
W = A.size(3);
K = B.size(0);
R = B.size(2);
S = B.size(3);
cutlass::conv::Conv2dProblemSize problem_size(
cutlass::Tensor4DCoord(N, H, W, C_),
cutlass::Tensor4DCoord(K, R, S, C_),
cutlass::Tensor4DCoord(std::get<0>(padding), std::get<0>(padding), std::get<1>(padding), std::get<1>(padding)),
cutlass::MatrixCoord(std::get<0>(stride), std::get<1>(stride)),
cutlass::MatrixCoord(std::get<0>(dilation), std::get<1>(dilation)),
cutlass::conv::Mode::kCrossCorrelation,
split_k_slices
);
P = problem_size.P;
Q = problem_size.Q;
typename UnderlyingKernel::ElementC* ptrC = (C == at::nullopt) ?
nullptr :
reinterpret_cast<typename UnderlyingKernel::ElementC*>(C->data_ptr());
torch::TensorOptions options = torch::TensorOptions().dtype(${torch_type_C}).device(B.device()).memory_format(at::MemoryFormat::ChannelsLast);
at::Tensor D = torch::zeros({N, K, P, Q}, options);
""" + _PYTORCH_CONV2D_IMPL_TEMPLATE_2x
)
_PYTORCH_CONV2D_DGRAD_IMPL_TEMPLATE_2x = (
common._CUTLASS_KERNEL_RUN_CONV2D_2x
+ """
at::Tensor ${name}_kernel(std::tuple<int, int, int, int> input_size, const at::Tensor& A, const at::Tensor& B, at::optional<const at::Tensor> C=at::nullopt,
std::tuple<int, int> stride={1, 1}, std::tuple<int, int> padding={0, 0}, std::tuple<int, int> dilation={1, 1}, float alpha=1.f, float beta=0.f,
std::string split_k_mode="serial", int split_k_slices=1) {
int N, H, W, C_, K, R, S;
N = std::get<0>(input_size);
C_ = std::get<1>(input_size);
H = std::get<2>(input_size);
W = std::get<3>(input_size);
K = B.size(0);
R = B.size(2);
S = B.size(3);
cutlass::conv::Conv2dProblemSize problem_size(
cutlass::Tensor4DCoord(N, H, W, C_),
cutlass::Tensor4DCoord(K, R, S, C_),
cutlass::Tensor4DCoord(std::get<0>(padding), std::get<0>(padding), std::get<1>(padding), std::get<1>(padding)),
cutlass::MatrixCoord(std::get<0>(stride), std::get<1>(stride)),
cutlass::MatrixCoord(std::get<0>(dilation), std::get<1>(dilation)),
cutlass::conv::Mode::kCrossCorrelation,
split_k_slices
);
typename UnderlyingKernel::ElementC* ptrC = (C == at::nullopt) ?
nullptr :
reinterpret_cast<typename UnderlyingKernel::ElementC*>(C->data_ptr());
torch::TensorOptions options = torch::TensorOptions().dtype(${torch_type_C}).device(B.device()).memory_format(at::MemoryFormat::ChannelsLast);
at::Tensor D = torch::empty({N, C_, H, W}, options);
""" + _PYTORCH_CONV2D_IMPL_TEMPLATE_2x
)
_PYTORCH_CONV2D_WGRAD_IMPL_TEMPLATE_2x = (
common._CUTLASS_KERNEL_RUN_CONV2D_2x
+ """
at::Tensor ${name}_kernel(std::tuple<int, int, int, int> weight_size, const at::Tensor& A, const at::Tensor& B, at::optional<const at::Tensor> C=at::nullopt,
std::tuple<int, int> stride={1, 1}, std::tuple<int, int> padding={0, 0}, std::tuple<int, int> dilation={1, 1}, float alpha=1.f, float beta=0.f,
std::string split_k_mode="serial", int split_k_slices=1) {
int N, H, W, C_, K, R, S;
K = std::get<0>(weight_size);
C_ = std::get<1>(weight_size);
R = std::get<2>(weight_size);
S = std::get<3>(weight_size);
N = B.size(0);
H = B.size(2);
W = B.size(3);
cutlass::conv::Conv2dProblemSize problem_size(
cutlass::Tensor4DCoord(N, H, W, C_),
cutlass::Tensor4DCoord(K, R, S, C_),
cutlass::Tensor4DCoord(std::get<0>(padding), std::get<0>(padding), std::get<1>(padding), std::get<1>(padding)),
cutlass::MatrixCoord(std::get<0>(stride), std::get<1>(stride)),
cutlass::MatrixCoord(std::get<0>(dilation), std::get<1>(dilation)),
cutlass::conv::Mode::kCrossCorrelation,
split_k_slices
);
typename UnderlyingKernel::ElementC* ptrC = (C == at::nullopt) ?
nullptr :
reinterpret_cast<typename UnderlyingKernel::ElementC*>(C->data_ptr());
torch::TensorOptions options = torch::TensorOptions().dtype(${torch_type_C}).device(B.device()).memory_format(at::MemoryFormat::ChannelsLast);
at::Tensor D = torch::empty({K, C_, R, S}, options);
""" + _PYTORCH_CONV2D_IMPL_TEMPLATE_2x
)
_PYTORCH_SETUP_PY = common._PYSTYLE_AUTOGEN_COMMENT + """
from setuptools import setup
from torch.utils.cpp_extension import BuildExtension, CUDAExtension
setup(
name='${name}',
ext_modules=[
CUDAExtension('${name}', [
'${name}.cpp',
'${name}_kernel.cu',
],
include_dirs=['${cutlass_path}/include', '${cutlass_path}/tools/util/include'],
extra_compile_args=['-std=c++17']
),
],
cmdclass={
'build_ext': BuildExtension
})
"""
def _generate_setup(name: str, sourcedir: str):
"""
Generates a setup.py file for the extension
:param name: name of the module to generate
:type name: str
:param sourcedir: directory to which generated source files should be written
:type sourcedir: str
"""
setup_py_file = os.path.join(sourcedir, "setup.py")
setup_source = SubstituteTemplate(
_PYTORCH_SETUP_PY, {"name": name, "cutlass_path": CUTLASS_PATH}
)
with open(setup_py_file, "w") as outfile:
outfile.write(setup_source)
class _ArchListSetter:
"""
Utility context manager for temporarily setting the value of the ``TORCH_CUDA_ARCH_LIST``
environment variable when building a PyTorch CUDA module.
``TORCH_CUDA_ARCH_LIST`` is a space-delmited list of compute capabilites for which a PyTorch
CUDA module should be compiled.
For example, ``TORCH_CUDA_ARCH_LIST="7.0 8.0"`` would result in the inclusion of
``-gencode=arch=compute_70,code=sm_70`` and ``-gencode=arch=compute_80,code=sm_80`` in the
compilation of the module.
This utility wraps the building of a PyTorch CUDA module with a setting of this environment
variable according to the current compute capability being targetted.
Example usage:
.. highlight:: python
.. code-block:: python
# Temporarily set TORCH_CUDA_ARCH_LIST="8.0"
with _ArchListSetter(80):
# Perform JIT compilation and loading of the module
mod = torch.utils.cpp_extension.load(...)
:param cc: compute capability
:type cc: int
"""
_TORCH_CUDA_ARCH_LIST = "TORCH_CUDA_ARCH_LIST"
def __init__(self, cc: int):
self.cc_str = ".".join(list(str(cc)))
def __enter__(self):
"""
Saves the old value of TORCH_CUDA_ARCH_LIST and reset it to the new value based on ``cc``
"""
self.old_arch_list = os.getenv(_ArchListSetter._TORCH_CUDA_ARCH_LIST)
os.environ[_ArchListSetter._TORCH_CUDA_ARCH_LIST] = self.cc_str
return self
def __exit__(self, exc_type, exc_val, traceback):
"""
Restores the old value of TORCH_CUDA_ARCH_LIST
"""
os.environ[_ArchListSetter._TORCH_CUDA_ARCH_LIST] = self.old_arch_list
def _jit(name: str, cc: int, cpp_file: str, cuda_file: str):
"""
JIT compiles and loads a PyTorch CUDA extension.
:param name: name of the module to generate
:type name: str
:param cc: compute capability of the device the module should target
:type cc: int
:param cpp_file: path to file containing extension's C++ interface
:type cpp_file: str
:param cuda_file: path to file containing extension's CUDA interface
:type cuda_file: str
:return: loaded PyTorch module
"""
from torch.utils.cpp_extension import load
extra_cuda_cflags = ["-std=c++17"]
if cc == 90:
# PyTorch does not currently add the sm_90a target when compute capability
# 9.0 is set within TORCH_CUDA_ARCH_LIST. Thus, we manually add the sm_90a target.
extra_cuda_cflags.append("-gencode=arch=compute_90a,code=sm_90a")
with _ArchListSetter(cc):
jitmodule = load(
name,
[cpp_file, cuda_file],
extra_cuda_cflags=extra_cuda_cflags,
extra_include_paths=[
os.path.join(CUTLASS_PATH, "include"),
os.path.join(CUTLASS_PATH, "tools/util/include"),
],
verbose=(logger.level == logging.DEBUG)
)
return jitmodule
def _pytorch_gemm(op, name: str, cc: int, jit: bool = False, sourcedir: str = ""):
"""
Generates source for building a PyTorch CUDA module that leverages the CUTLASS GEMM
specified by ``op``. If the ``jit`` parameter is set to true, the module is just-in-time
compiled, loaded, and returned.
:param op: operation to emit in the module
:param name: name of the module to generate
:type name: str
:param cc: compute capability of the device the module should target
:type cc: int
:param jit: whether the module should be just-in-time compiled
:type jit: bool
:param sourcedir: directory to which generated source files should be written
:type sourcedir: str
:return: loaded PyTorch module if ``jit=True`` or ``None`` otherwise
"""
if sourcedir != "" and not os.path.isdir(sourcedir):
os.makedirs(sourcedir)
cuda_file = os.path.join(sourcedir, name + "_kernel.cu")
extra_kw = {}
if op.api == ApiVersion.v3x:
impl_template = _PYTORCH_GEMM_IMPL_TEMPLATE_3x
else:
impl_template = _PYTORCH_GEMM_IMPL_TEMPLATE_2x
if isinstance(op.swizzling_functor, swizzle.ThreadblockSwizzleStreamK):
extra_kw["args"] = common._CUTLASS_KERNEL_ARGS_2x_STREAM_K
else:
extra_kw["args"] = common._CUTLASS_KERNEL_ARGS_2x
impl_template = (
_PYTORCH_GEMM_IMPL_TEMPLATE_3x
if op.api == ApiVersion.v3x
else _PYTORCH_GEMM_IMPL_TEMPLATE_2x
)
cuda_impl = SubstituteTemplate(impl_template, {"name": name, **extra_kw})
cuda_source = SubstituteTemplate(
_PYTORCH_CUDA_TEMPLATE,
{
"includes": _PYTORCH_GEMM_INCLUDES[op.api],
"declaration": op.rt_module.emit(),
"procedural_name": op.procedural_name(),
"impl": cuda_impl,
"torch_type_C": _CUTLASS_TYPE_TO_TORCH_TYPE[op.C.element],
},
)
with open(cuda_file, "w") as outfile:
outfile.write(cuda_source)
cpp_file = os.path.join(sourcedir, name + ".cpp")
cpp_source = SubstituteTemplate(
_PYTORCH_GEMM_CPP_TEMPLATE,
{"name": name, "description": f"CUTLASS {op.procedural_name()} GEMM"},
)
with open(cpp_file, "w") as outfile:
outfile.write(cpp_source)
_generate_setup(name, sourcedir)
if jit:
return _jit(name, cc, cpp_file, cuda_file)
return None
def _pytorch_grouped_gemm(
op, name: str, cc: int, jit: bool = False, sourcedir: str = ""
):
"""
Generates source for building a PyTorch CUDA module that leverages the CUTLASS grouped GEMM
specified by ``op``. If the ``jit`` parameter is set to true, the module is just-in-time
compiled, loaded, and returned.
:param op: operation to emit in the module
:param name: name of the module to generate
:type name: str
:param cc: compute capability of the device the module should target
:type cc: int
:param jit: whether the module should be just-in-time compiled
:type jit: bool
:param sourcedir: directory to which generated source files should be written
:type sourcedir: str
:return: loaded PyTorch module if ``jit=True`` or ``None`` otherwise
"""
if op.api != ApiVersion.v2x:
raise Exception("Grouped GEMM is currently only supported for CUTLASS 2.x")
if sourcedir != "" and not os.path.isdir(sourcedir):
os.makedirs(sourcedir)
cuda_file = os.path.join(sourcedir, name + "_kernel.cu")
cuda_impl = SubstituteTemplate(_PYTORCH_GROUPED_GEMM_IMPL_TEMPLATE, {"name": name})
cuda_source = SubstituteTemplate(
_PYTORCH_CUDA_TEMPLATE,
{
"includes": _PYTORCH_GROUPED_GEMM_INCLUDES,
"declaration": op.rt_module.emit(),
"procedural_name": op.procedural_name(),
"impl": cuda_impl,
"torch_type_C": _CUTLASS_TYPE_TO_TORCH_TYPE[op.C.element],
},
)
with open(cuda_file, "w") as outfile:
outfile.write(cuda_source)
cpp_file = os.path.join(sourcedir, name + ".cpp")
cpp_source = SubstituteTemplate(
_PYTORCH_GROUPED_GEMM_CPP_TEMPLATE,
{"name": name, "description": f"CUTLASS {op.procedural_name()} grouped GEMM"},
)
with open(cpp_file, "w") as outfile:
outfile.write(cpp_source)
_generate_setup(name, sourcedir)
if jit:
return _jit(name, cc, cpp_file, cuda_file)
return None
def _pytorch_conv2d(op, name: str, cc: int, jit: bool = False, sourcedir: str = ""):
"""
Generates source for building a PyTorch CUDA module that leverages the CUTLASS Conv2d
specified by ``op``. If the ``jit`` parameter is set to true, the module is just-in-time
compiled, loaded, and returned.
:param op: operation to emit in the module
:param name: name of the module to generate
:type name: str
:param cc: compute capability of the device the module should target
:type cc: int
:param jit: whether the module should be just-in-time compiled
:type jit: bool
:param sourcedir: directory to which generated source files should be written
:type sourcedir: str
Note that the when conv kind is `dgrad` or `wgrad`, the size of the input `(N, C, H, W)` or
weight `(K, C, R, S)` should be provided. This is because there are multiple valid solutions
for H/W/R/S given the same P/Q.
:return: loaded PyTorch module if ``jit=True`` or ``None`` otherwise
"""
if sourcedir != "" and not os.path.isdir(sourcedir):
os.makedirs(sourcedir)
cuda_file = os.path.join(sourcedir, name + "_kernel.cu")
extra_kw = {}
if op.conv_kind == cutlass_bindings.conv.Operator.fprop:
impl_template = _PYTORCH_CONV2D_FPROP_IMPL_TEMPLATE_2x
cpp_template = _PYTORCH_CONV2D_FPROP_CPP_TEMPLATE
elif op.conv_kind == cutlass_bindings.conv.Operator.dgrad:
impl_template = _PYTORCH_CONV2D_DGRAD_IMPL_TEMPLATE_2x
cpp_template = _PYTORCH_CONV2D_GRAD_CPP_TEMPLATE
elif op.conv_kind == cutlass_bindings.conv.Operator.wgrad:
impl_template = _PYTORCH_CONV2D_WGRAD_IMPL_TEMPLATE_2x
cpp_template = _PYTORCH_CONV2D_GRAD_CPP_TEMPLATE
extra_kw["conv_kind_name"] = ConvKindNames[op.conv_kind].capitalize()
extra_kw["torch_type_C"] = _CUTLASS_TYPE_TO_TORCH_TYPE[op.C.element]
cuda_impl = SubstituteTemplate(impl_template, {"name": name, **extra_kw})
cuda_source = SubstituteTemplate(
_PYTORCH_CUDA_TEMPLATE,
{
"includes": _PYTORCH_CONV2D_INCLUDES,
"declaration": op.rt_module.emit(),
"procedural_name": op.procedural_name(),
"impl": cuda_impl,
"torch_type_C": _CUTLASS_TYPE_TO_TORCH_TYPE[op.C.element],
},
)
with open(cuda_file, "w") as outfile:
outfile.write(cuda_source)
cpp_file = os.path.join(sourcedir, name + ".cpp")
cpp_source = SubstituteTemplate(
cpp_template,
{"name": name, "description": f"CUTLASS {op.procedural_name()} Conv2d"},
)
with open(cpp_file, "w") as outfile:
outfile.write(cpp_source)
_generate_setup(name, sourcedir)
if jit:
return _jit(name, cc, cpp_file, cuda_file)
return None
def pytorch(op, name: str, cc: int, jit: bool = False, sourcedir: str = ""):
"""
Generates source for building a PyTorch CUDA module that leverages the CUTLASS kernel
specified by ``op``. If the ``jit`` parameter is set to true, the module is just-in-time
compiled, loaded, and returned.
The result of this method is files within ``sourcedir`` that can be used for building
a PyTorch module.
:param op: operation to emit in the module
:param name: name of the module to generate
:type name: str
:param cc: compute capability of the device the module should target
:type cc: int
:param jit: whether the module should be just-in-time compiled
:type jit: bool
:param sourcedir: directory to which generated source files should be written
:type sourcedir: str
:return: loaded PyTorch module (if ``jit=True``) or None
"""
device_op = op.device_op()
if isinstance(op, GemmOperationUniversal):
return _pytorch_gemm(device_op, name, cc, jit, sourcedir)
elif isinstance(op, GemmOperationGrouped):
return _pytorch_grouped_gemm(device_op, name, cc, jit, sourcedir)
elif isinstance(op, Conv2dOperation):
return _pytorch_conv2d(device_op, name, cc, jit, sourcedir)
else:
raise Exception(
f"Operation type {type(op)} is not currently supported for PyTorch emission."
)
| cutlass-main | python/cutlass/emit/pytorch.py |
#################################################################################################
#
# Copyright (c) 2023 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#################################################################################################
"""
Common utilities for emitting CUTLASS kernels
"""
import cutlass
# Strings used for printing information about the generation of emitted scripts
_AUTOGEN_STR = f"This file was automatically generated by the CUTLASS {cutlass.__version__} Python interface (https://github.com/nvidia/cutlass/python)"
_CSTYLE_AUTOGEN_COMMENT = f"""// {_AUTOGEN_STR}
"""
_PYSTYLE_AUTOGEN_COMMENT = f"""# {_AUTOGEN_STR}
"""
_CUTLASS_KERNEL_ARGS_2x = """
typename DeviceKernel::Arguments arguments {
cutlass::gemm::GemmUniversalMode::kGemm,
{M, N, K}, // problem size
1,
{alpha, beta},
A, B, C, D,
0, 0, 0, 0, // batch strides
DeviceKernel::LayoutA::packed({M, K}).stride(0), // lda
DeviceKernel::LayoutB::packed({K, N}).stride(0), // ldb
DeviceKernel::LayoutC::packed({M, N}).stride(0), // ldc
DeviceKernel::LayoutC::packed({M, N}).stride(0) // ldd
};
"""
_CUTLASS_KERNEL_ARGS_2x_STREAM_K = """
typename DeviceKernel::Arguments arguments {
cutlass::gemm::GemmUniversalMode::kGemm,
{M, N, K}, // problem size
1,
{alpha, beta},
A, B, C, D,
0, 0, 0, 0, // batch strides
DeviceKernel::LayoutA::packed({M, K}).stride(0), // lda
DeviceKernel::LayoutB::packed({K, N}).stride(0), // ldb
DeviceKernel::LayoutC::packed({M, N}).stride(0), // ldc
DeviceKernel::LayoutC::packed({M, N}).stride(0), // ldd
-1 // avail_sms
};
"""
_CUTLASS_KERNEL_RUN_GEMM_2x = """
using ElementCompute = typename DeviceKernel::EpilogueOutputOp::ElementCompute;
cutlass::Status ${name}_kernel_run(int M, int N, int K,
const DeviceKernel::ElementA* A, const DeviceKernel::ElementB* B, const DeviceKernel::ElementC* C, DeviceKernel::ElementC* D,
ElementCompute alpha, ElementCompute beta) {
${args}
size_t workspace_size = DeviceKernel::get_workspace_size(arguments);
cutlass::device_memory::allocation<uint8_t> workspace(workspace_size);
DeviceKernel gemm_op;
cutlass::Status status = gemm_op.initialize(arguments,
workspace.get(),
nullptr); // CUDA stream
if (status != cutlass::Status::kSuccess) {
return status;
}
status = gemm_op();
return status;
}
"""
_CUTLASS_KERNEL_RUN_GEMM_3x = """
using StrideA = typename DeviceKernel::GemmKernel::StrideA;
using StrideB = typename DeviceKernel::GemmKernel::StrideB;
using StrideC = typename DeviceKernel::GemmKernel::StrideC;
using StrideD = typename DeviceKernel::GemmKernel::StrideD;
using ElementCompute = typename DeviceKernel::EpilogueOutputOp::ElementCompute;
cutlass::Status ${name}_kernel_run(
int M, int N, int K, int L,
const DeviceKernel::ElementA* A, const DeviceKernel::ElementB* B, const DeviceKernel::ElementC* C, DeviceKernel::ElementC* D,
ElementCompute alpha, ElementCompute beta, const cutlass::KernelHardwareInfo& hw_info) {
typename DeviceKernel::Arguments arguments{
cutlass::gemm::GemmUniversalMode::kGemm,
{M, N, K, L}, // problem size
A, // ptrA
cutlass::make_cute_packed_stride(StrideA{}, cute::make_shape(M, K, L)), // stride A
B, // ptrB
cutlass::make_cute_packed_stride(StrideB{}, cute::make_shape(N, K, L)), // stride B
{
C, // ptrC
cutlass::make_cute_packed_stride(StrideC{}, cute::make_shape(M, N, L)), // stride C
D, // ptrD
cutlass::make_cute_packed_stride(StrideD{}, cute::make_shape(M, N, L)), // stride D
{alpha, beta},
},
hw_info
};
size_t workspace_size = DeviceKernel::get_workspace_size(arguments);
cutlass::device_memory::allocation<uint8_t> workspace(workspace_size);
DeviceKernel gemm_op;
cutlass::Status status = gemm_op.run(arguments,
workspace.get(),
nullptr); // CUDA stream
return status;
}
"""
_CUTLASS_KERNEL_RUN_GROUPED_GEMM_2x = """
using ElementCompute = typename DeviceKernel::EpilogueOutputOp::ElementCompute;
int threadblock_count = DeviceKernel::sufficient();
cutlass::Status ${name}_kernel_run(int problem_count, cutlass::gemm::GemmCoord* problem_sizes,
DeviceKernel::ElementA** A, DeviceKernel::ElementB** B, DeviceKernel::ElementC** C, DeviceKernel::ElementC** D,
int64_t* lda, int64_t* ldb, int64_t* ldc, int64_t* ldd,
ElementCompute alpha, ElementCompute beta) {
typename DeviceKernel::Arguments arguments {
problem_sizes,
problem_count,
threadblock_count,
{alpha, beta},
A, B, C, D,
lda, ldb, ldc, ldd
};
size_t workspace_size = DeviceKernel::get_workspace_size(arguments);
cutlass::device_memory::allocation<uint8_t> workspace(workspace_size);
DeviceKernel gemm_op;
cutlass::Status status = gemm_op.initialize(arguments,
workspace.get(),
nullptr); // CUDA stream
if (status != cutlass::Status::kSuccess) {
return status;
}
status = gemm_op();
return status;
}
"""
_CUTLASS_KERNEL_RUN_CONV2D_2x = """
using UnderlyingKernel = typename DeviceKernel::UnderlyingKernel;
namespace {
using TensorRefA = typename UnderlyingKernel::TensorRefA;
using TensorRefB = typename UnderlyingKernel::TensorRefB;
using TensorRefC = typename UnderlyingKernel::TensorRefC;
using ElementCompute = typename UnderlyingKernel::EpilogueOutputOp::ElementCompute;
}
template<typename TensorRef, typename Element>
TensorRef get_tensor_ref(cutlass::Tensor4DCoord tensor_coord, Element* ptr){
cutlass::layout::TensorNHWC layout = cutlass::layout::TensorNHWC::packed(tensor_coord);
TensorRef tensor_ref(ptr, layout);
return tensor_ref;
}
cutlass::Status ${name}_kernel_run(cutlass::conv::Conv2dProblemSize* problem_size,
UnderlyingKernel::ElementA* A, UnderlyingKernel::ElementB* B,
UnderlyingKernel::ElementC* C, UnderlyingKernel::ElementC* D,
ElementCompute alpha, ElementCompute beta, std::string split_k_mode,
cudaStream_t stream, int device_id=0) {
// create the tensor references
cutlass::Tensor4DCoord tensor_coord_A = cutlass::conv::implicit_gemm_tensor_a_extent(
cutlass::conv::Operator::k${conv_kind_name}, *problem_size
);
cutlass::Tensor4DCoord tensor_coord_B = cutlass::conv::implicit_gemm_tensor_b_extent(
cutlass::conv::Operator::k${conv_kind_name}, *problem_size
);
cutlass::Tensor4DCoord tensor_coord_C = cutlass::conv::implicit_gemm_tensor_c_extent(
cutlass::conv::Operator::k${conv_kind_name}, *problem_size
);
TensorRefA tensor_ref_A = get_tensor_ref<TensorRefA, UnderlyingKernel::ElementA>(tensor_coord_A, A);
TensorRefB tensor_ref_B = get_tensor_ref<TensorRefB, UnderlyingKernel::ElementB>(tensor_coord_B, B);
TensorRefC tensor_ref_C = get_tensor_ref<TensorRefC, UnderlyingKernel::ElementC>(tensor_coord_C, C);
TensorRefC tensor_ref_D = get_tensor_ref<TensorRefC, UnderlyingKernel::ElementC>(tensor_coord_C, D);
cutlass::conv::SplitKMode mode;
if (split_k_mode == "serial") {
mode = cutlass::conv::SplitKMode::kSerial;
} else if (split_k_mode == "parallel") {
mode = cutlass::conv::SplitKMode::kParallel;
} else {
throw std::runtime_error("Invalid split_k_mode: " + split_k_mode);
}
typename DeviceKernel::Arguments arguments{
*problem_size,
tensor_ref_A,
tensor_ref_B,
tensor_ref_C,
tensor_ref_D,
{alpha, beta},
mode
};
DeviceKernel implicit_gemm_op;
size_t workspace_size = implicit_gemm_op.get_workspace_size(arguments);
void* workspace_ptr = device_memory_allocation(workspace_size, device_id);
cutlass::Status status = implicit_gemm_op.can_implement(arguments);
if (status != cutlass::Status::kSuccess) {
return status;
}
status = implicit_gemm_op.initialize(arguments, workspace_ptr, stream);
if (status != cutlass::Status::kSuccess) {
return status;
}
//
// Launch initialized CUTLASS kernel
//
status = implicit_gemm_op(stream);
return status;
}
"""
| cutlass-main | python/cutlass/emit/common.py |
#################################################################################################
#
# Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#################################################################################################
import gen_turing_and_volta as api_generator
import gen_sample as sample_creater
import gen_cmake as cmake_creater
import gen_verify as verify_creater
import gen_device as b2b_fused_generator
import replace_fix_impl_header
import argparse
import os
import json
parser = argparse.ArgumentParser(description="Generates Fused Multi-GEMM CUTLASS Kernels")
parser.add_argument("--config-file", default="config.json", help="JSON file containing configuration to generate")
parser.add_argument("--gen-name", default="FusedMultiGemmForward", help="Specific the output name")
parser.add_argument("--output-dir", default="", help="Specifies the output dir")
parser.add_argument("--cutlass-dir", default="", help="Specifies the dependent CUTLASS repo dir")
parser.add_argument("--gen-include-cutlass-dir", default="", help="Specifies the generated CUTLASS code include dir, if needed.")
args = parser.parse_args()
gen_name = args.gen_name
cutlass_deps_dir = args.cutlass_dir
output_dir = args.output_dir
output_dir += "/"
cutlass_deps_root = args.gen_include_cutlass_dir
if cutlass_deps_root == '':
cutlass_deps_root = cutlass_deps_dir + "/include/"
cutlass_deps_root +='/'
if not os.path.exists(output_dir):
os.makedirs(output_dir)
if not os.path.exists(output_dir + "/" + "auto_gen"):
os.mkdir(output_dir + "/" + "auto_gen")
if not os.path.exists(output_dir + "/" + "fixed_impl"):
os.mkdir(output_dir + "/" + "fixed_impl" )
if not os.path.exists(output_dir + "/" + "sample"):
os.mkdir(output_dir + "/" + "sample" )
if not os.path.exists(output_dir + "/" + "auto_gen" + "/" + "device"):
os.mkdir(output_dir + "/" + "auto_gen" + "/" + "device")
if not os.path.exists(output_dir + "/" + "auto_gen" + "/" + "kernel"):
os.mkdir(output_dir + "/" + "auto_gen" + "/" + "kernel")
if not os.path.exists(output_dir + "/" + "auto_gen" + "/" + "threadblock"):
os.mkdir(output_dir + "/" + "auto_gen" + "/" + "threadblock")
with open(args.config_file, 'r') as infile:
gemm_info_dict = json.load(infile)
keys = sorted(gemm_info_dict.keys())
fuse_gemm_info = [gemm_info_dict[k] for k in keys]
for_cutlass_gen_user_include_header_file = [
cutlass_deps_root + "cutlass/epilogue/thread/linear_combination_leaky_relu.h",
cutlass_deps_root + "cutlass/epilogue/thread/linear_combination.h",
]
for_fused_wrapper = [
cutlass_deps_root + "cutlass/epilogue/thread/linear_combination_leaky_relu.h",
cutlass_deps_root + "cutlass/epilogue/thread/linear_combination.h",
"auto_gen/device/" + gen_name + ".h",
cutlass_deps_root + "cutlass/gemm/device/gemm_batched.h",
cutlass_deps_root + "cutlass/cutlass.h",
]
# Copy fixed implementation to the output directory
fix_impl = replace_fix_impl_header.replace_fix_impl("../fixed_impl/", output_dir +"/fixed_impl/", cutlass_deps_root)
fix_impl.gen_code()
auto_gen_output_dir = output_dir + "/auto_gen/"
project_root = ""
turing_plus = b2b_fused_generator.gen_device(fuse_gemm_info, gen_name, for_cutlass_gen_user_include_header_file, cutlass_deps_root, project_root, auto_gen_output_dir)
turing_plus.gen_code(75, 'hmma1688', False)
api = api_generator.gen_one_API(fuse_gemm_info, gen_name, for_fused_wrapper, output_dir)
api.gen_code()
# Generate C++ sample
os.system("cp ../leaky_bias.h " + output_dir + "/sample/")
os.system("cp ../utils.h " + output_dir + "/sample/")
sample_dir = output_dir + "/sample/"
sample = sample_creater.gen_test(fuse_gemm_info, gen_name, for_cutlass_gen_user_include_header_file, sample_dir)
sample.gen_cpp_sample()
cmake_gen = cmake_creater.gen_build_sys(cutlass_deps_dir, output_dir)
cmake_gen.gen_code()
verify = verify_creater.gen_verify(fuse_gemm_info, gen_name, for_fused_wrapper, output_dir)
verify.gen_code()
| cutlass-main | examples/44_multi_gemm_ir_and_codegen/ir_gen/gen_all_code.py |
#################################################################################################
#
# Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#################################################################################################
from typing import *
import helper
import gen_ir
import gen_kernel as gen_ker
class gen_device:
def __init__(self, fuse_gemm_info, gen_class_name, user_header_file, cutlass_deps_root, project_root, output_dir = "../"):
self.fuse_gemm_info = fuse_gemm_info
self.raw_gemm_info = fuse_gemm_info
self.b2b_num = len(fuse_gemm_info)
self.user_header_file = user_header_file
self.args = {}
# device arg struct memebr
self.arg_member = []
self.gen_class_name = gen_class_name
self.gen_kernel_name = gen_class_name + "Kernel"
self.tempalte_args = []
self.__tempalate_arg_list = {'Stages': int, 'SplitKSerial': bool, 'IsBetaZero': bool, 'AlignmentA': int, 'AlignmentB': int}
self.file_name = output_dir + "/device/" +gen_class_name +".h"
self.sample_dir = output_dir
self.cutlass_deps_root = cutlass_deps_root
self.project_root = project_root
self.this_file_root = output_dir + "/device/"
self.first_use_1stage = False
## gen kernel
self.gen_kernel = gen_ker.gen_kernel(self.tempalte_args, self.gen_class_name, self.b2b_num, output_dir, cutlass_deps_root, project_root)
def __check_arg_type(self, temp_arg):
if temp_arg in self.__tempalate_arg_list.keys():
return self.__tempalate_arg_list[temp_arg]
find_sub = False
for candidate_arg in self.__tempalate_arg_list.keys():
if (temp_arg.find(candidate_arg) != -1):
return self.__tempalate_arg_list[candidate_arg]
return 'typename'
# def gen_B2b2bGemm_class():
def set_arch(self, sm_cap, mma_tp):
if sm_cap == 75 or sm_cap == 80 or sm_cap == 86:
self.arch = "cutlass::arch::Sm" + str(sm_cap)
if mma_tp is 'hmma1688':
self.mma_shape = [16, 8, 8]
self.mma_tp = 'hmma'
elif mma_tp is 'imma8816':
self.mma_tp = 'imma'
self.mma_shape = [8, 8, 16]
else:
return 0
def gen_include_header(self):
code = '''\
/* Auto Generated code - Do not edit.*/
#pragma once
#include \"{cutlass_root}cutlass/cutlass.h\"
#include \"{cutlass_root}cutlass/numeric_types.h\"
#include \"{cutlass_root}cutlass/arch/arch.h\"
#include \"{cutlass_root}cutlass/device_kernel.h\"
#include \"{cutlass_root}cutlass/gemm/threadblock/threadblock_swizzle.h\"
#include \"{cutlass_root}cutlass/gemm/device/default_gemm_configuration.h\"
#include \"{cutlass_root}cutlass/epilogue/thread/linear_combination_relu.h\"
#include \"{cutlass_root}cutlass/epilogue/thread/linear_combination.h\"
#include \"{project_root}../kernel/b2b_gemm.h\"
#include \"{project_root}../kernel/default_b2b_gemm.h\"
'''.format(cutlass_root=self.cutlass_deps_root, project_root=self.project_root, this_file_root=self.this_file_root)
include_user_header = ""
for header in self.user_header_file:
include_user_header += "#include \"" + header + "\"\n"
return code + include_user_header
def gen_code(self, sm_cap, mma_tp, ifprint = True):
self.set_arch(sm_cap, mma_tp)
self.update_b2b_args()
print(self.fuse_gemm_info)
self.update_b2b_class_template_args()
func_code = self.gen_all_func()
member_var_code = "private:\n typename B2bGemmKernel::Params params_;\n"
gen_code = gen_ir.gen_template_class(self.gen_class_name, self.tempalte_args, func_code + member_var_code)
code = self.gen_include_header() + gen_ir.gen_namespace("cutlass", gen_ir.gen_namespace("gemm", gen_ir.gen_namespace("device", gen_code)))
if ifprint:
print(code)
print("[INFO]: Gen device code output Dir: is ", self.file_name)
with open(self.file_name, 'w+') as f:
f.write(code)
gen_kernel = self.gen_kernel.gen_code(self.first_use_1stage)
print(gen_kernel)
def update_b2b_class_template_args(self):
for arg in self.args.keys():
self.tempalte_args.append([self.__check_arg_type(arg), arg, self.args[arg]])
def update_b2b_args(self):
self.args['ElementA'] = helper.type_2_cutlass_type(self.fuse_gemm_info[0]['A_tp'])
self.args['LayoutA'] = helper.type_2_cutlass_type(self.fuse_gemm_info[0]['A_format'])
cnt = 0
warp_M_tile = 32
# Determine maxmimum N_tile
Max_Ntile = 0
for layer in self.fuse_gemm_info:
n_tile = layer['mnk'][1]
if n_tile > Max_Ntile:
Max_Ntile = n_tile
if Max_Ntile >= 256:
warp_M_tile = 16
stages_temp = []
for layer in self.fuse_gemm_info:
cnt_str = str(cnt)
B_tp_str= 'ElementB' + cnt_str
B_format_str = 'LayoutB' + cnt_str
C_tp_str= 'ElementC' + cnt_str
C_format_str = 'LayoutC' + cnt_str
Acc_str = 'ElementAccumulator' + cnt_str
self.args[B_tp_str] = helper.type_2_cutlass_type(layer['B_tp'])
self.args[B_format_str] = helper.type_2_cutlass_type(layer['B_format'])
self.args[C_tp_str] = helper.type_2_cutlass_type(layer['C_tp'])
self.args[C_format_str] = helper.type_2_cutlass_type(layer['C_format'])
self.args[Acc_str] = helper.type_2_cutlass_type(layer['Acc_tp'])
mnk = layer['mnk'][:]
tile_mnk = mnk[:]
tile_mnk[2] = 32 # force the ktile is 32
#N tile gen
if mnk[1] > 1024:
assert(0)
elif mnk[1] > 512:
tile_mnk[1] = 1024
elif mnk[1] > 256:
tile_mnk[1] = 512
elif mnk[1] > 128:
tile_mnk[1] = 256
elif mnk[1] > 64:
tile_mnk[1] = 128
elif mnk[1] > 32:
tile_mnk[1] = 64
else :
tile_mnk[1] = 32
if tile_mnk[1] == 512:
stages_temp.append(1)
else:
stages_temp.append(2)
tile_mnk[0] = 4 * warp_M_tile
epilogue_setted_type = helper.get_epilogue_tp(layer)
cutlass_epilogue_name = "LinearCombinationRelu"
if epilogue_setted_type.lower() == 'leakyrelu':
cutlass_epilogue_name = "LinearCombinationLeakyRelu"
elif epilogue_setted_type.lower() == 'identity':
cutlass_epilogue_name = "LinearCombination"
epilogue_str = 'EpilogueOutputOp' + cnt_str
if cnt != len(self.fuse_gemm_info) - 1:
n = layer['mnk'][1]
Fragments = tile_mnk[1] // 8 * 2
self.args[epilogue_str] = "cutlass::epilogue::thread::" + cutlass_epilogue_name + "<ElementC0_, " + str(Fragments) +", ElementAccumulator0_, ElementAccumulator0_>"
else:
n = layer['mnk'][1]
n_mod_8 = n % 4
N_align_elements = 1
if n_mod_8 == 0:
N_align_elements = 8
elif n_mod_8 == 4:
N_align_elements = 4
elif n_mod_8 == 2 or n_mod_8 == 6:
N_align_elements = 2
self.args[epilogue_str] = "cutlass::epilogue::thread::" + cutlass_epilogue_name+ "<ElementC0_, " + str(N_align_elements) + ", ElementAccumulator0_, ElementAccumulator0_>"
ThreadBlockShape_str = 'ThreadblockShape' + cnt_str
self.args[ThreadBlockShape_str] = helper.cvt_2_cutlass_shape(tile_mnk)
WarpShape_str = 'WarpShape' + cnt_str
tile_mnk[0] = warp_M_tile
self.args[WarpShape_str] = helper.cvt_2_cutlass_shape(tile_mnk)
cnt += 1
self.args['ElementD'] = helper.type_2_cutlass_type(self.fuse_gemm_info[self.b2b_num - 1]['C_tp'])
self.args['LayoutD'] = helper.type_2_cutlass_type(self.fuse_gemm_info[self.b2b_num - 1]['C_format'])
self.args['InstructionShape'] = helper.cvt_2_cutlass_shape(self.mma_shape)
self.args['OperatorClass'] = 'arch::OpClassTensorOp'
self.args['ArchTag'] = self.arch
self.args['ThreadblockSwizzle'] = 'threadblock::GemmBatchedIdentityThreadblockSwizzle'
for i in range(self.b2b_num):
self.args[helper.var_idx('Stages', i)] = "2"
self.args['AlignmentA'] = str(8)
self.args['AlignmentB'] = str(8)
self.args['SplitKSerial'] = 'false'
self.args['Operator'] = 'typename DefaultGemmConfiguration<OperatorClass_, ArchTag_, ElementA_, ElementB0_, ElementC0_, ElementAccumulator0_>::Operator'
self.args['IsBetaZero'] = 'false'
def gen_using_kernel(self):
code = "using B2bGemmKernel = typename kernel::DefaultB2bGemm<\n"
code += " " + "ElementA,\n"
code += " " + "LayoutA,\n"
for i in range(self.b2b_num):
code += " " + helper.var_idx("ElementB", i) + ",\n"
code += " " + helper.var_idx("LayoutB", i) + ",\n"
code += " " + helper.var_idx("ElementC", i) + ",\n"
code += " " + helper.var_idx("LayoutC", i) + ",\n"
code += " " + helper.var_idx("ElementAccumulator", i) + ",\n"
code += " " + helper.var_idx("EpilogueOutputOp", i) + ",\n"
code += " " + helper.var_idx("ThreadblockShape", i) + ",\n"
code += " " + helper.var_idx("WarpShape", i) + ",\n"
code += " " + "ElementD,\n"
code += " " + "LayoutD,\n"
code += " " + "InstructionShape,\n"
code += " " + "OperatorClass,\n"
code += " " + "ArchTag,\n"
code += " " + "ThreadblockSwizzle,\n"
for i in range(self.b2b_num):
code += " " + helper.var_idx("Stages", i) + ",\n"
code += " " + "AlignmentA,\n"
code += " " + "AlignmentB,\n"
code += " " + "SplitKSerial,\n"
code += " " + "Operator,\n"
code += " " + "IsBetaZero_\n"
code += ">::B2bGemmKernel;\n\n"
return code
def gen_args(self):
def gen_arg_member(b2b_num):
data_members = []
for i in range(b2b_num):
member_type = "GemmCoord"
member_name = "problem_size_" + str(i)
data_members.append((member_type, member_name))
member_type = "TensorRef<ElementA const, LayoutA>"
member_name = "ref_A0"
data_members.append((member_type, member_name))
for i in range(b2b_num):
member_type = "TensorRef<ElementB" + str(i) + " const, LayoutB" + str(i) +">"
member_name = "ref_B" + str(i)
data_members.append((member_type, member_name))
member_type = "TensorRef<ElementC" + str(i) + " const, LayoutC" + str(i) +">"
member_name = "ref_C" + str(i)
data_members.append((member_type, member_name))
member_type = "TensorRef<ElementD, LayoutD>"
member_name = helper.var_idx("ref_D", b2b_num - 1)
data_members.append((member_type, member_name))
for i in range(b2b_num):
member_type = "typename EpilogueOutputOp" + str(i) + "::Params"
member_name = "epilogue" + str(i)
data_members.append((member_type, member_name))
data_members.append(('int', 'batch_count'))
return data_members
def gen_arg_struct_default_ctor(struct_name, data_members, inital_param_num, inital_value):
constructs_code = gen_ir.indentation + "CUTLASS_HOST_DEVICE\n" + \
gen_ir.indentation + struct_name + " (): "
for i in range(inital_param_num):
final_param = ','
if i == inital_param_num - 1:
final_param = '{ }'
constructs_code += data_members[i][1] + inital_value + final_param
constructs_code += "\n"
return constructs_code
def gen_arg_struct_ctor(struct_name, data_members):
constructs_code = gen_ir.indentation + "CUTLASS_HOST_DEVICE\n" + \
gen_ir.indentation + struct_name + " (\n"
cnt = 0
param_num = len(data_members)
for param in data_members:
final = ',\n'
if cnt == param_num - 1:
final = '\n):\n'
constructs_code += gen_ir.indentation + param[0] + " " + param[1] + "_" + final
cnt += 1
cnt = 0
for param in data_members:
final = '),\n'
if cnt == param_num - 1:
final = ") { }\n"
constructs_code += gen_ir.indentation + param[1] + "(" + param[1] + "_" + final
cnt += 1
constructs_code += "\n"
return constructs_code
# (variable type, variable name)
struct_member = gen_arg_member(self.b2b_num)
self.arg_member = struct_member
codeBody = ""
for each_member in struct_member:
codeBody += gen_ir.indentation + each_member[0] + " " + each_member[1] + ";\n"
codeBody += gen_arg_struct_default_ctor("Arguments", struct_member, self.b2b_num, "(0,0,0)") + "\n"
codeBody += gen_arg_struct_ctor("Arguments", struct_member) + "\n"
struct_code = gen_ir.gen_struct("Arguments", codeBody)
return struct_code
def gen_func_constructs(self):
code = self.gen_class_name +"() {}"
return code
def gen_func_initialize(self):
code = "Status initialize(Arguments const &args, void *workspace = nullptr, cudaStream_t stream = nullptr) {\n" + \
"// Determine grid shape\n" + \
"ThreadblockSwizzle threadblock_swizzle;\n" + \
"cutlass::gemm::GemmCoord grid_shape = threadblock_swizzle.get_tiled_shape(\n" + \
" args.problem_size_0, \n" + \
" { ThreadblockShape0::kM, ThreadblockShape0::kN, ThreadblockShape0::kK },\n" + \
" args.batch_count);\n" + \
"// Initialize the Params structure\n" + \
"params_ = typename B2bGemmKernel::Params{\n"
for i in range(self.b2b_num):
code += helper.var_idx(" args.problem_size_", i) + ",\n"
code += " grid_shape,\n" + \
" args.ref_A0.non_const_ref(),\n"
for i in range(self.b2b_num):
code += helper.var_idx(" args.ref_B", i) + ".non_const_ref(),\n"
code += helper.var_idx(" args.ref_C", i) + ".non_const_ref(),\n"
code += helper.var_idx(" args.ref_D", self.b2b_num - 1) + ",\n"
for i in range(self.b2b_num):
code += helper.var_idx(" args.epilogue", i) + ",\n"
code += " args.batch_count\n"
code += "};\n" + \
"return Status::kSuccess;\n" + \
"}\n"
return code
def gen_func_run(self):
code = "Status run(cudaStream_t stream = nullptr) {\n" + \
"\n" + \
" ThreadblockSwizzle threadblock_swizzle;\n" + \
"\n" + \
" dim3 grid = threadblock_swizzle.get_grid_shape(params_.grid_tiled_shape);\n" + \
" dim3 block(B2bGemmKernel::kThreadCount, 1, 1);\n" + \
"\n" + \
" cudaError_t result;\n" + \
"\n" + \
" int smem_size = int(sizeof(typename B2bGemmKernel::SharedStorage));\n" + \
" if (smem_size >= (48 << 10)) {\n" + \
" result = cudaFuncSetAttribute(Kernel<B2bGemmKernel>, cudaFuncAttributeMaxDynamicSharedMemorySize, smem_size);\n" + \
"\n" + \
" if (result != cudaSuccess) {\n" + \
" return Status::kErrorInternal;\n" + \
" }\n" + \
"\n" + \
" result = cudaFuncSetAttribute(\n" + \
" Kernel<B2bGemmKernel>,\n" + \
" cudaFuncAttributePreferredSharedMemoryCarveout, 100);\n" + \
"\n" + \
" if (result != cudaSuccess) {\n" + \
" return Status::kErrorInternal;\n" + \
" }\n" + \
" }\n" + \
" cutlass::Kernel<B2bGemmKernel><<<grid, block, smem_size, stream>>>(params_);\n" + \
" result = cudaGetLastError();\n" + \
" return result == cudaSuccess ? Status::kSuccess : Status::kErrorInternal;\n" + \
" }\n"
return code
def gen_func_operator(self):
opeartor_with_arg_code = "Status operator()(\n" + \
" Arguments const &args,\n" + \
" void *workspace = nullptr,\n" + \
" cudaStream_t stream = nullptr) {\n" + \
" Status status = initialize(args, workspace);\n" + \
" \n" + \
" if (status == Status::kSuccess) {\n" + \
" status = run(stream);\n" + \
" }\n" + \
" return status;\n" + \
"}\n"
operator_code = "Status operator()(\n" + \
" cudaStream_t stream = nullptr) {\n" + \
" Status status = run(stream);\n" + \
" return status;\n" + \
"}\n"
return opeartor_with_arg_code + "\n" + operator_code
def gen_all_func(self):
return self.gen_using_kernel() + "\n" + \
self.gen_args() + "\n" + \
self.gen_func_constructs() + "\n" + \
self.gen_func_initialize() + "\n" + \
self.gen_func_run() + "\n" + \
self.gen_func_operator()
| cutlass-main | examples/44_multi_gemm_ir_and_codegen/ir_gen/gen_device.py |
#################################################################################################
#
# Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#################################################################################################
import gen_ir
import helper
import gen_threadblock as gen_tb
class gen_default_Gemm:
def __init__(self, template_param, gen_class_name, b2b_num, cutlass_deps_root, project_root):
self.gen_class_name = "B2bGemm"
self.template_param = template_param
self.b2b_num = b2b_num
self.cutlass_deps_root = cutlass_deps_root
self.project_root = project_root
def gen_B2bMma(self, specialized_template_args):
code = "using B2bMma = typename cutlass::gemm::threadblock::DefaultB2bMma<\n"
code += specialized_template_args
code += ">::ThreadblockB2bMma;\n"
# print(code)
return code
def gen_epilogue(self):
epilogue_code = ""
epilogue_code += helper.var_idx("static const int kPartitionsK", self.b2b_num - 1) + helper.var_idx(" = ThreadblockShape", self.b2b_num - 1) + helper.var_idx("::kK / WarpShape", self.b2b_num - 1) + "::kK;\n"
epilogue_code += "using Epilogue = typename cutlass::epilogue::threadblock::DefaultEpilogueTensorOp<\n"
epilogue_code += " " + helper.var_idx("ThreadblockShape", self.b2b_num - 1) + ",\n"
epilogue_code += " " + helper.var_idx("typename B2bMma::Operator", self.b2b_num - 1) + ",\n"
epilogue_code += " " + helper.var_idx("kPartitionsK", self.b2b_num - 1) + ",\n"
epilogue_code += " " + helper.var_idx("EpilogueOutputOp", self.b2b_num - 1) + ",\n"
epilogue_code += " " + helper.var_idx("EpilogueOutputOp", self.b2b_num - 1) + "::kCount\n"
epilogue_code += ">::Epilogue;\n"
epilogue_code += "using B2bGemmKernel = kernel::B2bGemm<B2bMma, Epilogue, ThreadblockSwizzle, SplitKSerial>;\n\n"
return epilogue_code
def gen_include_header(self):
code = '''
/* Auto Generated code - Do not edit.*/
#pragma once
#include \"{cutlass_dir}cutlass/cutlass.h\"
#include \"{cutlass_dir}cutlass/layout/matrix.h\"
#include \"{cutlass_dir}cutlass/numeric_types.h\"
#include \"{cutlass_dir}cutlass/epilogue/threadblock/epilogue.h\"
#include \"{cutlass_dir}cutlass/epilogue/thread/linear_combination.h\"
#include \"{cutlass_dir}cutlass/gemm/gemm.h\"
#include \"{cutlass_dir}cutlass/gemm/kernel/gemm_pipelined.h\"
#include \"{cutlass_dir}cutlass/gemm/threadblock/default_mma_core_sm75.h\"
#include \"{cutlass_dir}cutlass/gemm/threadblock/default_mma_core_sm70.h\"
#include \"{cutlass_dir}cutlass/gemm/threadblock/default_mma_core_sm80.h\"
#include \"{cutlass_dir}cutlass/gemm/threadblock/default_mma_core_simt.h\"
#include \"{cutlass_dir}cutlass/gemm/threadblock/threadblock_swizzle.h\"
#include \"{cutlass_dir}cutlass/epilogue/threadblock/default_epilogue_tensor_op.h\"
#include \"{cutlass_dir}cutlass/epilogue/threadblock/default_epilogue_volta_tensor_op.h\"
#include \"{cutlass_dir}cutlass/epilogue/threadblock/default_epilogue_simt.h\"
#include \"{cutlass_dir}cutlass/transform/threadblock/predicated_tile_iterator.h\"
#include \"../kernel/b2b_gemm.h\"
#include \"../threadblock/default_b2b_mma.h\"
'''.format(cutlass_dir=self.cutlass_deps_root)
return code
def gen_code(self):
gen_using = ''
# Generate default template struct
gen_code = gen_ir.gen_template_struct("Default" + self.gen_class_name, self.template_param,"", speicalized = None, set_default=False)
filter_list = []
filter_list.append(('Stages', 2))
filter_list.append(("OperatorClass", "arch::OpClassTensorOp"))
filter_list.append(("ArchTag", "arch::Sm75"))
for i in range(self.b2b_num):
filter_list.append((helper.var_idx("LayoutC", i), "layout::RowMajor"))
rtn_template_args, speicalized_template_args = gen_ir.filtered_param(self.template_param, filter_list, keep_= True)
B2bMma_code = self.gen_B2bMma(speicalized_template_args)
epilogue_and_rest_code = self.gen_epilogue()
gen_special_code = gen_ir.gen_template_struct("Default" + self.gen_class_name, rtn_template_args, B2bMma_code + epilogue_and_rest_code, speicalized = speicalized_template_args, set_default=False)
code = gen_ir.gen_namespace("cutlass", gen_ir.gen_namespace("gemm", gen_ir.gen_namespace("kernel", gen_code + gen_special_code)))
return self.gen_include_header() + code
class gen_Kernel:
def __init__(self, template_param, gen_class_name, b2b_num, cutlass_deps_root, project_root):
self.gen_class_name = "B2bGemm"
self.template_param = template_param
self.b2bnum = b2b_num
self.cutlass_deps_root = cutlass_deps_root
self.project_root = project_root
def gen_include_header(self):
code = '''
#pragma once
#include \"{cutlass_dir}cutlass/cutlass.h\"
#include \"{cutlass_dir}cutlass/gemm/gemm.h\"
#include \"{cutlass_dir}cutlass/matrix_coord.h\"\n'''.format(cutlass_dir=self.cutlass_deps_root)
return code
def gen_Params(self):
gen_param = ""
for i in range(self.b2bnum):
gen_param += " " + helper.var_idx("cutlass::gemm::GemmCoord problem_size_", i) + ";\n"
gen_param += " " + "cutlass::gemm::GemmCoord grid_tiled_shape;\n"
gen_param += " " + "typename B2bMma::IteratorA0::Params params_A0;\n"
gen_param += " " + "typename B2bMma::IteratorA0::TensorRef ref_A0;\n"
for i in range(self.b2bnum):
gen_param += " " + helper.var_idx("typename B2bMma::IteratorB", i) + helper.var_idx("::Params params_B", i) + ";\n"
gen_param += " " + helper.var_idx("typename B2bMma::IteratorB", i) + helper.var_idx("::TensorRef ref_B", i) + ";\n"
if i == self.b2bnum - 1:
gen_param += " " + helper.var_idx("typename Epilogue::OutputTileIterator::Params params_C", i) + ";\n"
gen_param += " " + helper.var_idx("typename Epilogue::OutputTileIterator::TensorRef ref_C", i) + ";\n"
else:
gen_param += " " + helper.var_idx("typename FusedAddBiasEpilogue", i) + helper.var_idx("::OutputTileIterator::Params params_C", i) + ";\n"
gen_param += " " + helper.var_idx("typename FusedAddBiasEpilogue", i) + helper.var_idx("::OutputTileIterator::TensorRef ref_C", i) + ";\n"
gen_param += " " + helper.var_idx("typename Epilogue::OutputTileIterator::Params params_D", self.b2bnum - 1) + ";\n"
gen_param += " " + helper.var_idx("typename Epilogue::OutputTileIterator::TensorRef ref_D", self.b2bnum - 1) + ";\n"
for i in range(self.b2bnum):
gen_param += " " + helper.var_idx("typename OutputOp", i) + helper.var_idx("::Params output_op_", i) + ";\n"
gen_param += " " + 'int batch_count' + ";\n"
gen_param += " " + 'int gemm_k_iterations_0' + ";\n"
return gen_param
def gen_Memberfunc(self):
code_default = "\nCUTLASS_HOST_DEVICE\n"
code_default += "Params()"
code_default += " { } \n\n"
code_construct = "\nCUTLASS_HOST_DEVICE\n"
code_construct += "Params(\n"
for i in range(self.b2bnum):
code_construct += " " + helper.var_idx("cutlass::gemm::GemmCoord const & problem_size_", i) + ",\n"
code_construct += " " + "cutlass::gemm::GemmCoord const & grid_tiled_shape,\n"
code_construct += " " + "typename B2bMma::IteratorA0::TensorRef ref_A0,\n"
for i in range(self.b2bnum):
code_construct += " " + helper.var_idx("typename B2bMma::IteratorB", i) + helper.var_idx("::TensorRef ref_B", i) + ",\n"
if i == self.b2bnum - 1:
code_construct += " " + helper.var_idx("typename Epilogue::OutputTileIterator::TensorRef ref_C", i) + ",\n"
else:
code_construct += " " + helper.var_idx("typename FusedAddBiasEpilogue", i) + helper.var_idx("::OutputTileIterator::TensorRef ref_C", i) + ",\n"
code_construct += " " + helper.var_idx("typename Epilogue::OutputTileIterator::TensorRef ref_D", self.b2bnum - 1) + ",\n"
for i in range(self.b2bnum):
code_construct += " " + helper.var_idx("typename OutputOp", i) + helper.var_idx("::Params output_op_", i) + helper.var_idx(" = typename OutputOp", i) + "::Params(),\n"
code_construct += " " + "int batch_count = 1\n"
code_construct += "):\n"
for i in range(self.b2bnum):
code_construct += " " + helper.var_idx("problem_size_", i) + helper.var_idx("(problem_size_", i) + "),\n"
code_construct += " " + "grid_tiled_shape(grid_tiled_shape),\n"
code_construct += " " + "params_A0(ref_A0.layout()),\n"
code_construct += " " + "ref_A0(ref_A0),\n"
for i in range(self.b2bnum):
code_construct += " " + helper.var_idx("params_B", i) + helper.var_idx("(ref_B", i) + ".layout()),\n"
code_construct += " " + helper.var_idx("ref_B", i) + helper.var_idx("(ref_B", i) + "),\n"
code_construct += " " + helper.var_idx("params_C", i) + helper.var_idx("(ref_C", i) + ".layout()),\n"
code_construct += " " + helper.var_idx("ref_C", i) + helper.var_idx("(ref_C", i) + "),\n"
code_construct += " " + helper.var_idx("params_D", self.b2bnum - 1) + helper.var_idx("(ref_D", self.b2bnum - 1) + ".layout()),\n"
code_construct += " " + helper.var_idx("ref_D", self.b2bnum - 1) + helper.var_idx("(ref_D", self.b2bnum - 1) + "),\n"
for i in range(self.b2bnum):
code_construct += " " + helper.var_idx("output_op_", i) + helper.var_idx("(output_op_", i) + "), \n"
code_construct += " " + "batch_count(batch_count) {\n"
code_construct += " " + helper.var_idx("gemm_k_iterations_", 0) + helper.var_idx(" = (problem_size_", 0) + helper.var_idx(".k() + B2bMma::Shape", 0) + helper.var_idx("::kK - 1) / B2bMma::Shape", 0) + "::kK;\n"
code_construct += "}\n"
return code_default + code_construct
def gen_using(self):
code_using = ""
for i in range(self.b2bnum - 1):
code_using += " " + helper.var_idx("using OutputOp", i) + helper.var_idx(" = typename B2bMma::OutputOp", i) + ";\n"
code_using += " " + helper.var_idx("using OutputOp", self.b2bnum - 1) + " = typename Epilogue::OutputOp;\n"
for i in range(self.b2bnum - 1):
code_using += " " + helper.var_idx("using FusedAddBiasEpilogue", i) + helper.var_idx(" = typename B2bMma::FusedAddBiasEpilogue", i) +";\n"
code_using += " " + "using WarpCount0 = typename B2bMma::WarpCount0;\n"
code_using += " " + "static int const kThreadCount = 32 * WarpCount0::kCount;\n"
code_using += gen_ir.gen_struct("Params", self.gen_Params() + self.gen_Memberfunc())
code_using += "union SharedStorage {\n"
code_using += " " + "typename B2bMma::B2bMmaSharedStorage main_loop;\n"
code_using += " " + "typename Epilogue::SharedStorage epilogue;\n"
code_using += "};\n"
return code_using
def gen_can_implement(self):
gen_code = ""
return gen_code
def gen_operator_and_constr(self):
ctr_code = "CUTLASS_HOST_DEVICE\n"
ctr_code += self.gen_class_name + "() { } \n\n"
operator_code = "CUTLASS_DEVICE\n"
operator_code += "void operator()(Params const ¶ms, SharedStorage &shared_storage) {\n"
operator_code += " " + "ThreadblockSwizzle threadblock_swizzle;\n"
operator_code += " " + "cutlass::gemm::GemmCoord threadblock_tile_offset = threadblock_swizzle.get_tile_offset(params.grid_tiled_shape);\n"
operator_code += " " + "int batch_idx = threadblock_tile_offset.k();\n"
operator_code += " " + "if (params.grid_tiled_shape.m() <= threadblock_tile_offset.m() ||\n"
operator_code += " " + "params.grid_tiled_shape.n() <= threadblock_tile_offset.n()) {\n"
operator_code += " " + " " + "return;\n"
operator_code += " " + "}\n"
operator_code += " " + "cutlass::MatrixCoord tb_offset_A0{\n"
operator_code += " " + " " + "threadblock_tile_offset.m() * B2bMma::Shape0::kM,\n"
operator_code += " " + " " + "0\n"
operator_code += " " + "};\n"
for i in range(self.b2bnum):
operator_code += " " + helper.var_idx("cutlass::MatrixCoord tb_offset_B", i) + "{\n"
operator_code += " " + " " + "0,\n"
operator_code += " " + " " + helper.var_idx("threadblock_tile_offset.n() * B2bMma::Shape", i) + "::kN\n"
operator_code += " " + "};\n"
operator_code += " " + "int thread_idx = threadIdx.x;\n\n"
operator_code += " " + "MatrixCoord threadblock_offset(\n"
operator_code += " " + " " + helper.var_idx("threadblock_tile_offset.m() * B2bMma::Shape", self.b2bnum - 1) + "::kM,\n"
operator_code += " " + " " + helper.var_idx("threadblock_tile_offset.n() * B2bMma::Shape", self.b2bnum - 1) + "::kN\n"
operator_code += " " + ");\n"
operator_code += " " + "typename B2bMma::IteratorA0 iterator_A0(\n"
operator_code += " " + " " + "params.params_A0,\n"
operator_code += " " + " " + "params.ref_A0.data(),\n"
operator_code += " " + " " + "params.problem_size_0.mk(),\n"
operator_code += " " + " " + "thread_idx,\n"
operator_code += " " + " " + "tb_offset_A0);\n"
operator_code += " " + "iterator_A0.add_pointer_offset(batch_idx * params.problem_size_0.m() * params.problem_size_0.k());\n\n"
for i in range (self.b2bnum):
operator_code += " " + helper.var_idx("typename B2bMma::IteratorB", i ) + helper.var_idx(" iterator_B", i) + "(\n"
operator_code += " " + " " + helper.var_idx("params.params_B", i) + ",\n"
operator_code += " " + " " + helper.var_idx("params.ref_B", i) + ".data(),\n"
operator_code += " " + " " + helper.var_idx("params.problem_size_", i) + ".kn(),\n"
operator_code += " " + " " + "thread_idx,\n"
operator_code += " " + " " + helper.var_idx("tb_offset_B", i) + ");\n"
operator_code += " " + helper.var_idx("iterator_B", i) + helper.var_idx(".add_pointer_offset(batch_idx * params.problem_size_", i) + helper.var_idx(".n() * params.problem_size_", i) + ".k());\n\n"
for i in range (self.b2bnum - 1):
operator_code += " " + helper.var_idx("typename FusedAddBiasEpilogue", i ) + helper.var_idx("::OutputTileIterator iterator_C", i) + "(\n"
operator_code += " " + " " + helper.var_idx("params.params_C", i) + ",\n"
operator_code += " " + " " + helper.var_idx("params.ref_C", i) + ".data(),\n"
operator_code += " " + " " + helper.var_idx("params.problem_size_" , i) + ".mn(),\n"
operator_code += " " + " " + "thread_idx,\n"
operator_code += " " + " " + "threadblock_offset" + ");\n"
operator_code += " " + helper.var_idx("int ref_C", i) + helper.var_idx("_stride = params.ref_C", i) + ".stride()[0];\n"
operator_code += " " + helper.var_idx("iterator_C", i) + helper.var_idx(".add_pointer_offset(batch_idx * params.problem_size_", i) + helper.var_idx(".n() * (ref_C", i) + helper.var_idx("_stride == 0 ? 1 : params.problem_size_", i) + ".m()));\n\n"
for i in range (self.b2bnum - 1):
operator_code += " " + helper.var_idx("FusedAddBiasEpilogue", i ) + helper.var_idx(" epilogue_", i ) + ";\n"
operator_code += " " + "int warp_idx = __shfl_sync(0x1f, threadIdx.x / 32, 0);\n"
operator_code += " " + "int lane_idx = threadIdx.x % 32;\n"
for i in range (self.b2bnum - 1):
operator_code += " " + helper.var_idx("OutputOp", i) + helper.var_idx(" output_op_", i) + helper.var_idx("(params.output_op_", i) + ");\n"
operator_code += " " + "B2bMma b2bMma(shared_storage.main_loop, thread_idx, warp_idx, lane_idx);\n"
operator_code += " " + "typename B2bMma::FragmentC0 src_accum;\n"
operator_code += " " + helper.var_idx("typename B2bMma::FragmentC", self.b2bnum - 1)+ " accumulators;\n"
operator_code += " " + "src_accum.clear();\n"
operator_code += " " + "accumulators.clear();\n"
operator_code += " " + "b2bMma(params.gemm_k_iterations_0, accumulators, iterator_A0, "
for i in range(self.b2bnum):
operator_code += helper.var_idx("iterator_B", i) + ", "
operator_code += "src_accum"
if self.b2bnum != 1:
operator_code += ", "
for i in range(self.b2bnum - 1):
operator_code += helper.var_idx("output_op_", i) + ", "
for i in range(self.b2bnum - 1):
operator_code += helper.var_idx("epilogue_", i) + ", "
for i in range(self.b2bnum - 1):
final = ", "
if i == self.b2bnum - 2:
final =""
operator_code += helper.var_idx("iterator_C", i) + final
operator_code += ");\n"
operator_code += " " + helper.var_idx("OutputOp", self.b2bnum - 1) + helper.var_idx(" output_op_", self.b2bnum - 1) + helper.var_idx("(params.output_op_", self.b2bnum - 1) + ");\n"
operator_code += " " + "threadblock_tile_offset = threadblock_swizzle.get_tile_offset(params.grid_tiled_shape);\n"
operator_code += " " + helper.var_idx("typename Epilogue::OutputTileIterator iterator_C", self.b2bnum - 1) + "(\n"
operator_code += " " + " " + helper.var_idx("params.params_C", self.b2bnum - 1) + ",\n"
operator_code += " " + " " + helper.var_idx("params.ref_C", self.b2bnum - 1) + ".data(),\n"
operator_code += " " + " " + helper.var_idx("params.problem_size_", self.b2bnum - 1) + ".mn(),\n"
operator_code += " " + " " + "thread_idx,\n"
operator_code += " " + " " + "threadblock_offset\n"
operator_code += " " + ");\n"
operator_code += " " + helper.var_idx("int ref_C", self.b2bnum - 1) + helper.var_idx("_stride = params.ref_C", self.b2bnum - 1) + ".stride()[0];\n"
operator_code += " " + helper.var_idx("iterator_C", self.b2bnum - 1) + helper.var_idx(".add_pointer_offset(batch_idx * params.problem_size_", self.b2bnum - 1) + helper.var_idx(".n() * (ref_C", self.b2bnum - 1) + helper.var_idx("_stride == 0 ? 1 : params.problem_size_", self.b2bnum - 1) + ".m()));\n\n"
operator_code += " " + helper.var_idx("typename Epilogue::OutputTileIterator iterator_D", self.b2bnum - 1) + "(\n"
operator_code += " " + " " + helper.var_idx("params.params_D", self.b2bnum - 1) + ",\n"
operator_code += " " + " " + helper.var_idx("params.ref_D", self.b2bnum - 1) + ".data(),\n"
operator_code += " " + " " + helper.var_idx("params.problem_size_", self.b2bnum - 1) + ".mn(),\n"
operator_code += " " + " " + "thread_idx,\n"
operator_code += " " + " " + "threadblock_offset\n"
operator_code += " " + ");\n"
operator_code += " " + helper.var_idx("iterator_D", self.b2bnum - 1) + helper.var_idx(".add_pointer_offset(batch_idx * params.problem_size_", self.b2bnum - 1) + helper.var_idx(".n() * params.problem_size_", self.b2bnum - 1) + ".m());\n\n"
operator_code += " " + "Epilogue epilogue(\n"
operator_code += " " + " " + "shared_storage.epilogue,\n"
operator_code += " " + " " + "thread_idx,\n"
operator_code += " " + " " + "warp_idx,\n"
operator_code += " " + " " + "lane_idx\n"
operator_code += " " + ");\n"
operator_code += " " + "epilogue("
operator_code += helper.var_idx("output_op_", self.b2bnum - 1) + ", "
operator_code += helper.var_idx("iterator_D", self.b2bnum - 1) + ", "
operator_code += "accumulators, "
operator_code += helper.var_idx("iterator_C", self.b2bnum - 1) + ");\n"
operator_code += "}\n"
return ctr_code + operator_code
def gen_include_header(self):
code = '''
#pragma once
#include \"{cutlass_dir}cutlass/cutlass.h\"
#include \"{cutlass_dir}cutlass/gemm/gemm.h\"
#include \"{cutlass_dir}cutlass/matrix_coord.h\"
#include \"{cutlass_dir}cutlass/semaphore.h\"
'''.format(cutlass_dir=self.cutlass_deps_root)
return code
def gen_code(self):
template_param = []
template_param.append(("typename", "B2bMma"))
template_param.append(("typename", "Epilogue"))
template_param.append(("typename", "ThreadblockSwizzle"))
template_param.append((bool, "SplitKSerial"))
code_body = ""
code_body += self.gen_using()
code_body += self.gen_operator_and_constr()
struct_code = gen_ir.gen_template_struct(self.gen_class_name, template_param, code_body)
code = self.gen_include_header()
code += gen_ir.gen_namespace("cutlass", gen_ir.gen_namespace("gemm", gen_ir.gen_namespace("kernel", struct_code)))
return self.gen_include_header() + code
class gen_kernel:
def __init__(self, template_param, gen_class_name, b2b_num, output_dir, cutlass_deps_root, project_root):
self.template_param = template_param
self.gen_class_name = "B2bGemm"
self.gen_kernel_name = gen_class_name + "Kernel"
self.tempalte_args = []
self.cutlass_deps_root = cutlass_deps_root
self.project_root = project_root
self.gen_default_b2b_gemm = gen_default_Gemm(template_param, gen_class_name, b2b_num, cutlass_deps_root, project_root)
self.gen_Kerenl = gen_Kernel(template_param, gen_class_name, b2b_num, cutlass_deps_root, project_root)
# Include gen_threadBlock
self.gen_threadBlock = gen_tb.gen_threadblock(template_param, gen_class_name, b2b_num, output_dir, cutlass_deps_root, project_root)
self.file_dir = output_dir + "/kernel/"
def gen_code(self, first_use_1stage):
default_b2b_gemm = self.gen_default_b2b_gemm.gen_code()
print("[INFO]: Gen kernel code [default_b2b_gemm.h]output Dir: is ", self.file_dir)
with open(self.file_dir + "default_b2b_gemm.h", "w+") as f:
f.write(default_b2b_gemm)
kernel = self.gen_Kerenl.gen_code()
print("[INFO]: Gen kernel code [b2b_gemm.h]output Dir: is ", self.file_dir)
with open(self.file_dir + "b2b_gemm.h", "w+") as f:
f.write(kernel)
# Call code to gen threadblock
self.gen_threadBlock.gen_code(first_use_1stage)
| cutlass-main | examples/44_multi_gemm_ir_and_codegen/ir_gen/gen_kernel.py |
#################################################################################################
#
# Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#################################################################################################
import gen_ir
import helper
class gen_default_b2b_mma:
def __init__(self, template_param, gen_class_name, b2b_num,cutlass_deps_root, project_root):
self.gen_class_name = "DefaultB2bMma"
self.template_param = template_param
self.b2b_num = b2b_num
self.cutlass_deps_root = cutlass_deps_root
self.project_root = project_root
def gen_include_header(self):
code = '''
/* Auto Generated code - Do not edit.*/
#pragma once
#include \"{cutlass_dir}cutlass/cutlass.h\"
#include \"{cutlass_dir}cutlass/numeric_types.h\"
#include \"{cutlass_dir}cutlass/arch/arch.h\"
#include \"{cutlass_dir}cutlass/transform/threadblock/predicated_tile_iterator.h\"
#include \"{cutlass_dir}cutlass/transform/threadblock/predicated_tile_iterator_2dthreadtile.h\"
#include \"{cutlass_dir}cutlass/gemm/threadblock/default_mma_core_sm70.h\"
#include \"{cutlass_dir}cutlass/gemm/threadblock/default_mma_core_sm75.h\"
#include \"{cutlass_dir}cutlass/gemm/threadblock/default_mma_core_sm80.h\"
#include \"../threadblock/b2b_mma_pipelined.h\"
#include \"../../fixed_impl/epilogue/threadblock/fused_bias_act_epilogue.h\"
#include \"../../fixed_impl/epilogue/threadblock/default_bias_act_epilogue_tensor_op.h\"
#include \"../../fixed_impl/gemm/warp/mma_tensor_op_fragment_iterator_without_output_op.h\"
'''.format(cutlass_dir=self.cutlass_deps_root)
return code
def gen_using_MmaCore(self, stage):
threadBlockShape = "ThreadblockShape"
warpShape = "WarpShape"
instrunctionShape = "InstructionShape"
Mma_typename = "typename cutlass::gemm::threadblock::DefaultMmaCore"
gen_code = ""
for i in range(self.b2b_num):
code_using = "using MmaCore" + str(i)
gen_code += code_using + " = " + gen_ir.gen_declare_template_struct(Mma_typename, \
helper.var_idx(threadBlockShape, i), helper.var_idx(warpShape, i), instrunctionShape, \
"ElementA", "LayoutA", \
helper.var_idx("ElementB", i), helper.var_idx("LayoutB", i), \
helper.var_idx("ElementAccumulator", i), "layout::RowMajor", \
"OperatorClass", str(stage), "Operator")
return gen_code
def gen_using_FusedAddBiasEpilogue(self):
gen_code = ""
for i in range(self.b2b_num - 1):
code_using = helper.var_idx("using FusedAddBiasEpilogue", i)
epilogue_name = "typename cutlass::epilogue::threadblock::DefaultFusedBiasActEpilogueTensorOp"
template_args = helper.var_idx("<ThreadblockShape", i) + helper.var_idx(",typename MmaCore", i) + helper.var_idx("::MmaPolicy::Operator, 1, EpilogueOutputOp", i) + ", 2>::Epilogue"
gen_code += code_using + " = " + epilogue_name + template_args + ";\n"
return gen_code
def gen_using_Iterator(self):
code_using = "using IteratorA0"
iterator_typename = "cutlass::transform::threadblock::PredicatedTileIterator"
MmaCore = "MmaCore0"
matrix_shape = "cutlass::MatrixShape<" + MmaCore + "::Shape::kM, " + MmaCore + "::Shape::kK>"
iterator_map = "typename " + MmaCore + "::IteratorThreadMapA"
gen_code = code_using + " = " + gen_ir.gen_declare_template_struct(iterator_typename, \
matrix_shape, "ElementA", "LayoutA", "1", iterator_map, "AlignmentA_")
for i in range(self.b2b_num):
code_using = "using IteratorB" + str(i)
iterator_typename = "cutlass::transform::threadblock::PredicatedTileIterator"
MmaCore = "MmaCore" + str(i)
matrix_shape = "cutlass::MatrixShape<" + MmaCore + "::Shape::kK, " + MmaCore + "::Shape::kN>"
iterator_map = "typename " + MmaCore + "::IteratorThreadMapB"
gen_code += code_using + " = " + gen_ir.gen_declare_template_struct(iterator_typename, \
matrix_shape, helper.var_idx("ElementB", i), helper.var_idx("LayoutB", i), "0", iterator_map, "AlignmentB_")
return gen_code
def gen_fragment_iterator(self):
gen_code = "using AccumulatorLayout = cutlass::layout::ColumnMajor;\n"
for i in range(1, self.b2b_num):
code_using = "using FragmentIteratorA" + str(i)
iterator_typename = "cutlass::gemm::warp::MmaTensorOpPureFragmentIterator"
curr_MmaCore = "MmaCore" + str(i)
prev_MmaCore = "MmaCore" + str(i - 1)
Matrix_shape_curr = "cutlass::MatrixShape<" + curr_MmaCore + "::WarpShape::kM, " + curr_MmaCore + "::InstructionShape::kK>"
Matrix_shape_prev = "cutlass::MatrixShape<" + prev_MmaCore + "::WarpShape::kM, " + prev_MmaCore + "::WarpShape::kN>"
Curr_shape_kK = curr_MmaCore + "::Shape::kK"
gen_code += code_using + " = " + gen_ir.gen_declare_template_struct(iterator_typename, \
Matrix_shape_curr, Matrix_shape_prev, Curr_shape_kK, \
helper.var_idx("ElementAccumulator", i-1), "ElementA", \
"AccumulatorLayout", "InstructionShape_", "true")
return gen_code
def gen_threadblockmma(self):
code_using = "using ThreadblockB2bMma"
iterator_typename = "cutlass::gemm::threadblock::B2bMmaPipelined"
MmaPipelined_param_Mma0_shape = "typename MmaCore0::Shape"
MmaPipelined_param_Mma0_iteratorA = "IteratorA0"
MmaPipelined_param_Mma0_smemIteratorA = "typename MmaCore0::SmemIteratorA"
MmaPipelined_param_Mma0_iteratorB = "IteratorB0"
MmaPipelined_param_Mma0_smemIteratorB = "typename MmaCore0::SmemIteratorB"
MmaPipelined_param_list = MmaPipelined_param_Mma0_shape + ", " + MmaPipelined_param_Mma0_iteratorA + ", " + MmaPipelined_param_Mma0_smemIteratorA + ", " + MmaPipelined_param_Mma0_iteratorB + ", " + MmaPipelined_param_Mma0_smemIteratorB + ", "
for i in range(1, self.b2b_num):
MmaPipelined_param_Mma_shape = "typename MmaCore" + str(i) + "::Shape"
MmaPipelined_param_Mma_iteratorA = "FragmentIteratorA" + str(i)
MmaPipelined_param_Mma_iteratorB = "IteratorB" + str(i)
MmaPipelined_param_Mma_smemIteratorB = "typename MmaCore" + str(i) + "::SmemIteratorB"
MmaPipelined_param_list += MmaPipelined_param_Mma_shape + ", " + MmaPipelined_param_Mma_iteratorA + ", " + MmaPipelined_param_Mma_iteratorB + ", " + MmaPipelined_param_Mma_smemIteratorB + ", "
MmaPipelined_param_list += "ElementAccumulator0, layout::RowMajor, "
for i in range(self.b2b_num - 1):
epilogue_name = "EpilogueOutputOp" + str(i)
MmaPipelined_param_list += epilogue_name + ", "
for i in range(self.b2b_num - 1):
epilogue_name = "FusedAddBiasEpilogue" + str(i)
MmaPipelined_param_list += epilogue_name + ", "
for i in range(self.b2b_num):
MmaPolicy = "typename MmaCore" + str(i) + "::MmaPolicy"
MmaPipelined_param_list += MmaPolicy + ", "
cnt = 0
for i in range(self.b2b_num):
MmaStage = helper.var_idx("Stages", i)
final = ", "
if cnt == self.b2b_num - 1:
final = ""
MmaPipelined_param_list += MmaStage + final
cnt += 1
gen_code = code_using + " = " + gen_ir.gen_declare_template_struct(iterator_typename, MmaPipelined_param_list)
return gen_code
def gen_code(self):
gen_using = ''
# Generate default template struct
gen_code = gen_ir.gen_template_struct(self.gen_class_name, self.template_param, "", speicalized = None, set_default=False)
# Generate specialized template struct
mmacore_codebody = self.gen_using_MmaCore(2)
iterator_codebody = self.gen_using_Iterator()
fragment_iterator_codebody = self.gen_fragment_iterator()
epilogue_iterator_codebody = self.gen_using_FusedAddBiasEpilogue()
threadBlockMma = self.gen_threadblockmma()
specialized_code = mmacore_codebody + iterator_codebody + fragment_iterator_codebody + epilogue_iterator_codebody + threadBlockMma
# Specialize layout C -> cutlass::layout::RowMajor
rtn_template_args, speicalized_template_args = gen_ir.filtered_param(self.template_param, [ ('LayoutD', "cutlass::layout::RowMajor")], keep_= True)
gen_speical_code = gen_ir.gen_template_struct(self.gen_class_name, rtn_template_args, specialized_code, speicalized = speicalized_template_args, set_default=False)
code = gen_ir.gen_namespace("cutlass", gen_ir.gen_namespace("gemm", gen_ir.gen_namespace("threadblock", gen_code + gen_speical_code)))
return self.gen_include_header() + code
class gen_b2b_mme_pipelined:
def __init__(self, template_param, gen_class_name, b2b_num, cutlass_deps_root, project_root):
self.gen_class_name = "B2bMmaPipelined"
self.template_param = template_param
self.b2b_num = b2b_num
self.cutlass_deps_root = cutlass_deps_root
self.project_root = project_root
def gen_include_header(self):
code = '''
#pragma once
#include \"{cutlass_dir}cutlass/cutlass.h\"
#include \"{cutlass_dir}cutlass/array.h\"
#include \"{cutlass_dir}cutlass/aligned_buffer.h\"
#include \"{cutlass_dir}cutlass/numeric_conversion.h\"
#include \"{cutlass_dir}cutlass/numeric_types.h\"
#include \"{cutlass_dir}cutlass/matrix_shape.h\"
#include \"{cutlass_dir}cutlass/gemm/gemm.h\"
#include \"{cutlass_dir}cutlass/gemm/warp/mma_tensor_op_fragment_iterator.h\"
#include \"../threadblock/b2b_mma_base.h\"\n'''.format(cutlass_dir = self.cutlass_deps_root)
return code
def gen_using(self):
code_using = "using FragmentA0 = typename IteratorA0::Fragment;\n"
code_using += "using Base = B2bMmaBase<"
for i in range(self.b2b_num):
code_using += helper.var_idx("Shape", i) + "_, "
for i in range(self.b2b_num):
code_using += helper.var_idx("Policy", i) + "_, "
for i in range(self.b2b_num):
code_using += helper.var_idx("Stage", i) + "_, "
code_using = code_using[: -2] + ">;\n"
for i in range(self.b2b_num):
code_using += helper.var_idx("using FragmentB", i) + helper.var_idx(" = typename IteratorB", i) + "::Fragment;\n"
code_using += helper.var_idx("using FragmentC", i) + helper.var_idx(" = typename Policy", i) + "::Operator::FragmentC;\n"
code_using += helper.var_idx("using Operator", i) + helper.var_idx(" = typename Policy", i) + "::Operator;\n"
for i in range(self.b2b_num - 1):
code_using += helper.var_idx("using IteratorC", i) + helper.var_idx(" = typename FusedAddBiasEpilogue", i) + "::OutputTileIterator;\n"
code_using += "using ArchTag = typename Policy0::Operator::ArchTag;\n"
code_using += "static ComplexTransform const kTransformA0 = Operator0::kTransformA;\n"
for i in range(self.b2b_num):
code_using += helper.var_idx("static ComplexTransform const kTransformB", i) + helper.var_idx(" = Operator", i) + "::kTransformB;\n"
code_using += "private:\n"
code_using += "using WarpFragmentA0 = typename Operator0::FragmentA;\n"
code_using += "using WarpFragmentB0 = typename Operator0::FragmentB;\n"
for i in range(1, self.b2b_num):
code_using += helper.var_idx("using WarpFragmentA", i) + helper.var_idx(" = typename FragmentIteratorA", i) + "::Fragment;\n"
code_using += helper.var_idx("using WarpFragmentB", i) + helper.var_idx(" = typename Operator", i) + "::FragmentB;\n"
code_using += "protected:\n"
code_using += "SmemIteratorA0 smem_iterator_A_;\n"
for i in range(self.b2b_num):
code_using += helper.var_idx("SmemIteratorB", i) + helper.var_idx(" smem_iterator_B", i) + "_;\n"
return code_using
def gen_operator(self, first_use_1stage = False):
code = ""
def gen_operator_param(b2b_num):
param_code = ""
param_code += "int gemm_k_iterations_0,\n"
param_code += helper.var_idx("FragmentC", b2b_num-1) + helper.var_idx(" &accum", b2b_num-1) + ",\n"
param_code += "IteratorA0 iterator_A,\n"
for i in range(b2b_num):
param_code += helper.var_idx("IteratorB", i) + " " + helper.var_idx("iterator_B", i) + ",\n"
param_code += "FragmentC0 const &src_accum, \n"
for i in range(b2b_num - 1):
param_code += helper.var_idx("OutputOp", i) + " " + helper.var_idx("output_op_", i) + ",\n"
for i in range(b2b_num - 1):
param_code += helper.var_idx("FusedAddBiasEpilogue", i) + " " + helper.var_idx("epilogue_", i) + ",\n"
for i in range(b2b_num - 1):
param_code += helper.var_idx("IteratorC", i) + " " + helper.var_idx("iterator_C", i) + ",\n"
param_code += "TransformA0 transform_A0 = TransformA0(), \n"
for i in range(b2b_num):
final = "(),\n"
if i == b2b_num - 1:
final = "()\n"
param_code += helper.var_idx("TransformB", i) + " " + helper.var_idx("transform_B", i) + " = " +helper.var_idx("TransformB", i) + final
return param_code
def gen_first_gemm_1stage(b2b_num):
accu_code = " FragmentC0 accum0 = src_accum;\n"
if b2b_num == 1:
accu_code = " accum0 = src_accum;\n"
code ="\
\n\
FragmentA0 tb_frag_A;\n\
FragmentB0 tb_frag_B0;\n\
\n\
int smem_write_stage_idx = 1;\n\
\n\
tb_frag_A.clear();\n\
tb_frag_B0.clear();\n\
\n\
// The last kblock is loaded in the prolog\n\
iterator_A.load(tb_frag_A);\n\
iterator_B0.load(tb_frag_B0);\n\
\n\
++iterator_A;\n\
++iterator_B0;\n\
\n\
WarpFragmentA0 warp_frag_A0;\n\
WarpFragmentB0 warp_frag_B0;\n\
\n\
Operator0 warp_mma0;\n\
\n\
// Avoid reading out of bounds\n\
if (gemm_k_iterations_0 <= 1) {\n\
iterator_A.clear_mask();\n\
iterator_B0.clear_mask();\n\
}\n\
\n\
// Issue loads during the first warp-level matrix multiply-add *AFTER* issuing \n\
// shared memory loads (which have the tightest latency requirement).\n\
\n\
//\n\
// Mainloop\n\
//\n\
\n\
// Note: The main loop does not support Base::WarpGemmIterations == 2.\n\
CUTLASS_GEMM_LOOP\n\
for (; gemm_k_iterations_0 > 0; --gemm_k_iterations_0) {\n\
\n\
this->smem_iterator_A_.store(tb_frag_A);\n\
this->smem_iterator_B0_.store(tb_frag_B0);\n\
\n\
__syncthreads();\n\
//\n\
// Loop over GEMM K dimension\n\
//\n\
\n\
CUTLASS_PRAGMA_UNROLL\n\
for (int warp_mma_k = 0; warp_mma_k < Base::kWarpGemmIterations0; ++warp_mma_k) {\n\
\n\
// Load warp-level tiles from shared memory, wrapping to k offset if this is the last group\n\
// as the case may be.\n\
\n\
this->warp_tile_iterator_A0_.set_kgroup_index(warp_mma_k % Base::kWarpGemmIterations0);\n\
this->warp_tile_iterator_B0_.set_kgroup_index(warp_mma_k % Base::kWarpGemmIterations0);\n\
\n\
this->warp_tile_iterator_A0_.load(warp_frag_A0);\n\
this->warp_tile_iterator_B0_.load(warp_frag_B0);\n\
\n\
++this->warp_tile_iterator_A0_;\n\
++this->warp_tile_iterator_B0_;\n\
\n\
warp_mma0(accum0, warp_frag_A0, warp_frag_B0, accum0);\n\
}\n\
this->warp_tile_iterator_A0_.add_tile_offset({0, -Policy0::kPartitionsK * Base::kWarpGemmIterations0});\n\
this->warp_tile_iterator_B0_.add_tile_offset({-Policy0::kPartitionsK * Base::kWarpGemmIterations0, 0});\n\
\n\
__syncthreads();\n\
iterator_A.load(tb_frag_A);\n\
iterator_B0.load(tb_frag_B0);\n\
\n\
++iterator_A;\n\
++iterator_B0;\n\
\n\
if(gemm_k_iterations_0 <= 2) {\n\
iterator_A.clear_mask();\n\
iterator_B0.clear_mask();\n\
}\n\
}\n"
return accu_code + code
def gen_first_gemm_2stage(b2b_num):
accu_code = " FragmentC0 accum0 = src_accum;\n"
if b2b_num == 1:
accu_code = " accum0 = src_accum;\n"
code ="\
\n\
FragmentA0 tb_frag_A;\n\
FragmentB0 tb_frag_B0;\n\
\n\
tb_frag_A.clear();\n\
tb_frag_B0.clear();\n\
\n\
// The last kblock is loaded in the prolog\n\
iterator_A.load(tb_frag_A);\n\
iterator_B0.load(tb_frag_B0);\n\
\n\
++iterator_A;\n\
++iterator_B0;\n\
\n\
this->smem_iterator_A_.store(tb_frag_A);\n\
this->smem_iterator_B0_.store(tb_frag_B0);\n\
\n\
++this->smem_iterator_A_;\n\
++this->smem_iterator_B0_;\n\
\n\
__syncthreads();\n\
\n\
// Pair of fragments used to overlap shared memory loads and math instructions\n\
WarpFragmentA0 warp_frag_A0[2];\n\
WarpFragmentB0 warp_frag_B0[2];\n\
\n\
this->warp_tile_iterator_A0_.set_kgroup_index(0);\n\
this->warp_tile_iterator_B0_.set_kgroup_index(0);\n\
\n\
this->warp_tile_iterator_A0_.load(warp_frag_A0[0]);\n\
this->warp_tile_iterator_B0_.load(warp_frag_B0[0]);\n\
\n\
++this->warp_tile_iterator_A0_;\n\
++this->warp_tile_iterator_B0_;\n\
\n\
Operator0 warp_mma0;\n\
\n\
int smem_write_stage_idx = 1;\n\
\n\
// Avoid reading out of bounds\n\
if (gemm_k_iterations_0 <= 1) {\n\
iterator_A.clear_mask();\n\
iterator_B0.clear_mask();\n\
}\n\
\n\
// Issue loads during the first warp-level matrix multiply-add *AFTER* issuing \n\
// shared memory loads (which have the tightest latency requirement).\n\
iterator_A.load(tb_frag_A);\n\
\n\
//\n\
// Mainloop\n\
//\n\
\n\
// Note: The main loop does not support Base::WarpGemmIterations == 2.\n\
CUTLASS_GEMM_LOOP\n\
for (; gemm_k_iterations_0 > 0; --gemm_k_iterations_0) {\n\
\n\
//\n\
// Loop over GEMM K dimension\n\
//\n\
\n\
CUTLASS_PRAGMA_UNROLL\n\
for (int warp_mma_k = 0; warp_mma_k < Base::kWarpGemmIterations0; ++warp_mma_k) {\n\
\n\
// Load warp-level tiles from shared memory, wrapping to k offset if this is the last group\n\
// as the case may be.\n\
\n\
if (warp_mma_k == Base::kWarpGemmIterations0 - 1) {\n\
\n\
// Write fragments to shared memory\n\
this->smem_iterator_A_.store(tb_frag_A);\n\
\n\
this->smem_iterator_B0_.store(tb_frag_B0);\n\
\n\
__syncthreads();\n\
\n\
// Issue loads during the first warp-level matrix multiply-add *AFTER* issuing \n\
// shared memory loads (which have the tightest latency requirement).\n\
iterator_A.load(tb_frag_A);\n\
\n\
++this->smem_iterator_B0_;\n\
++this->smem_iterator_A_;\n\
\n\
\n\
// Add negative offsets to return iterators to the 'start' of the circular buffer in shared memory\n\
if (smem_write_stage_idx == 1) {\n\
this->smem_iterator_A_.add_tile_offset({0, -Base::Stage0});\n\
this->smem_iterator_B0_.add_tile_offset({-Base::Stage0, 0});\n\
}\n\
else {\n\
this->warp_tile_iterator_A0_.add_tile_offset(\n\
{0, -Base::Stage0 * Policy0::kPartitionsK * Base::kWarpGemmIterations0});\n\
this->warp_tile_iterator_B0_.add_tile_offset(\n\
{-Base::Stage0 * Policy0::kPartitionsK * Base::kWarpGemmIterations0,\n\
0});\n\
}\n\
\n\
smem_write_stage_idx ^= 1;\n\
}\n\
\n\
this->warp_tile_iterator_A0_.set_kgroup_index((warp_mma_k + 1) % Base::kWarpGemmIterations0);\n\
this->warp_tile_iterator_B0_.set_kgroup_index((warp_mma_k + 1) % Base::kWarpGemmIterations0);\n\
\n\
this->warp_tile_iterator_A0_.load(warp_frag_A0[(warp_mma_k + 1) % 2]);\n\
this->warp_tile_iterator_B0_.load(warp_frag_B0[(warp_mma_k + 1) % 2]);\n\
\n\
++this->warp_tile_iterator_A0_;\n\
++this->warp_tile_iterator_B0_;\n\
\n\
if (warp_mma_k == 0) {\n\
\n\
iterator_B0.load(tb_frag_B0);\n\
\n\
++iterator_A;\n\
++iterator_B0;\n\
\n\
// Avoid reading out of bounds if this was the last loop iteration\n\
if (gemm_k_iterations_0 <= 2) {\n\
iterator_A.clear_mask();\n\
iterator_B0.clear_mask();\n\
}\n\
}\n\
\n\
warp_mma0(accum0, warp_frag_A0[warp_mma_k % 2], warp_frag_B0[warp_mma_k % 2], accum0);\n\
}\n\
}\n"
return accu_code + code
def gen_other_gemms_2stage(b2b_num):
code = ""
def gemm_teamplate(id):
code = "// " + str(id + 1) + " Gemm"
code += " /// Iterator to load a warp-scoped tile of A1 operand from intermediate accumulator tile\n"
code += " " + helper.var_idx("FragmentC", id - 1) + helper.var_idx(" after_epilogue_accu", id - 1) + ";\n"
code += " " + helper.var_idx("epilogue_", id - 1) + helper.var_idx("(output_op_", id - 1) + helper.var_idx(", accum", id - 1) \
+ helper.var_idx(", after_epilogue_accu", id - 1) + helper.var_idx(", iterator_C", id - 1) +");\n"
# FragmentIteratorA1 warp_tile_iterator_A1_(accum0);
code += " " + helper.var_idx("FragmentIteratorA", id) + helper.var_idx(" warp_tile_iterator_A", id) +"_(" + helper.var_idx("after_epilogue_accu", id - 1) + ");\n"
# FragmentB1 tb_frag_B1;
code += " " + helper.var_idx("FragmentB", id) + " " + helper.var_idx("tb_frag_B", id) + ";\n"
# tb_frag_B1.clear();
code += " " + helper.var_idx("tb_frag_B", id) + ".clear();\n"
# iterator_B1.load(tb_frag_B1);
code += " " + helper.var_idx("iterator_B", id) + ".load(" + helper.var_idx("tb_frag_B", id) + ");\n"
# ++iterator_B1;
code += " " + "++" + helper.var_idx("iterator_B", id) + ";\n"
# this->smem_iterator_B1_.store(tb_frag_B1);
code += " " + helper.var_idx("this->smem_iterator_B", id) + "_.store(" + helper.var_idx("tb_frag_B", id) + ");\n"
# ++this->smem_iterator_B1_;
code += " " + helper.var_idx("++this->smem_iterator_B", id) + "_;\n"
# __syncthreads();
code += " " + "__syncthreads();\n"
# WarpFragmentA1 warp_frag_A1[2];
code += " " + helper.var_idx("WarpFragmentA", id) + helper.var_idx(" warp_frag_A", id) + "[2];\n"
# WarpFragmentB1 warp_frag_B1[2];
code += " " + helper.var_idx("WarpFragmentB", id) + helper.var_idx(" warp_frag_B", id) + "[2];\n"
# this->warp_tile_iterator_B1_.set_kgroup_index(0);
code += " " + helper.var_idx("this->warp_tile_iterator_B", id) + "_.set_kgroup_index(0);\n"
# warp_tile_iterator_A1_.load(warp_frag_A1[0], output_op_0);
code += " " + helper.var_idx("warp_tile_iterator_A", id) + helper.var_idx("_.load(warp_frag_A", id) + "[0]);\n"
# this->warp_tile_iterator_B1_.load(warp_frag_B1[0]);
code += " " + helper.var_idx("this->warp_tile_iterator_B", id) + helper.var_idx("_.load(warp_frag_B", id) + "[0]);\n"
# ++warp_tile_iterator_A1_;
code += " " + helper.var_idx("++warp_tile_iterator_A", id) + "_;\n"
# ++this->warp_tile_iterator_B1_;
code += " " + helper.var_idx("++this->warp_tile_iterator_B", id) + "_;\n"
# Operator1 warp_mma1;
code += " " + helper.var_idx("Operator", id) + " " + helper.var_idx("warp_mma", id) + ";\n"
# smem_write_stage_idx = 1;
code += " " + "smem_write_stage_idx = 1;\n"
# int gemm_k_iterations_1 = FragmentIteratorA1::Policy::kIterations / Base::kWarpGemmIterations1;
code += " " + helper.var_idx("int gemm_k_iterations_", id) + " = " + helper.var_idx("FragmentIteratorA", id) + helper.var_idx("::Policy::kIterations / Base::kWarpGemmIterations", id) +";\n"
# if (gemm_k_iterations_1 <= 1) {
# iterator_B1.clear_mask();
# }
code += " " + "if (" + helper.var_idx("gemm_k_iterations_", id) + " <= 1 ){\n" \
+ " " + " " + helper.var_idx("iterator_B", id) + ".clear_mask();\n" \
+ " " +"}\n"
# CUTLASS_PRAGMA_UNROLL
code += " " + "CUTLASS_PRAGMA_UNROLL\n"
# for (; gemm_k_iterations_1 > 0; --gemm_k_iterations_1) {
code += " " + helper.var_idx("for (; gemm_k_iterations_", id) + helper.var_idx(" > 0; --gemm_k_iterations_", id) + ") {\n"
# CUTLASS_PRAGMA_UNROLL
code += " " + " " + "CUTLASS_PRAGMA_UNROLL\n"
# for (int warp_mma_k = 0; warp_mma_k < Base::kWarpGemmIterations1; ++warp_mma_k) {
code += " " + " " + helper.var_idx("for (int warp_mma_k = 0; warp_mma_k < Base::kWarpGemmIterations", id) + "; ++warp_mma_k) {\n"
# if (warp_mma_k == Base::kWarpGemmIterations1 - 1) {
code += " " + " " + " " + helper.var_idx("if (warp_mma_k == Base::kWarpGemmIterations", id) + " - 1) {\n"
# this->smem_iterator_B1_.store(tb_frag_B1);
code += " " + " " + " " + " " + helper.var_idx(" this->smem_iterator_B", id) + helper.var_idx("_.store(tb_frag_B", id) + ");\n"
# __syncthreads();
code += " " + " " + " " + " " + "__syncthreads();\n"
# ++smem_iterator_B1_;
code += " " + " " + " " + " " + helper.var_idx(" ++smem_iterator_B", id) + "_;\n"
# if (smem_write_stage_idx == 1) {
# smem_iterator_B1_.add_tile_offset({-Base::Stage, 0});
# }
code += " " + " " + " " + " " + "if ( smem_write_stage_idx == 1 ) {\n" \
+ " " + " " + " " + " " + " " + helper.var_idx("smem_iterator_B", id) + helper.var_idx("_.add_tile_offset({-Base::Stage", i) + ", 0});\n" \
+ " " + " " + " " + " " +"}\n"
# else {
# this->warp_tile_iterator_B1_.add_tile_offset(
# {-Base::Stage * Policy1::kPartitionsK *
# Base::kWarpGemmIterations1,
# 0});
# }
code += " " + " " + " " + " " + "else {\n" \
+ " " + " " + " " + " " + " " + helper.var_idx("this->warp_tile_iterator_B", id) + "_.add_tile_offset(\n" \
+ " " + " " + " " + " " + " " + helper.var_idx("{-Base::Stage", id) + helper.var_idx(" * Policy", id) + "::kPartitionsK *\n" \
+ " " + " " + " " + " " + " " + helper.var_idx("Base::kWarpGemmIterations", id) + ",\n" \
+ " " + " " + " " + " " + " " + "0});\n" \
+ " " + " " + " " + " " + "}\n"
# smem_write_stage_idx ^= 1;
# }
code += " " + " " + " " + " " + "smem_write_stage_idx ^= 1;\n" \
+ " " + " " + " " + "}\n"
# this->warp_tile_iterator_B1_.set_kgroup_index((warp_mma_k + 1) % Base::kWarpGemmIterations1);
code += " " + " " + " " + helper.var_idx("this->warp_tile_iterator_B", id) + helper.var_idx("_.set_kgroup_index((warp_mma_k + 1) % Base::kWarpGemmIterations", id) + ");\n"
# warp_tile_iterator_A1_.load(warp_frag_A1[(warp_mma_k + 1) % 2], output_op_0);
code += " " + " " + " " + helper.var_idx("warp_tile_iterator_A", id) + helper.var_idx("_.load(warp_frag_A", id) + "[(warp_mma_k + 1) % 2]);\n"
# this->warp_tile_iterator_B1_.load(warp_frag_B1[(warp_mma_k + 1) % 2]);
code += " " + " " + " " + helper.var_idx("this->warp_tile_iterator_B", id) + helper.var_idx("_.load(warp_frag_B", id) + "[(warp_mma_k + 1) % 2]);\n"
# ++warp_tile_iterator_A1_;
code += " " + " " + " " + helper.var_idx("++warp_tile_iterator_A", id) + "_;\n"
# ++this->warp_tile_iterator_B1_;
code += " " + " " + " " + helper.var_idx("++this->warp_tile_iterator_B", id) + "_;\n"
# if (warp_mma_k == 0) {
# iterator_B1.load(tb_frag_B1);
# ++iterator_B1;
# if (gemm_k_iterations_1 <= 2) {
# iterator_B1.clear_mask();
# }
# }
code += " " + " " + " " + " if (warp_mma_k == 0) {\n" \
+ " " + " " + " " + " " + helper.var_idx("iterator_B", id) + helper.var_idx(".load(tb_frag_B", id) + ");\n" \
+ " " + " " + " " + " " + helper.var_idx("++iterator_B", id) +";\n" \
+ " " + " " + " " + " " + helper.var_idx("if (gemm_k_iterations_", id) +" <= 2) {\n" \
+ " " + " " + " " + " " + " " + helper.var_idx("iterator_B", id) + ".clear_mask();\n" \
+ " " + " " + " " + " " + "}\n" \
+ " " + " " + " " + "}\n"
# warp_mma1(accum, warp_frag_A1[warp_mma_k % 2], warp_frag_B1[warp_mma_k % 2], accum);
# }
# }
code += " " + " " + " " + helper.var_idx("warp_mma", id) + helper.var_idx("(accum", id) + helper.var_idx(", warp_frag_A", id) + helper.var_idx("[warp_mma_k % 2], warp_frag_B", id) + helper.var_idx("[warp_mma_k % 2], accum", id) + ");\n" \
+ " " + " " + "}\n" \
+ " " + "}\n\n\n"
return code
for i in range (1, b2b_num):
clear_accu = ""
if i != b2b_num - 1:
clear_accu = " " + helper.var_idx("FragmentC", i) + helper.var_idx(" accum", i) +";\n"
clear_accu += " " + helper.var_idx("accum", i) +".clear();\n"
code += clear_accu + gemm_teamplate(i)
return code
operator_code = " CUTLASS_DEVICE\n\
void operator()(\n " + gen_operator_param(self.b2b_num) + ") {\n"
if first_use_1stage:
operator_code += gen_first_gemm_1stage(self.b2b_num)
else:
operator_code += gen_first_gemm_2stage(self.b2b_num)
operator_code += gen_other_gemms_2stage(self.b2b_num) + "}\n"
return operator_code
def gen_construct_func(self):
name = self.gen_class_name
func_code = "CUTLASS_DEVICE\n"
func_code += name + "(\n" \
+ " " + "typename Base::B2bMmaSharedStorage &shared_storage,\n" \
+ " " + "int thread_idx,\n" \
+ " " + "int warp_idx,\n" \
+ " " + "int lane_idx\n" \
+ "):\n"
func_code += " " + "Base(shared_storage, thread_idx, warp_idx, lane_idx),\n" \
+ " " + "smem_iterator_A_(shared_storage.sharedStorage0.operand_A_ref(), thread_idx),\n"
for i in range(self.b2b_num):
final = ",\n"
if i == self.b2b_num - 1:
final = " {\n"
func_code += helper.var_idx("smem_iterator_B", i) + helper.var_idx("_(shared_storage.sharedStorage", i) +".operand_B_ref(), thread_idx)" + final
func_code += " " + "int warp_idx_mn = warp_idx % (Base::WarpCount0::kM * Base::WarpCount0::kN);\n"
func_code += " " + "int warp_idx_k = warp_idx / (Base::WarpCount0::kM * Base::WarpCount0::kN);\n"
func_code += " " + "int warp_idx_m = warp_idx_mn % Base::WarpCount0::kM;\n"
func_code += " " + "int warp_idx_n = warp_idx_mn / Base::WarpCount0::kM;\n"
for i in range(self.b2b_num):
func_code += " " + helper.var_idx("int tile_offset_k", i) + helper.var_idx(" = Base::kWarpGemmIterations", i) + " * warp_idx_k;\n"
func_code += " " + "this->warp_tile_iterator_A0_.add_tile_offset({warp_idx_m, tile_offset_k0});\n"
for i in range(self.b2b_num):
func_code += " " + helper.var_idx("this->warp_tile_iterator_B", i) + helper.var_idx("_.add_tile_offset({tile_offset_k", i) + ", warp_idx_n});\n"
func_code += "}\n"
return func_code
def gen_member_func(self, first_use_1stage):
code = "public:\n"
code += self.gen_operator(first_use_1stage)
code += self.gen_construct_func()
return code
def gen_code(self, first_use_1stage):
def gen_template_args(b2b_num):
template_param = []
template_param.append(("typename", "Shape0"))
template_param.append(("typename", "IteratorA0"))
template_param.append(("typename", "SmemIteratorA0"))
template_param.append(("typename", "IteratorB0"))
template_param.append(("typename", "SmemIteratorB0"))
for i in range(1, b2b_num):
template_param.append(("typename", helper.var_idx("Shape", i)))
template_param.append(("typename", helper.var_idx("FragmentIteratorA", i)))
template_param.append(("typename", helper.var_idx("IteratorB", i)))
template_param.append(("typename", helper.var_idx("SmemIteratorB", i)))
template_param.append(("typename", "ElementC"))
template_param.append(("typename", "LayoutC"))
for i in range(0, b2b_num - 1):
template_param.append(("typename", helper.var_idx("OutputOp", i)))
for i in range(0, b2b_num - 1):
template_param.append(("typename", helper.var_idx("FusedAddBiasEpilogue", i)))
for i in range(0, b2b_num):
template_param.append(("typename", helper.var_idx("Policy", i)))
for i in range(0, b2b_num):
template_param.append((int, helper.var_idx("Stage", i)))
template_param.append(("typename","TransformA0", "NumericArrayConverter<typename SmemIteratorA0_::Element, typename IteratorA0_::Element, IteratorA0_::Fragment::kElements>"))
for i in range(0, b2b_num):
cvtr = helper.var_idx("NumericArrayConverter<typename SmemIteratorB", i) + helper.var_idx("_::Element, typename IteratorB", i) + helper.var_idx("_::Element, IteratorB", i) + "_::Fragment::kElements>"
template_param.append(("typename", helper.var_idx("TransformB", i), cvtr))
template_param.append(("typename", "Enable", "bool"))
return template_param
template_param = gen_template_args(self.b2b_num)
inheritance_code = "public B2bMmaBase<"
for i in range(self.b2b_num):
inheritance_code += helper.var_idx("Shape", i) + "_, "
for i in range(self.b2b_num):
inheritance_code += helper.var_idx("Policy", i) + "_, "
for i in range(self.b2b_num - 1):
inheritance_code += helper.var_idx("Stage", i) + "_, "
inheritance_code += helper.var_idx("Stage", self.b2b_num - 1) + "_"
inheritance_code += ">"
code_body = ""
using_code= self.gen_using()
func_code = self.gen_member_func(first_use_1stage)
code_body = using_code + func_code
class_code = gen_ir.gen_template_class(self.gen_class_name, template_param, code_body, inheritance_code = inheritance_code)
code = self.gen_include_header()
code += gen_ir.gen_namespace("cutlass", gen_ir.gen_namespace("gemm", gen_ir.gen_namespace("threadblock", class_code)))
# print(code)
return code
class gen_b2b_mma_base:
def __init__(self, template_param, gen_class_name, b2b_num, cutlass_deps_root, project_root):
self.gen_class_name = gen_class_name
self.template_param = template_param
self.b2b_num = b2b_num
self.cutlass_deps_root = cutlass_deps_root
self.project_root = project_root
def gen_include_header(self):
code = '''
#pragma once
#include \"{cutlass_dirs}cutlass/aligned_buffer.h\"
#include \"{cutlass_dirs}cutlass/arch/memory.h\"
#include \"{cutlass_dirs}cutlass/array.h\"
#include \"{cutlass_dirs}cutlass/cutlass.h\"
#include \"{cutlass_dirs}cutlass/gemm/gemm.h\"
#include \"{cutlass_dirs}cutlass/matrix_shape.h\"
#include \"{cutlass_dirs}cutlass/numeric_types.h\"\n'''.format(cutlass_dirs=self.cutlass_deps_root)
return code
def gen_shared_storage(self):
code = \
" template< \n\
typename Shape_,\n\
typename Policy_,\n\
int ThisStage_\n\
>\n\
class SharedStorage {\n\
public:\n\
using Shape = Shape_;\n\
using Policy = Policy_;\n\
static int const ThisStage = ThisStage_;\n\
using Operator = typename Policy::Operator;\n\
\
using TensorRefA = TensorRef<typename Operator::ElementA, typename Operator::LayoutA>;\n\
\
/// Tensor reference to the B operand \n\
using TensorRefB = TensorRef<typename Operator::ElementB, typename Operator::LayoutB>;\n\
\n\
/// Shape of the A matrix operand in shared memory \n\
using ShapeA = MatrixShape<Shape::kM + Policy::SmemPaddingA::kRow,\n\
Shape::kK * ThisStage +\n\
Policy::SmemPaddingA::kColumn>;\n\
\n\
/// Shape of the B matrix operand in shared memory\n\
using ShapeB =\n\
MatrixShape<Shape::kK * ThisStage + Policy::SmemPaddingB::kRow,\n\
Shape::kN + Policy::SmemPaddingB::kColumn>;\n\
\n\
public:\n\
\n\
/// Buffer for A operand\n\
AlignedBuffer<typename Operator::ElementA, ShapeA::kCount> operand_A;\n\
\n\
/// Buffer for B operand\n\
AlignedBuffer<typename Operator::ElementB, ShapeB::kCount> operand_B;\n\
\n\
public:\n\
\n\
/// Returns a layout object for the A matrix\n\
CUTLASS_DEVICE\n\
static typename Operator::LayoutA LayoutA() {\n\
return Operator::LayoutA::packed({ShapeA::kRow, ShapeA::kColumn});\n\
}\n\
\n\
/// Returns a layout object for the B matrix\n\
CUTLASS_HOST_DEVICE\n\
static typename Operator::LayoutB LayoutB() {\n\
return Operator::LayoutB::packed({ShapeB::kRow, ShapeB::kColumn});\n\
}\n\
\n\
/// Returns a TensorRef to the A operand\n\
CUTLASS_HOST_DEVICE\n\
TensorRefA operand_A_ref() {\n\
return TensorRefA{operand_A.data(), LayoutA()};\n\
}\n\
\n\
/// Returns a TensorRef to the B operand\n\
CUTLASS_HOST_DEVICE\n\
TensorRefB operand_B_ref() {\n\
return TensorRefB{operand_B.data(), LayoutB()};\n\
}\n\
CUTLASS_HOST_DEVICE\n\
void * get_B_Shared_ptr() {\n\
return operand_B.data();\n\
}\n\
};\n"
return code
def gen_using_and_misc(self, b2b_num):
code_using = ""
for i in range(b2b_num):
code_using += "using Operator" +str(i) + " = typename Policy" + str(i) +"::Operator;\n"
for i in range(b2b_num):
code_using += "using WarpGemm" +str(i) + " = typename Policy" + str(i) +"::Operator::Shape;\n"
for i in range(b2b_num):
code_using += "using WarpCount" +str(i) + " = GemmShape<" + helper.var_idx("Shape", i) +"::kM / " + helper.var_idx("WarpGemm", i) +"::kM, "\
+ helper.var_idx("Shape", i) +"::kN / " + helper.var_idx("WarpGemm", i) +"::kN, "\
+ helper.var_idx("Shape", i) +"::kK / " + helper.var_idx("WarpGemm", i) +"::kK>;\n"
code_misc = ""
for i in range(b2b_num):
code_misc += "static int const " + helper.var_idx("kWarpGemmIterations", i) + " = (" + helper.var_idx("WarpGemm", i) + "::kK / " + helper.var_idx("Operator", i) +"::Policy::MmaShape::kK);\n"
code = code_using + code_misc + self.gen_shared_storage()
for i in range(b2b_num):
code += "using " + helper.var_idx("SharedStorage", i) + " = SharedStorage<" + helper.var_idx("Shape", i) + ", " + helper.var_idx("Policy", i) +", " + helper.var_idx("Stage", i) + ">;\n"
def gen_union_shared_storage(b2b_num):
code = ""
for i in range(b2b_num):
code += " " +helper.var_idx("SharedStorage", i) + " " + helper.var_idx("sharedStorage", i) +";\n"
return code
code += "union B2bMmaSharedStorage {\n" + gen_union_shared_storage(self.b2b_num) + "};\n"
for i in range(b2b_num - 1):
code += helper.var_idx("void * C", i) + "_smm_ptr;\n"
return code
def gen_protected(self):
code = "\nprotected:\n"
code += "typename Operator0::IteratorA warp_tile_iterator_A0_;\n"
for i in range(self.b2b_num):
code += "typename Operator" +str(i) + "::IteratorB" +" warp_tile_iterator_B" + str(i) + "_;\n"
return code
def gen_public_member(self):
code = "\npublic:\n"
code += "CUTLASS_DEVICE\n"
code += \
"B2bMmaBase(\n" + \
" B2bMmaSharedStorage & shared_storage,\n" + \
" int thread_idx,\n" + \
" int warp_idx,\n" + \
" int lane_idx\n" + \
"):\n" + \
" warp_tile_iterator_A0_(shared_storage.sharedStorage0.operand_A_ref(), lane_idx),\n"
for i in range(self.b2b_num):
final = ",\n"
if i == self.b2b_num-1:
final = "\n"
iterator = " warp_tile_iterator_B" + str(i) + "_"
shared_storage = "shared_storage.sharedStorage" + str(i) + ".operand_B_ref()"
code += iterator + "(" + shared_storage + ", lane_idx)" + final
code += "{\n"
for i in range(self.b2b_num - 1):
code += helper.var_idx(" C", i) + helper.var_idx("_smm_ptr = shared_storage.sharedStorage", i) + ".get_B_Shared_ptr();\n"
code += "}\n"
return code
def gen_code(self):
tempalte_arg = []
for i in range(self.b2b_num):
tempalte_arg.append(("typename", helper.var_idx("Shape", i)))
for i in range(self.b2b_num):
tempalte_arg.append(("typename", helper.var_idx("Policy", i)))
for i in range(self.b2b_num):
tempalte_arg.append((int, helper.var_idx("Stage", i)))
code_body = self.gen_using_and_misc(self.b2b_num)
code_body += self.gen_protected()
code_body += self.gen_public_member()
class_code = gen_ir.gen_template_class("B2bMmaBase", tempalte_arg, code_body)
code = self.gen_include_header() + gen_ir.gen_namespace("cutlass", gen_ir.gen_namespace("gemm", gen_ir.gen_namespace("threadblock", class_code)))
return code
class gen_threadblock:
def __init__(self, template_param, gen_class_name, b2b_num, output_dir, cutlass_deps_root, project_root):
self.gen_class_name = gen_class_name
self.template_param = template_param
self.b2b_num = b2b_num
self.file_dir = output_dir + "/threadblock/"
self.cutlass_deps_root = cutlass_deps_root
self.project_root = project_root
self.gen_b2b_mma_base = gen_b2b_mma_base(template_param, gen_class_name, b2b_num, cutlass_deps_root, project_root)
self.gen_b2b_mma_pipelined = gen_b2b_mme_pipelined(template_param, gen_class_name, b2b_num, cutlass_deps_root, project_root)
self.gen_default_b2b_mma = gen_default_b2b_mma(template_param, gen_class_name, b2b_num, cutlass_deps_root, project_root)
def gen_code(self, first_use_1stage):
base_code = self.gen_b2b_mma_base.gen_code()
print("[INFO]: Gen kernel code [b2b_mma_base.h]output Dir: is ", self.file_dir)
with open(self.file_dir + "b2b_mma_base.h", "w+") as f:
f.write(base_code)
pipeline_code = self.gen_b2b_mma_pipelined.gen_code(first_use_1stage = first_use_1stage)
print("[INFO]: Gen kernel code [b2b_mma_pipelined.h]output Dir: is ", self.file_dir)
with open(self.file_dir + "b2b_mma_pipelined.h", "w+") as f:
f.write(pipeline_code)
default_code = self.gen_default_b2b_mma.gen_code()
print("[INFO]: Gen kernel code [default_b2b_mma.h]output Dir: is ", self.file_dir)
with open(self.file_dir + "default_b2b_mma.h", "w+") as f:
f.write(default_code)
| cutlass-main | examples/44_multi_gemm_ir_and_codegen/ir_gen/gen_threadblock.py |
#################################################################################################
#
# Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#################################################################################################
import ast
fuse_gemm_info = [
{
'epilogue': {
'tp': 'LeakyRelu', #'CustomizedLeaky_RELU'
'bias': {'addbias': False, 'bias_tp': 'mat'},
'args': [('float', 'leaky_alpha', 1.3), ],
'func': '''
y = max(leaky_alpha * x, x)
y = y * x
'''
}
},
]
class AnalysisNodeVisitor(ast.NodeVisitor):
def visit_Import(self,node):
ast.NodeVisitor.generic_visit(self, node)
def visit_ImportFrom(self,node):
ast.NodeVisitor.generic_visit(self, node)
def visit_Assign(self,node):
print('Node type: Assign and fields: ', node._fields)
# print('Node type: Assign and targets value: ', node.targets, node.value)
ast.NodeVisitor.generic_visit(self, node)
def visit_BinOp(self, node):
print('Node type: BinOp and fields: ', node._fields)
print('node op: ', type(node.op).__name__)
ast.NodeVisitor.generic_visit(self, node)
def visit_Expr(self, node):
print('Node type: Expr and fields: ', node._fields)
ast.NodeVisitor.generic_visit(self, node)
def visit_Num(self,node):
print('Node type: Num and fields: ', node._fields)
print('Node type: Num: ', node.n)
def visit_Name(self,node):
print('Node type: Name and fields: ', node._fields)
print('Node type: Name and fields: ', type(node.ctx).__name__, node.id)
ast.NodeVisitor.generic_visit(self, node)
def visit_Str(self, node):
print('Node type: Str and fields: ', node._fields)
class CodeVisitor(ast.NodeVisitor):
def visit_BinOp(self, node):
if isinstance(node.op, ast.Add):
node.op = ast.Sub()
self.generic_visit(node)
def visit_Assign(self, node):
print('Assign %s' % node.value)
self.generic_visit(node)
def visit_Name(self, node):
print("Name:", node.id)
self.generic_visit(node)
def visit_FunctionDef(self, node):
print('Function Name:%s'% node.name.op)
self.generic_visit(node)
func_log_stmt = ast.Print(
dest = None,
values = [ast.Str(s = 'calling func: %s' % node.name, lineno = 0, col_offset = 0)],
nl = True,
lineno = 0,
col_offset = 0,
)
node.body.insert(0, func_log_stmt)
visitor = AnalysisNodeVisitor()
code = \
'''
a=max(leaky_alpha * x, x +1)
'''
visitor.visit(ast.parse(code))
| cutlass-main | examples/44_multi_gemm_ir_and_codegen/ir_gen/gen_customized_epilogue.py |
#################################################################################################
#
# Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#################################################################################################
class gen_build_sys:
def __init__(self, cutlass_deps_dir, output_dir = "../"):
self.output_dir = output_dir
self.cutlass_deps_dir = cutlass_deps_dir
def gen_top(self):
code = ""
code += '''\
# Auto Generated code - Do not edit.
cmake_minimum_required(VERSION 3.8)
project(CUTLASS_MULTI_GEMMS LANGUAGES CXX CUDA)
find_package(CUDAToolkit)
set(CUDA_PATH ${{CUDA_TOOLKIT_ROOT_DIR}})
set(CUTLASS_PATH \"{cutlass_deps_dir}/include\")
set(CUTLASS_UTIL_PATH \"{cutlass_deps_dir}/tools/util/include\")
list(APPEND CMAKE_MODULE_PATH ${{CUDAToolkit_LIBRARY_DIR}})
'''.format(cutlass_deps_dir=self.cutlass_deps_dir)
code += '''\
set(GPU_ARCHS \"\" CACHE STRING
\"List of GPU architectures (semicolon-separated) to be compiled for.\")
if(\"${GPU_ARCHS}\" STREQUAL \"\")
set(GPU_ARCHS \"70\")
endif()
foreach(arch ${GPU_ARCHS})
set(CMAKE_CUDA_FLAGS \"${CMAKE_CUDA_FLAGS} -gencode arch=compute_${arch},code=sm_${arch}\")
if(SM STREQUAL 70 OR SM STREQUAL 75)
set(CMAKE_C_FLAGS \"${CMAKE_C_FLAGS} -DWMMA\")
set(CMAKE_CXX_FLAGS \"${CMAKE_CXX_FLAGS} -DWMMA\")
set(CMAKE_CUDA_FLAGS \"${CMAKE_CUDA_FLAGS} -DWMMA\")
endif()
endforeach()
set(CMAKE_C_FLAGS \"${CMAKE_C_FLAGS}\")
set(CMAKE_CXX_FLAGS \"${CMAKE_CXX_FLAGS}\")
set(CMAKE_CUDA_FLAGS \"${CMAKE_CUDA_FLAGS} -Xcompiler -Wall\")
set(CMAKE_C_FLAGS_DEBUG \"${CMAKE_C_FLAGS_DEBUG} -Wall -O0\")
set(CMAKE_CXX_FLAGS_DEBUG \"${CMAKE_CXX_FLAGS_DEBUG} -Wall -O0\")
set(CMAKE_CUDA_FLAGS_DEBUG \"${CMAKE_CUDA_FLAGS_DEBUG} -O0 -G -Xcompiler -Wall\")
set(CMAKE_CXX_STANDARD 11)
set(CMAKE_CXX_STANDARD_REQUIRED ON)
if(CMAKE_CXX_STANDARD STREQUAL \"11\")
set(CMAKE_CUDA_FLAGS \"${CMAKE_CUDA_FLAGS} --expt-extended-lambda\")
set(CMAKE_CUDA_FLAGS \"${CMAKE_CUDA_FLAGS} --expt-relaxed-constexpr\")
endif()
set(CMAKE_CXX_FLAGS \"${CMAKE_CXX_FLAGS} -g -O3\")
set(CMAKE_CUDA_FLAGS \"${CMAKE_CUDA_FLAGS} -Xcompiler -O3\")
set(CMAKE_CUDA_FLAGS \"${CMAKE_CUDA_FLAGS} -Xcompiler=-fno-strict-aliasing\")
set(COMMON_HEADER_DIRS
${PROJECT_SOURCE_DIR}
${CUDAToolkit_INCLUDE_DIRS}
)
set(COMMON_LIB_DIRS
${CUDAToolkit_LIBRARY_DIR}
)
list(APPEND COMMON_HEADER_DIRS ${CUTLASS_PATH})
list(APPEND COMMON_HEADER_DIRS ${CUTLASS_UTIL_PATH})
'''
code += '''\
include_directories(
${COMMON_HEADER_DIRS}
)
link_directories(
${COMMON_LIB_DIRS}
)
add_definitions(-D_GLIBCXX_USE_CXX11_ABI=0)
add_definitions(-DGOOGLE_CUDA=1)
add_executable(sample
sample/sample.cu
one_api.cu
)
target_link_libraries(sample PRIVATE
-lcudart
-lnvToolsExt
${CMAKE_THREAD_LIBS_INIT}
)
if(NOT DEFINED LIB_INSTALL_PATH)
set(LIB_INSTALL_PATH ${CMAKE_CURRENT_BINARY_DIR})
endif()
'''
return code
def gen_code(self):
top_code = self.gen_top()
with open(self.output_dir + "CMakeLists.txt", "w") as f:
f.write(top_code)
| cutlass-main | examples/44_multi_gemm_ir_and_codegen/ir_gen/gen_cmake.py |
#################################################################################################
#
# Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#################################################################################################
import helper
import gen_ir as ir
import gen_turing_and_volta as gen_basic
class gen_verify:
def __init__(self, fuse_gemm_info, gen_class_name, user_header_file, output_dir = "../"):
self.fuse_gemm_info = fuse_gemm_info
self.name = gen_class_name + "_verify"
self.b2b_num = len(fuse_gemm_info)
self.params = []
self.user_header_file = ""
for header in user_header_file:
self.user_header_file += "#include \"" + header + "\"\n"
self.separate_cutlass = gen_basic.gen_volta_turing_fuse_act_impl(fuse_gemm_info, gen_class_name, user_header_file, output_dir)
self.gen_params()
self.output_dir = output_dir
def gen_code(self):
code = ""
code += self.user_header_file
code += self.separate_cutlass.gen_using(False) #False -> Turing, True -> Volta
code_body = ""
for i in range(self.b2b_num):
code_body += " " + helper.var_idx("Gemm", i) + helper.var_idx(" gemm_op_", i) + ";\n"
code_body += " " + helper.var_idx("gemm_op_", i) + helper.var_idx(".initialize(Arguments_", i) + ", nullptr);\n"
code_body += self.separate_cutlass.gen_run()
code += ir.gen_func(self.name, self.params, code_body)
helper.write_2_headfile("cutlass_verify.h", self.output_dir, code)
def gen_params(self):
for i in range(self.b2b_num):
self.params.append(
(
helper.var_idx("typename Gemm", i)+ "::Arguments",
helper.var_idx("Arguments_", i)
)
)
def get_params(self, declartion = True):
code = ""
if declartion:
for param in self.params:
code += param[0] + " " + param[1] + ";\n"
return code
def gen_initialize():
code = ""
initialize_code = self.separate_cutlass.gen_initialize()
code = ir.gen_func("initialize", [[]])
| cutlass-main | examples/44_multi_gemm_ir_and_codegen/ir_gen/gen_verify.py |
#################################################################################################
#
# Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#################################################################################################
def type_2_cutlass_type(input_type = "fp16"):
# float point type
if input_type == "fp32":
return "float"
if input_type == "bf16":
return "cutlass::bfloat16_t"
if input_type == "fp16":
return "cutlass::half_t"
# integer type
if(input_type == "int32"):
return "int32_t"
if(input_type == "int8"):
return "int8_t"
if input_type == 'Row':
return 'cutlass::layout::RowMajor'
if input_type == 'Col':
return 'cutlass::layout::ColumnMajor'
def cvt_2_cutlass_shape(gemm_shape):
# gemm shape
if len(gemm_shape) == 3:
val = "cutlass::gemm::GemmShape<" \
+ str(gemm_shape[0]) + ", " \
+ str(gemm_shape[1]) + ", " \
+ str(gemm_shape[2]) + ">"
return val
def write_2_headfile(filename, file_dir, string):
with open(file_dir + filename, 'w') as f:
f.write("/* Auto Generated code - Do not edit.*/\n\n\n#pragma once\n" + string)
def var_idx(varaiable, index):
return varaiable + str(index)
def list_2_string(input_list, ):
rtn_string = ""
cnt = 0
for element in input_list:
final = ", \n"
if cnt == len(input_list) - 1:
final = "\n"
cnt += 1
rtn_string += str(element) + final
return rtn_string
def get_epilogue_info(layer_info):
return layer_info['epilogue']
def get_epilogue_tp(layer_info):
epilogue_info = get_epilogue_info(layer_info)
return epilogue_info['tp']
def get_epilogue_add_bias_or_not(layer_info):
epilogue_info = get_epilogue_info(layer_info)
return epilogue_info['bias']['addbias']
def get_epilogue_add_bias_tp(layer_info):
epilogue_info = get_epilogue_info(layer_info)
return epilogue_info['bias']['bias_tp']
def get_epilogue_args(layer_info):
epilogue_info = get_epilogue_info(layer_info)
return epilogue_info['args']
def get_epilogue_bias_shape(layer_info):
bias_tp = get_epilogue_add_bias_tp(layer_info).lower()
mn_shape = layer_info['mnk'][:-1]
if bias_tp == 'mat':
mn_shape[0] = 'M'
return mn_shape
elif bias_tp == 'vec':
mn_shape[0] = 1
return mn_shape
else:
assert(0)
def get_epilogue_bias_ldm(layer_info):
bias_tp = get_epilogue_add_bias_tp(layer_info).lower()
mn_shape = layer_info['mnk'][:-1]
c_layout = layer_info['C_format'].lower()
if c_layout != 'row':
assert(0)
if bias_tp == 'mat':
return mn_shape[1]
elif bias_tp == 'vec':
return 0
else:
assert(0)
def get_epilogue_compute_tp(layer_info):
return layer_info['Acc_tp']
| cutlass-main | examples/44_multi_gemm_ir_and_codegen/ir_gen/helper.py |
#################################################################################################
#
# Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#################################################################################################
import helper
import gen_ir as ir
class gen_turing_impl:
def __init__(self,fuse_gemm_info, gen_class_name, user_header_file, output_dir = "../"):
self.fuse_gemm_info = fuse_gemm_info
self.class_name = gen_class_name
self.gen_class_name = gen_class_name + "_turing_impl"
self.user_header_file = ""
for header in user_header_file:
self.user_header_file += "#include \"" + header + "\"\n"
self.output_dir = output_dir
self.b2b_num = len(fuse_gemm_info)
self.gen_turing_unfused = gen_volta_turing_fuse_act_impl(fuse_gemm_info, gen_class_name, user_header_file, output_dir)
def gen_using(self):
code_using = "using b2b_gemm = typename cutlass::gemm::device::" + self.class_name + "<cutlass::half_t>;"
return code_using + "\n"
def gen_initialize(self):
code = ""
for i in range(self.b2b_num):
code_this = ""
code_this += helper.var_idx(helper.type_2_cutlass_type(self.fuse_gemm_info[i]['Acc_tp']) + " alpha", i) + " = " + helper.type_2_cutlass_type(self.fuse_gemm_info[i]['Acc_tp']) + "(1);\n"
beta = "(1)"
if helper.get_epilogue_add_bias_or_not(self.fuse_gemm_info[i]) is False:
beta = "(0)"
code_this += helper.var_idx(helper.type_2_cutlass_type(self.fuse_gemm_info[i]['Acc_tp']) + " beta", i) + " = " + helper.type_2_cutlass_type(self.fuse_gemm_info[i]['Acc_tp']) + beta + ";\n"
k_str = str(self.fuse_gemm_info[i]['mnk'][2])
if i == 0:
k_str = "K0"
code_this += helper.var_idx("cutlass::gemm::GemmCoord problem_size_", i) + "(M, " + str(self.fuse_gemm_info[i]['mnk'][1]) + ", " + k_str + ");\n"
code += code_this
code += "typename b2b_gemm::Arguments arguments{\n"
for i in range(self.b2b_num):
code += " " + helper.var_idx("problem_size_", i) + ",\n"
code += " " + "{reinterpret_cast<" + helper.type_2_cutlass_type(self.fuse_gemm_info[i]['A_tp']) + "*>(" + helper.var_idx("A", 0) + "), " + helper.var_idx("problem_size_", 0) + ".k()},\n"
for i in range(self.b2b_num):
ldmB = str(self.fuse_gemm_info[i]['mnk'][2])
if i == 0:
ldmB = "K0"
if self.fuse_gemm_info[i]['B_format'] is 'Row':
ldmB = str(self.fuse_gemm_info[i]['mnk'][1])
ldmC = str(helper.get_epilogue_bias_ldm(self.fuse_gemm_info[i]))
code += " " + "{reinterpret_cast<" + helper.type_2_cutlass_type(self.fuse_gemm_info[i]['B_tp']) + "*>(" + helper.var_idx("B", i) + "), " + ldmB + "},\n"
code += " " + "{reinterpret_cast<" + helper.type_2_cutlass_type(self.fuse_gemm_info[i]['C_tp']) + "*>(" + helper.var_idx("C", i) + "), " + ldmC + "},\n"
code += " " + "{reinterpret_cast<" + helper.type_2_cutlass_type(self.fuse_gemm_info[i]['C_tp']) + "*>(" + helper.var_idx("D", self.b2b_num -1) + "), " + helper.var_idx("problem_size_", self.b2b_num - 1) + ".n()},\n"
for i in range(self.b2b_num):
code += " " + "{ " + helper.var_idx("alpha", i) + ", " + helper.var_idx("beta", i)
for epilogue_arg in helper.get_epilogue_args(self.fuse_gemm_info[i]):
arg_name = helper.var_idx("Epilogue", i) + "_" + epilogue_arg[1]
code += ", " + helper.type_2_cutlass_type(self.fuse_gemm_info[i]['Acc_tp']) + "(" + str(arg_name) + ")"
code += "},\n"
code += " " + "Batch};\n\n"
code += " " "b2b_gemm gemm_op;\n"
code += " " + "gemm_op.initialize(arguments);\n"
return code + "\n"
def gen_run(self):
code = " " + "gemm_op(stream);\n"
return code
def gen_wrapper(self):
code_body = ""
arg_lists = []
arg_lists.append(["int", "M"])
arg_lists.append(["int", "K0"])
arg_lists.append(["int", "Batch"])
arg_lists.append(["void*", helper.var_idx("A", 0)])
for i in range(self.b2b_num):
arg_lists.append(["void*", helper.var_idx("B", i)])
arg_lists.append(["void*", helper.var_idx("C", i)])
arg_lists.append(["void*", helper.var_idx("D", i)])
epilogue_args = helper.get_epilogue_args(self.fuse_gemm_info[i])
acc_tp = helper.get_epilogue_compute_tp(self.fuse_gemm_info[i])
for arg in epilogue_args:
arg_tp = arg[0]
arg_name = helper.var_idx("Epilogue", i) + "_" + arg[1]
arg_lists.append([arg_tp, arg_name])
if self.b2b_num == 1:
code_body += self.gen_turing_unfused.gen_using(False) #False -> Turing, True -> Volta
code_body += self.gen_turing_unfused.gen_initialize()
code_body += self.gen_turing_unfused.gen_run()
else:
code_body += self.gen_using()
code_body += self.gen_initialize()
code_body += self.gen_run()
code = ir.gen_func(self.gen_class_name, arg_lists, code_body)
return code
def gen_code(self):
code = self.gen_wrapper()
helper.write_2_headfile("turing_impl.h", self.output_dir, self.user_header_file + "\n" + code)
class gen_volta_turing_fuse_act_impl:
def __init__(self, fuse_gemm_info, gen_class_name, user_header_file, output_dir = "../"):
self.fuse_gemm_info = fuse_gemm_info
self.gen_class_name = gen_class_name + "_volta_impl"
self.user_header_file = ""
for header in user_header_file:
self.user_header_file += "#include \"" + header + "\"\n"
self.output_dir = output_dir
self.b2b_num = len(fuse_gemm_info)
def perf_tiling(self, layer_mnk):
mnk = layer_mnk[:]
block_tile = mnk[:]
block_tile[2] = 32 # force the K tile to be 32
# M tile gen
block_tile[0] = 32
# N tile gen
if mnk[1] > 128:
block_tile[1] = 256
elif mnk[1] > 64:
block_tile[1] = 128
elif mnk[1] > 32:
block_tile[1] = 64
else :
block_tile[1] = 32
warp_tile = block_tile[:]
if block_tile[1] == 256:
warp_tile[1] = 64
elif block_tile[1] == 128:
warp_tile[1] = 32
elif block_tile[1] == 64:
warp_tile[1] = 32
else :
warp_tile[1] = 32
warp_tile[0] = 32
return block_tile, warp_tile
def process_epilogue(self, epilogue_tp, n, C_tp, Acc_tp):
epilogue_setted_type = epilogue_tp
cutlass_epilogue_name = "LinearCombinationRelu"
if epilogue_setted_type.lower() == 'leakyrelu':
cutlass_epilogue_name = "LinearCombinationLeakyRelu"
elif epilogue_setted_type.lower() == 'identity':
cutlass_epilogue_name = "LinearCombination"
n_mod_8 = n % 4
N_align_elements = 1
if n_mod_8 == 0:
N_align_elements = 8
elif n_mod_8 == 4:
N_align_elements = 4
elif n_mod_8 == 2 or n_mod_8 == 6:
N_align_elements = 2
epilogue_str = "cutlass::epilogue::thread::" + cutlass_epilogue_name+ "<" + C_tp + ", " + str(N_align_elements) + ", " + Acc_tp + ", " + Acc_tp + ">"
return epilogue_str
def gen_using(self, volta = True):
code_using = ""
volta_arch = "cutlass::arch::Sm70"
volta_tc = "cutlass::gemm::GemmShape<8, 8, 4>"
turing_arch = "cutlass::arch::Sm75"
turing_tc = "cutlass::gemm::GemmShape<16, 8, 8>"
arch = ""
tc = ""
if volta:
arch = volta_arch
tc = volta_tc
else:
arch = turing_arch
tc = turing_tc
for i in range(self.b2b_num):
k = self.fuse_gemm_info[i]['mnk'][2]
k_mod_8 = k % 4
ab_ldm = 1
if k_mod_8 == 0:
ab_ldm = 8
elif k_mod_8 == 4:
ab_ldm = 4
elif k_mod_8 == 2 or k_mod_8 == 6:
ab_ldm = 2
block_tile, warp_tile = self.perf_tiling(self.fuse_gemm_info[i]['mnk'])
this_gemm_config = helper.var_idx("using Gemm", i) + " = cutlass::gemm::device::GemmBatched<\n"
this_gemm_config += " " + helper.type_2_cutlass_type(self.fuse_gemm_info[i]['A_tp']) + ",\n"
this_gemm_config += " " + helper.type_2_cutlass_type(self.fuse_gemm_info[i]['A_format']) + ",\n"
this_gemm_config += " " + helper.type_2_cutlass_type(self.fuse_gemm_info[i]['B_tp']) + ",\n"
this_gemm_config += " " + helper.type_2_cutlass_type(self.fuse_gemm_info[i]['B_format']) + ",\n"
this_gemm_config += " " + helper.type_2_cutlass_type(self.fuse_gemm_info[i]['C_tp']) + ",\n"
this_gemm_config += " " + helper.type_2_cutlass_type(self.fuse_gemm_info[i]['C_format']) + ",\n"
this_gemm_config += " " + helper.type_2_cutlass_type(self.fuse_gemm_info[i]['Acc_tp']) + ",\n"
this_gemm_config += " " + "cutlass::arch::OpClassTensorOp,\n"
this_gemm_config += " " + arch + ",\n"
this_gemm_config += " " + "cutlass::gemm::GemmShape<" + str(block_tile[0]) + ", " + str(block_tile[1]) + ", " + str(block_tile[2]) + ">,\n"
this_gemm_config += " " + "cutlass::gemm::GemmShape<" + str(warp_tile[0]) + ", " + str(warp_tile[1]) + ", " + str(warp_tile[2]) + ">,\n"
this_gemm_config += " " + tc + ",\n"
this_gemm_config += " " + self.process_epilogue(helper.get_epilogue_tp(self.fuse_gemm_info[i]), self.fuse_gemm_info[i]['mnk'][1], helper.type_2_cutlass_type(self.fuse_gemm_info[i]['C_tp']), helper.type_2_cutlass_type(self.fuse_gemm_info[i]['Acc_tp'])) + ",\n"
this_gemm_config += " " + "cutlass::gemm::threadblock::GemmBatchedIdentityThreadblockSwizzle,\n"
this_gemm_config += " " + "2,\n"
this_gemm_config += " " + str(ab_ldm) + ",\n"
this_gemm_config += " " + str(ab_ldm) + ">;\n"
code_using += this_gemm_config + "\n"
return code_using + "\n"
def gen_initialize(self):
code = ""
for i in range(self.b2b_num):
code_this = ""
N_str = str(self.fuse_gemm_info[i]['mnk'][1])
code_this += helper.var_idx(helper.type_2_cutlass_type(self.fuse_gemm_info[i]['Acc_tp']) + " alpha", i) + " = " + helper.type_2_cutlass_type(self.fuse_gemm_info[i]['Acc_tp']) + "(1);\n"
beta = "(1)"
if helper.get_epilogue_add_bias_or_not( self.fuse_gemm_info[i]) is False:
beta = "(0)"
code_this += helper.var_idx(helper.type_2_cutlass_type(self.fuse_gemm_info[i]['Acc_tp']) + " beta", i) + " = " + helper.type_2_cutlass_type(self.fuse_gemm_info[i]['Acc_tp']) + beta + ";\n"
k_str = str(self.fuse_gemm_info[i]['mnk'][2])
if i == 0:
k_str = "K0"
code_this += helper.var_idx("cutlass::gemm::GemmCoord problem_size_", i) + "(M, " + str(self.fuse_gemm_info[i]['mnk'][1]) + ", " + k_str + ");\n"
code_this += helper.var_idx("typename Gemm", i) + helper.var_idx("::Arguments arguments_", i) + "{\n"
code_this += " " + helper.var_idx("problem_size_", i) + ",\n"
ldmA = k_str
ldmB = k_str
ldmC = str(self.fuse_gemm_info[i]['mnk'][1])
ldmBias = str(helper.get_epilogue_bias_ldm(self.fuse_gemm_info[i]))
if self.fuse_gemm_info[i]['A_format'] is 'Col':
ldmA = "M"
if self.fuse_gemm_info[i]['B_format'] is 'Row':
ldmB = str(self.fuse_gemm_info[i]['mnk'][1])
if self.fuse_gemm_info[i]['C_format'] is 'Col':
ldmC = "M"
if i == 0:
code_this += " " + "{reinterpret_cast<" + helper.type_2_cutlass_type(self.fuse_gemm_info[i]['A_tp']) + "*>(" + helper.var_idx("A", i) + "), " + ldmA + "}, " + "M * " + ldmA + ",\n"
else:
code_this += " " + "{reinterpret_cast<" + helper.type_2_cutlass_type(self.fuse_gemm_info[i]['A_tp']) + "*>(" + helper.var_idx("D", i - 1) + "), " + ldmA + "}, " + "M * " + ldmA + ",\n"
code_this += " " + "{reinterpret_cast<" + helper.type_2_cutlass_type(self.fuse_gemm_info[i]['B_tp']) + "*>(" + helper.var_idx("B", i) + "), " + ldmB + "}, " + N_str + " * " + ldmB + ",\n"
M_bias = str(helper.get_epilogue_bias_shape(self.fuse_gemm_info[i])[0])
code_this += " " + "{reinterpret_cast<" + helper.type_2_cutlass_type(self.fuse_gemm_info[i]['C_tp']) + "*>(" + helper.var_idx("C", i) + "), " + ldmBias + "}, " + M_bias + " * " + N_str + ",\n"
code_this += " " + "{reinterpret_cast<" + helper.type_2_cutlass_type(self.fuse_gemm_info[i]['C_tp']) + "*>(" + helper.var_idx("D", i) + "), " + ldmC + "}, " + "M * " + ldmC + ",\n"
code_this += " " + "{ " + helper.var_idx("alpha", i) + ", " + helper.var_idx("beta", i)
for epilogue_arg in helper.get_epilogue_args(self.fuse_gemm_info[i]):
arg_name = helper.var_idx("Epilogue", i) + "_" + epilogue_arg[1]
code_this += ", " + helper.type_2_cutlass_type(self.fuse_gemm_info[i]['Acc_tp']) + "(" + str(arg_name) + ")"
code_this += " },\n"
code_this += " " + "Batch};\n"
code_this += " " + helper.var_idx("Gemm", i) + helper.var_idx(" gemm_op_", i) + ";\n"
code_this += " " + helper.var_idx("gemm_op_", i) + helper.var_idx(".initialize(arguments_", i) + ", nullptr);\n"
code += code_this + "\n"
return code + "\n"
def gen_run(self):
code = ""
for i in range(self.b2b_num):
code_this = ""
code_this += " " + helper.var_idx("gemm_op_", i) + "(stream);\n"
code += code_this
return code
def gen_wrapper(self):
code_body = ""
arg_lists = []
arg_lists.append(["int", "M"])
arg_lists.append(["int", "K0"])
arg_lists.append(["int", "Batch"])
arg_lists.append(["void*", helper.var_idx("A", 0)])
for i in range(self.b2b_num):
arg_lists.append(["void*", helper.var_idx("B", i)])
arg_lists.append(["void*", helper.var_idx("C", i)])
arg_lists.append(["void*", helper.var_idx("D", i)])
epilogue_args = helper.get_epilogue_args(self.fuse_gemm_info[i])
acc_tp = helper.get_epilogue_compute_tp(self.fuse_gemm_info[i])
for arg in epilogue_args:
arg_tp = arg[0]
arg_name = helper.var_idx("Epilogue", i) + "_" + arg[1]
arg_lists.append([arg_tp, arg_name])
code_body += self.gen_using()
code_body += self.gen_initialize()
code_body += self.gen_run()
code = ir.gen_func(self.gen_class_name, arg_lists, code_body)
return code
def gen_code(self):
code = self.gen_wrapper()
helper.write_2_headfile("volta_impl.h", self.output_dir, self.user_header_file + "\n" + code)
class gen_one_API:
def __init__(self, fuse_gemm_info, gen_class_name, user_header_file, output_dir = "../"):
self.fuse_gemm_info = fuse_gemm_info
self.gen_class_name = gen_class_name
self.user_header_file = ""
for header in user_header_file:
self.user_header_file += "#include \"" + header + "\"\n"
self.output_dir = output_dir
self.b2b_num = len(fuse_gemm_info)
self.gen_volta = gen_volta_turing_fuse_act_impl(fuse_gemm_info, gen_class_name, user_header_file, output_dir)
self.gen_turing = gen_turing_impl(fuse_gemm_info, gen_class_name, user_header_file, output_dir)
def gen_CUTLASS_irrelevant_API(self):
code = ""
code += "#include <cuda_runtime.h>\n"
code += "#include <assert.h>\n"
param_name = "Fused" + str(self.b2b_num) + "xGemm_"
for i in range(self.b2b_num):
param_name += str(self.fuse_gemm_info[i]['mnk'][1]) + "_"
param_name += "Params"
params = ""
params += " " + "int M;\n"
params += " " + "int K0;\n"
params += " " + "int Batch;\n"
params += " " + "const void* A0;\n"
for i in range(self.b2b_num):
params += " " + "const void* " + helper.var_idx("B", i) + ";\n"
params += " " + "const void* " + helper.var_idx("C", i) + ";\n"
epilogue_args = helper.get_epilogue_args(self.fuse_gemm_info[i])
acc_tp = helper.get_epilogue_compute_tp(self.fuse_gemm_info[i])
for arg in epilogue_args:
arg_tp = arg[0]
arg_name = helper.var_idx("Epilogue", i) + "_" + arg[1]
params += " " + arg_tp + " " + arg_name + ";\n"
params += " " + "void* " + helper.var_idx("D", i) + ";\n"
code += ir.gen_struct(param_name, params)
code += "using Param = " + param_name + ";\n"
code += "void one_api( const Param & param, int sm, cudaStream_t stream);\n"
return code
def gen_one_api(self):
code = ""
code += "/* Auto Generated code - Do not edit.*/\n"
code += "#include \"cutlass_irrelevant.h\"\n"
code += "#include \"api.h\"\n"
code += "void one_api( const Param & param, int sm, cudaStream_t stream) {\n"
code += " " + "if (sm == 70) \n"
code += " " + " " + self.gen_class_name + "_volta_impl(param.M, param.K0, param.Batch, const_cast<void*>(param.A0), "
for i in range(self.b2b_num):
code += helper.var_idx("const_cast<void*>(param.B", i) + "), "
code += helper.var_idx("const_cast<void*>(param.C", i) + "), "
code += helper.var_idx("param.D", i) + ", "
epilogue_args = helper.get_epilogue_args(self.fuse_gemm_info[i])
for arg in epilogue_args:
arg_name = helper.var_idx("Epilogue", i) + "_" + arg[1]
code += "param." + arg_name + ", "
code += "stream);\n"
code += " " + "else if(sm >= 75) \n"
code += " " + " " + self.gen_class_name + "_turing_impl(param.M, param.K0, param.Batch, const_cast<void*>(param.A0), "
for i in range(self.b2b_num):
code += helper.var_idx("const_cast<void*>(param.B", i) + "), "
code += helper.var_idx("const_cast<void*>(param.C", i) + "), "
code += helper.var_idx("param.D", i) + ", "
epilogue_args = helper.get_epilogue_args(self.fuse_gemm_info[i])
for arg in epilogue_args:
arg_name = helper.var_idx("Epilogue", i) + "_" + arg[1]
code += "param." + arg_name + ", "
code += "stream);\n"
code += " " + "else assert(0);\n"
code += "}\n"
return code
def gen_code(self):
turing_code = self.gen_turing.gen_wrapper()
volta_code = self.gen_volta.gen_wrapper()
cutlass_irrelevant_code = self.gen_CUTLASS_irrelevant_API()
one_api_code = self.gen_one_api()
with open(self.output_dir + "one_api.cu", "w+") as f:
f.write(one_api_code)
helper.write_2_headfile("cutlass_irrelevant.h", self.output_dir, cutlass_irrelevant_code)
helper.write_2_headfile("api.h", self.output_dir, self.user_header_file + "\n" + turing_code + volta_code)
| cutlass-main | examples/44_multi_gemm_ir_and_codegen/ir_gen/gen_turing_and_volta.py |
#################################################################################################
#
# Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#################################################################################################
import helper
import gen_ir as ir
class gen_test:
def __init__(self, fuse_gemm_info, gen_class_name, user_header_file, output_dir = "../"):
self.fuse_gemm_info = fuse_gemm_info
self.gen_class_name = gen_class_name
self.user_header_file = user_header_file
self.sample_dir = output_dir
self.b2b_num = len(fuse_gemm_info)
def gen_cpp_sample(self):
code = "/* Auto Generated code - Do not edit.*/\n"
code += "#include <stdio.h> \n"
code += "#include \"cutlass/gemm/device/gemm_batched.h\" \n"
code += "#include \"cutlass/cutlass.h\" \n"
code += "#include \"../cutlass_irrelevant.h\" \n"
code += "#include \"../cutlass_verify.h\" \n"
code += "#include \"leaky_bias.h\" \n"
code += "#include \"utils.h\" \n"
code += "int main(int args, char * argv[]) {\n"
code += " " + "int M = atoi(argv[1]);\n"
code += " " + "int K0 = " + str(self.fuse_gemm_info[0]['mnk'][0]) + ";\n"
code += " " + "if(args == 3);\n"
code += " " + " " + "K0 = atoi(argv[2]);\n"
code += " " + "int B = 1;\n"
code += " " + "if(args == 4);\n"
code += " " + " " + "B = atoi(argv[3]);\n"
code += " " + "srand(1234UL);\n"
code += " " + "int device_id = 0;\n"
code += " " + "cudaGetDevice(&device_id);\n"
code += " " + "cudaDeviceProp prop;\n"
code += " " + "cudaGetDeviceProperties(&prop, device_id);\n"
code += " " + "int sm = prop.major *10 + prop.minor;\n"
code += "using ElementCompute = cutlass::half_t;\n"
for i in range(self.b2b_num):
code += " " + helper.var_idx("ElementCompute alpha", i) + " = ElementCompute(1);\n"
addbias = helper.get_epilogue_add_bias_or_not( self.fuse_gemm_info[i])
if addbias:
code += " " + helper.var_idx("ElementCompute beta", i) + " = ElementCompute(1);\n"
else:
code += " " + helper.var_idx("ElementCompute beta", i) + " = ElementCompute(0);\n"
code += " " + "size_t flops = 0;\n"
for i in range(self.b2b_num):
m = self.fuse_gemm_info[i]['mnk'][0]
n = self.fuse_gemm_info[i]['mnk'][1]
k = self.fuse_gemm_info[i]['mnk'][2]
bias_shape = helper.get_epilogue_bias_shape(self.fuse_gemm_info[i])
this_k = "K0"
if (i > 0):
this_k = str(k)
code += " " + "flops += size_t(2) * size_t(M) * size_t(B) * " + "size_t(" + str(n) + ") * size_t(" + this_k + ");\n"
code += " " + helper.var_idx("cutlass::gemm::GemmCoord problem_size_", i) + "(" + "M" + ", " + str(n) + ", " + this_k + ");\n"
code += " " + helper.var_idx("memory_unit<cutlass::half_t> Mat_A", i) + helper.var_idx("(B * problem_size_", i) + helper.var_idx(".m() * problem_size_", i) + ".k());\n"
code += " " + helper.var_idx("memory_unit<cutlass::half_t> Mat_B", i) + helper.var_idx("(B * problem_size_", i) + helper.var_idx(".n() * problem_size_", i) + ".k());\n"
code += " " + helper.var_idx("memory_unit<cutlass::half_t> Mat_C", i) + "(B * " + str(bias_shape[0]) + " * " + str(bias_shape[1]) + ");\n"
code += " " + helper.var_idx("memory_unit<cutlass::half_t> Mat_D_cutlass_ref", i) + helper.var_idx("(B * problem_size_", i) + helper.var_idx(".m() * problem_size_", i) + ".n());\n"
code += " " + helper.var_idx("Mat_A", i) + ".init();\n"
code += " " + helper.var_idx("Mat_B", i) + ".init();\n"
code += " " + helper.var_idx("Mat_C", i) + ".init();\n"
code += " " + helper.var_idx("memory_unit<cutlass::half_t> Mat_D", self.b2b_num - 1) + helper.var_idx("(B * problem_size_", i) + helper.var_idx(".m() * problem_size_",self.b2b_num - 1) + ".n());\n"
params = []
params.append("M")
params.append("B")
params.append("Mat_A0.device_ptr")
for i in range(self.b2b_num):
params.append(helper.var_idx("Mat_B", i) + ".device_ptr")
params.append(helper.var_idx("Mat_C", i) + ".device_ptr")
if i != self.b2b_num-1:
params.append(helper.var_idx("Mat_D_cutlass_ref", i) + ".device_ptr")
params.append(helper.var_idx("Mat_D", self.b2b_num - 1) + ".device_ptr")
code += " " + "Param arguments = {\n"
code += " " + " " + "M,\n"
code += " " + " " + "K0,\n"
code += " " + " " + "B,\n"
code += " " + " " + "reinterpret_cast<const void*>(Mat_A0.device_ptr),\n"
cnt = 1
for i in range(self.b2b_num):
bias_flag = helper.get_epilogue_add_bias_or_not( self.fuse_gemm_info[i])
code += " " + " " + "reinterpret_cast<const void*>(" + helper.var_idx("Mat_B", i) + ".device_ptr" + "),\n"
cnt += 1
if bias_flag:
code += " " + " " + "reinterpret_cast<const void*>(" + helper.var_idx("Mat_C", i) + ".device_ptr" + "),\n"
cnt += 1
else:
code += " " + " " + "reinterpret_cast<const void*>(NULL),\n"
epilogue_args = helper.get_epilogue_args(self.fuse_gemm_info[i])
acc_tp = helper.get_epilogue_compute_tp(self.fuse_gemm_info[i])
for arg in epilogue_args:
arg_value = str(arg[2])
code += " " + " " + helper.type_2_cutlass_type(acc_tp) + "(" + arg_value + "),\n"
if i != self.b2b_num - 1:
code += " " + " " + "reinterpret_cast<void*>(" + helper.var_idx("Mat_D_cutlass_ref", i) + ".device_ptr" + "),\n"
else:
code += " " + " " + "reinterpret_cast<void*>(" + helper.var_idx("Mat_D", i) + ".device_ptr" + ")};\n"
code += " " + "TI(FUSED_CUTLASS);\n"
code += " " + "for(int i = 0; i < 100; i++){\n"
code += " " + " " + "one_api(arguments, sm, NULL);\n"
code += " " + "}\n"
code += " " + "TO(FUSED_CUTLASS, \"FUSED_CUTLASS\", 100);\n"
code += "\n"
for i in range(self.b2b_num):
code_this = ""
N_str = str(self.fuse_gemm_info[i]['mnk'][1])
code_this += " " + helper.var_idx("typename Gemm", i) + helper.var_idx("::Arguments arguments_", i) + "{\n"
code_this += " " + " " + helper.var_idx("problem_size_", i) + ",\n"
ldmA = str(self.fuse_gemm_info[i]['mnk'][2])
if i == 0:
ldmA = "K0"
ldmB = str(self.fuse_gemm_info[i]['mnk'][2])
if i == 0:
ldmB = "K0"
ldmC = str(self.fuse_gemm_info[i]['mnk'][1])
ldmBias = str(helper.get_epilogue_bias_ldm(self.fuse_gemm_info[i]))
if self.fuse_gemm_info[i]['A_format'] is 'Col':
ldmA = "M"
if self.fuse_gemm_info[i]['B_format'] is 'Row':
ldmB = str(self.fuse_gemm_info[i]['mnk'][1])
if self.fuse_gemm_info[i]['C_format'] is 'Col':
ldmC = "M"
if i == 0:
code_this += " " + " " + "{reinterpret_cast<" + helper.type_2_cutlass_type(self.fuse_gemm_info[i]['A_tp']) + "*>(" + helper.var_idx("Mat_A", i) + ".device_ptr), " + ldmA + "}, " + "M * " + ldmA + ",\n"
else:
code_this += " " + " " + "{reinterpret_cast<" + helper.type_2_cutlass_type(self.fuse_gemm_info[i]['A_tp']) + "*>(" + helper.var_idx("Mat_D_cutlass_ref", i - 1) + ".device_ptr), " + ldmA + "}, " + "M * " + ldmA + ",\n"
code_this += " " + " " + "{reinterpret_cast<" + helper.type_2_cutlass_type(self.fuse_gemm_info[i]['B_tp']) + "*>(" + helper.var_idx("Mat_B", i) + ".device_ptr), " + ldmB + "}, " + N_str + " * " + ldmB + ",\n"
M_bias = str(helper.get_epilogue_bias_shape(self.fuse_gemm_info[i])[0])
code_this += " " + " " + "{reinterpret_cast<" + helper.type_2_cutlass_type(self.fuse_gemm_info[i]['C_tp']) + "*>(" + helper.var_idx("Mat_C", i) + ".device_ptr), " + ldmBias + "}, " + M_bias + " * " + N_str + ",\n"
code_this += " " + " " + "{reinterpret_cast<" + helper.type_2_cutlass_type(self.fuse_gemm_info[i]['C_tp']) + "*>(" + helper.var_idx("Mat_D_cutlass_ref", i) + ".device_ptr), " + ldmC + "}, " + "M * " + ldmC + ",\n"
code_this += " " + " " + "{ " + helper.var_idx("alpha", i) + ", " + helper.var_idx("beta", i)
for epilogue_arg in helper.get_epilogue_args(self.fuse_gemm_info[i]):
arg_value = str(epilogue_arg[2])
code_this += ", " + helper.type_2_cutlass_type(self.fuse_gemm_info[i]['Acc_tp']) + "(" + str(arg_value) + ")"
code_this += " " + " },\n"
code_this += " " + " " + "B};\n"
code += code_this
code += " " + "TI(UNFUSED_CUTLASS);\n"
code += " " + "for(int i = 0; i < 100; i++){\n"
code += " " + " " + self.gen_class_name + "_verify(\n"
for i in range(self.b2b_num):
code += " " + " " + " " + helper.var_idx("arguments_", i) + ",\n"
code += " " + " " + " " + "NULL);\n"
code += " " + "}\n"
code += " " + "TO(UNFUSED_CUTLASS, \"UNFUSED_CUTLASS\", 100);\n"
code += " " + helper.var_idx("Mat_D_cutlass_ref", self.b2b_num - 1) + ".d2h();\n"
code += " " + helper.var_idx("Mat_D", self.b2b_num - 1) + ".d2h();\n"
code += " " + helper.var_idx("check_result(Mat_D_cutlass_ref", self.b2b_num - 1) + helper.var_idx(".host_ptr, Mat_D", self.b2b_num - 1) \
+ helper.var_idx(".host_ptr, Mat_D", self.b2b_num - 1) + ".elements);\n"
code += "\n\n}\n"
with open(self.sample_dir + "sample.cu", "w+") as f:
f.write(code)
| cutlass-main | examples/44_multi_gemm_ir_and_codegen/ir_gen/gen_sample.py |
#################################################################################################
#
# Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#################################################################################################
import os
class replace_fix_impl:
def __init__(self, src_dir, dst_dir, cutlass_deps_root):
self.src_dir = src_dir
self.dst_dir = dst_dir
self.cutlass_deps_root = cutlass_deps_root
def gen_code(self):
for sub_dir in os.walk(self.src_dir):
files_in_sub_dir = sub_dir[2]
src_dirs = sub_dir[0]
output_dirs = self.dst_dir + sub_dir[0][len(self.src_dir):]
if not os.path.exists(output_dirs):
os.mkdir(output_dirs)
for f in files_in_sub_dir:
with open(src_dirs +"/" + f, 'r') as current_file:
output_lines = []
lines = current_file.readlines()
for line in lines:
if(len(line) >= len("#include \"cutlass") and line[:len("#include \"cutlass")] == "#include \"cutlass"):
new_line = "#include \"" + self.cutlass_deps_root + line[len("#include \""):]
# print(new_line)
output_lines.append(new_line)
else:
output_lines.append(line)
with open(output_dirs + "/" + f, "w+") as dest_file:
dest_file.writelines(output_lines)
| cutlass-main | examples/44_multi_gemm_ir_and_codegen/ir_gen/replace_fix_impl_header.py |
#################################################################################################
#
# Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#################################################################################################
import helper
indentation = " "
def append_word(word):
code = ""
code += word
code += " "
return code
def gen_namespace(namespace, codeBody):
code_gen = "namespace " + namespace + " {\n"
code_gen += codeBody
code_gen += "} // namespace " + namespace + "\n"
return code_gen
def gen_expression(type, lval, rval = None):
code_gen = ""
code_gen += append_word(type)
code_gen += append_word(lval)
if rval is not None:
code_gen += append_word("=")
code_gen += append_word(rval)
return code_gen
def gen_class(name, codeBody, inheritance_code = None):
code_gen = ""
if inheritance_code is None:
code_gen = "class " + name + "{\n"
else:
code_gen = "class " + name + " : "+ inheritance_code + "{\n"
code_gen += codeBody
code_gen += "}; // class " + name + "\n"
return code_gen
def gen_struct(name, codeBody, specialized = None):
specialized_code = ""
if specialized is not None:
specialized_code = "<" + specialized + ">"
code_gen = "struct " + name + specialized_code + "{\n"
code_gen += codeBody
code_gen += "}; // struct " + name + "\n"
return code_gen
def gen_template_arg(arg_type, arg_name, default_val = None):
rval = None
if default_val is not None:
rval = str(default_val)
arg_typename = ""
if arg_type is int:
arg_typename = "int"
elif arg_type is bool:
arg_typename = "bool"
else:
arg_typename = "typename"
internal_arg_name = arg_name + "_"
code_gen = indentation
code_gen += gen_expression(arg_typename, internal_arg_name, rval)
return code_gen
def gen_template_args(args, set_default = True):
arg_len = len(args)
cnt = 1
code_gen = ""
for arg_tuple in args:
arg_type = arg_tuple[0]
arg_name = arg_tuple[1]
arg_default_val = None
if len(arg_tuple) == 3 and set_default:
arg_default_val = arg_tuple[2]
code_gen += gen_template_arg(arg_type, arg_name, arg_default_val)
if cnt != arg_len:
code_gen += ",\n"
cnt += 1
return code_gen
def gen_template_head(args, set_default = True):
code_gen = "template <\n"
code_gen += gen_template_args(args, set_default)
code_gen += ">\n"
return code_gen
def export_template_args(args):
code_gen = "public:\n"
for arg_tuple in args:
code_gen += indentation
arg_type = arg_tuple[0]
arg_name = arg_tuple[1]
internal_arg_name = arg_name + "_"
typename = ""
if arg_type is int:
typename = "static int const"
elif arg_type is bool:
typename = "static bool const"
else:
typename = "using"
code_gen += gen_expression(typename, arg_name, internal_arg_name)
code_gen += ";\n"
return code_gen
def gen_template_class(class_name, args, codeBody, set_default = True, inheritance_code = None):
code_gen = ""
code_gen += gen_template_head(args, set_default)
code_gen += gen_class(class_name, export_template_args(args) + codeBody, inheritance_code)
return code_gen
def gen_template_struct(struct_name, args, codeBody, speicalized = None, set_default = True, export_args = True):
code_gen = ""
code_gen += gen_template_head(args, set_default)
code = export_template_args(args) + codeBody
if export_args is False:
code = codeBody
code_gen += gen_struct(struct_name, code , speicalized)
return code_gen
def gen_declare_template_struct(name, *params):
code = name + "<"
cnt = 0
param_num = len(params)
for param in params:
final = ", "
if cnt == param_num - 1:
final = ""
code += param + final
cnt += 1
code += ">;\n"
return code
def filtered_param(params, name_and_value_pair, keep_ = False):
rtn_template_args = []
speicalized_template_args = []
for param in params:
param_name = ""
if len(param) >= 1:
param_name = param[1]
else:
param_name = param[0]
hit_flag = False
set_value = ""
for n_v_pair in name_and_value_pair:
filter_name = n_v_pair[0]
set_value = n_v_pair[1]
if param_name == (filter_name + "_") or param_name == filter_name :
hit_flag = True
break
if hit_flag is False:
rtn_template_args.append(param)
if hit_flag is True:
speicalized_template_args.append(set_value)
else:
if keep_ is True:
speicalized_template_args.append(param_name + "_")
else:
speicalized_template_args.append(param_name)
specialized_template_arg_str = helper.list_2_string(speicalized_template_args)
return rtn_template_args, specialized_template_arg_str
def gen_func(func_name, arg_lists, code_body, only_declare = False, with_cudaStream = True):
code = "void " + func_name + "(\n"
for arg in arg_lists:
arg_tp = arg[0]
arg_nm = arg[1]
code += " " + arg_tp + " " + arg_nm + ",\n"
code += "cudaStream_t stream)"
if only_declare :
return code
code += "{\n"
code += code_body + "\n"
code += "}\n"
return code
def indent_level(code, level = 0):
rtn_code = ""
for i in range(level):
rtn_code += " "
rtn_code += code
return rtn_code
| cutlass-main | examples/44_multi_gemm_ir_and_codegen/ir_gen/gen_ir.py |
################################################################################
#
# Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
################################################################################
"""
Basic example of using the CUTLASS Python interface to run a GEMM
"""
import argparse
import numpy as np
import sys
import cutlass_bindings
import cutlass.backend as pycutlass
from cutlass.backend import *
from cutlass.backend.utils.device import device_cc
parser = argparse.ArgumentParser(description="Launch a GEMM kernel from Python: 'D = alpha * A * B + beta * C'")
parser.add_argument("--m", default=128, type=int, help="M dimension of the GEMM")
parser.add_argument("--n", default=128, type=int, help="N dimension of the GEMM")
parser.add_argument("--k", default=128, type=int, help="K dimension of the GEMM")
parser.add_argument('--print_cuda', action="store_true", help="Print the underlying CUDA kernel")
try:
args = parser.parse_args()
except:
sys.exit(0)
# Check that the device is of a sufficient compute capability
cc = device_cc()
assert cc >= 70, "The CUTLASS Python GEMM example requires compute capability greater than or equal to 70."
alignment = 8
assert args.m % alignment == 0, "M dimension of size {} is not divisible by alignment of {}".format(args.m, alignment)
assert args.n % alignment == 0, "N dimension of size {} is not divisible by alignment of {}".format(args.n, alignment)
assert args.k % alignment == 0, "K dimension of size {} is not divisible by alignment of {}".format(args.k, alignment)
np.random.seed(0)
# Allocate a pool of device memory to be used by the kernel
pycutlass.get_memory_pool(init_pool_size=2**30, max_pool_size=2**32)
# Set the compiler to use to NVCC
pycutlass.compiler.nvcc()
# Set up A, B, C and accumulator
A = TensorDescription(cutlass_bindings.float16, cutlass_bindings.ColumnMajor, alignment)
B = TensorDescription(cutlass_bindings.float16, cutlass_bindings.RowMajor, alignment)
C = TensorDescription(cutlass_bindings.float32, cutlass_bindings.ColumnMajor, alignment)
element_acc = cutlass_bindings.float32
element_epilogue = cutlass_bindings.float32
# Select instruction shape based on the Tensor Core instructions supported
# by the device on which we are running
if cc == 70:
instruction_shape = [8, 8, 4]
elif cc == 75:
instruction_shape = [16, 8, 8]
else:
# Use CUTLASS kernels for CC 80 by default (e.g., for cases in which SM86 is used)
cc = 80
instruction_shape = [16, 8, 16]
math_inst = MathInstruction(
instruction_shape,
A.element, B.element, element_acc,
cutlass_bindings.OpClass.TensorOp,
MathOperation.multiply_add
)
tile_description = TileDescription(
[128, 128, 32], # Threadblock shape
2, # Number of stages
[2, 2, 1], # Number of warps within each dimension of the threadblock shape
math_inst
)
epilogue_functor = pycutlass.LinearCombination(C.element, C.alignment, element_acc, element_epilogue)
operation = GemmOperationUniversal(
arch=cc, tile_description=tile_description,
A=A, B=B, C=C,
epilogue_functor=epilogue_functor)
if args.print_cuda:
print(operation.rt_module.emit())
operations = [operation, ]
# Compile the operation
pycutlass.compiler.add_module(operations)
# Randomly initialize tensors
tensor_A = np.ceil(np.random.uniform(low=-8.5, high=7.5, size=(args.m * args.k,))).astype(np.float16)
tensor_B = np.ceil(np.random.uniform(low=-8.5, high=7.5, size=(args.k * args.n,))).astype(np.float16)
tensor_C = np.ceil(np.random.uniform(low=-8.5, high=7.5, size=(args.m * args.n,))).astype(np.float32)
tensor_D = np.zeros(shape=(args.m * args.n,)).astype(np.float32)
problem_size = cutlass_bindings.gemm.GemmCoord(args.m, args.n, args.k)
alpha = 1.
beta = 0.
arguments = GemmArguments(
operation=operation, problem_size=problem_size,
A=tensor_A, B=tensor_B, C=tensor_C, D=tensor_D,
output_op=operation.epilogue_type(alpha, beta))
# Run the operation
operation.run(arguments)
arguments.sync()
# Run the host reference module and compare to the CUTLASS result
reference = ReferenceModule(A, B, C)
tensor_D_ref = reference.run(tensor_A, tensor_B, tensor_C, problem_size, alpha, beta)
try:
assert np.array_equal(tensor_D, tensor_D_ref)
except:
assert np.allclose(tensor_D, tensor_D_ref, atol=1e-5)
print("Passed.")
| cutlass-main | examples/40_cutlass_py/gemm.py |
################################################################################
#
# Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
################################################################################
"""
Basic example of using the CUTLASS Python interface to run a 2d convolution
"""
import argparse
import torch
import numpy as np
import sys
import cutlass_bindings
import cutlass.backend as pycutlass
from cutlass.backend import *
from cutlass.backend.utils.reference_model import Conv2dReferenceModule
from cutlass.backend.utils.device import device_cc
parser = argparse.ArgumentParser(
description=("Launch a 2d convolution kernel from Python. "
"See https://docs.nvidia.com/deeplearning/performance/dl-performance-convolutional/index.html#convo-intro for notation."))
parser.add_argument("--n", default=1, type=int, help="N dimension of the convolution")
parser.add_argument("--c", default=64, type=int, help="C dimension of the convolution")
parser.add_argument("--h", default=32, type=int, help="H dimension of the convolution")
parser.add_argument("--w", default=32, type=int, help="W dimension of the convolution")
parser.add_argument("--k", default=32, type=int, help="N dimension of the convolution")
parser.add_argument("--r", default=3, type=int, help="R dimension of the convolution")
parser.add_argument("--s", default=3, type=int, help="S dimension of the convolution")
parser.add_argument('--print_cuda', action="store_true", help="Print the underlying CUDA kernel")
try:
args = parser.parse_args()
except:
sys.exit(0)
# Check that the device is of a sufficient compute capability
cc = device_cc()
assert cc >= 70, "The CUTLASS Python Conv2d example requires compute capability greater than or equal to 70."
alignment = 1
np.random.seed(0)
# Allocate a pool of device memory to be used by the kernel
pycutlass.get_memory_pool(init_pool_size=2**30, max_pool_size=2**32)
# Set the compiler to use to NVCC
pycutlass.compiler.nvcc()
# Set up A, B, C and accumulator
A = TensorDescription(cutlass_bindings.float16, cutlass_bindings.TensorNHWC, alignment)
B = TensorDescription(cutlass_bindings.float16, cutlass_bindings.TensorNHWC, alignment)
C = TensorDescription(cutlass_bindings.float32, cutlass_bindings.TensorNHWC, alignment)
element_acc = cutlass_bindings.float32
element_epilogue = cutlass_bindings.float32
# Select instruction shape based on the Tensor Core instructions supported
# by the device on which we are running
if cc == 70:
instruction_shape = [8, 8, 4]
elif cc == 75:
instruction_shape = [16, 8, 8]
else:
# Use CUTLASS kernels for CC 80 by default (e.g., for cases in which SM86 is used)
cc = 80
instruction_shape = [16, 8, 16]
math_inst = MathInstruction(
instruction_shape,
A.element, B.element, element_acc,
cutlass_bindings.OpClass.TensorOp,
MathOperation.multiply_add
)
tile_description = TileDescription(
[128, 128, 32], # Threadblock shape
2, # Number of stages
[2, 2, 1], # Number of warps within each dimension of the threadblock shape
math_inst
)
epilogue_functor = pycutlass.LinearCombination(C.element, C.alignment, element_acc, element_epilogue)
operation = Conv2dOperation(
conv_kind=cutlass_bindings.conv.Operator.fprop,
iterator_algorithm=cutlass_bindings.conv.IteratorAlgorithm.optimized,
arch=cc, tile_description=tile_description,
A=A, B=B, C=C, stride_support=StrideSupport.Strided,
epilogue_functor=epilogue_functor
)
if args.print_cuda:
print(operation.rt_module.emit())
operations = [operation, ]
# Compile the operation
pycutlass.compiler.add_module(operations)
# Randomly initialize tensors
problem_size = cutlass_bindings.conv.Conv2dProblemSize(
cutlass_bindings.Tensor4DCoord(args.n, args.h, args.c, args.w),
cutlass_bindings.Tensor4DCoord(args.k, args.r, args.s, args.c),
cutlass_bindings.Tensor4DCoord(0, 0, 0, 0), # Padding
cutlass_bindings.MatrixCoord(1, 1), # Strides
cutlass_bindings.MatrixCoord(1, 1), # Dilation
cutlass_bindings.conv.Mode.cross_correlation,
1, # Split k slices
1 # Groups
)
tensor_A_size = cutlass_bindings.conv.implicit_gemm_tensor_a_size(operation.conv_kind, problem_size)
tensor_B_size = cutlass_bindings.conv.implicit_gemm_tensor_b_size(operation.conv_kind, problem_size)
tensor_C_size = cutlass_bindings.conv.implicit_gemm_tensor_c_size(operation.conv_kind, problem_size)
tensor_A = torch.ceil(torch.empty(size=(tensor_A_size,), dtype=torch.float16, device="cuda").uniform_(-8.5, 7.5))
tensor_B = torch.ceil(torch.empty(size=(tensor_B_size,), dtype=torch.float16, device="cuda").uniform_(-8.5, 7.5))
tensor_C = torch.ceil(torch.empty(size=(tensor_C_size,), dtype=torch.float32, device="cuda").uniform_(-8.5, 7.5))
tensor_D = torch.ones(size=(tensor_C_size,), dtype=torch.float32, device="cuda")
alpha = 1.
beta = 0.
arguments = Conv2dArguments(
operation=operation, problem_size=problem_size,
A=tensor_A, B=tensor_B, C=tensor_C, D=tensor_D,
output_op=operation.epilogue_type(alpha, beta)
)
# Run the operation
operation.run(arguments)
arguments.sync()
# Run the host reference module and compare to the CUTLASS result
reference = Conv2dReferenceModule(A, B, C, operation.conv_kind)
tensor_D_ref = reference.run(tensor_A, tensor_B, tensor_C, problem_size, alpha, beta)
try:
assert torch.equal(tensor_D, tensor_D_ref)
except:
assert torch.allclose(tensor_D, tensor_D_ref, rtol=1e-2)
print("Passed.")
| cutlass-main | examples/40_cutlass_py/conv2d.py |
################################################################################
#
# Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
################################################################################
"""
Basic example of using the CUTLASS Python interface to run a grouped GEMM
"""
import argparse
import numpy as np
import sys
import cutlass_bindings
import cutlass.backend as pycutlass
from cutlass.backend import *
from cutlass.backend.utils.device import device_cc
parser = argparse.ArgumentParser(description="Launch a grouped GEMM kernel from Python")
parser.add_argument('--print_cuda', action="store_true", help="Print the underlying CUDA kernel")
try:
args = parser.parse_args()
except:
sys.exit(0)
# Check that the device is of a sufficient compute capability
cc = device_cc()
assert cc >= 70, "The CUTLASS Python grouped GEMM example requires compute capability greater than or equal to 70."
np.random.seed(0)
# Allocate a pool of device memory to be used by the kernel
pycutlass.get_memory_pool(init_pool_size=2**30, max_pool_size=2**32)
# Set the compiler to use to NVCC
pycutlass.compiler.nvcc()
# Set up A, B, C and accumulator
alignment = 1
A = TensorDescription(cutlass_bindings.float16, cutlass_bindings.ColumnMajor, alignment)
B = TensorDescription(cutlass_bindings.float16, cutlass_bindings.RowMajor, alignment)
C = TensorDescription(cutlass_bindings.float32, cutlass_bindings.ColumnMajor, alignment)
element_acc = cutlass_bindings.float32
element_epilogue = cutlass_bindings.float32
# Select instruction shape based on the Tensor Core instructions supported
# by the device on which we are running
if cc == 70:
instruction_shape = [8, 8, 4]
elif cc == 75:
instruction_shape = [16, 8, 8]
else:
# Use CUTLASS kernels for CC 80 by default (e.g., for cases in which SM86 is used)
cc = 80
instruction_shape = [16, 8, 16]
math_inst = MathInstruction(
instruction_shape,
A.element, B.element, element_acc,
cutlass_bindings.OpClass.TensorOp,
MathOperation.multiply_add
)
tile_description = TileDescription(
[128, 128, 32], # Threadblock shape
2, # Number of stages
[2, 2, 1], # Number of warps within each dimension of the threadblock shape
math_inst
)
epilogue_functor = pycutlass.LinearCombination(C.element, C.alignment, element_acc, element_epilogue)
operation = GemmOperationGrouped(
arch=cc, tile_description=tile_description,
A=A, B=B, C=C,
epilogue_functor=epilogue_functor,
precompute_mode=SchedulerMode.Device)
if args.print_cuda:
print(operation.rt_module.emit())
operations = [operation, ]
# Compile the operation
pycutlass.compiler.add_module(operations)
# Initialize tensors for each problem in the group
problem_sizes = [
cutlass_bindings.gemm.GemmCoord(128, 128, 64),
cutlass_bindings.gemm.GemmCoord(512, 256, 128)
]
problem_count = len(problem_sizes)
alpha = 1.
beta = 0.
tensor_As = []
tensor_Bs = []
tensor_Cs = []
tensor_Ds = []
tensor_D_refs = []
reference = ReferenceModule(A, B, C)
for problem_size in problem_sizes:
# Randomly initialize tensors
m = problem_size.m()
n = problem_size.n()
k = problem_size.k()
tensor_A = np.ceil(np.random.uniform(low=-8.5, high=7.5, size=(m * k,))).astype(np.float16)
tensor_B = np.ceil(np.random.uniform(low=-8.5, high=7.5, size=(k * n,))).astype(np.float16)
tensor_C = np.ceil(np.random.uniform(low=-8.5, high=7.5, size=(m * n,))).astype(np.float32)
tensor_D = np.zeros(shape=(m * n,)).astype(np.float32)
tensor_As.append(tensor_A)
tensor_Bs.append(tensor_B)
tensor_Cs.append(tensor_C)
tensor_Ds.append(tensor_D)
# Run the reference GEMM
tensor_D_ref = reference.run(tensor_A, tensor_B, tensor_C, problem_size, alpha, beta)
tensor_D_refs.append(tensor_D_ref)
arguments = GemmGroupedArguments(
operation, problem_sizes, tensor_As, tensor_Bs, tensor_Cs, tensor_Ds,
output_op=operation.epilogue_type(alpha, beta)
)
# Run the operation
operation.run(arguments)
arguments.sync()
# Compare the CUTLASS result to the host reference result
for tensor_d, tensor_d_ref in zip(tensor_Ds, tensor_D_refs):
try:
assert np.array_equal(tensor_d, tensor_d_ref)
except:
assert np.allclose(tensor_d, tensor_d_ref, rtol=1e-5)
print("Passed.")
| cutlass-main | examples/40_cutlass_py/gemm_grouped.py |
################################################################################
#
# Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
################################################################################
import numpy as np
import cutlass.backend as pycutlass
from cutlass.backend import *
from cutlass.backend.utils.device import device_cc
import cutlass_bindings
from bfloat16 import bfloat16
import sys
import argparse
# parse the arguments
parser = argparse.ArgumentParser(description="Launch CUTLASS GEMM kernels from Python: 'D = alpha * A * B + beta * C'")
# Operation description
# math instruction description
parser.add_argument("-i", "--instruction_shape",
default=[1, 1, 1], nargs=3, type=int,
help="This option describes the size of MMA op")
parser.add_argument("-ta", "--element_a", default="float32", type=str,
choices=['float64', 'float32', 'float16', 'bfloat16', 'int32', 'int8'],
help='Data type of elements in input tensor A')
parser.add_argument("-tb", "--element_b", default="float32", type=str,
choices=['float64', 'float32', 'float16', 'bfloat16', 'int32', 'int8'],
help='Data type of elements in input tensor B')
parser.add_argument("-tc", "--element_c", default="float32", type=str,
choices=['float64', 'float32', 'float16', 'bfloat16', 'int32', 'int8'],
help='Data type of elements in input tensor C and output tensor D')
parser.add_argument("-tacc", "--element_acc", default="float32", type=str,
choices=['float64', 'float32', 'float16', 'bfloat16', 'int32', 'int8'],
help='Data type of accumulator')
parser.add_argument('-m', "--math", default="multiply_add",
type=str, choices=["multiply_add", "multiply_add_fast_bf16", "multiply_add_fast_f32"], help="math instruction")
parser.add_argument('-op', "--opcode", default="Simt", type=str,
choices=["Simt", 'TensorOp'],
help="This option describes whether you want to use tensor \
cores (TensorOp) or regular SIMT cores (Simt) on GPU SM")
# tile description
parser.add_argument("-b", "--threadblock_shape",
default=[128, 128, 8], nargs=3, type=int,
help="This option describes the tile size a thread block with compute")
parser.add_argument("-s", "--stages", default=4,
type=int, help="Number of pipelines you want to use")
parser.add_argument("-w", "--warp_count", default=[4, 2, 1], nargs=3, type=int,
help="This option describes the number of warps along M, N, and K of the threadblock")
parser.add_argument("-cc", "--compute_capability", default=80,
type=int, help="This option describes CUDA SM architecture number")
# A
parser.add_argument('-la', "--layout_a", default="RowMajor", type=str, choices=[
"RowMajor", "ColumnMajor", "RowMajorInterleaved32", "ColumnMajorInterleaved32"],
help="Memory layout of input tensor A")
parser.add_argument('-aa', '--alignment_a', default=1,
type=int, help="Memory alignement of input tensor A")
# B
parser.add_argument('-lb', "--layout_b", default="RowMajor", type=str, choices=[
"RowMajor", "ColumnMajor", "RowMajorInterleaved32", "ColumnMajorInterleaved32"],
help="Memory layout of input tensor B")
parser.add_argument('-ab', '--alignment_b', default=1,
type=int, help="Memory alignment of input tensor B")
# C
parser.add_argument('-lc', "--layout_c", default="RowMajor", type=str, choices=[
"RowMajor", "ColumnMajor", "RowMajorInterleaved32", "ColumnMajorInterleaved32"],
help="Memory layout of input tensor C and output tensor D")
parser.add_argument('-ac', '--alignment_c', default=1,
type=int, help="Memory alignment of input tensor C and output tensor D")
# epilogue
parser.add_argument("-te", "--element_epilogue", default="float32", type=str,
choices=['float64', 'float32', 'float16', 'bfloat16'], help='Epilogue datatype')
parser.add_argument("-ep", "--epilogue_functor", default="LinearCombination",
type=str, choices=['LinearCombination', 'FastLinearCombinationClamp', 'LinearCombinationClamp'],
help="This option describes the epilogue part of the kernel")
parser.add_argument("-epv", "--epilogue_visitor", default=None,
type=str, choices=['RowReduction', 'ColumnReduction', 'RowBroadcast', 'ColumnBroadcast'], help="epilogue visitor for more complex epilogues")
# swizzling
parser.add_argument("-sw", "--swizzling_functor", default="IdentitySwizzle1", type=str, choices=[
"IdentitySwizzle1", "IdentitySwizzle2", "IdentitySwizzle4", "IdentitySwizzle8", "HorizontalSwizzle", "BatchedIdentitySwizzle"],
help="This option describes how thread blocks are scheduled on GPU")
# Argument
parser.add_argument("-p", "--problem_size",
default=[128, 128, 128], nargs=3, type=int,
help="GEMM problem size M, N, K")
parser.add_argument("-alpha", "--alpha", default=1.0, type=float,
help="Scaling factor of A * B")
parser.add_argument("-beta", "--beta", default=0.0, type=float,
help="Scaling factor of C")
parser.add_argument("-gm", "--gemm_mode", default="Gemm", type=str,
choices=["Gemm", "GemmSplitKParallel", "Batched", "Array"],
help="GEMM mode. Gemm is used for non-splitK or serial-splitK. \
GemmSplitKParallel is used for parallel splitK")
parser.add_argument('-k', '--split_k_slices', default=1,
type=int, help="Number of split-k partitions. (default 1)")
parser.add_argument('-bias', '--bias', action='store_true', help="C is bias vector")
parser.add_argument('-batch', '--batch', default=1, type=int, help="batch size for batched GEMM")
# Activation function
parser.add_argument("-activ", "--activation_function", default="identity",
choices=["identity", "relu", "leaky_relu", "tanh", "sigmoid", "silu", "hardswish", "gelu"], help="activation function")
parser.add_argument("-activ_arg", "--activation_args", default=[], nargs="+", type=float,
help="addition arguments for activation")
parser.add_argument('--print_cuda', action="store_true",
help="print the underlying CUDA kernel")
try:
args = parser.parse_args()
except:
sys.exit(0)
cc = device_cc()
if args.compute_capability != cc:
raise Exception(("Parameter --compute-capability of {} "
"does not match that of the device of {}.").format(args.compute_capability, cc))
pycutlass.get_memory_pool(init_pool_size=2**30, max_pool_size=2**32)
pycutlass.compiler.nvcc()
np.random.seed(0)
element_a = getattr(cutlass_bindings, args.element_a)
element_b = getattr(cutlass_bindings, args.element_b)
element_c = getattr(cutlass_bindings, args.element_c)
element_acc = getattr(cutlass_bindings, args.element_acc)
math_operation = getattr(MathOperation, args.math)
opclass = getattr(cutlass_bindings.OpClass, args.opcode)
math_inst = MathInstruction(
args.instruction_shape, element_a, element_b,
element_acc, opclass, math_operation
)
tile_description = TileDescription(
args.threadblock_shape, args.stages, args.warp_count,
math_inst
)
layout_a = getattr(cutlass_bindings, args.layout_a)
layout_b = getattr(cutlass_bindings, args.layout_b)
layout_c = getattr(cutlass_bindings, args.layout_c)
A = TensorDescription(
element_a, layout_a, args.alignment_a
)
B = TensorDescription(
element_b, layout_b, args.alignment_b
)
C = TensorDescription(
element_c, layout_c, args.alignment_c
)
element_epilogue = getattr(cutlass_bindings, args.element_epilogue)
if (args.activation_function == "identity"
or (args.gemm_mode == "GemmSplitKParallel" and args.split_k_slices > 1)):
#
epilogue_functor = getattr(pycutlass, args.epilogue_functor)(
C.element, C.alignment, math_inst.element_accumulator, element_epilogue)
else:
epilogue_functor = getattr(pycutlass, "LinearCombinationGeneric")(
getattr(pycutlass, args.activation_function)(element_epilogue),
C.element, C.alignment, math_inst.element_accumulator, element_epilogue)
swizzling_functor = getattr(cutlass_bindings, args.swizzling_functor)
visitor = args.epilogue_visitor is not None
if args.epilogue_visitor == "ColumnReduction":
class ColumnReduction_(EpilogueVisitTree):
def __call__(
self, accum: 'tensor', c: 'tensor',
alpha: 'scalar', beta: 'scalar'):
#
D = alpha * accum + beta * c
reduction = reduction_op(D, "column", "Add", args.threadblock_shape[0])
return D, reduction
epilogue_functor = ColumnReduction_(
epilogue_functor, tile_description, math_inst.element_accumulator,
C.alignment, element_epilogue, C.element)
epilogue_functor.initialize()
elif args.epilogue_visitor == "RowReduction":
class RowReduction_(EpilogueVisitTree):
def __call__(
self, accum: 'tensor', c: 'tensor',
alpha: 'scalar', beta: 'scalar'):
#
D = alpha * accum + tanh.numpy(beta * c)
reduction = reduction_op(D, "row", "Add", args.threadblock_shape[1])
return D, reduction
epilogue_functor = RowReduction_(
epilogue_functor, tile_description, math_inst.element_accumulator,
C.alignment, element_epilogue, C.element)
epilogue_functor.initialize()
elif args.epilogue_visitor == "RowBroadcast":
class RowBroadcast_(EpilogueVisitTree):
def __call__(
self, accum: 'tensor', c: 'tensor',
vector: 'row', alpha: 'scalar', beta: 'scalar'):
#
T = accum + vector
scale_T = alpha * T
Z = relu.numpy(scale_T + beta * c)
return Z, T
epilogue_functor = RowBroadcast_(
epilogue_functor, tile_description, math_inst.element_accumulator,
C.alignment, element_epilogue, C.element)
epilogue_functor.initialize()
elif args.epilogue_visitor == "ColumnBroadcast":
class ColumnBroadcast_(EpilogueVisitTree):
def __call__(
self, accum: 'tensor', c: 'tensor',
vector: 'column', alpha: 'scalar', beta: 'scalar'):
#
T = accum + vector
scale_T = leaky_relu.numpy(alpha * T, 0.2)
Z = scale_T + beta * c
return Z, T
epilogue_functor = ColumnBroadcast_(
epilogue_functor, tile_description, math_inst.element_accumulator,
C.alignment, element_epilogue, C.element)
epilogue_functor.initialize()
else:
epilogue_functor = epilogue_functor
operation = GemmOperationUniversal(
arch=args.compute_capability, tile_description=tile_description,
A=A, B=B, C=C,
epilogue_functor=epilogue_functor, swizzling_functor=swizzling_functor,
visitor=visitor
)
if args.print_cuda:
print(operation.rt_module.emit())
operations = [operation, ]
if args.gemm_mode == "GemmSplitKParallel":
if (args.activation_function == "identity"):
epilogue_functor_reduction = getattr(pycutlass, args.epilogue_functor)(
C.element, C.alignment, math_inst.element_accumulator, element_epilogue)
else:
epilogue_functor_reduction = getattr(pycutlass, "LinearCombinationGeneric")(
getattr(pycutlass, args.activation_function)(element_epilogue),
C.element, C.alignment, math_inst.element_accumulator, element_epilogue)
reduction_operation = ReductionOperation(
shape=cutlass_bindings.MatrixCoord(4, 32 * C.alignment),
C=C, element_accumulator=element_acc,
element_compute=element_epilogue,
epilogue_functor=epilogue_functor_reduction,
count=C.alignment
)
operations.append(reduction_operation)
pycutlass.compiler.add_module(operations)
# User-provide inputs
problem_size = cutlass_bindings.gemm.GemmCoord(
args.problem_size[0], args.problem_size[1], args.problem_size[2])
tensor_a_size = args.batch * problem_size.m() * problem_size.k()
if args.element_a != "int8":
if args.element_a == "bfloat16":
tensor_A = np.ceil(
np.random.uniform(low=-8.5, high=7.5, size=(tensor_a_size,))
).astype(bfloat16)
else:
tensor_A = np.ceil(
np.random.uniform(low=-8.5, high=7.5, size=(tensor_a_size,))
).astype(getattr(np, args.element_a))
else:
tensor_A = np.random.uniform(
low=-2, high=2,size=(tensor_a_size,)
).astype(getattr(np, args.element_a))
tensor_b_size = args.batch * problem_size.k() * problem_size.n()
if args.element_b != "int8":
if args.element_b == "bfloat16":
tensor_B = np.ceil(
np.random.uniform(low=-8.5, high=7.5, size=(tensor_b_size,))
).astype(bfloat16)
else:
tensor_B = np.ceil(
np.random.uniform(low=-8.5, high=7.5, size=(tensor_b_size,))
).astype(getattr(np, args.element_b))
else:
tensor_B = np.random.uniform(
low=-2, high=2, size=(tensor_b_size,)
).astype(getattr(np, args.element_b))
if args.element_c != "int8":
if args.bias:
if args.layout_c == "RowMajor":
tensor_c_size = args.batch * problem_size.n()
elif args.layout_c == "ColumnMajor":
tensor_c_size = args.batch * problem_size.m()
else:
raise ValueError(args.layout_c)
else:
tensor_c_size = args.batch * problem_size.m() * problem_size.n()
if args.element_c == "bfloat16":
tensor_C = np.ceil(
np.random.uniform(low=-8.5, high=7.5, size=(tensor_c_size,))
).astype(bfloat16)
else:
tensor_C = np.ceil(
np.random.uniform(low=-8.5, high=7.5, size=(tensor_c_size,))
).astype(getattr(np, args.element_c))
else:
tensor_C = np.random.uniform(
low=-2, high=2, size=(args.batch * problem_size.m() * problem_size.n(),)
).astype(getattr(np, args.element_c))
tensor_D = np.zeros(
shape=(args.batch * problem_size.m() * problem_size.n(),)
).astype(getattr(np, args.element_c))
if args.epilogue_visitor == "RowReduction":
cta_n = args.threadblock_shape[1]
num_cta_n = (problem_size.n() + cta_n - 1) // cta_n
reduction = np.zeros(shape=(args.batch * problem_size.m() * num_cta_n,), dtype=getattr(np, args.element_c))
output_op = operation.epilogue_type(
D=tensor_D, alpha=args.alpha, beta=args.beta, c=tensor_C, reduction=reduction, problem_size=[problem_size.m(), problem_size.n()]
)
elif args.epilogue_visitor == "ColumnReduction":
cta_m = args.threadblock_shape[0]
num_cta_m = (problem_size.m() + cta_m - 1) // cta_m
reduction = np.zeros(shape=(args.batch * problem_size.n() * num_cta_m,), dtype=getattr(np, args.element_c))
output_op = operation.epilogue_type(
D=tensor_D, alpha=args.alpha, beta=args.beta, c=tensor_C, reduction=reduction, problem_size=[problem_size.m(), problem_size.n()]
)
elif args.epilogue_visitor == "RowBroadcast":
vector = np.ceil(
np.random.uniform(low=-8.5, high=7.5, size=(args.batch, 1, problem_size.n()))
).astype(getattr(np, args.element_c))
tensor_t = np.empty_like(tensor_D)
output_op = operation.epilogue_type(
c=tensor_C, vector=vector, alpha=args.alpha, beta=args.beta, Z=tensor_D, T=tensor_t, problem_size=[problem_size.m(), problem_size.n()]
)
elif args.epilogue_visitor == "ColumnBroadcast":
vector = np.ceil(
np.random.uniform(low=-8.5, high=7.5, size=(args.batch, problem_size.m(), 1))
).astype(getattr(np, args.element_c))
tensor_t = np.empty_like(tensor_D)
output_op = operation.epilogue_type(
c=tensor_C, vector=vector, alpha=args.alpha, beta=args.beta, Z=tensor_D, T=tensor_t, problem_size=[problem_size.m(), problem_size.n()]
)
else:
output_op = operation.epilogue_type(*([args.alpha, args.beta] + args.activation_args))
arguments = GemmArguments(
operation=operation, problem_size=problem_size,
A=tensor_A, B=tensor_B, C=tensor_C, D=tensor_D,
output_op=output_op,
gemm_mode=getattr(cutlass_bindings.gemm.Mode, args.gemm_mode),
split_k_slices=args.split_k_slices, batch=args.batch
)
if args.gemm_mode == "GemmSplitKParallel":
reduction_arguments = ReductionArguments(
operation=reduction_operation,
problem_size=[problem_size.m(), problem_size.n()],
partitions=args.split_k_slices, workspace=arguments.ptr_D,
destination=tensor_D, source=tensor_C,
output_op=reduction_operation.epilogue_type(*([args.alpha, args.beta] + args.activation_args)),
bias = arguments.bias
)
operation.run(arguments)
if args.gemm_mode == "GemmSplitKParallel":
reduction_operation.run(reduction_arguments)
reduction_arguments.sync()
else:
arguments.sync()
# run the host reference module
reference = ReferenceModule(A, B, C)
tensor_D_ref = reference.run(
tensor_A, tensor_B, tensor_C, problem_size, args.alpha, args.beta, args.bias, args.batch)
if args.epilogue_visitor in ["RowBroadcast", "ColumnBroadcast"]:
tensor_D_ref = (tensor_D_ref.reshape((args.batch, problem_size.m(), problem_size.n())) + vector).flatten()
tensor_D_ref = getattr(pycutlass, args.activation_function).numpy(*([tensor_D_ref,] + args.activation_args))
if args.epilogue_visitor in ["RowReduction", "ColumnReduction"]:
output_op.sync()
accum_ref = reference.run(
tensor_A, tensor_B, tensor_C, problem_size, 1.0, 0.0, args.bias, args.batch)
tensor_D_ref, reduction_ref = epilogue_functor(
accum_ref.reshape((args.batch, problem_size.m(), problem_size.n())),
tensor_C.reshape((args.batch, problem_size.m(), problem_size.n())),
args.alpha, args.beta
)
tensor_D_ref = tensor_D_ref.flatten()
reduction_ref = reduction_ref.flatten()
assert np.allclose(reduction_ref, reduction, atol=1e-2)
elif args.epilogue_visitor in ["RowBroadcast", "ColumnBroadcast"]:
output_op.sync()
accum_ref = reference.run(
tensor_A, tensor_B, tensor_C, problem_size, 1.0, 0.0, args.bias, args.batch)
tensor_D_ref, tensor_T_ref = epilogue_functor(
accum_ref.reshape((args.batch, problem_size.m(), problem_size.n())),
tensor_C.reshape((args.batch, problem_size.m(), problem_size.n())),
vector, args.alpha, args.beta)
tensor_D_ref = tensor_D_ref.flatten()
tensor_T_ref = tensor_T_ref.flatten()
assert np.array_equal(tensor_t, tensor_T_ref)
try:
assert np.array_equal(tensor_D, tensor_D_ref)
except:
assert np.allclose(tensor_D, tensor_D_ref, atol=1e-5)
print("Passed.")
| cutlass-main | examples/40_cutlass_py/customizable/gemm.py |
################################################################################
#
# Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
################################################################################
import numpy as np
import cutlass.backend as pycutlass
from cutlass.backend import *
from cutlass.backend.utils.device import device_cc
from cutlass.backend.conv2d_operation import *
from cutlass.backend.utils.reference_model import Conv2dReferenceModule
import sys
import torch.nn.functional as F
import argparse
# parse the arguments
parser = argparse.ArgumentParser(description="Launch CUTLASS convolution 2d kernels from Python")
# Operation description
# math instruction description
parser.add_argument("-i", "--instruction_shape",
default=[1, 1, 1], nargs=3, type=int,
help="This option describes the size of MMA op")
parser.add_argument("-ta", "--element_a", default="float32", type=str,
choices=['float64', 'float32', 'float16', 'bfloat16', 'int32', 'int8'],
help='Data type of elements in input tensor A')
parser.add_argument("-tb", "--element_b", default="float32", type=str,
choices=['float64', 'float32', 'float16', 'bfloat16', 'int32', 'int8'],
help='Data type of elements in input tensor B')
parser.add_argument("-tc", "--element_c", default="float32", type=str,
choices=['float64', 'float32', 'float16', 'bfloat16', 'int32', 'int8'],
help='Data type of elements in input tensor C and output tensor D')
parser.add_argument("-tacc", "--element_acc", default="float32", type=str,
choices=['float64', 'float32', 'float16', 'bfloat16', 'int32', 'int8'],
help='Data type of accumulator')
parser.add_argument('-m', "--math", default="multiply_add",
type=str, choices=["multiply_add", "multiply_add_fast_bf16", "multiply_add_fast_f32"], help="math instruction")
parser.add_argument('-op', "--opcode", default="Simt", type=str,
choices=["Simt", 'TensorOp'],
help='This option describes whether you want to use tensor \
cores (TensorOp) or regular SIMT cores (Simt) on GPU SM')
# tile description
parser.add_argument("-b", "--threadblock_shape",
default=[128, 128, 8], nargs=3, type=int,
help="This option describes the tile size a thread block with compute")
parser.add_argument("-s", "--stages", default=4,
type=int, help="Number of pipelines you want to use")
parser.add_argument("-w", "--warp_count", default=[
4, 2, 1], nargs=3, type=int,
help="This option describes the number of warps along M, N, and K of the threadblock")
parser.add_argument("-cc", "--compute_capability", default=80,
type=int, help="This option describes CUDA SM architecture number")
# A
parser.add_argument('-la', "--layout_a", default="TensorNHWC", type=str, choices=[
"TensorNHWC", "TensorNC32HW32"],
help="Memory layout of input tensor A")
parser.add_argument('-aa', '--alignment_a', default=1,
type=int, help="Memory alignement of input tensor A")
# B
parser.add_argument('-lb', "--layout_b", default="TensorNHWC", type=str, choices=[
"TensorNHWC", "TensorC32RSK32"],
help="Memory layout of input tensor B")
parser.add_argument('-ab', '--alignment_b', default=1,
type=int, help="Memory alignment of input tensor B")
# C
parser.add_argument('-lc', "--layout_c", default="TensorNHWC", type=str, choices=[
"TensorNHWC", "TensorNC32HW32"],
help="Memory layout of input tensor C and output tensor D")
parser.add_argument('-ac', '--alignment_c', default=1,
type=int, help="Memory alignment of input tensor C and output tensor D")
# epilogue
parser.add_argument("-te", "--element_epilogue", default="float32", type=str,
choices=['float64', 'float32', 'float16', 'bfloat16'],
help='Data type of computation in the epilogue')
parser.add_argument("-ep", "--epilogue_functor", default="LinearCombination",
type=str, choices=['LinearCombination', 'FastLinearCombinationClamp', 'LinearCombinationClamp'],
help="This option describes the epilogue part of the kernel")
# swizzling
parser.add_argument("-sw", "--swizzling_functor", default="IdentitySwizzle1", type=str, choices=[
"IdentitySwizzle1", "IdentitySwizzle2", "IdentitySwizzle4", "IdentitySwizzle8",
"HorizontalSwizzle", "StridedDgradIdentitySwizzle1", "StridedDgradIdentitySwizzle4",
"StridedDgradHorizontalSwizzle"],
help="This option describes how thread blocks are scheduled on GPU")
# conv related
parser.add_argument("-co", "--conv_kind", default="fprop", type=str, choices=['fprop', 'dgrad', 'wgrad'],
help="The type of convolution: forward propagation (fprop), \
gradient of activation (dgrad), gradient of weight (wgrad)")
parser.add_argument("-st", "--stride_support", default="Strided", type=str, choices=["Strided", "Unity"],
)
parser.add_argument("-ia", "--iterator_algorithm", default="analytic", type=str,
choices=["analytic", "optimized", "fixed_channels", "few_channels"],
help="This option describes iterator algorithm")
# arguments
parser.add_argument("-sm", "--split_k_mode", default="Serial", type=str, choices=["Serial", "Parallel"],
help="Split K Mode. Serial is used for non-splitK or serial-splitK.\
Parallel is used for parallel splitK.")
parser.add_argument('-k', '--split_k_slices', default=1,
type=int, help="Number of split-k partitions. (default 1)")
parser.add_argument("-nhwc", "--nhwc", nargs=4, type=int, help="input size (NHWC)")
parser.add_argument("-krsc", "--krsc", nargs=4, type=int, help="filter size (KRSC)")
parser.add_argument("-pad", "--pad", nargs=4, type=int, help="padding (pad_h, _, pad_w, _)")
parser.add_argument("-stride", "--stride", nargs=2, type=int, help="stride (stride_h, stride_w)")
parser.add_argument("-dilation", "--dilation", nargs=2, type=int, help="dilation (dilation_h, dilation_w)")
parser.add_argument("-alpha", "--alpha", default=1.0, type=float, help="alpha")
parser.add_argument("-beta", "--beta", default=0.0, type=float, help="beta")
parser.add_argument('-bias', '--bias', action='store_true', help="C is bias vector")
# Activation function
parser.add_argument("-activ", "--activation_function", default="identity",
choices=["identity", "relu", "leaky_relu", "tanh", "sigmoid", "silu", "hardswish", "gelu"], help="activation function")
parser.add_argument("-activ_arg", "--activation_args", default=[], nargs="+", type=float,
help="addition arguments for activation")
parser.add_argument('--print_cuda', action="store_true",
help="print the underlying CUDA kernel")
try:
args = parser.parse_args()
except:
sys.exit(0)
cc = device_cc()
if args.compute_capability != cc:
raise Exception(("Parameter --compute-capability of {} "
"does not match that of the device of {}.").format(args.compute_capability, cc))
pycutlass.get_memory_pool(init_pool_size=2**30, max_pool_size=2**32)
np.random.seed(0)
element_a = getattr(cutlass_bindings, args.element_a)
element_b = getattr(cutlass_bindings, args.element_b)
element_c = getattr(cutlass_bindings, args.element_c)
element_acc = getattr(cutlass_bindings, args.element_acc)
math_operation = getattr(MathOperation, args.math)
opclass = getattr(cutlass_bindings.OpClass, args.opcode)
math_inst = MathInstruction(
args.instruction_shape, element_a, element_b,
element_acc, opclass, math_operation
)
tile_description = TileDescription(
args.threadblock_shape, args.stages, args.warp_count,
math_inst
)
layout_a = getattr(cutlass_bindings, args.layout_a)
layout_b = getattr(cutlass_bindings, args.layout_b)
layout_c = getattr(cutlass_bindings, args.layout_c)
A = TensorDescription(
element_a, layout_a, args.alignment_a
)
B = TensorDescription(
element_b, layout_b, args.alignment_b
)
C = TensorDescription(
element_c, layout_c, args.alignment_c
)
element_epilogue = getattr(cutlass_bindings, args.element_epilogue)
if (args.activation_function == "identity"
or (args.split_k_mode == "Parallel" and args.split_k_slices > 1)):
#
epilogue_functor = getattr(pycutlass, args.epilogue_functor)(
C.element, C.alignment, math_inst.element_accumulator, element_epilogue)
else:
epilogue_functor = getattr(pycutlass, "LinearCombinationGeneric")(
getattr(pycutlass, args.activation_function)(element_epilogue),
C.element, C.alignment, math_inst.element_accumulator, element_epilogue)
iterator_algorithm = getattr(cutlass_bindings.conv.IteratorAlgorithm, args.iterator_algorithm)
swizzling_functor = getattr(cutlass_bindings, args.swizzling_functor)
stride_support = getattr(StrideSupport, args.stride_support)
conv_kind = getattr(cutlass_bindings.conv.Operator, args.conv_kind)
operation = Conv2dOperation(
conv_kind=conv_kind, iterator_algorithm=iterator_algorithm,
arch=args.compute_capability, tile_description=tile_description,
A=A, B=B, C=C, stride_support=stride_support,
epilogue_functor=epilogue_functor, swizzling_functor=swizzling_functor
)
if args.print_cuda:
print(operation.rt_module.emit())
operations = [operation,]
if args.split_k_mode == "Parallel" and args.split_k_slices > 1:
if (args.activation_function == "identity"):
epilogue_functor_reduction = getattr(pycutlass, args.epilogue_functor)(
C.element, C.alignment, math_inst.element_accumulator, element_epilogue)
else:
epilogue_functor_reduction = getattr(pycutlass, "LinearCombinationGeneric")(
getattr(pycutlass, args.activation_function)(element_epilogue),
C.element, C.alignment, math_inst.element_accumulator, element_epilogue)
reduction_operation = ReductionOperation(
shape=cutlass_bindings.MatrixCoord(4, 32 * C.alignment),
C=C, element_accumulator=element_acc,
element_compute=element_epilogue,
epilogue_functor=epilogue_functor_reduction,
count=C.alignment
)
operations.append(reduction_operation)
pycutlass.compiler.add_module(operations)
problem_size = cutlass_bindings.conv.Conv2dProblemSize(
cutlass_bindings.Tensor4DCoord(args.nhwc[0], args.nhwc[1], args.nhwc[2], args.nhwc[3]),
cutlass_bindings.Tensor4DCoord(args.krsc[0], args.krsc[1], args.krsc[2], args.krsc[3]),
cutlass_bindings.Tensor4DCoord(args.pad[0], args.pad[1], args.pad[2], args.pad[3]),
cutlass_bindings.MatrixCoord(args.stride[0], args.stride[1]),
cutlass_bindings.MatrixCoord(args.dilation[0], args.dilation[1]),
cutlass_bindings.conv.Mode.cross_correlation,
args.split_k_slices, 1
)
# User-provide inputs
tensor_A_size = cutlass_bindings.conv.implicit_gemm_tensor_a_size(
conv_kind, problem_size
)
tensor_B_size = cutlass_bindings.conv.implicit_gemm_tensor_b_size(
conv_kind, problem_size
)
if args.bias:
tensor_C_size = cutlass_bindings.conv.implicit_gemm_tensor_c_extent(
conv_kind, problem_size
).at(3)
else:
tensor_C_size = cutlass_bindings.conv.implicit_gemm_tensor_c_size(
conv_kind, problem_size
)
tensor_D_size = cutlass_bindings.conv.implicit_gemm_tensor_c_size(
conv_kind, problem_size
)
if args.element_a != "int8":
tensor_A = torch.ceil(torch.empty(size=(tensor_A_size,), dtype=getattr(torch, args.element_a), device="cuda").uniform_(-8.5, 7.5))
else:
tensor_A = torch.empty(size=(tensor_A_size,), dtype=getattr(torch, args.element_a), device="cuda").uniform_(-2, 2)
if args.element_b != "int8":
tensor_B = torch.ceil(torch.empty(size=(tensor_B_size,), dtype=getattr(torch, args.element_b), device="cuda").uniform_(-8.5, 7.5))
else:
tensor_B = torch.empty(size=(tensor_B_size,), dtype=getattr(torch, args.element_b), device="cuda").uniform_(-2, 2)
if args.element_c != "int8":
tensor_C = torch.ceil(torch.empty(size=(tensor_C_size,), dtype=getattr(torch, args.element_c), device="cuda").uniform_(-8.5, 7.5))
else:
tensor_C = torch.empty(size=(tensor_C_size,), dtype=getattr(torch, args.element_c), device="cuda").uniform_(-2, 2)
tensor_D = torch.ones(size=(tensor_D_size,), dtype=getattr(torch, args.element_c), device="cuda")
arguments = Conv2dArguments(
operation=operation, problem_size=problem_size, A=tensor_A,
B=tensor_B, C=tensor_C, D=tensor_D,
output_op = operation.epilogue_type(*([args.alpha, args.beta] + args.activation_args)),
split_k_mode=getattr(cutlass_bindings.conv.SplitKMode, args.split_k_mode),
split_k_slices=problem_size.split_k_slices
)
if args.split_k_mode == "Parallel" and args.split_k_slices > 1:
implicit_gemm_size = cutlass_bindings.conv.implicit_gemm_problem_size(conv_kind, arguments.problem_size)
reduction_arguments = ReductionArguments(
reduction_operation,
problem_size=[implicit_gemm_size.m(), implicit_gemm_size.n()],
partitions=problem_size.split_k_slices,
workspace=arguments.ptr_D,
destination=tensor_D,
source=tensor_C,
output_op = reduction_operation.epilogue_type(*([args.alpha, args.beta] + args.activation_args)),
bias = arguments.bias
)
operation.run(arguments)
if args.split_k_mode == "Parallel" and args.split_k_slices > 1:
reduction_operation.run(reduction_arguments)
reduction_arguments.sync()
else:
arguments.sync()
reference_model = Conv2dReferenceModule(A, B, C, conv_kind)
tensor_D_ref = reference_model.run(tensor_A, tensor_B, tensor_C, arguments.problem_size, args.alpha, args.beta, args.bias)
if (args.activation_function != "identity"):
tensor_D_ref = getattr(F, args.activation_function)(*([tensor_D_ref,] + args.activation_args))
try:
assert torch.equal(tensor_D, tensor_D_ref)
except:
assert torch.allclose(tensor_D, tensor_D_ref, rtol=1e-2)
print("Passed.")
| cutlass-main | examples/40_cutlass_py/customizable/conv2d.py |
################################################################################
#
# Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
################################################################################
import numpy as np
import cutlass.backend as pycutlass
from cutlass.backend import *
from cutlass.backend.utils.device import device_cc
import csv
import sys
import argparse
# parse the arguments
parser = argparse.ArgumentParser(
description="Launch CUTLASS GEMM Grouped kernels from Python")
# Operation description
# math instruction description
parser.add_argument("-i", "--instruction_shape",
default=[1, 1, 1], nargs=3, type=int,
help="This option describes the size of MMA op")
parser.add_argument("-ta", "--element_a", default="float32", type=str,
choices=['float64', 'float32', 'float16', 'bfloat16', 'int32', 'int8'],
help='Data type of elements in input tensor A')
parser.add_argument("-tb", "--element_b", default="float32", type=str,
choices=['float64', 'float32', 'float16', 'bfloat16', 'int32', 'int8'],
help='Data type of elements in input tensor B')
parser.add_argument("-tc", "--element_c", default="float32", type=str,
choices=['float64', 'float32', 'float16', 'bfloat16', 'int32', 'int8'],
help='Data type of elements in input tensor C and output tensor D')
parser.add_argument("-tacc", "--element_acc", default="float32", type=str,
choices=['float64', 'float32', 'float16', 'bfloat16', 'int32', 'int8'],
help='Data type of accumulator')
parser.add_argument('-m', "--math", default="multiply_add",
type=str, choices=["multiply_add", "multiply_add_fast_bf16", "multiply_add_fast_f32"], help="math instruction")
parser.add_argument('-op', "--opcode", default="Simt", type=str,
choices=["Simt", 'TensorOp'], help='This option describes whether you want to use tensor \
cores (TensorOp) or regular SIMT cores (Simt) on GPU SM')
# tile description
parser.add_argument("-b", "--threadblock_shape",
default=[128, 128, 8], nargs=3, type=int,
help="This option describes the tile size a thread block with compute")
parser.add_argument("-s", "--stages", default=4,
type=int, help="Number of pipelines you want to use")
parser.add_argument("-w", "--warp_count", default=[
4, 2, 1], nargs=3, type=int,
help="This option describes the number of warps along M, N, and K of the threadblock")
parser.add_argument("-cc", "--compute_capability", default=80,
type=int, help="This option describes CUDA SM architecture number")
# A
parser.add_argument('-la', "--layout_a", default="RowMajor", type=str, choices=[
"RowMajor", "ColumnMajor", "RowMajorInterleaved32", "ColumnMajorInterleaved32"],
help="Memory layout of input tensor A")
parser.add_argument('-aa', '--alignment_a', default=1,
type=int, help="Memory alignment of input tensor A")
# B
parser.add_argument('-lb', "--layout_b", default="RowMajor", type=str, choices=[
"RowMajor", "ColumnMajor", "RowMajorInterleaved32", "ColumnMajorInterleaved32"],
help="Memory layout of input tensor B")
parser.add_argument('-ab', '--alignment_b', default=1,
type=int, help="Memory alignment of input tensor B")
# C
parser.add_argument('-lc', "--layout_c", default="RowMajor", type=str, choices=[
"RowMajor", "ColumnMajor", "RowMajorInterleaved32", "ColumnMajorInterleaved32"],
help="Memory layout of input tensor C and output tensor D")
parser.add_argument('-ac', '--alignment_c', default=1,
type=int, help="Memory alignment of input tensor C and output tensor D")
# epilogue
parser.add_argument("-te", "--element_epilogue", default="float32", type=str,
choices=['float64', 'float32', 'float16', 'bfloat16'], help='Epilogue datatype')
parser.add_argument("-ep", "--epilogue_functor", default="LinearCombination",
type=str, choices=['LinearCombination', 'FastLinearCombinationClamp', 'LinearCombinationClamp'],
help="This option describes the epilogue part of the kernel")
# swizzling
parser.add_argument("-sw", "--swizzling_functor", default="IdentitySwizzle1", type=str, choices=[
"IdentitySwizzle1", "IdentitySwizzle2", "IdentitySwizzle4", "IdentitySwizzle8", "HorizontalSwizzle"],
help="This option describes how thread blocks are scheduled on GPU. \
NOTE: Threadblock swizzling is currently not supported by CUTLASS's grouped kernels. \
This parameter is passed in at present to match the APIs of other kernels. The parameter \
is unused within the kernel")
# precompute mode
parser.add_argument("-pm", "--precompute_mode",
default="Device", type=str, choices=["Host", "Device"],
help="Grouped Gemm Scheduing on device only (Device) or using host precompute (Host)")
# arguments
parser.add_argument("-p", "--problem_size_dir", type=str, default="grouped_gemm_problem_size.csv",
help="path to the csv file contains the problem sizes")
parser.add_argument("-alpha", "--alpha", default=1.0, type=float, help="alpha")
parser.add_argument("-beta", "--beta", default=0.0, type=float, help="beta")
parser.add_argument('-bias', '--bias', action='store_true', help="C is bias vector")
# Activation function
parser.add_argument("-activ", "--activation_function", default="identity",
choices=["identity", "relu", "leaky_relu", "tanh", "sigmoid", "silu", "hardswish", "gelu"], help="activation function")
parser.add_argument("-activ_arg", "--activation_args", default=[], nargs="+", type=float,
help="addition arguments for activation")
parser.add_argument('--print_cuda', action="store_true",
help="print the underlying CUDA kernel")
try:
args = parser.parse_args()
except:
sys.exit(0)
cc = device_cc()
if args.compute_capability != cc:
raise Exception(("Parameter --compute-capability of {} "
"does not match that of the device of {}.").format(args.compute_capability, cc))
pycutlass.get_memory_pool(init_pool_size=2**30, max_pool_size=2**32)
np.random.seed(0)
element_a = getattr(cutlass_bindings, args.element_a)
element_b = getattr(cutlass_bindings, args.element_b)
element_c = getattr(cutlass_bindings, args.element_c)
element_acc = getattr(cutlass_bindings, args.element_acc)
math_operation = getattr(MathOperation, args.math)
opclass = getattr(cutlass_bindings.OpClass, args.opcode)
math_inst = MathInstruction(
args.instruction_shape, element_a, element_b,
element_acc, opclass, math_operation
)
tile_description = TileDescription(
args.threadblock_shape, args.stages, args.warp_count,
math_inst
)
layout_a = getattr(cutlass_bindings, args.layout_a)
layout_b = getattr(cutlass_bindings, args.layout_b)
layout_c = getattr(cutlass_bindings, args.layout_c)
A = TensorDescription(
element_a, layout_a, args.alignment_a
)
B = TensorDescription(
element_b, layout_b, args.alignment_b
)
C = TensorDescription(
element_c, layout_c, args.alignment_c
)
element_epilogue = getattr(cutlass_bindings, args.element_epilogue)
if args.activation_function == "identity":
epilogue_functor = getattr(pycutlass, args.epilogue_functor)(
C.element, C.alignment, math_inst.element_accumulator, element_epilogue)
else:
epilogue_functor = getattr(pycutlass, "LinearCombinationGeneric")(
getattr(pycutlass, args.activation_function)(element_epilogue),
C.element, C.alignment, math_inst.element_accumulator, element_epilogue)
swizzling_functor = getattr(cutlass_bindings, args.swizzling_functor)
precompute_mode = getattr(SchedulerMode, args.precompute_mode)
operation = GemmOperationGrouped(
arch=args.compute_capability, tile_description=tile_description,
A=A, B=B, C=C,
epilogue_functor=epilogue_functor, swizzling_functor=swizzling_functor,
precompute_mode=precompute_mode
)
if args.print_cuda:
print(operation.rt_module.emit())
pycutlass.compiler.add_module([operation, ])
reference_module = ReferenceModule(A, B, C)
# get problems
problem_sizes = []
with open(args.problem_size_dir) as csv_file:
reader = csv.reader(csv_file)
for row in reader:
problem_sizes.append(
cutlass_bindings.gemm.GemmCoord(int(row[0]), int(row[1]), int(row[2]))
)
problem_count = len(problem_sizes)
tensor_As = []
tensor_Bs = []
tensor_Cs = []
tensor_Ds = []
problem_sizes_coord = []
tensor_D_refs = []
for problem_size in problem_sizes:
if args.element_a != "int8":
if args.element_a == "bfloat16":
tensor_A = np.ceil(np.random.uniform(low=-8.5, high=7.5, size=(problem_size.m()
* problem_size.k(),))).astype(bfloat16)
else:
tensor_A = np.ceil(np.random.uniform(low=-8.5, high=7.5, size=(problem_size.m()
* problem_size.k(),))).astype(getattr(np, args.element_a))
else:
tensor_A = np.random.uniform(low=-2, high=2, size=(problem_size.m()
* problem_size.k(),)).astype(getattr(np, args.element_a))
if args.element_b != "int8":
if args.element_b == "bfloat16":
tensor_B = np.ceil(np.random.uniform(low=-8.5, high=7.5, size=(problem_size.k()
* problem_size.n(),))).astype(bfloat16)
else:
tensor_B = np.ceil(np.random.uniform(low=-8.5, high=7.5, size=(problem_size.k()
* problem_size.n(),))).astype(getattr(np, args.element_b))
else:
tensor_B = np.random.uniform(low=-2, high=2, size=(problem_size.k()
* problem_size.n(),)).astype(getattr(np, args.element_b))
if args.element_c != "int8":
if args.bias:
if args.layout_c == "RowMajor":
c_size = problem_size.n()
elif args.layout_c == "ColumnMajor":
c_size = problem_size.m()
else:
raise ValueError(args.layout_c)
else:
c_size = problem_size.m() * problem_size.n()
if args.element_c == "bfloat16":
tensor_C = np.ceil(
np.random.uniform(low=-8.5, high=7.5, size=(c_size,))
).astype(bfloat16)
else:
tensor_C = np.ceil(
np.random.uniform(low=-8.5, high=7.5, size=(c_size,))
).astype(getattr(np, args.element_c))
else:
tensor_C = np.random.uniform(
low=-2, high=2, size=(problem_size.m() * problem_size.n(),)
).astype(getattr(np, args.element_c))
tensor_D = np.zeros(
shape=(problem_size.m() * problem_size.n(),)
).astype(getattr(np, args.element_c))
tensor_As.append(tensor_A)
tensor_Bs.append(tensor_B)
tensor_Cs.append(tensor_C)
tensor_Ds.append(tensor_D)
tensor_D_ref = reference_module.run(
tensor_A, tensor_B, tensor_C, problem_size,
args.alpha, args.beta, args.bias)
tensor_D_ref = getattr(pycutlass, args.activation_function).numpy(*([tensor_D_ref,] + args.activation_args))
tensor_D_refs.append(tensor_D_ref)
problem_sizes_coord.append(problem_size)
arguments = GemmGroupedArguments(
operation, problem_sizes_coord, tensor_As, tensor_Bs, tensor_Cs, tensor_Ds,
output_op=operation.epilogue_type(*([args.alpha, args.beta] + args.activation_args))
)
operation.run(arguments)
arguments.sync()
for tensor_d, tensor_d_ref in zip(tensor_Ds, tensor_D_refs):
try:
assert np.array_equal(tensor_d, tensor_d_ref)
except:
assert np.allclose(tensor_d, tensor_d_ref, rtol=1e-5)
print("Passed.")
| cutlass-main | examples/40_cutlass_py/customizable/gemm_grouped.py |
from typing import List
import torch
import subprocess
import sys
import tempfile
import os
import numpy as np
TORCH_DTYPE_NAME = {
torch.float32: "f32",
torch.float16: "f16",
torch.bfloat16: "b16"
}
NAME_TORCH_DTYPE = {v: k for k, v in TORCH_DTYPE_NAME.items()}
def _tensor_from_storage(tensor: torch.Tensor, dtype) -> torch.Tensor:
# PyTorch >= 2.0
if hasattr(tensor, 'untyped_storage'):
return torch.tensor([], dtype=dtype).set_(tensor.untyped_storage())
return torch.tensor([], dtype=dtype).set_(tensor.storage().untyped())
class PipedSubprocess:
def __init__(self, binary: str) -> None:
self.binary = binary
self.tempdir_ctx = tempfile.TemporaryDirectory()
def __enter__(self) -> "PipedSubprocess":
self.subp = subprocess.Popen(self.binary, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=sys.stderr, text=True, bufsize=0)
self.tempdir = self.tempdir_ctx.__enter__()
self.file_counter = 0
return self
def __exit__(self, exc_type, exc_val, exc_tb) -> None:
self.tempdir_ctx.__exit__(exc_type, exc_val, exc_tb)
def temp_filename(self, suffix: str) -> str:
self.file_counter += 1
return os.path.join(self.tempdir, f"{self.file_counter}{suffix}")
def write(self, *args) -> None:
for a in args:
self.subp.stdin.write(str(a) + " ")
def writeTensor(self, tensor: torch.Tensor, name: str, stride_names: List[str]) -> None:
print(f"Py ->C++: {TORCH_DTYPE_NAME[tensor.dtype]}:{name}")
tensor_u8 = _tensor_from_storage(tensor, torch.uint8)
self.write("tensor_begin", f"{TORCH_DTYPE_NAME[tensor.dtype]}:{name}", tensor_u8.shape[0])
filename = self.temp_filename(f"{name}.tensor")
assert tensor.storage_offset() == 0
with open(filename, "wb+") as fd:
fd.write(bytes(tensor_u8.numpy()))
self.write("file", filename)
self.write("tensor_end")
for stride_name, stride_value in zip(stride_names, tensor.stride()):
self.write(stride_name, stride_value)
def readTensor(self, name, stride_name, shape) -> torch.Tensor:
tmpfile = self.temp_filename(f"{name}.tensor")
self.write("tmpfile", tmpfile)
self.readExpect("tensor_begin")
dtype_str, name = self.read().split(":")
print(f"C++->Py : {dtype_str}:{name}")
u8len = int(self.read())
dtype = NAME_TORCH_DTYPE[dtype_str]
self.readExpect("file")
self.readExpect(tmpfile)
with open(tmpfile, "rb") as fd:
data = fd.read(u8len)
# `np.array` is not strictly needed, but avoids a torch warning
tensor_u8 = torch.frombuffer(np.array(data), dtype=torch.uint8, count=u8len)
self.readExpect("tensor_end")
tensor = _tensor_from_storage(tensor_u8, dtype)
strides = []
for sn in stride_name:
self.readExpect(sn)
strides.append(int(self.read()))
if len(strides) != shape:
strides.append(1)
assert len(strides) == len(shape), name
return torch.as_strided(tensor, shape, strides)
def readNamed(self, name: str):
self.readExpect(name)
return self.read()
def readExpect(self, what: str) -> None:
r = self.read()
if r != what:
raise ValueError(f"Read {r} but expected {what}")
def read(self):
read_all = []
# Skip initial whitespace
while True:
r = self.subp.stdout.read(1)
if r not in [' ', "\n"]:
read_all.append(r)
break
# Read data
while True:
r = self.subp.stdout.read(1)
if r in [' ', "\n"]:
break
read_all.append(r)
return ''.join(read_all)
| cutlass-main | examples/41_fused_multi_head_attention/piped_subprocess.py |
import argparse
import torch
import sys
import os
from piped_subprocess import PipedSubprocess, TORCH_DTYPE_NAME
import math
parser = argparse.ArgumentParser()
parser.add_argument("example_exe", type=str, help="Path to the 41_fused_multi_head_attention_backward executable")
args = parser.parse_args()
torch.manual_seed(0)
dtype = torch.float16
B, Mq, Mkv, H, K, Kv = 2, 1024, 1024, 5, 128, 128
causal = True
repeat_count = 100
ATOL = {
torch.float: 5e-4,
torch.half: 9.5e-2,
torch.bfloat16: 7e-1,
}[dtype]
RTOL = {
torch.float: 1e-4,
torch.half: 2e-2,
torch.bfloat16: 1e-1,
}[dtype]
assert not (causal and Mq < Mkv), "causal only supports seqlenK <= seqlenQ"
fmha_bw_binary = args.example_exe
if not os.path.isfile(fmha_bw_binary):
print(f"""No such file: `{fmha_bw_binary}`\nDid you forget to run "make 41_fused_multi_head_attention"?""")
sys.exit(1)
def create_lower_triangular_mask():
return torch.triu(torch.full( # type: ignore
[1, Mq, Mkv],
dtype=dtype,
fill_value=float("-inf"),
), diagonal=1)
def ref_mha_bmk(q, k, v, mask):
# Multi-head attention with inputs/outputs in BMK format
q = q.float()
k = k.float()
v = v.float()
q = q * (1 / q.shape[-1] ** 0.5)
attn = q @ k.transpose(-2, -1)
if mask is not None:
attn += mask
attn_max = attn.max(-1, True).values
attn_norm = (attn - attn_max).exp().sum(-1, True)
attn = attn.softmax(-1)
lse = attn_max + attn_norm.log()
lse = lse.squeeze(2)
return attn @ v, lse
def bmhk2bmk(t):
return t.permute((0, 2, 1, 3)).reshape(
[t.shape[0] * t.shape[2], t.shape[1], t.shape[3]]
)
def ref_mha_bmhk(q, k, v, mask):
# Multi-head attention with inputs/outputs in BMHK format
assert q.ndim == 4
out, lse = ref_mha_bmk(bmhk2bmk(q), bmhk2bmk(k), bmhk2bmk(v), mask=mask)
out = out.reshape([q.shape[0], q.shape[2], q.shape[1], v.shape[3]])
return out.permute((0, 2, 1, 3)), lse.reshape([q.shape[0], q.shape[2], q.shape[1]])
def ref_mha_bw_bmhk(q, k, v, mask, lse, out, grad_out, delta):
lse = lse[:, :, :q.shape[1]] #BMH, unpad Q dimension
delta = delta.reshape([-1, delta.shape[-1], 1])
# bmhk -> bmk
q, k, v, out, grad_out = [bmhk2bmk(x).float() for x in (q, k, v, out, grad_out)]
attn_T = k @ q.transpose(-2, -1)
if mask is not None:
attn_T += mask.transpose(-2, -1)
attn_T = attn_T * (1 / q.shape[-1] ** 0.5)
attn_T = attn_T - lse.reshape([-1, 1, lse.shape[-1]])
attn_T = attn_T.exp()
grad_v = attn_T @ grad_out
dov = grad_out @ v.transpose(-2, -1)
tmp = (dov - delta) * attn_T.transpose(-2, -1)
tmp = tmp / (q.shape[-1] ** 0.5)
grad_q = tmp @ k
grad_k = tmp.transpose(-2, -1) @ q
return [x.reshape([B, H, x.shape[1], x.shape[-1]]).permute([0, 2, 1, 3]) for x in [grad_q, grad_k, grad_v]]
print("initializing tensors...")
query = torch.randn([B, Mq, H, K], dtype=dtype)
key = 3 * torch.randn([B, Mkv, H, K], dtype=dtype)
value = 3 * torch.randn([B, Mkv, H, Kv], dtype=dtype)
mask = create_lower_triangular_mask() if causal else None
# let PyTorch compute gradients
query.requires_grad_(True)
key.requires_grad_(True)
value.requires_grad_(True)
print("computing fw...")
out, lse = ref_mha_bmhk(query, key, value, mask=mask)
out = out.to(dtype).contiguous()
grad_out = 3 * torch.randn([B, Mq, H, Kv], dtype=dtype)
print("computing bw with autograd...")
out.backward(grad_out)
scale = (1 / query.shape[-1] ** 0.5)
# Additional data needed by the kernel
delta = (grad_out.float() * out.float()).sum(-1).transpose(-2, -1).contiguous()
pad_amount = (32 - (lse.shape[2] % 32)) % 32
lse = torch.nn.functional.pad(lse, [0, pad_amount], value=math.inf)
print("computing bw with reference implem...")
gQr, gKr, gVr = ref_mha_bw_bmhk(query, key, value, mask, lse, out, grad_out, delta)
with PipedSubprocess(fmha_bw_binary) as bw_kernel:
# Send kernel arguments
bw_kernel.write(
TORCH_DTYPE_NAME[query.dtype],
"scale", scale,
"head_dim", K,
"head_dim_value", Kv,
"num_queries", Mq,
"num_keys", Mkv,
"num_heads", H,
"custom_mask_type", (1 if causal else 0),
"num_batches", B,
"repeat_count", repeat_count,
"num_splits_key", (Mkv // 128),
)
bw_kernel.writeTensor(query, "query", ["q_strideB", "q_strideM", "q_strideH"])
bw_kernel.writeTensor(key, "key", ["k_strideB", "k_strideM", "k_strideH"])
bw_kernel.writeTensor(value, "value", ["v_strideB", "v_strideM", "v_strideH"])
bw_kernel.writeTensor(lse, "logsumexp", ["lse_strideB", "lse_strideH"])
bw_kernel.writeTensor(out, "output", ["o_strideB", "o_strideM", "o_strideH"])
bw_kernel.writeTensor(grad_out, "grad_output", ["gO_strideB", "gO_strideM", "gO_strideH"])
bw_kernel.writeTensor(delta, "delta", ["delta_strideB", "delta_strideH"])
if bw_kernel.read() != "OK":
print("Got unexpected output")
print(bw_kernel.subp.communicate()[0])
sys.exit(0)
# Read kernel output
gQ = bw_kernel.readTensor("grad_query", ["gQ_strideB", "gQ_strideM", "gQ_strideH"], query.shape).float()
gK = bw_kernel.readTensor("grad_key", ["gK_strideB", "gK_strideM", "gK_strideH"], key.shape).float()
gV = bw_kernel.readTensor("grad_value", ["gV_strideB", "gV_strideM", "gV_strideH"], value.shape).float()
runtime_ms = float(bw_kernel.readNamed("runtime_ms"))
float_ops = B * H * sum([
# att = Q @ K.transpose
Mq * Mkv * K * 2,
# att @ dO
Mkv * Mq * Kv * 2,
# dov = dO @ V
Mq * Kv * Mkv * 2,
# dov @ K
Mq * K * Mkv * 2,
# dov @ Q
Mq * K * Mkv * 2,
])
if causal:
float_ops //= 2
print(f"""
Fused multi-head attention - backward
batch_size={B}
num_queries={Mq}
num_keys={Mkv}
num_heads={H}
head_dim={K}
head_dim_value={Kv}
Correctness:
grad_query: {"PASS" if torch.allclose(gQ, gQr, rtol=RTOL, atol=ATOL) else "FAIL"} (delta: {(gQ - gQr).abs().max()})
grad_key: {"PASS" if torch.allclose(gK, gKr, rtol=RTOL, atol=ATOL) else "FAIL"} (delta: {(gK - gKr).abs().max()})
grad_value: {"PASS" if torch.allclose(gV, gVr, rtol=RTOL, atol=ATOL) else "FAIL"} (delta: {(gV - gVr).abs().max()})
(atol={ATOL} / rtol={RTOL})
Runtime: {runtime_ms}ms ({(float_ops / (1024 ** 4)) / (runtime_ms / 1000):.4f} TFlops)
""")
assert torch.allclose(query.grad.float(), gQr, rtol=RTOL, atol=ATOL), "Reference implementation does not match PyTorch autograd!"
assert torch.allclose(key.grad.float(), gKr, rtol=RTOL, atol=ATOL), "Reference implementation does not match PyTorch autograd!"
assert torch.allclose(value.grad.float(), gVr, rtol=RTOL, atol=ATOL), "Reference implementation does not match PyTorch autograd!"
| cutlass-main | examples/41_fused_multi_head_attention/fmha_backward_test.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: riva_nlp.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='riva_nlp.proto',
package='nvidia.riva.nlp',
syntax='proto3',
serialized_options=_b('\370\001\001'),
serialized_pb=_b('\n\x10riva_nlp.proto\x12\x11nvidia.riva.nlp\"D\n\x13NaturalQueryRequest\x12\r\n\x05query\x18\x01 \x01(\t\x12\r\n\x05top_n\x18\x02 \x01(\r\x12\x0f\n\x07\x63ontext\x18\x03 \x01(\t\"3\n\x12NaturalQueryResult\x12\x0e\n\x06\x61nswer\x18\x01 \x01(\t\x12\r\n\x05score\x18\x02 \x01(\x02\"N\n\x14NaturalQueryResponse\x12\x36\n\x07results\x18\x01 \x03(\x0b\x32%.nvidia.riva.nlp.NaturalQueryResult2n\n\tRivaNLP\x12\x61\n\x0cNaturalQuery\x12&.nvidia.riva.nlp.NaturalQueryRequest\x1a\'.nvidia.riva.nlp.NaturalQueryResponse\"\x00\x42\x03\xf8\x01\x01\x62\x06proto3')
)
_NATURALQUERYREQUEST = _descriptor.Descriptor(
name='NaturalQueryRequest',
full_name='nvidia.riva.nlp.NaturalQueryRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='query', full_name='nvidia.riva.nlp.NaturalQueryRequest.query', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='top_n', full_name='nvidia.riva.nlp.NaturalQueryRequest.top_n', index=1,
number=2, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='context', full_name='nvidia.riva.nlp.NaturalQueryRequest.context', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=39,
serialized_end=107,
)
_NATURALQUERYRESULT = _descriptor.Descriptor(
name='NaturalQueryResult',
full_name='nvidia.riva.nlp.NaturalQueryResult',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='answer', full_name='nvidia.riva.nlp.NaturalQueryResult.answer', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='score', full_name='nvidia.riva.nlp.NaturalQueryResult.score', index=1,
number=2, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=109,
serialized_end=160,
)
_NATURALQUERYRESPONSE = _descriptor.Descriptor(
name='NaturalQueryResponse',
full_name='nvidia.riva.nlp.NaturalQueryResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='results', full_name='nvidia.riva.nlp.NaturalQueryResponse.results', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=162,
serialized_end=240,
)
_NATURALQUERYRESPONSE.fields_by_name['results'].message_type = _NATURALQUERYRESULT
DESCRIPTOR.message_types_by_name['NaturalQueryRequest'] = _NATURALQUERYREQUEST
DESCRIPTOR.message_types_by_name['NaturalQueryResult'] = _NATURALQUERYRESULT
DESCRIPTOR.message_types_by_name['NaturalQueryResponse'] = _NATURALQUERYRESPONSE
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
NaturalQueryRequest = _reflection.GeneratedProtocolMessageType('NaturalQueryRequest', (_message.Message,), dict(
DESCRIPTOR = _NATURALQUERYREQUEST,
__module__ = 'riva_nlp_pb2'
# @@protoc_insertion_point(class_scope:nvidia.riva.nlp.NaturalQueryRequest)
))
_sym_db.RegisterMessage(NaturalQueryRequest)
NaturalQueryResult = _reflection.GeneratedProtocolMessageType('NaturalQueryResult', (_message.Message,), dict(
DESCRIPTOR = _NATURALQUERYRESULT,
__module__ = 'riva_nlp_pb2'
# @@protoc_insertion_point(class_scope:nvidia.riva.nlp.NaturalQueryResult)
))
_sym_db.RegisterMessage(NaturalQueryResult)
NaturalQueryResponse = _reflection.GeneratedProtocolMessageType('NaturalQueryResponse', (_message.Message,), dict(
DESCRIPTOR = _NATURALQUERYRESPONSE,
__module__ = 'riva_nlp_pb2'
# @@protoc_insertion_point(class_scope:nvidia.riva.nlp.NaturalQueryResponse)
))
_sym_db.RegisterMessage(NaturalQueryResponse)
DESCRIPTOR._options = None
_RIVANLP = _descriptor.ServiceDescriptor(
name='RivaNLP',
full_name='nvidia.riva.nlp.RivaNLP',
file=DESCRIPTOR,
index=0,
serialized_options=None,
serialized_start=242,
serialized_end=352,
methods=[
_descriptor.MethodDescriptor(
name='NaturalQuery',
full_name='nvidia.riva.nlp.RivaNLP.NaturalQuery',
index=0,
containing_service=None,
input_type=_NATURALQUERYREQUEST,
output_type=_NATURALQUERYRESPONSE,
serialized_options=None,
),
])
_sym_db.RegisterServiceDescriptor(_RIVANLP)
DESCRIPTOR.services_by_name['RivaNLP'] = _RIVANLP
# @@protoc_insertion_point(module_scope)
| speechsquad-master | reference/qa/jarvis_nlp_pb2.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from concurrent import futures
from transformers import pipeline, AutoModelForQuestionAnswering, AutoTokenizer
import multiprocessing
import time
import math
import logging
import argparse
import grpc
import riva_nlp_pb2
import riva_nlp_pb2_grpc
def get_args():
parser = argparse.ArgumentParser(description="Riva Question Answering client sample")
parser.add_argument("--listen", default="[::]:50052", type=str, help="Address to listen to")
parser.add_argument("--model-name", default="twmkn9/bert-base-uncased-squad2", type=str, help="pretrained HF model to use")
parser.add_argument("--model-cache", default="/data/models", type=str, help="path to location to store downloaded checkpoints")
return parser.parse_args()
class RivaNLPServicer(riva_nlp_pb2_grpc.RivaNLPServicer):
def __init__(self, model_name, cache=None):
tokenizer = AutoTokenizer.from_pretrained(model_name, cache_dir=cache)
model = AutoModelForQuestionAnswering.from_pretrained(model_name, cache_dir=cache)
self.model = pipeline('question-answering',
model=model, tokenizer=tokenizer)
print(f"Model loaded, serving: {model_name}")
def NaturalQuery(self, request, context):
"""NaturalQuery is a search function that enables querying one or more documents
or contexts with a query that is written in natural language.
"""
result = self.model({
'question': str(request.query),
'context': str(request.context)
}, handle_impossible_answer=True)
response = riva_nlp_pb2.NaturalQueryResponse()
response.results.append(riva_nlp_pb2.NaturalQueryResult(answer=result['answer'], score=result['score']))
return response
def serve(uri="[::]:50051", model="twmkn9/distilbert-base-uncased-squad2", model_cache=None):
server = grpc.server(futures.ThreadPoolExecutor(max_workers=multiprocessing.cpu_count()))
riva_nlp_pb2_grpc.add_RivaNLPServicer_to_server(
RivaNLPServicer(model, cache=model_cache), server)
server.add_insecure_port(uri,)
server.start()
server.wait_for_termination()
if __name__ == '__main__':
args = get_args()
logging.basicConfig()
serve(uri=args.listen, model=args.model_name, model_cache=args.model_cache)
| speechsquad-master | reference/qa/serve.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
import grpc
import riva_nlp_pb2 as riva__nlp__pb2
class RivaNLPStub(object):
"""Riva NLP Services implement task-specific APIs for popular NLP tasks including
intent recognition (as well as slot filling), and entity extraction.
"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.NaturalQuery = channel.unary_unary(
'/nvidia.riva.nlp.RivaNLP/NaturalQuery',
request_serializer=riva__nlp__pb2.NaturalQueryRequest.SerializeToString,
response_deserializer=riva__nlp__pb2.NaturalQueryResponse.FromString,
)
class RivaNLPServicer(object):
"""Riva NLP Services implement task-specific APIs for popular NLP tasks including
intent recognition (as well as slot filling), and entity extraction.
"""
def NaturalQuery(self, request, context):
"""NaturalQuery is a search function that enables querying one or more documents
or contexts with a query that is written in natural language.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_RivaNLPServicer_to_server(servicer, server):
rpc_method_handlers = {
'NaturalQuery': grpc.unary_unary_rpc_method_handler(
servicer.NaturalQuery,
request_deserializer=riva__nlp__pb2.NaturalQueryRequest.FromString,
response_serializer=riva__nlp__pb2.NaturalQueryResponse.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'nvidia.riva.nlp.RivaNLP', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
| speechsquad-master | reference/qa/jarvis_nlp_pb2_grpc.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import grpc
import time
import argparse
import fastcounter
import riva_nlp_pb2 as jnlp
import riva_nlp_pb2_grpc as jnlp_srv
def get_args():
parser = argparse.ArgumentParser(description="Riva Question Answering client sample")
parser.add_argument("--riva-uri", default="localhost:50052", type=str, help="URI to access Riva server")
parser.add_argument("--iterations", default=10, type=int, help="number of queries to make")
return parser.parse_args()
parser = get_args()
grpc_server = parser.riva_uri
channel = grpc.insecure_channel(grpc_server)
riva_nlp = jnlp_srv.RivaNLPStub(channel)
ok_counter = fastcounter.Counter()
bad_counter = fastcounter.Counter()
def process_response(call_future):
# print(call_future.exception())
# print(call_future.result())
if call_future.exception():
bad_counter.increment()
else:
ok_counter.increment()
def run(iterations):
req = jnlp.NaturalQueryRequest()
req.query = "who discovered coronavirus?"
test_context = """
Coronaviruses were first discovered in the 1930s when an acute respiratory infection of domesticated chickens was shown
to be caused by infectious bronchitis virus (IBV).[14] Arthur Schalk and M.C. Hawn described in 1931 a new respiratory
infection of chickens in North Dakota. The infection of new-born chicks was characterized by gasping and listlessness.
The chicks' mortality rate was 40–90%.[15] Fred Beaudette and Charles Hudson six years later successfully isolated and
cultivated the infectious bronchitis virus which caused the disease.[16] In the 1940s, two more animal coronaviruses,
mouse hepatitis virus (MHV) and transmissible gastroenteritis virus (TGEV), were isolated.[17] It was not realized at
the time that these three different viruses were related.[18]
Human coronaviruses were discovered in the 1960s.[19][20] They were isolated using two different methods in the United
Kingdom and the United States.[21] E.C. Kendall, Malcom Byone, and David Tyrrell working at the Common Cold Unit of the
British Medical Research Council in 1960 isolated from a boy a novel common cold virus B814.[22][23][24] The virus was
not able to be cultivated using standard techniques which had successfully cultivated rhinoviruses, adenoviruses and
other known common cold viruses. In 1965, Tyrrell and Byone successfully cultivated the novel virus by serially passing
it through organ culture of human embryonic trachea.[25] The new cultivating method was introduced to the lab by Bertil
Hoorn.[26] The isolated virus when intranasally inoculated into volunteers caused a cold and was inactivated by ether
which indicated it had a lipid envelope.[22][27] Around the same time, Dorothy Hamre[28] and John Procknow at the
University of Chicago isolated a novel cold virus 229E from medical students, which they grew in kidney tissue culture.
The novel virus 229E, like the virus strain B814, when inoculated into volunteers caused a cold and was inactivated by
ether.[29] """
req.context = test_context
for x in range(iterations):
resp_future = riva_nlp.NaturalQuery.future(req)
resp_future.add_done_callback(process_response)
if __name__ == '__main__':
start_time = time.time()
run(parser.iterations)
while (ok_counter.value + bad_counter.value) != parser.iterations:
time.sleep(0.01)
print(f"total time: {time.time()-start_time}, ok: {ok_counter.value}, fail: {bad_counter.value}")
| speechsquad-master | reference/qa/test_qa.py |
Subsets and Splits