python_code
stringlengths 0
679k
| repo_name
stringlengths 9
41
| file_path
stringlengths 6
149
|
---|---|---|
# Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
###########################################################################
# Example Sim Rigid Gyroscopic
#
# Demonstrates the Dzhanibekov effect where rigid bodies will tumble in
# free space due to unstable axes of rotation.
#
###########################################################################
import os
import warp as wp
import warp.sim
import warp.sim.render
wp.init()
class Example:
def __init__(self, stage):
self.sim_steps = 2000
self.sim_dt = 1.0 / 120.0
self.sim_time = 0.0
self.scale = 0.5
builder = wp.sim.ModelBuilder()
b = builder.add_body()
# axis shape
builder.add_shape_box(
pos=(0.3 * self.scale, 0.0, 0.0),
hx=0.25 * self.scale,
hy=0.1 * self.scale,
hz=0.1 * self.scale,
density=100.0,
body=b,
)
# tip shape
builder.add_shape_box(
pos=(0.0, 0.0, 0.0), hx=0.05 * self.scale, hy=0.2 * self.scale, hz=1.0 * self.scale, density=100.0, body=b
)
# initial spin
builder.body_qd[0] = (25.0, 0.01, 0.01, 0.0, 0.0, 0.0)
builder.gravity = 0.0
self.model = builder.finalize()
self.model.ground = False
self.integrator = wp.sim.SemiImplicitIntegrator()
self.state = self.model.state()
self.renderer = wp.sim.render.SimRenderer(self.model, stage, scaling=100.0)
def update(self):
with wp.ScopedTimer("simulate", active=True):
self.state.clear_forces()
self.state = self.integrator.simulate(self.model, self.state, self.state, self.sim_dt)
def render(self, is_live=False):
with wp.ScopedTimer("render", active=True):
time = 0.0 if is_live else self.sim_time
self.renderer.begin_frame(time)
self.renderer.render(self.state)
self.renderer.end_frame()
self.sim_time += self.sim_dt
if __name__ == "__main__":
stage_path = os.path.join(os.path.dirname(__file__), "outputs/example_sim_rigid_gyroscopic.usd")
example = Example(stage_path)
for i in range(example.sim_steps):
example.update()
example.render()
example.renderer.save()
| warp-main | examples/example_sim_rigid_gyroscopic.py |
# Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
###########################################################################
# Example Sim Neo-Hookean
#
# Shows a simulation of an Neo-Hookean FEM beam being twisted through a
# 180 degree rotation.
#
###########################################################################
import os
import math
import warp as wp
import warp.sim
import warp.sim.render
wp.init()
class Example:
def __init__(self, stage):
self.sim_width = 8
self.sim_height = 8
self.sim_fps = 60.0
self.sim_substeps = 64
self.sim_duration = 5.0
self.sim_frames = int(self.sim_duration * self.sim_fps)
self.sim_dt = (1.0 / self.sim_fps) / self.sim_substeps
self.sim_time = 0.0
self.sim_render = True
self.sim_iterations = 1
self.sim_relaxation = 1.0
self.lift_speed = 2.5 / self.sim_duration * 2.0 # from Smith et al.
self.rot_speed = math.pi / self.sim_duration
builder = wp.sim.ModelBuilder()
cell_dim = 15
cell_size = 2.0 / cell_dim
center = cell_size * cell_dim * 0.5
builder.add_soft_grid(
pos=(-center, 0.0, -center),
rot=wp.quat_identity(),
vel=(0.0, 0.0, 0.0),
dim_x=cell_dim,
dim_y=cell_dim,
dim_z=cell_dim,
cell_x=cell_size,
cell_y=cell_size,
cell_z=cell_size,
density=100.0,
fix_bottom=True,
fix_top=True,
k_mu=1000.0,
k_lambda=5000.0,
k_damp=0.0,
)
self.model = builder.finalize()
self.model.ground = False
self.model.gravity[1] = 0.0
self.integrator = wp.sim.SemiImplicitIntegrator()
self.rest = self.model.state()
self.rest_vol = (cell_size * cell_dim) ** 3
self.state_0 = self.model.state()
self.state_1 = self.model.state()
self.volume = wp.zeros(1, dtype=wp.float32)
self.renderer = wp.sim.render.SimRenderer(self.model, stage, scaling=20.0)
def update(self):
with wp.ScopedTimer("simulate"):
xform = wp.transform(
(0.0, self.lift_speed * self.sim_time, 0.0),
wp.quat_from_axis_angle((0.0, 1.0, 0.0), self.rot_speed * self.sim_time),
)
wp.launch(
kernel=self.twist_points,
dim=len(self.state_0.particle_q),
inputs=[self.rest.particle_q, self.state_0.particle_q, self.model.particle_mass, xform],
)
for s in range(self.sim_substeps):
self.state_0.clear_forces()
self.state_1.clear_forces()
self.integrator.simulate(self.model, self.state_0, self.state_1, self.sim_dt)
self.sim_time += self.sim_dt
# swap states
(self.state_0, self.state_1) = (self.state_1, self.state_0)
self.volume.zero_()
wp.launch(
kernel=self.compute_volume,
dim=self.model.tet_count,
inputs=[self.state_0.particle_q, self.model.tet_indices, self.volume],
)
def render(self, is_live=False):
with wp.ScopedTimer("render"):
time = 0.0 if is_live else self.sim_time
self.renderer.begin_frame(time)
self.renderer.render(self.state_0)
self.renderer.end_frame()
@wp.kernel
def twist_points(
rest: wp.array(dtype=wp.vec3), points: wp.array(dtype=wp.vec3), mass: wp.array(dtype=float), xform: wp.transform
):
tid = wp.tid()
r = rest[tid]
p = points[tid]
m = mass[tid]
# twist the top layer of particles in the beam
if m == 0 and p[1] != 0.0:
points[tid] = wp.transform_point(xform, r)
@wp.kernel
def compute_volume(points: wp.array(dtype=wp.vec3), indices: wp.array2d(dtype=int), volume: wp.array(dtype=float)):
tid = wp.tid()
i = indices[tid, 0]
j = indices[tid, 1]
k = indices[tid, 2]
l = indices[tid, 3]
x0 = points[i]
x1 = points[j]
x2 = points[k]
x3 = points[l]
x10 = x1 - x0
x20 = x2 - x0
x30 = x3 - x0
v = wp.dot(x10, wp.cross(x20, x30)) / 6.0
wp.atomic_add(volume, 0, v)
if __name__ == "__main__":
stage_path = os.path.join(os.path.dirname(__file__), "outputs/example_sim_neo_hookean.usd")
example = Example(stage_path)
for i in range(example.sim_frames):
example.update()
example.render()
example.renderer.save()
| warp-main | examples/example_sim_neo_hookean.py |
# Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
###########################################################################
# Example Sim Granular
#
# Shows how to set up a particle-based granular material model using the
# wp.sim.ModelBuilder().
#
###########################################################################
import os
import warp as wp
import warp.sim
import warp.sim.render
import math
import numpy as np
wp.init()
class Example:
def __init__(self, stage):
self.frame_dt = 1.0 / 60
self.frame_count = 400
self.sim_substeps = 64
self.sim_dt = self.frame_dt / self.sim_substeps
self.sim_steps = self.frame_count * self.sim_substeps
self.sim_time = 0.0
self.radius = 0.1
builder = wp.sim.ModelBuilder()
builder.default_particle_radius = self.radius
builder.add_particle_grid(
dim_x=16,
dim_y=32,
dim_z=16,
cell_x=self.radius * 2.0,
cell_y=self.radius * 2.0,
cell_z=self.radius * 2.0,
pos=(0.0, 20.0, 0.0),
rot=wp.quat_identity(),
vel=(2.0, 0.0, 0.0),
mass=0.1,
jitter=self.radius * 0.1,
)
rock_vdb = wp.Volume.load_from_nvdb(
open(os.path.join(os.path.dirname(__file__), "assets/rocks.nvdb"), "rb").read())
rock_sdf = wp.sim.SDF(rock_vdb)
s = builder.add_shape_sdf(
ke=1.0e4,
kd=1000.0,
kf=1000.0,
mu=0.5,
sdf=rock_sdf,
body=-1,
pos=(0.0, 0.0, 0.0),
rot=wp.quat_from_axis_angle((1.0, 0.0, 0.0), -0.5 * math.pi),
scale=(0.01, 0.01, 0.01))
mins = np.array([-3.0, -3.0, -3.0])
voxel_size = 0.2
maxs = np.array([3.0, 3.0, 3.0])
nums = np.ceil((maxs - mins) / (voxel_size)).astype(dtype=int)
center = np.array([0.0, 0.0, 0.0])
rad = 2.5
sphere_sdf_np = np.zeros(tuple(nums))
for x in range(nums[0]):
for y in range(nums[1]):
for z in range(nums[2]):
pos = mins + voxel_size * np.array([x, y, z])
dis = np.linalg.norm(pos - center)
sphere_sdf_np[x, y, z] = dis - rad
sphere_vdb = wp.Volume.load_from_numpy(sphere_sdf_np, mins, voxel_size, rad + 3.0 * voxel_size)
sphere_sdf = wp.sim.SDF(sphere_vdb)
self.sphere_pos = (3.0, 15.0, 0.0)
self.sphere_scale = 1.0
self.sphere_radius = rad
s = builder.add_shape_sdf(
ke=1.0e4,
kd=1000.0,
kf=1000.0,
mu=0.5,
sdf=sphere_sdf,
body=-1,
pos=self.sphere_pos,
scale=(self.sphere_scale, self.sphere_scale, self.sphere_scale))
self.model = builder.finalize()
self.model.particle_kf = 25.0
self.model.soft_contact_kd = 100.0
self.model.soft_contact_kf *= 2.0
self.state_0 = self.model.state()
self.state_1 = self.model.state()
self.integrator = wp.sim.SemiImplicitIntegrator()
self.renderer = wp.sim.render.SimRenderer(self.model, stage, scaling=20.0)
def update(self):
with wp.ScopedTimer("simulate", active=True):
self.model.particle_grid.build(self.state_0.particle_q, self.radius * 2.0)
for s in range(self.sim_substeps):
self.state_0.clear_forces()
wp.sim.collide(self.model, self.state_0)
self.integrator.simulate(self.model, self.state_0, self.state_1, self.sim_dt)
# swap states
(self.state_0, self.state_1) = (self.state_1, self.state_0)
def render(self, is_live=False):
with wp.ScopedTimer("render", active=True):
time = 0.0 if is_live else self.sim_time
self.renderer.begin_frame(time)
# Note the extra wp.quat_from_axis_angle((1.0, 0.0, 0.0), math.pi) is because .usd is oriented differently from .nvdb
self.renderer.render_ref(
name="collision",
path=os.path.join(os.path.dirname(__file__), "assets/rocks.usd"),
pos=(0.0, 0.0, 0.0),
rot=wp.quat_from_axis_angle((1.0, 0.0, 0.0), -0.5 * math.pi) *
wp.quat_from_axis_angle((1.0, 0.0, 0.0), math.pi),
scale=(0.01, 0.01, 0.01),
)
self.renderer.render_sphere(name="sphere", pos=self.sphere_pos,
radius=self.sphere_scale * self.sphere_radius, rot=(0.0, 0.0, 0.0, 1.0))
self.renderer.render(self.state_0)
self.renderer.end_frame()
self.sim_time += self.frame_dt
if __name__ == "__main__":
stage_path = os.path.join(os.path.dirname(__file__), "outputs/example_sim_sdf_shape.usd")
example = Example(stage_path)
for i in range(example.frame_count):
example.update()
example.render()
example.renderer.save()
| warp-main | examples/example_sim_sdf_shape.py |
# Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
###########################################################################
# Example Marching Cubes
#
# Shows how use the built-in marching cubes functionality to extract
# the iso-surface from a density field.
#
###########################################################################
import warp as wp
import warp.render
import os
import math
wp.init()
# signed sphere
@wp.func
def sdf_sphere(p: wp.vec3, r: float):
return wp.length(p) - r
# signed box
@wp.func
def sdf_box(upper: wp.vec3, p: wp.vec3):
qx = wp.abs(p[0]) - upper[0]
qy = wp.abs(p[1]) - upper[1]
qz = wp.abs(p[2]) - upper[2]
e = wp.vec3(wp.max(qx, 0.0), wp.max(qy, 0.0), wp.max(qz, 0.0))
return wp.length(e) + wp.min(wp.max(qx, wp.max(qy, qz)), 0.0)
# union
@wp.func
def op_union(d1: float, d2: float):
return wp.min(d1, d2)
@wp.func
def op_smooth_union(d1: float, d2: float, k: float):
# a = wp.pow(d1, k)
# b = wp.pow(d2, k)
# return wp.pow((a*b)/(a+b), 1.0/k)
a = d1
b = d2
h = wp.clamp(0.5 + 0.5 * (b - a) / k, 0.0, 1.0)
return wp.lerp(b, a, h) - k * h * (1.0 - h)
# subtraction
@wp.func
def op_subtract(d1: float, d2: float):
return wp.max(-d1, d2)
# intersection
@wp.func
def op_intersect(d1: float, d2: float):
return wp.max(d1, d2)
@wp.kernel
def make_field(field: wp.array3d(dtype=float), center: wp.vec3, radius: float, time: float):
i, j, k = wp.tid()
p = wp.vec3(float(i), float(j), float(k))
rng = wp.rand_init(42)
noise = wp.noise(rng, wp.vec4(float(i) + 0.5, float(j) + 0.5, float(k) + 0.5, time) * 0.25)
sphere = 2.0 * noise + wp.length(p - center) - radius
box = sdf_box(wp.vec3(16.0, 48.0, 16.0), p - center)
d = op_smooth_union(sphere, box, 4.0)
field[i, j, k] = d
class Example:
def __init__(self, stage):
self.dim = 128
self.max_verts = 10**6
self.max_tris = 10**6
self.time = 0.0
self.field = wp.zeros(shape=(self.dim, self.dim, self.dim), dtype=float)
self.iso = wp.MarchingCubes(
nx=self.dim, ny=self.dim, nz=self.dim, max_verts=self.max_verts, max_tris=self.max_tris
)
self.renderer = wp.render.UsdRenderer(stage)
def update(self):
pass
def render(self, is_live=False):
with wp.ScopedTimer("Update Field"):
wp.launch(
make_field,
dim=self.field.shape,
inputs=[self.field, wp.vec3(self.dim / 2, self.dim / 2, self.dim / 2), self.dim / 4, self.time],
)
with wp.ScopedTimer("Surface Extraction"):
self.iso.surface(field=self.field, threshold=math.sin(self.time) * self.dim / 8)
with wp.ScopedTimer("Render"):
self.renderer.begin_frame(self.time)
self.renderer.render_mesh("surface", self.iso.verts.numpy(), self.iso.indices.numpy(), update_topology=True)
self.renderer.end_frame()
self.time += 1.0 / 60.0
if __name__ == "__main__":
stage_path = os.path.join(os.path.dirname(__file__), "outputs/example_marching_cubes.usd")
example = Example(stage_path)
for i in range(240):
example.update()
example.render()
example.renderer.save()
| warp-main | examples/example_marching_cubes.py |
# Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
###########################################################################
# Example Sim Particle Chain
#
# Shows how to set up a simple chain of particles connected by springs
# using wp.sim.ModelBuilder().
#
###########################################################################
import os
import math
import warp as wp
import warp.sim
import warp.sim.render
wp.init()
class Example:
def __init__(self, stage):
self.sim_width = 64
self.sim_height = 32
self.sim_fps = 60.0
self.sim_substeps = 10
self.sim_duration = 5.0
self.sim_frames = int(self.sim_duration * self.sim_fps)
self.sim_dt = (1.0 / self.sim_fps) / self.sim_substeps
self.sim_time = 0.0
builder = wp.sim.ModelBuilder()
# anchor
builder.add_particle((0.0, 1.0, 0.0), (0.0, 0.0, 0.0), 0.0)
# chain
for i in range(1, 10):
radius = math.sqrt(i) * 0.2
mass = math.pi * radius * radius * radius
builder.add_particle((i, 1.0, 0.0), (0.0, 0.0, 0.0), mass, radius=radius)
builder.add_spring(i - 1, i, 1.0e6, 0.0, 0)
self.model = builder.finalize()
self.model.ground = False
self.integrator = wp.sim.XPBDIntegrator()
self.state_0 = self.model.state()
self.state_1 = self.model.state()
self.renderer = wp.sim.render.SimRenderer(self.model, stage, scaling=15.0)
def update(self):
with wp.ScopedTimer("simulate"):
for s in range(self.sim_substeps):
self.state_0.clear_forces()
self.state_1.clear_forces()
self.integrator.simulate(self.model, self.state_0, self.state_1, self.sim_dt)
self.sim_time += self.sim_dt
# swap states
(self.state_0, self.state_1) = (self.state_1, self.state_0)
def render(self, is_live=False):
with wp.ScopedTimer("render"):
time = 0.0 if is_live else self.sim_time
self.renderer.begin_frame(time)
self.renderer.render(self.state_0)
self.renderer.end_frame()
if __name__ == "__main__":
stage_path = os.path.join(os.path.dirname(__file__), "outputs/example_sim_particle_chain.usd")
example = Example(stage_path)
for i in range(example.sim_frames):
example.update()
example.render()
example.renderer.save()
| warp-main | examples/example_sim_particle_chain.py |
# Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#############################################################################
# Example Differentiable Ray Tracer
#
# Shows how to use the built-in wp.Mesh data structure and wp.mesh_query_ray()
# function to implement a basic differentiable ray tracer
#
##############################################################################
import matplotlib.pyplot as plt
import matplotlib.image as img
import matplotlib.animation as animation
from pxr import Usd, UsdGeom
import warp as wp
import numpy as np
import os
import math
wp.init()
class RenderMode:
"""Rendering modes
grayscale: lambertian shading from multiple directional lights
texture: 2D texture map
normal_map: mesh normal computed from interpolated vertex normals
"""
grayscale = 0
texture = 1
normal_map = 2
@wp.struct
class RenderMesh:
"""Mesh to be ray traced.
Assumes a triangle mesh as input.
Per-vertex normals are computed with compute_vertex_normals()
"""
id: wp.uint64
vertices: wp.array(dtype=wp.vec3)
indices: wp.array(dtype=int)
tex_coords: wp.array(dtype=wp.vec2)
tex_indices: wp.array(dtype=int)
vertex_normals: wp.array(dtype=wp.vec3)
pos: wp.array(dtype=wp.vec3)
rot: wp.array(dtype=wp.quat)
@wp.struct
class Camera:
"""Basic camera for ray tracing"""
horizontal: float
vertical: float
aspect: float
e: float
tan: float
pos: wp.vec3
rot: wp.quat
@wp.struct
class DirectionalLights:
"""Stores arrays of directional light directions and intensities."""
dirs: wp.array(dtype=wp.vec3)
intensities: wp.array(dtype=float)
num_lights: int
class Example:
"""A basic differentiable ray tracer
Non-differentiable variables:
camera.horizontal: camera horizontal aperture size
camera.vertical: camera vertical aperture size
camera.aspect: camera aspect ratio
camera.e: focal length
camera.pos: camera displacement
camera.rot: camera rotation (quaternion)
pix_width: final image width in pixels
pix_height: final image height in pixels
num_samples: anti-aliasing. calculated as pow(2, num_samples)
directional_lights: characterized by intensity (scalar) and direction (vec3)
render_mesh.indices: mesh vertex indices
render_mesh.tex_indices: texture indices
Differentiable variables:
render_mesh.pos: parent transform displacement
render_mesh.quat: parent transform rotation (quaternion)
render_mesh.vertices: mesh vertex positions
render_mesh.vertex_normals: mesh vertex normals
render_mesh.tex_coords: 2D texture coordinates
"""
def __init__(self):
cam_pos = wp.vec3(0.0, 0.75, 7.0)
cam_rot = wp.quat(0.0, 0.0, 0.0, 1.0)
horizontal_aperture = 36.0
vertical_aperture = 20.25
aspect = horizontal_aperture / vertical_aperture
focal_length = 50.0
self.height = 1024
self.width = int(aspect * self.height)
self.num_pixels = self.width * self.height
asset_stage = Usd.Stage.Open(os.path.join(os.path.dirname(__file__), "assets/bunny.usd"))
mesh_geom = UsdGeom.Mesh(asset_stage.GetPrimAtPath("/bunny/bunny"))
points = np.array(mesh_geom.GetPointsAttr().Get())
indices = np.array(mesh_geom.GetFaceVertexIndicesAttr().Get())
num_points = points.shape[0]
num_faces = int(indices.shape[0] / 3)
# manufacture texture coordinates + indices for this asset
distance = np.linalg.norm(points, axis=1)
radius = np.max(distance)
distance = distance / radius
tex_coords = np.stack((distance, distance), axis=1)
tex_indices = indices
# manufacture texture for this asset
x = np.arange(256.0)
xx, yy = np.meshgrid(x, x)
zz = np.zeros_like(xx)
texture_host = np.stack((xx, yy, zz), axis=2) / 255.0
# set anti-aliasing
self.num_samples = 1
# set render mode
self.render_mode = RenderMode.texture
# set training iterations
self.train_rate = 3.0e-8
self.train_iters = 300
self.period = 10
# storage for training animation
self.images = np.zeros((self.height, self.width, 3, int(self.train_iters / self.period)))
with wp.ScopedDevice(device="cuda:0"):
# construct RenderMesh
self.render_mesh = RenderMesh()
self.mesh = wp.Mesh(
points=wp.array(points, dtype=wp.vec3, requires_grad=True), indices=wp.array(indices, dtype=int)
)
self.render_mesh.id = self.mesh.id
self.render_mesh.vertices = self.mesh.points
self.render_mesh.indices = self.mesh.indices
self.render_mesh.tex_coords = wp.array(tex_coords, dtype=wp.vec2, requires_grad=True)
self.render_mesh.tex_indices = wp.array(tex_indices, dtype=int)
self.normal_sums = wp.zeros(num_points, dtype=wp.vec3, requires_grad=True)
self.render_mesh.vertex_normals = wp.zeros(num_points, dtype=wp.vec3, requires_grad=True)
self.render_mesh.pos = wp.zeros(1, dtype=wp.vec3, requires_grad=True)
self.render_mesh.rot = wp.array(np.array([0.0, 0.0, 0.0, 1.0]), dtype=wp.quat, requires_grad=True)
# compute vertex normals
wp.launch(
kernel=Example.vertex_normal_sum_kernel,
dim=num_faces,
inputs=[self.render_mesh.vertices, self.render_mesh.indices, self.normal_sums],
)
wp.launch(
kernel=Example.normalize_kernel,
dim=num_points,
inputs=[self.normal_sums, self.render_mesh.vertex_normals],
)
# construct camera
self.camera = Camera()
self.camera.horizontal = horizontal_aperture
self.camera.vertical = vertical_aperture
self.camera.aspect = aspect
self.camera.e = focal_length
self.camera.tan = vertical_aperture / (2.0 * focal_length)
self.camera.pos = cam_pos
self.camera.rot = cam_rot
# construct texture
self.texture = wp.array2d(texture_host, dtype=wp.vec3, requires_grad=True)
# construct lights
self.lights = DirectionalLights()
self.lights.dirs = wp.array(np.array([[1.0, 0.0, 0.0], [0.0, 0.0, 1.0]]), dtype=wp.vec3, requires_grad=True)
self.lights.intensities = wp.array(np.array([2.0, 0.2]), dtype=float, requires_grad=True)
self.lights.num_lights = 2
# construct rays
self.rays_width = self.width * pow(2, self.num_samples)
self.rays_height = self.height * pow(2, self.num_samples)
self.num_rays = self.rays_width * self.rays_height
self.rays = wp.zeros(self.num_rays, dtype=wp.vec3, requires_grad=True)
# construct pixels
self.pixels = wp.zeros(self.num_pixels, dtype=wp.vec3, requires_grad=True)
self.target_pixels = wp.zeros(self.num_pixels, dtype=wp.vec3)
# loss array
self.loss = wp.zeros(1, dtype=float, requires_grad=True)
def update(self):
pass
def render(self, is_live=False):
with wp.ScopedDevice("cuda:0"):
# raycast
wp.launch(
kernel=Example.draw_kernel,
dim=self.num_rays,
inputs=[
self.render_mesh,
self.camera,
self.texture,
self.rays_width,
self.rays_height,
self.rays,
self.lights,
self.render_mode,
],
)
# downsample
wp.launch(
kernel=Example.downsample_kernel,
dim=self.num_pixels,
inputs=[self.rays, self.pixels, self.rays_width, pow(2, self.num_samples)],
)
@wp.kernel
def vertex_normal_sum_kernel(
verts: wp.array(dtype=wp.vec3), indices: wp.array(dtype=int), normal_sums: wp.array(dtype=wp.vec3)
):
tid = wp.tid()
i = indices[tid * 3]
j = indices[tid * 3 + 1]
k = indices[tid * 3 + 2]
a = verts[i]
b = verts[j]
c = verts[k]
ab = b - a
ac = c - a
area_normal = wp.cross(ab, ac)
wp.atomic_add(normal_sums, i, area_normal)
wp.atomic_add(normal_sums, j, area_normal)
wp.atomic_add(normal_sums, k, area_normal)
@wp.kernel
def normalize_kernel(
normal_sums: wp.array(dtype=wp.vec3),
vertex_normals: wp.array(dtype=wp.vec3),
):
tid = wp.tid()
vertex_normals[tid] = wp.normalize(normal_sums[tid])
@wp.func
def texture_interpolation(tex_interp: wp.vec2, texture: wp.array2d(dtype=wp.vec3)):
tex_width = texture.shape[1]
tex_height = texture.shape[0]
tex = wp.vec2(tex_interp[0] * float(tex_width - 1), (1.0 - tex_interp[1]) * float(tex_height - 1))
x0 = int(tex[0])
x1 = x0 + 1
alpha_x = tex[0] - float(x0)
y0 = int(tex[1])
y1 = y0 + 1
alpha_y = tex[1] - float(y0)
c00 = texture[y0, x0]
c10 = texture[y0, x1]
c01 = texture[y1, x0]
c11 = texture[y1, x1]
lower = (1.0 - alpha_x) * c00 + alpha_x * c10
upper = (1.0 - alpha_x) * c01 + alpha_x * c11
color = (1.0 - alpha_y) * lower + alpha_y * upper
return color
@wp.kernel
def draw_kernel(
mesh: RenderMesh,
camera: Camera,
texture: wp.array2d(dtype=wp.vec3),
rays_width: int,
rays_height: int,
rays: wp.array(dtype=wp.vec3),
lights: DirectionalLights,
mode: int,
):
tid = wp.tid()
x = tid % rays_width
y = rays_height - tid // rays_width
sx = 2.0 * float(x) / float(rays_width) - 1.0
sy = 2.0 * float(y) / float(rays_height) - 1.0
# compute view ray in world space
ro_world = camera.pos
rd_world = wp.normalize(
wp.quat_rotate(camera.rot, wp.vec3(sx * camera.tan * camera.aspect, sy * camera.tan, -1.0))
)
# compute view ray in mesh space
inv = wp.transform_inverse(wp.transform(mesh.pos[0], mesh.rot[0]))
ro = wp.transform_point(inv, ro_world)
rd = wp.transform_vector(inv, rd_world)
t = float(0.0)
ur = float(0.0)
vr = float(0.0)
sign = float(0.0)
n = wp.vec3()
f = int(0)
color = wp.vec3(0.0, 0.0, 0.0)
if wp.mesh_query_ray(mesh.id, ro, rd, 1.0e6, t, ur, vr, sign, n, f):
i = mesh.indices[f * 3]
j = mesh.indices[f * 3 + 1]
k = mesh.indices[f * 3 + 2]
a = mesh.vertices[i]
b = mesh.vertices[j]
c = mesh.vertices[k]
p = wp.mesh_eval_position(mesh.id, f, ur, vr)
# barycentric coordinates
tri_area = wp.length(wp.cross(b - a, c - a))
w = wp.length(wp.cross(b - a, p - a)) / tri_area
v = wp.length(wp.cross(p - a, c - a)) / tri_area
u = 1.0 - w - v
a_n = mesh.vertex_normals[i]
b_n = mesh.vertex_normals[j]
c_n = mesh.vertex_normals[k]
# vertex normal interpolation
normal = u * a_n + v * b_n + w * c_n
if mode == 0 or mode == 1:
if mode == 0: # grayscale
color = wp.vec3(1.0)
elif mode == 1: # texture interpolation
tex_a = mesh.tex_coords[mesh.tex_indices[f * 3]]
tex_b = mesh.tex_coords[mesh.tex_indices[f * 3 + 1]]
tex_c = mesh.tex_coords[mesh.tex_indices[f * 3 + 2]]
tex = u * tex_a + v * tex_b + w * tex_c
color = Example.texture_interpolation(tex, texture)
# lambertian directional lighting
lambert = float(0.0)
for i in range(lights.num_lights):
dir = wp.transform_vector(inv, lights.dirs[i])
val = lights.intensities[i] * wp.dot(normal, dir)
if val < 0.0:
val = 0.0
lambert = lambert + val
color = lambert * color
elif mode == 2: # normal map
color = normal * 0.5 + wp.vec3(0.5, 0.5, 0.5)
if color[0] > 1.0:
color = wp.vec3(1.0, color[1], color[2])
if color[1] > 1.0:
color = wp.vec3(color[0], 1.0, color[2])
if color[2] > 1.0:
color = wp.vec3(color[0], color[1], 1.0)
rays[tid] = color
@wp.kernel
def downsample_kernel(
rays: wp.array(dtype=wp.vec3), pixels: wp.array(dtype=wp.vec3), rays_width: int, num_samples: int
):
tid = wp.tid()
pixels_width = rays_width / num_samples
px = tid % pixels_width
py = tid // pixels_width
start_idx = py * num_samples * rays_width + px * num_samples
color = wp.vec3(0.0, 0.0, 0.0)
for i in range(0, num_samples):
for j in range(0, num_samples):
ray = rays[start_idx + i * rays_width + j]
color = wp.vec3(color[0] + ray[0], color[1] + ray[1], color[2] + ray[2])
num_samples_sq = float(num_samples * num_samples)
color = wp.vec3(color[0] / num_samples_sq, color[1] / num_samples_sq, color[2] / num_samples_sq)
pixels[tid] = color
@wp.kernel
def loss_kernel(
pixels: wp.array(dtype=wp.vec3), target_pixels: wp.array(dtype=wp.vec3), loss: wp.array(dtype=float)
):
tid = wp.tid()
pixel = pixels[tid]
target_pixel = target_pixels[tid]
diff = target_pixel - pixel
# pseudo Huber loss
delta = 1.0
x = delta * delta * (wp.sqrt(1.0 + (diff[0] / delta) * (diff[0] / delta)) - 1.0)
y = delta * delta * (wp.sqrt(1.0 + (diff[1] / delta) * (diff[1] / delta)) - 1.0)
z = delta * delta * (wp.sqrt(1.0 + (diff[2] / delta) * (diff[2] / delta)) - 1.0)
sum = x + y + z
wp.atomic_add(loss, 0, sum)
@wp.kernel
def step_kernel(x: wp.array(dtype=wp.quat), grad: wp.array(dtype=wp.quat), alpha: float):
tid = wp.tid()
# projected gradient descent
x[tid] = wp.normalize(wp.sub(x[tid], wp.mul(grad[tid], alpha)))
def compute_loss(self):
self.render()
wp.launch(self.loss_kernel, dim=self.num_pixels, inputs=[self.pixels, self.target_pixels, self.loss])
def train_graph(self):
with wp.ScopedDevice("cuda:0"):
# capture graph
wp.capture_begin()
tape = wp.Tape()
with tape:
self.compute_loss()
tape.backward(self.loss)
self.graph = wp.capture_end()
# train
image_counter = 0
for i in range(self.train_iters):
wp.capture_launch(self.graph)
rot_grad = tape.gradients[self.render_mesh.rot]
wp.launch(Example.step_kernel, dim=1, inputs=[self.render_mesh.rot, rot_grad, self.train_rate])
if i % self.period == 0:
print(f"Iter: {i} Loss: {self.loss}")
self.images[:, :, :, image_counter] = self.get_image()
image_counter += 1
tape.zero()
self.loss.zero_()
def get_image(self):
return self.pixels.numpy().reshape((self.height, self.width, 3))
def get_animation(self):
fig, ax = plt.subplots()
plt.axis("off")
plt.subplots_adjust(top=1, bottom=0, right=1, left=0, hspace=0, wspace=0)
plt.margins(0, 0)
frames = []
for i in range(self.images.shape[3]):
frame = ax.imshow(self.images[:, :, :, i], animated=True)
frames.append([frame])
ani = animation.ArtistAnimation(fig, frames, interval=50, blit=True, repeat_delay=1000)
return ani
if __name__ == "__main__":
output_dir = os.path.join(os.path.dirname(__file__), "outputs")
example = Example()
# render target rotation
example.render()
with wp.ScopedDevice(device="cuda:0"):
wp.copy(example.target_pixels, example.pixels)
target_image = example.get_image()
img.imsave(output_dir + "/target_image.png", target_image)
# offset mesh rotation
with wp.ScopedDevice(device="cuda:0"):
example.render_mesh.rot = wp.array(
np.array(
[0.0, (math.sqrt(3) - 1) / (2.0 * math.sqrt(2.0)), 0.0, (math.sqrt(3) + 1) / (2.0 * math.sqrt(2.0))]
),
dtype=wp.quat,
requires_grad=True,
)
# recover target rotation
example.train_graph()
final_image = example.get_image()
img.imsave(output_dir + "/final_image.png", final_image)
video = example.get_animation()
video.save(output_dir + "/animation.gif", dpi=300, writer=animation.PillowWriter(fps=15))
| warp-main | examples/example_diffray.py |
# Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
###########################################################################
# Example Sim Cartpole
#
# Shows how to set up a simulation of a rigid-body cartpole articulation
# from a URDF using the wp.sim.ModelBuilder().
# Note this example does not include a trained policy.
#
###########################################################################
import os
import math
import numpy as np
import warp as wp
import warp.sim
import warp.sim.render
wp.init()
class Example:
frame_dt = 1.0 / 60.0
episode_duration = 20.0 # seconds
episode_frames = int(episode_duration / frame_dt)
sim_substeps = 10
sim_dt = frame_dt / sim_substeps
sim_steps = int(episode_duration / sim_dt)
sim_time = 0.0
def __init__(self, stage=None, render=True, num_envs=1):
builder = wp.sim.ModelBuilder()
self.enable_rendering = render
self.num_envs = num_envs
articulation_builder = wp.sim.ModelBuilder()
wp.sim.parse_urdf(
os.path.join(os.path.dirname(__file__), "assets/cartpole.urdf"),
articulation_builder,
xform=wp.transform(np.zeros(3), wp.quat_from_axis_angle((1.0, 0.0, 0.0), -math.pi * 0.5)),
floating=False,
density=100,
armature=0.1,
stiffness=0.0,
damping=0.0,
shape_ke=1.0e4,
shape_kd=1.0e2,
shape_kf=1.0e2,
shape_mu=1.0,
limit_ke=1.0e4,
limit_kd=1.0e1,
enable_self_collisions=False,
)
builder = wp.sim.ModelBuilder()
for i in range(num_envs):
builder.add_builder(
articulation_builder, xform=wp.transform(np.array((i * 2.0, 4.0, 0.0)), wp.quat_identity())
)
# joint initial positions
builder.joint_q[-3:] = [0.0, 0.3, 0.0]
builder.joint_target[:3] = [0.0, 0.0, 0.0]
# finalize model
self.model = builder.finalize()
self.model.ground = False
self.model.joint_attach_ke = 1600.0
self.model.joint_attach_kd = 20.0
self.integrator = wp.sim.SemiImplicitIntegrator()
# -----------------------
# set up Usd renderer
self.renderer = None
if render:
self.renderer = wp.sim.render.SimRenderer(self.model, stage, scaling=15.0)
def update(self):
for _ in range(self.sim_substeps):
self.state.clear_forces()
self.state = self.integrator.simulate(self.model, self.state, self.state, self.sim_dt)
def render(self, is_live=False):
time = 0.0 if is_live else self.sim_time
self.renderer.begin_frame(time)
self.renderer.render(self.state)
self.renderer.end_frame()
def run(self, render=True):
# ---------------
# run simulation
self.sim_time = 0.0
self.state = self.model.state()
wp.sim.eval_fk(self.model, self.model.joint_q, self.model.joint_qd, None, self.state)
profiler = {}
# create update graph
wp.capture_begin()
# simulate
self.update()
graph = wp.capture_end()
# simulate
with wp.ScopedTimer("simulate", detailed=False, print=False, active=True, dict=profiler):
for f in range(0, self.episode_frames):
with wp.ScopedTimer("simulate", active=True):
wp.capture_launch(graph)
self.sim_time += self.frame_dt
if self.enable_rendering:
with wp.ScopedTimer("render", active=True):
self.render()
self.renderer.save()
wp.synchronize()
avg_time = np.array(profiler["simulate"]).mean() / self.episode_frames
avg_steps_second = 1000.0 * float(self.num_envs) / avg_time
print(f"envs: {self.num_envs} steps/second {avg_steps_second} avg_time {avg_time}")
return 1000.0 * float(self.num_envs) / avg_time
profile = False
if profile:
env_count = 2
env_times = []
env_size = []
for i in range(15):
robot = Example(render=False, num_envs=env_count)
steps_per_second = robot.run()
env_size.append(env_count)
env_times.append(steps_per_second)
env_count *= 2
# dump times
for i in range(len(env_times)):
print(f"envs: {env_size[i]} steps/second: {env_times[i]}")
# plot
import matplotlib.pyplot as plt
plt.figure(1)
plt.plot(env_size, env_times)
plt.xscale("log")
plt.xlabel("Number of Envs")
plt.yscale("log")
plt.ylabel("Steps/Second")
plt.show()
else:
stage = os.path.join(os.path.dirname(__file__), "outputs/example_sim_cartpole.usd")
robot = Example(stage, render=True, num_envs=10)
robot.run()
| warp-main | examples/example_sim_cartpole.py |
# Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import numpy as np
import warp as wp
import gc
@wp.kernel
def inc_kernel(a: wp.array(dtype=float)):
tid = wp.tid()
a[tid] = a[tid] + 1.0
@wp.kernel
def dec_kernel(a: wp.array(dtype=float)):
tid = wp.tid()
a[tid] = a[tid] - 1.0
def test_allocs(n, device, do_sync=False):
arrs = [None] * n
with wp.ScopedTimer("allocs"):
for i in range(n):
arrs[i] = wp.zeros(1, device=device)
if do_sync:
wp.synchronize()
return arrs
def test_allocs_v2(n, device, do_sync=False):
arrs = [None] * n
with wp.ScopedTimer("allocs_v2"), wp.ScopedDevice(device):
for i in range(n):
arrs[i] = wp.zeros(1)
if do_sync:
wp.synchronize()
return arrs
def test_launches(n, device, do_sync=False):
arr = wp.zeros(1, dtype=wp.float32, device=device)
wp.synchronize()
with wp.ScopedTimer("launches"):
for _ in range(n):
wp.launch(inc_kernel, dim=arr.size, inputs=[arr], device=device)
wp.launch(dec_kernel, dim=arr.size, inputs=[arr], device=device)
if do_sync:
wp.synchronize()
def test_launches_v2(n, device, do_sync=False):
arr = wp.zeros(1, dtype=wp.float32, device=device)
wp.synchronize()
with wp.ScopedTimer("launches_v2"), wp.ScopedDevice(device):
for _ in range(n):
wp.launch(inc_kernel, dim=arr.size, inputs=[arr])
wp.launch(dec_kernel, dim=arr.size, inputs=[arr])
if do_sync:
wp.synchronize()
def test_copies(n, do_sync=False):
a = wp.zeros(1, dtype=wp.float32, device="cpu")
b = wp.zeros(1, dtype=wp.float32, device="cuda")
c = wp.zeros(1, dtype=wp.float32, device="cuda")
wp.synchronize()
with wp.ScopedTimer("copies"):
for _ in range(n):
wp.copy(b, a)
wp.copy(c, b)
wp.copy(a, c)
if do_sync:
wp.synchronize()
def test_graphs(n, device, do_sync=False):
arr = wp.zeros(1, dtype=wp.float32, device=device)
wp.synchronize()
wp.capture_begin()
wp.launch(inc_kernel, dim=arr.size, inputs=[arr], device=device)
wp.launch(dec_kernel, dim=arr.size, inputs=[arr], device=device)
graph = wp.capture_end()
wp.synchronize()
with wp.ScopedTimer("graphs"):
for _ in range(n):
wp.capture_launch(graph)
if do_sync:
wp.synchronize()
wp.init()
wp.force_load()
device = "cuda"
n = 100000
# make sure the context gets fully initialized now
_a = wp.zeros(1, device=device)
wp.launch(inc_kernel, dim=_a.size, inputs=[_a], device=device)
wp.synchronize()
gc.collect()
test_allocs(n, device)
wp.synchronize()
gc.collect()
test_allocs_v2(n, device)
wp.synchronize()
gc.collect()
test_launches(n, device)
wp.synchronize()
gc.collect()
test_launches_v2(n, device)
wp.synchronize()
gc.collect()
test_copies(n)
wp.synchronize()
gc.collect()
test_graphs(n, device)
wp.synchronize()
gc.collect()
# ========= profiling ==========#
# import cProfile
# cProfile.run('test_allocs(n, device)')
# from pyinstrument import Profiler
# profiler = Profiler()
# profiler.start()
# #arrs = test_allocs(n, device)
# test_launches(n, device)
# #test_copies(n)
# profiler.stop()
# print(profiler.output_text(show_all=True))
| warp-main | examples/benchmark_api.py |
# Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
###########################################################################
# Example Sim Rigid Kinematics
#
# Tests rigid body forward and backwards kinematics through the
# wp.sim.eval_ik() and wp.sim.eval_fk() methods.
#
###########################################################################
import os
import math
import numpy as np
import warp as wp
import warp.sim
import warp.sim.render
wp.init()
class Robot:
frame_dt = 1.0 / 60.0
episode_duration = 2.0 # seconds
episode_frames = int(episode_duration / frame_dt)
sim_substeps = 10
sim_dt = frame_dt / sim_substeps
sim_steps = int(episode_duration / sim_dt)
sim_time = 0.0
render_time = 0.0
def __init__(self, render=True, num_envs=1, device=None):
builder = wp.sim.ModelBuilder()
self.render = render
self.num_envs = num_envs
for i in range(num_envs):
wp.sim.parse_mjcf(
os.path.join(os.path.dirname(__file__), "assets/nv_ant.xml"),
builder,
stiffness=0.0,
damping=1.0,
armature=0.1,
contact_ke=1.0e4,
contact_kd=1.0e2,
contact_kf=1.0e2,
contact_mu=0.75,
limit_ke=1.0e3,
limit_kd=1.0e1,
)
coord_count = 15
dof_count = 14
coord_start = i * coord_count
dof_start = i * dof_count
# base
builder.joint_q[coord_start : coord_start + 3] = [i * 2.0, 0.70, 0.0]
builder.joint_q[coord_start + 3 : coord_start + 7] = wp.quat_from_axis_angle(
(1.0, 0.0, 0.0), -math.pi * 0.5
)
# joints
builder.joint_q[coord_start + 7 : coord_start + coord_count] = [0.0, 1.0, 0.0, -1.0, 0.0, -1.0, 0.0, 1.0]
builder.joint_qd[dof_start + 6 : dof_start + dof_count] = [1.0, 1.0, 1.0, -1.0, 1.0, -1.0, 1.0, 1.0]
# finalize model
self.model = builder.finalize(device)
self.model.ground = True
self.model.joint_attach_ke *= 16.0
self.model.joint_attach_kd *= 4.0
self.integrator = wp.sim.SemiImplicitIntegrator()
# -----------------------
# set up Usd renderer
if self.render:
self.renderer = wp.sim.render.SimRenderer(
self.model,
os.path.join(os.path.dirname(__file__), "outputs/example_sim_rigid_kinematics.usd"),
scaling=50.0,
)
def run(self):
# ---------------
# run simulation
self.sim_time = 0.0
self.state = self.model.state()
# save a copy of joint values
q_fk = self.model.joint_q.numpy()
qd_fk = self.model.joint_qd.numpy()
wp.sim.eval_fk(self.model, self.model.joint_q, self.model.joint_qd, None, self.state)
q_ik = wp.zeros_like(self.model.joint_q)
qd_ik = wp.zeros_like(self.model.joint_qd)
wp.sim.eval_ik(self.model, self.state, q_ik, qd_ik)
q_err = q_fk - q_ik.numpy()
qd_err = qd_fk - qd_ik.numpy()
if self.render:
self.renderer.begin_frame(self.render_time)
self.renderer.render(self.state)
self.renderer.end_frame()
self.renderer.save()
print(q_err)
print(qd_err)
assert np.abs(q_err).max() < 1.0e-6
assert np.abs(qd_err).max() < 1.0e-6
robot = Robot(render=False, num_envs=1)
robot.run()
| warp-main | examples/example_sim_rigid_kinematics.py |
# Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
###########################################################################
# Example DEM
#
# Shows how to implement a DEM particle simulation with cohesion between
# particles. Neighbors are found using the wp.HashGrid class, and
# wp.hash_grid_query(), wp.hash_grid_query_next() kernel methods.
#
###########################################################################
import numpy as np
import warp as wp
import warp.render
import os
wp.init()
@wp.func
def contact_force(n: wp.vec3, v: wp.vec3, c: float, k_n: float, k_d: float, k_f: float, k_mu: float):
vn = wp.dot(n, v)
jn = c * k_n
jd = min(vn, 0.0) * k_d
# contact force
fn = jn + jd
# friction force
vt = v - n * vn
vs = wp.length(vt)
if vs > 0.0:
vt = vt / vs
# Coulomb condition
ft = wp.min(vs * k_f, k_mu * wp.abs(fn))
# total force
return -n * fn - vt * ft
@wp.kernel
def apply_forces(
grid: wp.uint64,
particle_x: wp.array(dtype=wp.vec3),
particle_v: wp.array(dtype=wp.vec3),
particle_f: wp.array(dtype=wp.vec3),
radius: float,
k_contact: float,
k_damp: float,
k_friction: float,
k_mu: float,
):
tid = wp.tid()
# order threads by cell
i = wp.hash_grid_point_id(grid, tid)
x = particle_x[i]
v = particle_v[i]
f = wp.vec3()
# ground contact
n = wp.vec3(0.0, 1.0, 0.0)
c = wp.dot(n, x)
cohesion_ground = 0.02
cohesion_particle = 0.0075
if c < cohesion_ground:
f = f + contact_force(n, v, c, k_contact, k_damp, 100.0, 0.5)
# particle contact
neighbors = wp.hash_grid_query(grid, x, radius * 5.0)
for index in neighbors:
if index != i:
# compute distance to point
n = x - particle_x[index]
d = wp.length(n)
err = d - radius * 2.0
if err <= cohesion_particle:
n = n / d
vrel = v - particle_v[index]
f = f + contact_force(n, vrel, err, k_contact, k_damp, k_friction, k_mu)
particle_f[i] = f
@wp.kernel
def integrate(
x: wp.array(dtype=wp.vec3),
v: wp.array(dtype=wp.vec3),
f: wp.array(dtype=wp.vec3),
gravity: wp.vec3,
dt: float,
inv_mass: float,
):
tid = wp.tid()
v_new = v[tid] + f[tid] * inv_mass * dt + gravity * dt
x_new = x[tid] + v_new * dt
v[tid] = v_new
x[tid] = x_new
class Example:
def __init__(self, stage):
self.frame_dt = 1.0 / 60
self.frame_count = 400
self.sim_substeps = 64
self.sim_dt = self.frame_dt / self.sim_substeps
self.sim_steps = self.frame_count * self.sim_substeps
self.sim_time = 0.0
self.point_radius = 0.1
self.k_contact = 8000.0
self.k_damp = 2.0
self.k_friction = 1.0
self.k_mu = 100000.0 # for cohesive materials
self.inv_mass = 64.0
self.renderer = wp.render.UsdRenderer(stage)
self.renderer.render_ground()
self.grid = wp.HashGrid(128, 128, 128)
self.grid_cell_size = self.point_radius * 5.0
self.points = self.particle_grid(32, 128, 32, (0.0, 0.3, 0.0), self.point_radius, 0.1)
self.x = wp.array(self.points, dtype=wp.vec3)
self.v = wp.array(np.ones([len(self.x), 3]) * np.array([0.0, 0.0, 10.0]), dtype=wp.vec3)
self.f = wp.zeros_like(self.v)
self.use_graph = wp.get_device().is_cuda
if self.use_graph:
wp.capture_begin()
for s in range(self.sim_substeps):
with wp.ScopedTimer("forces", active=False):
wp.launch(
kernel=apply_forces,
dim=len(self.x),
inputs=[
self.grid.id,
self.x,
self.v,
self.f,
self.point_radius,
self.k_contact,
self.k_damp,
self.k_friction,
self.k_mu,
],
)
wp.launch(
kernel=integrate,
dim=len(self.x),
inputs=[self.x, self.v, self.f, (0.0, -9.8, 0.0), self.sim_dt, self.inv_mass],
)
self.graph = wp.capture_end()
def update(self):
with wp.ScopedTimer("simulate", active=True):
if self.use_graph:
with wp.ScopedTimer("grid build", active=False):
self.grid.build(self.x, self.grid_cell_size)
with wp.ScopedTimer("solve", active=False):
wp.capture_launch(self.graph)
else:
with wp.ScopedTimer("grid build", active=False):
self.grid.build(self.x, self.point_radius)
with wp.ScopedTimer("solve", active=False):
for s in range(self.sim_substeps):
wp.launch(
kernel=apply_forces,
dim=len(self.x),
inputs=[
self.grid.id,
self.x,
self.v,
self.f,
self.point_radius,
self.k_contact,
self.k_damp,
self.k_friction,
self.k_mu,
],
)
wp.launch(
kernel=integrate,
dim=len(self.x),
inputs=[self.x, self.v, self.f, (0.0, -9.8, 0.0), self.sim_dt, self.inv_mass],
)
wp.synchronize()
def render(self, is_live=False):
with wp.ScopedTimer("render", active=True):
time = 0.0 if is_live else self.sim_time
self.renderer.begin_frame(time)
self.renderer.render_points(points=self.x.numpy(), radius=self.point_radius, name="points")
self.renderer.end_frame()
self.sim_time += self.frame_dt
# creates a grid of particles
def particle_grid(self, dim_x, dim_y, dim_z, lower, radius, jitter):
points = np.meshgrid(np.linspace(0, dim_x, dim_x), np.linspace(0, dim_y, dim_y), np.linspace(0, dim_z, dim_z))
points_t = np.array((points[0], points[1], points[2])).T * radius * 2.0 + np.array(lower)
points_t = points_t + np.random.rand(*points_t.shape) * radius * jitter
return points_t.reshape((-1, 3))
if __name__ == "__main__":
stage_path = os.path.join(os.path.dirname(__file__), "outputs/example_dem.usd")
example = Example(stage_path)
for i in range(example.frame_count):
example.update()
example.render()
example.renderer.save()
| warp-main | examples/example_dem.py |
# Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
###########################################################################
# Example Sim Rigid Contact
#
# Shows how to set up free rigid bodies with different shape types falling
# and colliding against each other and the ground using wp.sim.ModelBuilder().
#
###########################################################################
import os
import math
import numpy as np
import warp as wp
import warp.sim
import warp.sim.render
from pxr import UsdGeom, Usd
wp.init()
class Example:
frame_dt = 1.0 / 60.0
episode_duration = 20.0 # seconds
episode_frames = int(episode_duration / frame_dt)
sim_substeps = 10
sim_dt = frame_dt / sim_substeps
sim_steps = int(episode_duration / sim_dt)
sim_time = 0.0
def __init__(self, stage=None, render=True):
builder = wp.sim.ModelBuilder()
self.enable_rendering = render
self.num_bodies = 8
self.scale = 0.8
self.ke = 1.0e5
self.kd = 250.0
self.kf = 500.0
# boxes
for i in range(self.num_bodies):
b = builder.add_body(origin=wp.transform((i, 1.0, 0.0), wp.quat_identity()))
s = builder.add_shape_box(
pos=(0.0, 0.0, 0.0),
hx=0.5 * self.scale,
hy=0.2 * self.scale,
hz=0.2 * self.scale,
body=i,
ke=self.ke,
kd=self.kd,
kf=self.kf,
)
# spheres
for i in range(self.num_bodies):
b = builder.add_body(origin=wp.transform((i, 1.0, 2.0), wp.quat_identity()))
s = builder.add_shape_sphere(
pos=(0.0, 0.0, 0.0), radius=0.25 * self.scale, body=b, ke=self.ke, kd=self.kd, kf=self.kf
)
# capsules
for i in range(self.num_bodies):
b = builder.add_body(origin=wp.transform((i, 1.0, 6.0), wp.quat_identity()))
s = builder.add_shape_capsule(
pos=(0.0, 0.0, 0.0),
radius=0.25 * self.scale,
half_height=self.scale * 0.5,
up_axis=0,
body=b,
ke=self.ke,
kd=self.kd,
kf=self.kf,
)
# initial spin
for i in range(len(builder.body_qd)):
builder.body_qd[i] = (0.0, 2.0, 10.0, 0.0, 0.0, 0.0)
# meshes
bunny = self.load_mesh(os.path.join(os.path.dirname(__file__), "assets/bunny.usd"), "/bunny/bunny")
for i in range(self.num_bodies):
b = builder.add_body(
origin=wp.transform(
(i * 0.5 * self.scale, 1.0 + i * 1.7 * self.scale, 4.0 + i * 0.5 * self.scale),
wp.quat_from_axis_angle((0.0, 1.0, 0.0), math.pi * 0.1 * i),
)
)
s = builder.add_shape_mesh(
body=b,
mesh=bunny,
pos=(0.0, 0.0, 0.0),
scale=(self.scale, self.scale, self.scale),
ke=self.ke,
kd=self.kd,
kf=self.kf,
density=1e3,
)
# finalize model
self.model = builder.finalize()
self.model.ground = True
self.integrator = wp.sim.SemiImplicitIntegrator()
# -----------------------
# set up OpenGL renderer
if self.enable_rendering:
self.renderer = wp.sim.render.SimRendererOpenGL(self.model, stage, scaling=0.5)
def load_mesh(self, filename, path):
asset_stage = Usd.Stage.Open(filename)
mesh_geom = UsdGeom.Mesh(asset_stage.GetPrimAtPath(path))
points = np.array(mesh_geom.GetPointsAttr().Get())
indices = np.array(mesh_geom.GetFaceVertexIndicesAttr().Get()).flatten()
return wp.sim.Mesh(points, indices)
def update(self):
for _ in range(self.sim_substeps):
self.state_0.clear_forces()
wp.sim.collide(self.model, self.state_0)
self.integrator.simulate(self.model, self.state_0, self.state_1, self.sim_dt)
self.state_0, self.state_1 = self.state_1, self.state_0
def render(self, is_live=False):
time = 0.0 if is_live else self.sim_time
self.renderer.begin_frame(time)
self.renderer.render(self.state_0)
self.renderer.end_frame()
def run(self, render=True):
# ---------------
# run simulation
self.sim_time = 0.0
self.state_0 = self.model.state()
self.state_1 = self.model.state()
wp.sim.eval_fk(self.model, self.model.joint_q, self.model.joint_qd, None, self.state_0)
profiler = {}
# create update graph
wp.capture_begin()
# simulate
self.update()
graph = wp.capture_end()
# simulate
with wp.ScopedTimer("simulate", detailed=False, print=False, active=True, dict=profiler):
for f in range(0, self.episode_frames):
with wp.ScopedTimer("simulate", active=True):
wp.capture_launch(graph)
self.sim_time += self.frame_dt
if self.enable_rendering:
with wp.ScopedTimer("render", active=True):
self.render()
wp.synchronize()
stage = os.path.join(os.path.dirname(__file__), "outputs/example_sim_rigid_contact.usd")
robot = Example(stage, render=True)
robot.run()
| warp-main | examples/example_sim_rigid_contact.py |
"""
This example simulates a convection-diffusion PDE using FVM with upwind transport
D phi / dt + nu Div f = 0
f = grad phi
"""
import argparse
import warp as wp
from warp.fem.types import *
from warp.fem.geometry import Grid2D, Trimesh2D
from warp.fem.field import make_test, make_trial
from warp.fem.space import make_polynomial_space
from warp.fem.quadrature import RegularQuadrature
from warp.fem.domain import Cells, Sides
from warp.fem.integrate import integrate, interpolate
from warp.fem.operator import integrand, jump, average, normal
from warp.sparse import bsr_mm, bsr_axpy, bsr_transposed
from bsr_utils import bsr_to_scipy, invert_diagonal_bsr_mass_matrix
from plot_utils import plot_surface
from mesh_utils import gen_trimesh
from example_convection_diffusion import initial_condition, velocity, inertia_form
from scipy.sparse.linalg import factorized
import matplotlib.pyplot as plt
import matplotlib.animation as animation
@integrand
def vel_mass_form(
s: Sample,
u: Field,
v: Field,
):
return wp.dot(v(s), u(s))
@integrand
def half_diffusion_form(
s: Sample,
domain: Domain,
psi: Field,
u: Field,
):
return jump(psi, s) * wp.dot(average(u, s), normal(domain, s))
@integrand
def upwind_transport_form(s: Sample, domain: Domain, phi: Field, psi: Field, ang_vel: float):
pos = domain(s)
vel = velocity(pos, ang_vel)
vel_n = wp.dot(vel, normal(domain, s))
return jump(psi, s) * (average(phi, s) * vel_n + 0.5 * jump(phi, s) * wp.abs(vel_n))
if __name__ == "__main__":
wp.init()
wp.set_module_options({"enable_backward": False})
parser = argparse.ArgumentParser()
parser.add_argument("--resolution", type=int, default=50)
parser.add_argument("--n_frames", type=int, default=250)
parser.add_argument("--viscosity", type=float, default=0.001)
parser.add_argument("--ang_vel", type=float, default=1.0)
parser.add_argument("--tri_mesh", action="store_true", help="Use a triangular mesh")
args = parser.parse_args()
res = args.resolution
dt = 1.0 / (args.ang_vel * res)
if args.tri_mesh:
positions, tri_vidx = gen_trimesh(res=vec2i(res))
geo = Trimesh2D(tri_vertex_indices=tri_vidx, positions=positions)
else:
geo = Grid2D(res=vec2i(res))
domain = Cells(geometry=geo)
sides = Sides(geo)
quadrature = RegularQuadrature(domain=domain, order=2)
scalar_space = make_polynomial_space(geo, degree=0)
# Initial condition
phi0 = scalar_space.make_field()
interpolate(initial_condition, dest=phi0)
# Inertia matrix
test = make_test(space=scalar_space, domain=domain)
trial = make_trial(space=scalar_space, domain=domain)
matrix_inertia = integrate(
inertia_form,
quadrature=quadrature,
fields={"phi": trial, "psi": test},
values={"dt": dt},
)
# Upwind transport term
side_test = make_test(space=scalar_space, domain=sides)
side_trial = make_trial(space=scalar_space, domain=sides)
matrix_transport = integrate(
upwind_transport_form,
fields={"phi": side_trial, "psi": side_test},
values={"ang_vel": args.ang_vel},
)
# Diffusion bilinear form
# Since we have piecewise constant element, we cannot use the classical diffusion form
# Instead we assemble the matrix B M^-1 B^T, with B associated to the form psi div(u)
# and the diagonal matrix M to the velocity mass form u.v
velocity_space = make_polynomial_space(geo, degree=0, dtype=wp.vec2)
side_trial_vel = make_trial(space=velocity_space, domain=sides)
matrix_half_diffusion = integrate(
half_diffusion_form,
fields={"psi": side_test, "u": side_trial_vel},
)
# Diagonal velocity mass matrix
test_vel = make_test(space=velocity_space, domain=domain)
trial_vel = make_trial(space=velocity_space, domain=domain)
inv_vel_mass_matrix = integrate(vel_mass_form, domain=domain, fields={"u": trial_vel, "v": test_vel}, nodal=True)
invert_diagonal_bsr_mass_matrix(inv_vel_mass_matrix)
# Assemble system matrix
matrix = matrix_inertia
# matrix += matrix_transport
bsr_axpy(x=matrix_transport, y=matrix)
# matrix += nu * B M^-1 B^T
bsr_mm(
x=bsr_mm(matrix_half_diffusion, inv_vel_mass_matrix),
y=bsr_transposed(matrix_half_diffusion),
z=matrix,
alpha=args.viscosity,
beta=1.0,
)
matrix_solve = factorized(bsr_to_scipy(matrix))
results = [phi0.dof_values.numpy()]
phik = phi0
for k in range(args.n_frames):
# right-hand-side -- standard inertia
rhs = integrate(
inertia_form,
quadrature=quadrature,
fields={"phi": phik, "psi": test},
values={"dt": dt},
)
# Solve using Scipy
x = matrix_solve(rhs.numpy().flatten())
phik.dof_values = x
results.append(x)
colormesh = plot_surface(phi0)
ax = colormesh.axes
def animate(i):
ax.clear()
phik.dof_values = results[i]
return plot_surface(phik, axes=ax)
anim = animation.FuncAnimation(
ax.figure,
animate,
interval=30,
blit=False,
frames=len(results),
)
plt.show()
| warp-main | examples/fem/example_convection_diffusion_dg0.py |
from typing import Union, Any, Tuple
import warp as wp
import warp.types
from warp.sparse import BsrMatrix, bsr_zeros, bsr_get_diag, bsr_mv
from warp.utils import array_inner
try:
from scipy.sparse import csr_array, bsr_array
except ImportError:
# WAR for older scipy
try:
from scipy.sparse import csr_matrix as csr_array, bsr_matrix as bsr_array
except ImportError:
pass
def bsr_to_scipy(matrix: BsrMatrix) -> "bsr_array":
if matrix.block_shape == (1, 1):
return csr_array(
(
matrix.values.numpy().flatten()[: matrix.nnz],
matrix.columns.numpy()[: matrix.nnz],
matrix.offsets.numpy(),
),
shape=matrix.shape,
)
return bsr_array(
(
matrix.values.numpy().reshape((matrix.values.shape[0], *matrix.block_shape))[: matrix.nnz],
matrix.columns.numpy()[: matrix.nnz],
matrix.offsets.numpy(),
),
shape=matrix.shape,
)
def scipy_to_bsr(sp: Union["bsr_array", "csr_array"], device=None, dtype=None) -> BsrMatrix:
if dtype is None:
dtype = warp.types.np_dtype_to_warp_type[sp.dtype]
sp.sort_indices()
if isinstance(sp, csr_array):
matrix = bsr_zeros(sp.shape[0], sp.shape[1], dtype, device=device)
else:
block_shape = sp.blocksize
block_type = wp.types.matrix(shape=block_shape, dtype=dtype)
matrix = bsr_zeros(sp.shape[0] // block_shape[0], sp.shape[1] // block_shape[1], block_type, device=device)
matrix.nnz = sp.nnz
matrix.values = wp.array(sp.data.flatten(), dtype=matrix.values.dtype, device=device)
matrix.columns = wp.array(sp.indices, dtype=matrix.columns.dtype, device=device)
matrix.offsets = wp.array(sp.indptr, dtype=matrix.offsets.dtype, device=device)
return matrix
@wp.kernel
def _bsr_cg_kernel_1(
rs_old: wp.array(dtype=Any),
p_Ap: wp.array(dtype=Any),
x: wp.array(dtype=Any),
r: wp.array(dtype=Any),
p: wp.array(dtype=Any),
Ap: wp.array(dtype=Any),
):
i = wp.tid()
alpha = rs_old[0] / p_Ap[0]
x[i] = x[i] + alpha * p[i]
r[i] = r[i] - alpha * Ap[i]
@wp.kernel
def _bsr_cg_kernel_2(
tol: Any,
rs_old: wp.array(dtype=Any),
rs_new: wp.array(dtype=Any),
z: wp.array(dtype=Any),
p: wp.array(dtype=Any),
):
# p = r + (rsnew / rsold) * p;
i = wp.tid()
if rs_new[0] > tol:
beta = rs_new[0] / rs_old[0]
else:
beta = rs_new[0] - rs_new[0]
p[i] = z[i] + beta * p[i]
@wp.kernel
def _bsr_cg_solve_block_diag_precond_kernel(
diag: wp.array(dtype=Any),
r: wp.array(dtype=Any),
z: wp.array(dtype=Any),
):
i = wp.tid()
d = wp.get_diag(diag[i])
if wp.dot(d, d) == 0.0:
z[i] = r[i]
else:
d_abs = wp.max(d, -d)
z[i] = wp.cw_div(r[i], d_abs)
@wp.kernel
def _bsr_cg_solve_scalar_diag_precond_kernel(
diag: wp.array(dtype=Any),
r: wp.array(dtype=Any),
z: wp.array(dtype=Any),
):
i = wp.tid()
d = diag[i]
if d == 0.0:
z[i] = r[i]
else:
z[i] = r[i] / wp.abs(d)
def bsr_cg(
A: BsrMatrix,
x: wp.array,
b: wp.array,
max_iters: int = 0,
tol: float = 0.0001,
check_every=10,
use_diag_precond=True,
mv_routine=bsr_mv,
device=None,
) -> Tuple[float, int]:
"""Solves the linear system A x = b using the Conjugate Gradient method, optionally with diagonal preconditioning
Args:
A: system left-hand side
x: result vector and initial guess
b: system right-hand-side
max_iters: maximum number of iterations to performing before aborting. If set to zero, equal to the system size.
tol: relative tolerance under which to stop the solve
check_every: number of iterations every which to evaluate the current residual norm to compare against tolerance
use_diag_precond: Whether to use diagonal preconditioning
mv_routine: Matrix-vector multiplication routine to for multiplications with ``A``
device: Warp device to use for the computation
Returns:
Tuple (residual norm, iteration count)
"""
if max_iters == 0:
max_iters = A.shape[0]
if device is None:
device = A.values.device
scalar_dtype = A.scalar_type
r = wp.zeros_like(b)
p = wp.zeros_like(b)
Ap = wp.zeros_like(b)
if use_diag_precond:
A_diag = bsr_get_diag(A)
z = wp.zeros_like(b)
if A.block_shape == (1, 1):
precond_kernel = _bsr_cg_solve_scalar_diag_precond_kernel
else:
precond_kernel = _bsr_cg_solve_block_diag_precond_kernel
else:
z = r
rz_old = wp.empty(n=1, dtype=scalar_dtype, device=device)
rz_new = wp.empty(n=1, dtype=scalar_dtype, device=device)
p_Ap = wp.empty(n=1, dtype=scalar_dtype, device=device)
# r = b - A * x;
r.assign(b)
mv_routine(A, x, r, alpha=-1.0, beta=1.0)
# z = M^-1 r
if use_diag_precond:
wp.launch(kernel=precond_kernel, dim=A.nrow, device=device, inputs=[A_diag, r, z])
# p = z;
p.assign(z)
# rsold = r' * z;
array_inner(r, z, out=rz_old)
tol_sq = tol * tol * A.shape[0]
err = rz_old.numpy()[0]
end_iter = 0
if err > tol_sq:
end_iter = max_iters
for i in range(max_iters):
# Ap = A * p;
mv_routine(A, p, Ap)
array_inner(p, Ap, out=p_Ap)
wp.launch(kernel=_bsr_cg_kernel_1, dim=A.nrow, device=device, inputs=[rz_old, p_Ap, x, r, p, Ap])
# z = M^-1 r
if use_diag_precond:
wp.launch(kernel=precond_kernel, dim=A.nrow, device=device, inputs=[A_diag, r, z])
# rznew = r' * z;
array_inner(r, z, out=rz_new)
if ((i + 1) % check_every) == 0:
err = rz_new.numpy()[0]
print(f"At iteration {i} error = \t {err} \t tol: {tol_sq}")
if err <= tol_sq:
end_iter = i
break
wp.launch(kernel=_bsr_cg_kernel_2, dim=A.nrow, device=device, inputs=[tol_sq, rz_old, rz_new, z, p])
# swap buffers
rs_tmp = rz_old
rz_old = rz_new
rz_new = rs_tmp
err = rz_old.numpy()[0]
print(f"Terminated after {end_iter} iterations with error = \t {err}")
return err, end_iter
def invert_diagonal_bsr_mass_matrix(A: BsrMatrix):
"""Inverts each block of a block-diagonal mass matrix"""
wp.launch(kernel=_block_diagonal_mass_invert, dim=A.nrow, inputs=[A.values], device=A.values.device)
@wp.kernel
def _block_diagonal_mass_invert(values: wp.array(dtype=Any)):
i = wp.tid()
values[i] = values[i] / wp.ddot(values[i], values[i])
| warp-main | examples/fem/bsr_utils.py |
"""
This example computes a 3D weakly-compressible Stokes flow around a moving object, including:
- defining active cells from a mask, and restricting the computation domain to those
- utilizing the PicQuadrature to integrate over unstructured particles
"""
import warp as wp
import numpy as np
from warp.fem.types import *
from warp.fem.geometry import Grid3D, ExplicitGeometryPartition
from warp.fem.field import make_test, make_trial
from warp.fem.space import make_polynomial_space, make_space_partition
from warp.fem.domain import Cells
from warp.fem.integrate import integrate, interpolate
from warp.fem.operator import integrand, D, div
from warp.fem.quadrature import PicQuadrature
from warp.fem.utils import array_axpy
from warp.sparse import bsr_mv
from plot_utils import plot_3d_scatter, plot_3d_velocities
from bsr_utils import bsr_cg
from example_stokes_transfer import inverse_array_kernel
from warp.utils import array_cast
from warp.sparse import bsr_transposed, bsr_mm, bsr_axpy
import matplotlib.pyplot as plt
@integrand
def vel_from_particles_form(s: Sample, particle_vel: wp.array(dtype=wp.vec3), v: Field):
vel = particle_vel[s.qp_index]
return wp.dot(vel, v(s))
@integrand
def viscosity_form(s: Sample, u: Field, v: Field, nu: float):
return nu * wp.ddot(D(u, s), D(v, s))
@integrand
def mass_form(
s: Sample,
u: Field,
v: Field,
):
return wp.dot(u(s), v(s))
@integrand
def scalar_mass_form(
s: Sample,
p: Field,
q: Field,
):
return p(s) * q(s)
@integrand
def div_form(
s: Sample,
u: Field,
q: Field,
):
return q(s) * div(u, s)
@integrand
def cell_activity(s: Sample, domain: Domain, c1: wp.vec3, c2: wp.vec3, radius: float):
pos = domain(s)
if wp.length(pos - c1) < radius:
return 0.0
if wp.length(pos - c2) < radius:
return 0.0
return 1.0
if __name__ == "__main__":
wp.init()
wp.set_module_options({"enable_backward": False})
res = 20
geo = Grid3D(
res=vec3i(res, res, res),
bounds_lo=wp.vec3(0.0, 0.0, 0.0),
bounds_hi=wp.vec3(1.0, 1.0, 1.0),
)
vel = 1.0
viscosity = 100.0
compliance = 0.01
bd_strength = 100000.0
# Generate particles defining the transfer displacement
circle_radius = 0.15
c1_center = np.array([0.25, 0.5, 0.5])
c2_center = np.array([0.75, 0.5, 0.5])
particles_per_side = int(4 * circle_radius * res)
particles_x = np.linspace(-circle_radius, circle_radius, particles_per_side)
cube_particles = np.array([[px, py, pz] for px in particles_x for py in particles_x for pz in particles_x])
particles_per_circle = particles_per_side**3
n_particles = 2 * particles_per_circle
particles = np.empty((n_particles, 3), dtype=float)
particles[:particles_per_circle, :] = cube_particles + c1_center
particles[particles_per_circle:, :] = cube_particles + c2_center
particle_areas = np.ones(n_particles) * circle_radius * circle_radius / (res * res)
particle_velocities = np.zeros_like(particles)
particle_velocities[:particles_per_circle, 0] = vel
particle_velocities[particles_per_circle:, 0] = -vel
particles = wp.array(particles, dtype=wp.vec3)
particle_areas = wp.array(particle_areas, dtype=float)
particle_velocities = wp.array(particle_velocities, dtype=wp.vec3)
# Disable cells that are interior to the circles
cell_space = make_polynomial_space(geo, degree=0)
activity = cell_space.make_field()
interpolate(
cell_activity,
dest=activity,
values={"c1": c1_center, "c2": c2_center, "radius": circle_radius - 1.0 / res},
)
active_partition = ExplicitGeometryPartition(geo, wp.array(activity.dof_values.numpy(), dtype=int))
print("Active cells:", active_partition.cell_count())
# Function spaces -- Q1 for vel, Q0 for pressure
u_space = make_polynomial_space(geo, degree=1, dtype=wp.vec3)
p_space = make_polynomial_space(geo, degree=0)
active_space_partition = make_space_partition(space=u_space, geometry_partition=active_partition)
active_p_space_partition = make_space_partition(space=p_space, geometry_partition=active_partition)
domain = Cells(geometry=active_partition)
pic_quadrature = PicQuadrature(domain, particles, particle_areas)
# Boundary condition on particles
u_test = make_test(space=u_space, space_partition=active_space_partition, domain=domain)
u_trial = make_trial(space=u_space, space_partition=active_space_partition, domain=domain)
u_rhs = integrate(
vel_from_particles_form,
quadrature=pic_quadrature,
fields={"v": u_test},
values={"particle_vel": particle_velocities},
output_dtype=wp.vec3d
)
u_bd_matrix = integrate(mass_form, quadrature=pic_quadrature, fields={"u": u_trial, "v": u_test})
# Viscosity
u_visc_matrix = integrate(
viscosity_form,
fields={"u": u_trial, "v": u_test},
values={"nu": viscosity}
)
# Pressure-velocity coupling
p_test = make_test(space=p_space, space_partition=active_p_space_partition, domain=domain)
p_trial = make_trial(space=p_space, space_partition=active_p_space_partition, domain=domain)
div_matrix = integrate(div_form, fields={"u": u_trial, "q": p_test})
inv_p_mass_matrix = integrate(scalar_mass_form, fields={"p": p_trial, "q": p_test})
wp.launch(
kernel=inverse_array_kernel,
dim=inv_p_mass_matrix.values.shape,
device=inv_p_mass_matrix.values.device,
inputs=[inv_p_mass_matrix.values],
)
# Assemble linear system
u_matrix = u_visc_matrix
bsr_axpy(u_bd_matrix, u_matrix, alpha=bd_strength)
div_matrix_t = bsr_transposed(div_matrix)
gradient_matrix = bsr_mm(div_matrix_t, inv_p_mass_matrix)
bsr_mm(gradient_matrix, div_matrix, u_matrix, alpha=1.0 / compliance, beta=1.0)
array_axpy(u_rhs, u_rhs, alpha=0.0, beta=bd_strength)
# Solve for displacement
u_res = wp.zeros_like(u_rhs)
bsr_cg(u_matrix, x=u_res, b=u_rhs)
# Recompute pressure
p_res = wp.zeros(n=active_p_space_partition.node_count(), dtype=wp.float64)
p_tmp = wp.empty_like(p_res)
bsr_mv(A=div_matrix, x=u_res, y=p_tmp)
bsr_mv(A=inv_p_mass_matrix, x=p_tmp, y=p_res, alpha=-1)
# Display result
u_field = u_space.make_field()
p_field = p_space.make_field()
u_nodes = wp.indexedarray(u_field.dof_values, indices=active_space_partition.space_node_indices())
p_nodes = wp.indexedarray(p_field.dof_values, indices=active_p_space_partition.space_node_indices())
array_cast(in_array=u_res, out_array=u_nodes)
array_cast(in_array=p_res, out_array=p_nodes)
plot_3d_scatter(p_field)
plot_3d_velocities(u_field)
plt.show()
| warp-main | examples/fem/example_stokes_transfer_3d.py |
"""
This example simulates a convection-diffusion PDE using Discontinuous Galerkin
with upwind transport and Symmetric Interior Penalty
D phi / dt - nu d2 phi / dx^2 = 0
"""
import argparse
import warp as wp
from warp.fem.types import *
from warp.fem.geometry import Grid2D, Trimesh2D
from warp.fem.field import make_test, make_trial
from warp.fem.space import make_polynomial_space
from warp.fem.quadrature import RegularQuadrature
from warp.fem.domain import Cells, Sides
from warp.fem.integrate import integrate, interpolate
from warp.fem.polynomial import Polynomial
from warp.fem.operator import (
grad,
integrand,
jump,
average,
normal,
grad_average,
measure_ratio,
degree,
)
from warp.sparse import bsr_axpy
from bsr_utils import bsr_to_scipy
from plot_utils import plot_surface
from mesh_utils import gen_trimesh
from example_convection_diffusion import (
initial_condition,
velocity,
inertia_form,
diffusion_form,
)
from scipy.sparse.linalg import factorized
import matplotlib.pyplot as plt
import matplotlib.animation as animation
# Standard transport term, on cells' interior
@integrand
def transport_form(s: Sample, domain: Domain, phi: Field, psi: Field, ang_vel: float):
pos = domain(s)
vel = velocity(pos, ang_vel)
return psi(s) * wp.dot(grad(phi, s), vel)
# Upwind flux, on cell sides
@integrand
def upwind_transport_form(s: Sample, domain: Domain, phi: Field, psi: Field, ang_vel: float):
pos = domain(s)
vel = velocity(pos, ang_vel)
vel_n = wp.dot(vel, normal(domain, s))
return jump(phi, s) * (-average(psi, s) * vel_n + 0.5 * jump(psi, s) * wp.abs(vel_n))
# Symmetric-Interior-Penalty diffusion term (See Pietro Ern 2012)
@integrand
def sip_diffusion_form(
s: Sample,
domain: Domain,
psi: Field,
phi: Field,
):
nor = normal(domain, s)
penalty = measure_ratio(domain, s) * float(degree(psi) * degree(phi))
return penalty * jump(phi, s) * jump(psi, s) - (
wp.dot(grad_average(phi, s), nor) * jump(psi, s) + wp.dot(grad_average(psi, s), nor) * jump(phi, s)
)
@integrand
def identity(s: Sample, phi: Field):
return phi(s)
if __name__ == "__main__":
wp.init()
wp.set_module_options({"enable_backward": False})
parser = argparse.ArgumentParser()
parser.add_argument("--resolution", type=int, default=50)
parser.add_argument("--degree", type=int, default=2)
parser.add_argument("--n_frames", type=int, default=100)
parser.add_argument("--viscosity", type=float, default=0.001)
parser.add_argument("--ang_vel", type=float, default=1.0)
parser.add_argument("--tri_mesh", action="store_true", help="Use a triangular mesh")
args = parser.parse_args()
res = args.resolution
dt = 1.0 / (args.ang_vel * res)
if args.tri_mesh:
positions, tri_vidx = gen_trimesh(res=vec2i(res))
geo = Trimesh2D(tri_vertex_indices=tri_vidx, positions=positions)
else:
geo = Grid2D(res=vec2i(res))
domain = Cells(geometry=geo)
sides = Sides(geo)
quadrature = RegularQuadrature(domain=domain, order=2 * args.degree)
scalar_space = make_polynomial_space(
geo,
discontinuous=True,
degree=args.degree,
family=Polynomial.GAUSS_LEGENDRE,
)
# Right-hand-side
phi0 = scalar_space.make_field()
interpolate(initial_condition, dest=phi0)
test = make_test(space=scalar_space, domain=domain)
trial = make_trial(space=scalar_space, domain=domain)
side_test = make_test(space=scalar_space, domain=sides)
side_trial = make_trial(space=scalar_space, domain=sides)
matrix_inertia = integrate(
inertia_form,
quadrature=quadrature,
fields={"phi": trial, "psi": test},
values={"dt": dt},
)
matrix_transport = integrate(
transport_form,
fields={"phi": trial, "psi": test},
values={"ang_vel": args.ang_vel},
)
bsr_axpy(
integrate(
upwind_transport_form,
fields={"phi": side_trial, "psi": side_test},
values={"ang_vel": args.ang_vel},
),
y=matrix_transport,
)
matrix_diffusion = integrate(
diffusion_form,
fields={"u": trial, "v": test},
)
bsr_axpy(
integrate(
sip_diffusion_form,
fields={"phi": side_trial, "psi": side_test},
),
y=matrix_diffusion,
)
matrix = matrix_inertia
bsr_axpy(x=matrix_transport, y=matrix)
bsr_axpy(x=matrix_diffusion, y=matrix, alpha=args.viscosity)
matrix_solve = factorized(bsr_to_scipy(matrix))
results = [phi0.dof_values.numpy()]
phik = phi0
for k in range(args.n_frames):
rhs = integrate(
inertia_form,
quadrature=quadrature,
fields={"phi": phik, "psi": test},
values={"dt": dt},
)
# Solve using Scipy
x = matrix_solve(rhs.numpy().flatten())
phik.dof_values = x
results.append(x)
colormesh = plot_surface(phi0)
ax = colormesh.axes
# Convert to continuous for visualization
viz_space = make_polynomial_space(geo, degree=args.degree)
phi_viz = viz_space.make_field()
def animate(i):
ax.clear()
phik.dof_values = results[i]
interpolate(identity, fields={"phi": phik}, dest=phi_viz)
return plot_surface(phi_viz, axes=ax)
anim = animation.FuncAnimation(
ax.figure,
animate,
interval=30,
blit=False,
frames=len(results),
)
plt.show()
| warp-main | examples/fem/example_convection_diffusion_dg.py |
"""
This example illustrates using domain decomposition to solve a diffusion PDE over multiple devices
"""
from typing import Tuple
import warp as wp
from warp.fem.types import *
from warp.fem.geometry import Grid2D, LinearGeometryPartition
from warp.fem.space import make_polynomial_space, make_space_partition
from warp.fem.field import make_test, make_trial
from warp.fem.domain import Cells, BoundarySides
from warp.fem.integrate import integrate
from warp.fem.operator import integrand
from warp.sparse import bsr_axpy, bsr_mv
from warp.utils import array_cast
from example_diffusion import linear_form, diffusion_form
from bsr_utils import bsr_cg
from plot_utils import plot_grid_surface
import matplotlib.pyplot as plt
@integrand
def mass_form(
s: Sample,
u: Field,
v: Field,
):
return u(s) * v(s)
@wp.kernel
def scal_kernel(a: wp.array(dtype=wp.float64), alpha: wp.float64):
a[wp.tid()] = a[wp.tid()] * alpha
@wp.kernel
def sum_kernel(a: wp.indexedarray(dtype=wp.float64), b: wp.array(dtype=wp.float64)):
a[wp.tid()] = a[wp.tid()] + b[wp.tid()]
def sum_vecs(vecs, indices, sum: wp.array, tmp: wp.array):
for v, idx in zip(vecs, indices):
wp.copy(dest=tmp, src=v)
idx_sum = wp.indexedarray(sum, idx)
wp.launch(kernel=sum_kernel, dim=idx.shape, device=sum.device, inputs=[idx_sum, tmp])
return sum
class DistributedSystem:
device = None
scalar_type: type
tmp_buf: wp.array
nrow: int
shape = Tuple[int, int]
rank_data = None
def mv_routine(A: DistributedSystem, x: wp.array, y: wp.array, alpha=1.0, beta=0.0):
"""Distributed matrix-vector multiplication routine, for example purposes"""
tmp = A.tmp_buf
wp.launch(kernel=scal_kernel, dim=y.shape, device=y.device, inputs=[y, wp.float64(beta)])
stream = wp.get_stream()
for mat_i, x_i, y_i, idx in zip(*A.rank_data):
# WAR copy with indexed array requiring matching shape
tmp_i = wp.array(
ptr=tmp.ptr, device=tmp.device, capacity=tmp.capacity, dtype=tmp.dtype, shape=idx.shape, owner=False
)
# Compress rhs on rank 0
x_idx = wp.indexedarray(x, idx)
wp.copy(dest=tmp_i, src=x_idx, count=idx.size, stream=stream)
# Send to rank i
wp.copy(dest=x_i, src=tmp_i, count=idx.size, stream=stream)
with wp.ScopedDevice(x_i.device):
wp.wait_stream(stream)
bsr_mv(A=mat_i, x=x_i, y=y_i, alpha=alpha, beta=0.0)
wp.wait_stream(wp.get_stream(x_i.device))
# Back to rank 0 for sum
wp.copy(dest=tmp_i, src=y_i, count=idx.size, stream=stream)
y_idx = wp.indexedarray(y, idx)
wp.launch(kernel=sum_kernel, dim=idx.shape, device=y_idx.device, inputs=[y_idx, tmp_i], stream=stream)
if __name__ == "__main__":
wp.init()
wp.set_module_options({"enable_backward": False})
geo = Grid2D(res=vec2i(25))
bd_weight = 100.0
scalar_space = make_polynomial_space(geo, degree=3)
devices = wp.get_cuda_devices()
rhs_vecs = []
res_vecs = []
matrices = []
indices = []
main_device = devices[0]
# Build local system for each device
for k, device in enumerate(devices):
with wp.ScopedDevice(device):
# Construct the partition corresponding to the k'th device
geo_partition = LinearGeometryPartition(geo, k, len(devices))
space_partition = make_space_partition(scalar_space, geo_partition)
domain = Cells(geometry=geo_partition)
# Right-hand-side
test = make_test(space_partition=space_partition, domain=domain)
rhs = integrate(linear_form, fields={"v": test})
# Weakly-imposed boundary conditions on all sides
boundary = BoundarySides(geometry=geo_partition)
bd_test = make_test(space_partition=space_partition, domain=boundary)
bd_trial = make_trial(space_partition=space_partition, domain=boundary)
bd_matrix = integrate(mass_form, fields={"u": bd_trial, "v": bd_test})
# Diffusion form
trial = make_trial(space_partition=space_partition, domain=domain)
matrix = integrate(diffusion_form, fields={"u": trial, "v": test}, values={"nu": 1.0})
bsr_axpy(y=matrix, x=bd_matrix, alpha=bd_weight)
rhs_vecs.append(rhs)
res_vecs.append(wp.empty_like(rhs))
matrices.append(matrix)
indices.append(space_partition.space_node_indices().to(main_device))
# Global rhs as sum of all local rhs
glob_rhs = wp.zeros(n=scalar_space.node_count(), dtype=wp.float64, device=main_device)
tmp = wp.empty_like(glob_rhs)
sum_vecs(rhs_vecs, indices, glob_rhs, tmp)
# Distributed CG
glob_res = wp.zeros_like(glob_rhs)
A = DistributedSystem()
A.device = device
A.scalar_type = glob_rhs.dtype
A.nrow = scalar_space.node_count()
A.shape = (A.nrow, A.nrow)
A.tmp_buf = tmp
A.rank_data = (matrices, rhs_vecs, res_vecs, indices)
with wp.ScopedDevice(main_device):
bsr_cg(A, x=glob_res, b=glob_rhs, use_diag_precond=False, mv_routine=mv_routine, device=main_device)
scalar_field = scalar_space.make_field()
array_cast(in_array=glob_res, out_array=scalar_field.dof_values)
plot_grid_surface(scalar_field)
plt.show()
| warp-main | examples/fem/example_diffusion_mgpu.py |
warp-main | examples/fem/__init__.py |
|
import os
import math
from typing import Any
import warp as wp
import numpy as np
import warp.sim.render
from warp.sim import Model, State
from warp.fem.geometry import Grid3D
from warp.fem.domain import Cells, BoundarySides
from warp.fem.space import make_polynomial_space
from warp.fem.quadrature import PicQuadrature
from warp.fem.field import make_test, make_trial
from warp.fem.types import vec3i, Field, Sample, Domain
from warp.fem.integrate import integrate
from warp.fem.operator import integrand, lookup, normal, grad, at_node, div
from warp.fem.dirichlet import normalize_dirichlet_projector
from warp.sparse import bsr_mv, bsr_copy, bsr_mm, bsr_transposed, BsrMatrix
from bsr_utils import bsr_cg
@integrand
def integrate_fraction(s: Sample, phi: Field):
return phi(s)
@integrand
def integrate_velocity(
s: Sample,
domain: Domain,
u: Field,
velocities: wp.array(dtype=wp.vec3),
velocity_gradients: wp.array(dtype=wp.mat33),
dt: float,
gravity: wp.vec3,
):
"""Transfer particle velocities to grid"""
node_offset = domain(at_node(u, s)) - domain(s)
vel_apic = velocities[s.qp_index] + velocity_gradients[s.qp_index] * node_offset
vel_adv = vel_apic + dt * gravity
return wp.dot(u(s), vel_adv)
@integrand
def update_particles(
s: Sample,
domain: Domain,
grid_vel: Field,
dt: float,
pos: wp.array(dtype=wp.vec3),
pos_prev: wp.array(dtype=wp.vec3),
vel: wp.array(dtype=wp.vec3),
vel_grad: wp.array(dtype=wp.mat33),
):
"""Read particle velocity from grid and advect positions"""
vel[s.qp_index] = grid_vel(s)
vel_grad[s.qp_index] = grad(grid_vel, s)
pos_adv = pos_prev[s.qp_index] + dt * vel[s.qp_index]
# Project onto domain
pos_proj = domain(lookup(domain, pos_adv))
pos[s.qp_index] = pos_proj
return 0.0
@integrand
def velocity_boundary_projector_form(s: Sample, domain: Domain, u: Field, v: Field):
"""Projector for velocity-Dirichlet boundary conditions"""
n = normal(domain, s)
if n[1] > 0.0:
# Neuman on top
return 0.0
# Free-slip on other sides
return wp.dot(u(s), n) * wp.dot(v(s), n)
@integrand
def divergence_form(s: Sample, u: Field, psi: Field):
return div(u, s) * psi(s)
@wp.kernel
def invert_volume_kernel(values: wp.array(dtype=float)):
i = wp.tid()
m = values[i]
if m <= 1.0e-8:
values[i] = 0.0
else:
values[i] = 1.0 / m
@wp.kernel
def scalar_vector_multiply(
alpha: wp.array(dtype=float),
x: wp.array(dtype=wp.vec3),
y: wp.array(dtype=wp.vec3),
):
i = wp.tid()
y[i] = alpha[i] * x[i]
@wp.kernel
def scale_transposed_divergence_mat(
tr_divergence_mat_offsets: wp.array(dtype=int),
tr_divergence_mat_values: wp.array(dtype=wp.mat(shape=(3, 1), dtype=float)),
inv_fraction_int: wp.array(dtype=float),
):
u_i = wp.tid()
block_beg = tr_divergence_mat_offsets[u_i]
block_end = tr_divergence_mat_offsets[u_i + 1]
for b in range(block_beg, block_end):
tr_divergence_mat_values[b] = tr_divergence_mat_values[b] * inv_fraction_int[u_i]
def solve_incompressibility(
divergence_mat: BsrMatrix,
inv_volume,
pressure,
velocity,
):
"""Solve for divergence-free velocity delta:
delta_velocity = inv_volume * transpose(divergence_mat) * pressure
divergence_mat * (velocity + delta_velocity) = 0
"""
# Build transposed gradient matrix, scale with inverse fraction
transposed_divergence_mat = bsr_transposed(divergence_mat)
wp.launch(
kernel=scale_transposed_divergence_mat,
dim=inv_volume.shape[0],
inputs=[
transposed_divergence_mat.offsets,
transposed_divergence_mat.values,
inv_volume,
],
)
# For simplicity, assemble schur complement and solve with CG
schur = bsr_mm(divergence_mat, transposed_divergence_mat)
rhs = wp.zeros_like(pressure)
bsr_mv(A=divergence_mat, x=velocity, y=rhs, alpha=-1.0, beta=0.0)
bsr_cg(schur, b=rhs, x=pressure)
# Apply pressure to velocity
bsr_mv(A=transposed_divergence_mat, x=pressure, y=velocity, alpha=1.0, beta=1.0)
class Example:
def __init__(self, stage):
self.frame_dt = 1.0 / 60
self.frame_count = 1000
self.sim_substeps = 1
self.sim_dt = self.frame_dt / self.sim_substeps
self.sim_steps = self.frame_count * self.sim_substeps
self.sim_time = 0.0
# grid dimensions and particle emission
grid_res = np.array([32, 64, 16], dtype=int)
particle_fill_frac = np.array([0.5, 0.5, 1.0])
grid_lo = wp.vec3(0.0)
grid_hi = wp.vec3(50, 100, 25)
grid_cell_size = np.array(grid_hi - grid_lo) / grid_res
grid_cell_volume = np.prod(grid_cell_size)
PARTICLES_PER_CELL_DIM = 3
self.radius = np.max(grid_cell_size) / (2 * PARTICLES_PER_CELL_DIM)
particle_grid_res = np.array(particle_fill_frac * grid_res * PARTICLES_PER_CELL_DIM, dtype=int)
particle_grid_offset = self.radius * np.ones(3)
np.random.seed(0)
builder = wp.sim.ModelBuilder()
builder.add_particle_grid(
dim_x=particle_grid_res[0],
dim_y=particle_grid_res[1],
dim_z=particle_grid_res[2],
cell_x=self.radius * 2.0,
cell_y=self.radius * 2.0,
cell_z=self.radius * 2.0,
pos=(0.0, 0.0, 0.0) + particle_grid_offset,
rot=wp.quat_identity(),
vel=(0.0, 0.0, 0.0),
mass=grid_cell_volume / PARTICLES_PER_CELL_DIM**3,
jitter=self.radius * 1.0,
radius_mean=self.radius,
)
self.grid = Grid3D(vec3i(grid_res), grid_lo, grid_hi)
# Function spaces
self.velocity_space = make_polynomial_space(self.grid, dtype=wp.vec3, degree=1)
self.fraction_space = make_polynomial_space(self.grid, dtype=float, degree=1)
self.strain_space = make_polynomial_space(
self.grid,
dtype=float,
degree=0,
)
self.pressure_field = self.strain_space.make_field()
self.velocity_field = self.velocity_space.make_field()
# Test and trial functions
self.domain = Cells(self.grid)
self.velocity_test = make_test(self.velocity_space, domain=self.domain)
self.velocity_trial = make_trial(self.velocity_space, domain=self.domain)
self.fraction_test = make_test(self.fraction_space, domain=self.domain)
self.strain_test = make_test(self.strain_space, domain=self.domain)
self.strain_trial = make_trial(self.strain_space, domain=self.domain)
# Enforcing the Dirichlet boundary condition the hard way;
# build projector for velocity left- and right-hand-sides
boundary = BoundarySides(self.grid)
u_bd_test = make_test(space=self.velocity_space, domain=boundary)
u_bd_trial = make_trial(space=self.velocity_space, domain=boundary)
u_bd_projector = integrate(
velocity_boundary_projector_form, fields={"u": u_bd_trial, "v": u_bd_test}, nodal=True, output_dtype=float
)
normalize_dirichlet_projector(u_bd_projector)
self.vel_bd_projector = u_bd_projector
# Warp.sim model
self.model: Model = builder.finalize()
print("Particle count:", self.model.particle_count)
self.state_0: State = self.model.state()
self.state_0.particle_qd_grad = wp.zeros(shape=(self.model.particle_count), dtype=wp.mat33)
self.state_1: State = self.model.state()
self.state_1.particle_qd_grad = wp.zeros(shape=(self.model.particle_count), dtype=wp.mat33)
self.renderer = wp.sim.render.SimRenderer(self.model, stage, scaling=20.0)
def update(self, frame_index):
with wp.ScopedTimer(f"simulate frame {frame_index}", active=True):
for s in range(self.sim_substeps):
# Bin particles to grid cells
pic = PicQuadrature(
domain=Cells(self.grid), positions=self.state_0.particle_q, measures=self.model.particle_mass
)
# Inverse volume fraction
fraction_test = make_test(space=self.fraction_space, domain=pic.domain)
inv_volume = integrate(
integrate_fraction,
quadrature=pic,
fields={"phi": fraction_test},
accumulate_dtype=float,
)
wp.launch(kernel=invert_volume_kernel, dim=inv_volume.shape, inputs=[inv_volume])
# Velocity right-hand side
velocity_int = integrate(
integrate_velocity,
quadrature=pic,
fields={"u": self.velocity_test},
values={
"velocities": self.state_0.particle_qd,
"velocity_gradients": self.state_0.particle_qd_grad,
"dt": self.sim_dt,
"gravity": self.model.gravity,
},
accumulate_dtype=float,
output_dtype=wp.vec3,
)
# Compute constraint-free velocity
wp.launch(
kernel=scalar_vector_multiply,
dim=inv_volume.shape[0],
inputs=[inv_volume, velocity_int, self.velocity_field.dof_values],
)
# Apply velocity boundary conditions:
# velocity -= vel_bd_projector * velocity
wp.copy(src=self.velocity_field.dof_values, dest=velocity_int)
bsr_mv(A=self.vel_bd_projector, x=velocity_int, y=self.velocity_field.dof_values, alpha=-1.0, beta=1.0)
# Divergence matrix
divergence_mat = integrate(
divergence_form,
quadrature=pic,
fields={"u": self.velocity_trial, "psi": self.strain_test},
accumulate_dtype=float,
output_dtype=float,
)
# Project matrix to enforce boundary conditions
divergence_mat_tmp = bsr_copy(divergence_mat)
bsr_mm(alpha=-1.0, x=divergence_mat_tmp, y=self.vel_bd_projector, z=divergence_mat, beta=1.0)
# Solve unilateral incompressibility
solve_incompressibility(
divergence_mat,
inv_volume,
self.pressure_field.dof_values,
self.velocity_field.dof_values,
)
# (A)PIC advection
integrate(
update_particles,
quadrature=pic,
values={
"pos": self.state_1.particle_q,
"pos_prev": self.state_0.particle_q,
"vel": self.state_1.particle_qd,
"vel_grad": self.state_1.particle_qd_grad,
"dt": self.sim_dt,
},
fields={"grid_vel": self.velocity_field},
)
# swap states
(self.state_0, self.state_1) = (self.state_1, self.state_0)
def render(self, is_live=False):
with wp.ScopedTimer("render", active=True):
time = 0.0 if is_live else self.sim_time
self.renderer.begin_frame(time)
self.renderer.render(self.state_0)
self.renderer.end_frame()
self.sim_time += self.frame_dt
if __name__ == "__main__":
wp.set_module_options({"enable_backward": False})
wp.init()
stage_path = os.path.join(os.path.dirname(__file__), "outputs/example_sim_apic.usd")
example = Example(stage_path)
for i in range(example.frame_count):
example.update(i)
example.render()
example.renderer.save()
| warp-main | examples/fem/example_apic_fluid.py |
"""
This example computes a 2D weakly-compressible Stokes flow around a moving object, including:
- defining active cells from a mask, and restricting the computation domain to those
- utilizing the PicQuadrature to integrate over unstructured particles
"""
import math
import warp as wp
import numpy as np
from warp.fem.types import *
from warp.fem.geometry import Grid2D, ExplicitGeometryPartition
from warp.fem.field import make_test, make_trial
from warp.fem.space import make_polynomial_space, make_space_partition
from warp.fem.domain import Cells
from warp.fem.integrate import integrate, interpolate
from warp.fem.operator import integrand, D, div
from warp.fem.quadrature import PicQuadrature
from warp.fem.utils import array_axpy
from warp.utils import array_cast
from warp.sparse import bsr_transposed, bsr_mm, bsr_axpy
from plot_utils import plot_grid_streamlines, plot_grid_surface
from bsr_utils import bsr_cg, bsr_mv
import matplotlib.pyplot as plt
@integrand
def vel_from_particles_form(s: Sample, particle_vel: wp.array(dtype=wp.vec2), v: Field):
vel = particle_vel[s.qp_index]
return wp.dot(vel, v(s))
@integrand
def viscosity_form(s: Sample, u: Field, v: Field, nu: float):
return nu * wp.ddot(D(u, s), D(v, s))
@integrand
def mass_form(
s: Sample,
u: Field,
v: Field,
):
return wp.dot(u(s), v(s))
@integrand
def scalar_mass_form(
s: Sample,
p: Field,
q: Field,
):
return p(s) * q(s)
@integrand
def div_form(
s: Sample,
u: Field,
q: Field,
):
return q(s) * div(u, s)
@integrand
def cell_activity(s: Sample, domain: Domain, c1: wp.vec2, c2: wp.vec2, radius: float):
pos = domain(s)
if wp.length(pos - c1) < radius:
return 0.0
if wp.length(pos - c2) < radius:
return 0.0
return 1.0
@wp.kernel
def inverse_array_kernel(m: wp.array(dtype=wp.float64)):
m[wp.tid()] = wp.float64(1.0) / m[wp.tid()]
if __name__ == "__main__":
wp.init()
wp.set_module_options({"enable_backward": False})
res = 50
cell_size = 1.0 / res
geo = Grid2D(res=vec2i(res))
vel = 1.0
viscosity = 100.0
compliance = 0.01
bd_strength = 100000.0
# Generate particles defining the transfer displacement
circle_radius = 0.15
c1_center = wp.vec2(0.25, 0.5)
c2_center = wp.vec2(0.75, 0.5)
particles_per_circle = int(2.0 * math.pi * circle_radius * res)
angles = np.linspace(0, 2.0 * math.pi, particles_per_circle, endpoint=False)
n_particles = 2 * particles_per_circle
particles = np.empty((n_particles, 2), dtype=float)
particles[:particles_per_circle, 0] = c1_center[0] + circle_radius * np.cos(angles)
particles[:particles_per_circle, 1] = c1_center[1] + circle_radius * np.sin(angles)
particles[particles_per_circle:, 0] = c2_center[0] + circle_radius * np.cos(angles)
particles[particles_per_circle:, 1] = c2_center[1] + circle_radius * np.sin(angles)
particle_areas = np.ones(n_particles) * cell_size**2
particle_velocities = np.zeros_like(particles)
particle_velocities[:particles_per_circle, 0] = vel
particle_velocities[particles_per_circle:, 0] = -vel
particles = wp.array(particles, dtype=wp.vec2)
particle_areas = wp.array(particle_areas, dtype=float)
particle_velocities = wp.array(particle_velocities, dtype=wp.vec2)
# Disable cells that are interior to the circles
cell_space = make_polynomial_space(geo, degree=0)
activity = cell_space.make_field()
interpolate(
cell_activity,
dest=activity,
values={"c1": c1_center, "c2": c2_center, "radius": circle_radius - cell_size},
)
active_partition = ExplicitGeometryPartition(geo, wp.array(activity.dof_values.numpy(), dtype=int))
print("Active cells:", active_partition.cell_count())
# Function spaces -- Q1 for vel, Q0 for pressure
u_space = make_polynomial_space(geo, degree=1, dtype=wp.vec2)
p_space = make_polynomial_space(geo, degree=0)
active_space_partition = make_space_partition(space=u_space, geometry_partition=active_partition)
active_p_space_partition = make_space_partition(space=p_space, geometry_partition=active_partition)
domain = Cells(geometry=active_partition)
pic_quadrature = PicQuadrature(domain, particles, particle_areas)
# Boundary condition on particles
u_test = make_test(space=u_space, space_partition=active_space_partition, domain=domain)
u_trial = make_trial(space=u_space, space_partition=active_space_partition, domain=domain)
u_rhs = integrate(
vel_from_particles_form,
quadrature=pic_quadrature,
fields={"v": u_test},
values={"particle_vel": particle_velocities},
output_dtype=wp.vec2d
)
u_bd_matrix = integrate(mass_form, quadrature=pic_quadrature, fields={"u": u_trial, "v": u_test})
# Viscosity
u_visc_matrix = integrate(
viscosity_form,
fields={"u": u_trial, "v": u_test},
values={"nu": viscosity},
)
# Pressure-velocity coupling
p_test = make_test(space=p_space, space_partition=active_p_space_partition, domain=domain)
p_trial = make_trial(space=p_space, space_partition=active_p_space_partition, domain=domain)
div_matrix = integrate(div_form, fields={"u": u_trial, "q": p_test})
inv_p_mass_matrix = integrate(scalar_mass_form, fields={"p": p_trial, "q": p_test})
wp.launch(
kernel=inverse_array_kernel,
dim=inv_p_mass_matrix.values.shape,
device=inv_p_mass_matrix.values.device,
inputs=[inv_p_mass_matrix.values],
)
# Assemble linear system
u_matrix = u_visc_matrix
bsr_axpy(u_bd_matrix, u_matrix, alpha=bd_strength)
div_matrix_t = bsr_transposed(div_matrix)
gradient_matrix = bsr_mm(div_matrix_t, inv_p_mass_matrix)
bsr_mm(gradient_matrix, div_matrix, u_matrix, alpha=1.0 / compliance, beta=1.0)
array_axpy(u_rhs, u_rhs, alpha=0.0, beta=bd_strength)
# Solve for displacement
u_res = wp.zeros_like(u_rhs)
bsr_cg(u_matrix, x=u_res, b=u_rhs)
# Recompute pressure
p_res = wp.zeros(n=active_p_space_partition.node_count(), dtype=wp.float64)
p_tmp = wp.empty_like(p_res)
bsr_mv(A=div_matrix, x=u_res, y=p_tmp)
bsr_mv(A=inv_p_mass_matrix, x=p_tmp, y=p_res, alpha=-1)
# Display result
u_field = u_space.make_field()
p_field = p_space.make_field()
u_nodes = wp.indexedarray(u_field.dof_values, indices=active_space_partition.space_node_indices())
p_nodes = wp.indexedarray(p_field.dof_values, indices=active_p_space_partition.space_node_indices())
array_cast(in_array=u_res, out_array=u_nodes)
array_cast(in_array=p_res, out_array=p_nodes)
plot_grid_surface(p_field)
plot_grid_streamlines(u_field)
plt.show()
| warp-main | examples/fem/example_stokes_transfer.py |
""" This example illustrates using Mixed FEM to solve a 2D linear elasticity problem
Div[ E: D(u) ] = 0
with Dirichlet boundary conditions on horizontal sides, and E the elasticity rank-4 tensor
"""
import argparse
import warp as wp
import numpy as np
from warp.fem.types import *
from warp.fem.geometry import Grid2D, Trimesh2D
from warp.fem.field import make_test, make_trial
from warp.fem.space import make_polynomial_space, SymmetricTensorMapper
from warp.fem.domain import Cells, BoundarySides
from warp.fem.integrate import integrate
from warp.fem.operator import normal, integrand, D
from warp.fem.dirichlet import project_linear_system
from warp.sparse import bsr_transposed, bsr_mm
from plot_utils import plot_velocities
from bsr_utils import bsr_cg, invert_diagonal_bsr_mass_matrix
from mesh_utils import gen_trimesh
import matplotlib.pyplot as plt
@wp.func
def compute_stress(tau: wp.mat22, E: wp.mat33):
"""Strain to stress computation"""
tau_sym = wp.vec3(tau[0, 0], tau[1, 1], tau[0, 1] + tau[1, 0])
sig_sym = E * tau_sym
return wp.mat22(sig_sym[0], 0.5 * sig_sym[2], 0.5 * sig_sym[2], sig_sym[1])
@integrand
def symmetric_grad_form(
s: Sample,
u: Field,
tau: Field,
):
"""D(u) : tau"""
return wp.ddot(tau(s), D(u, s))
@integrand
def stress_form(s: Sample, u: Field, tau: Field, E: wp.mat33):
"""(E : D(u)) : tau"""
return wp.ddot(tau(s), compute_stress(D(u, s), E))
@integrand
def horizontal_boundary_projector_form(
s: Sample,
domain: Domain,
u: Field,
v: Field,
):
# non zero on horizontal boundary of domain only
nor = normal(domain, s)
return wp.dot(u(s), v(s)) * wp.abs(nor[1])
@integrand
def horizontal_displacement_form(
s: Sample,
domain: Domain,
v: Field,
displacement: float,
):
# opposed to normal on horizontal boundary of domain only
nor = normal(domain, s)
return -wp.abs(nor[1]) * displacement * wp.dot(nor, v(s))
@integrand
def tensor_mass_form(
s: Sample,
sig: Field,
tau: Field,
):
return wp.ddot(tau(s), sig(s))
if __name__ == "__main__":
wp.init()
wp.set_module_options({"enable_backward": False})
parser = argparse.ArgumentParser()
parser.add_argument("--resolution", type=int, default=25)
parser.add_argument("--degree", type=int, default=2)
parser.add_argument("--displacement", type=float, default=0.1)
parser.add_argument("--young_modulus", type=float, default=1.0)
parser.add_argument("--poisson_ratio", type=float, default=0.5)
parser.add_argument("--tri_mesh", action="store_true", help="Use a triangular mesh")
args = parser.parse_args()
if args.tri_mesh:
positions, tri_vidx = gen_trimesh(res=vec2i(args.resolution))
geo = Trimesh2D(tri_vertex_indices=tri_vidx, positions=positions)
else:
geo = Grid2D(res=vec2i(args.resolution))
boundary = BoundarySides(geo)
# Strain-stress matrix
young = args.young_modulus
poisson = args.poisson_ratio
elasticity_mat = wp.mat33(
young
/ (1.0 - poisson * poisson)
* np.array(
[[1.0, poisson, 0.0], [poisson, 1.0, 0.0], [0.0, 0.0, (2.0 * (1.0 + poisson)) * (1.0 - poisson * poisson)]]
)
)
domain = Cells(geometry=geo)
# Function spaces -- Q_k for displacement, Q_{k-1}d for stress
u_space = make_polynomial_space(geo, degree=args.degree, dtype=wp.vec2)
# Store stress degrees of freedom as symmetric tensors (3 dof) rather than full 2x2 matrices
tau_space = make_polynomial_space(
geo, degree=args.degree - 1, discontinuous=True, dof_mapper=SymmetricTensorMapper(wp.mat22)
)
# Displacement boundary conditions
u_bd_test = make_test(space=u_space, domain=boundary)
u_bd_trial = make_trial(space=u_space, domain=boundary)
u_bd_rhs = integrate(
horizontal_displacement_form,
fields={"v": u_bd_test},
values={"displacement": args.displacement},
nodal=True,
output_dtype=wp.vec2d,
)
u_bd_matrix = integrate(horizontal_boundary_projector_form, fields={"u": u_bd_trial, "v": u_bd_test}, nodal=True)
# Stress/velocity coupling
u_trial = make_trial(space=u_space, domain=domain)
tau_test = make_test(space=tau_space, domain=domain)
tau_trial = make_trial(space=tau_space, domain=domain)
sym_grad_matrix = integrate(symmetric_grad_form, fields={"u": u_trial, "tau": tau_test})
stress_matrix = integrate(stress_form, fields={"u": u_trial, "tau": tau_test}, values={"E": elasticity_mat})
# Compute inverse of the (block-diagonal) tau mass matrix
tau_inv_mass_matrix = integrate(tensor_mass_form, fields={"sig": tau_trial, "tau": tau_test}, nodal=True)
invert_diagonal_bsr_mass_matrix(tau_inv_mass_matrix)
# Assemble system matrix
u_matrix = bsr_mm(bsr_transposed(sym_grad_matrix), bsr_mm(tau_inv_mass_matrix, stress_matrix))
# Enforce boundary conditions
u_rhs = wp.zeros_like(u_bd_rhs)
project_linear_system(u_matrix, u_rhs, u_bd_matrix, u_bd_rhs)
x = wp.zeros_like(u_rhs)
bsr_cg(u_matrix, b=u_rhs, x=x, tol=1.0e-16)
# Extract result
u_field = u_space.make_field()
u_field.dof_values = x # .reshape((-1, 2))
plot_velocities(u_field)
plt.show()
| warp-main | examples/fem/example_mixed_elasticity.py |
"""
This example solves a 2D Stokes flow problem
-nu D(u) + grad p = 0
Div u = 0
with (soft) velocity-Dirichlet boundary conditions
"""
import argparse
import warp as wp
import numpy as np
from warp.fem.types import *
from warp.fem.geometry import Grid2D, Trimesh2D
from warp.fem.field import make_test, make_trial, make_restriction
from warp.fem.space import make_polynomial_space
from warp.fem.domain import Cells, BoundarySides
from warp.fem.integrate import integrate, interpolate
from warp.fem.operator import normal, integrand, D, div
from plot_utils import plot_velocities, plot_surface
from bsr_utils import bsr_to_scipy
from mesh_utils import gen_trimesh
from scipy.sparse import bmat
from scipy.sparse.linalg import spsolve
import matplotlib.pyplot as plt
@integrand
def constant_form(val: wp.vec2):
return val
@integrand
def viscosity_form(s: Sample, u: Field, v: Field, nu: float):
return nu * wp.ddot(D(u, s), D(v, s))
@integrand
def top_mass_form(
s: Sample,
domain: Domain,
u: Field,
v: Field,
):
# non zero on top boundary of domain only
nor = normal(domain, s)
return wp.dot(u(s), v(s)) * wp.max(0.0, nor[1])
@integrand
def mass_form(
s: Sample,
u: Field,
v: Field,
):
return wp.dot(u(s), v(s))
@integrand
def div_form(
s: Sample,
u: Field,
q: Field,
):
return q(s) * div(u, s)
if __name__ == "__main__":
wp.init()
wp.set_module_options({"enable_backward": False})
parser = argparse.ArgumentParser()
parser.add_argument("--resolution", type=int, default=50)
parser.add_argument("--degree", type=int, default=2)
parser.add_argument("--top_velocity", type=float, default=1.0)
parser.add_argument("--viscosity", type=float, default=1.0)
parser.add_argument("--boundary_strength", type=float, default=100.0)
parser.add_argument("--tri_mesh", action="store_true", help="Use a triangular mesh")
args = parser.parse_args()
top_velocity = wp.vec2(args.top_velocity, 0.0)
if args.tri_mesh:
positions, tri_vidx = gen_trimesh(res=vec2i(args.resolution))
geo = Trimesh2D(tri_vertex_indices=tri_vidx, positions=positions)
else:
geo = Grid2D(res=vec2i(args.resolution))
domain = Cells(geometry=geo)
boundary = BoundarySides(geo)
# Function spaces -- Q_d for vel, Q_{d-1} for pressure
u_space = make_polynomial_space(geo, degree=args.degree, dtype=wp.vec2)
p_space = make_polynomial_space(geo, degree=args.degree-1)
# Interpolate initial condition on boundary (mostly for testing)
f = u_space.make_field()
f_boundary = make_restriction(f, domain=boundary)
interpolate(constant_form, dest=f_boundary, values={"val": top_velocity})
# Viscosity
u_test = make_test(space=u_space, domain=domain)
u_trial = make_trial(space=u_space, domain=domain)
u_visc_matrix = integrate(
viscosity_form,
fields={"u": u_trial, "v": u_test},
values={"nu": args.viscosity},
)
# Weak velocity boundary conditions
u_bd_test = make_test(space=u_space, domain=boundary)
u_bd_trial = make_trial(space=u_space, domain=boundary)
u_rhs = integrate(top_mass_form, fields={"u": f.trace(), "v": u_bd_test})
u_bd_matrix = integrate(mass_form, fields={"u": u_bd_trial, "v": u_bd_test})
# Pressure-velocity coupling
p_test = make_test(space=p_space, domain=domain)
div_matrix = integrate(div_form, fields={"u": u_trial, "q": p_test})
# Solve with scipy
# Assemble saddle-point system with velocity, pressure, and zero-average-pressure constraint
u_rhs = u_rhs.numpy() * args.boundary_strength
u_matrix = bsr_to_scipy(u_visc_matrix) + args.boundary_strength * bsr_to_scipy(u_bd_matrix)
div_matrix = bsr_to_scipy(div_matrix)
ones = np.ones(shape=(p_space.node_count(), 1), dtype=float)
saddle_system = bmat(
[
[u_matrix, div_matrix.transpose(), None],
[div_matrix, None, ones],
[None, ones.transpose(), None],
],
format="csr",
)
saddle_rhs = np.zeros(saddle_system.shape[0])
u_slice = slice(0, 2 * u_space.node_count())
p_slice = slice(
2 * u_space.node_count(), 2 * u_space.node_count() + p_space.node_count()
)
saddle_rhs[u_slice] = u_rhs.flatten()
x = spsolve(saddle_system, saddle_rhs)
# Extract result
u_field = u_space.make_field()
p_field = p_space.make_field()
u_field.dof_values = x[u_slice].reshape((-1, 2))
p_field.dof_values = x[p_slice]
plot_surface(p_field)
plot_velocities(u_field)
plt.show()
| warp-main | examples/fem/example_stokes.py |
import numpy as np
def plot_grid_surface(field, axes=None):
import matplotlib.pyplot as plt
from matplotlib import cm
if axes is None:
fig, axes = plt.subplots(subplot_kw={"projection": "3d"})
node_positions = field.space.node_positions()
# Make data.
X = node_positions[0]
Y = node_positions[1]
Z = field.dof_values.numpy().reshape(X.shape)
# Plot the surface.
return axes.plot_surface(X, Y, Z, cmap=cm.coolwarm, linewidth=0, antialiased=False)
def plot_tri_surface(field, axes=None):
import matplotlib.pyplot as plt
from matplotlib import cm
from matplotlib.tri.triangulation import Triangulation
if axes is None:
fig, axes = plt.subplots(subplot_kw={"projection": "3d"})
node_positions = field.space.node_positions()
triangulation = Triangulation(
x=node_positions[0], y=node_positions[1], triangles=field.space.node_triangulation()
)
Z = field.dof_values.numpy()
# Plot the surface.
return axes.plot_trisurf(triangulation, Z, cmap=cm.coolwarm, linewidth=0, antialiased=False)
def plot_scatter_surface(field, axes=None):
import matplotlib.pyplot as plt
from matplotlib import cm
if axes is None:
fig, axes = plt.subplots(subplot_kw={"projection": "3d"})
X, Y = field.space.node_positions()
# Make data.
Z = field.dof_values.numpy().reshape(X.shape)
# Plot the surface.
return axes.scatter(X, Y, Z, c=Z, cmap=cm.coolwarm)
def plot_surface(field, axes=None):
if hasattr(field.space, "node_triangulation"):
return plot_tri_surface(field, axes)
else:
try:
return plot_grid_surface(field, axes)
except:
return plot_scatter_surface(field, axes)
def plot_grid_color(field, axes=None):
import matplotlib.pyplot as plt
from matplotlib import cm
if axes is None:
fig, axes = plt.subplots()
node_positions = field.space.node_positions()
# Make data.
X = node_positions[0]
Y = node_positions[1]
Z = field.dof_values.numpy().reshape(X.shape)
# Plot the surface.
return axes.pcolormesh(X, Y, Z, cmap=cm.coolwarm)
def plot_velocities(field, axes=None):
import matplotlib.pyplot as plt
if axes is None:
fig, axes = plt.subplots()
node_positions = field.space.node_positions()
# Make data.
X = node_positions[0]
Y = node_positions[1]
vel = field.dof_values.numpy()
u = np.ascontiguousarray(vel[:, 0])
v = np.ascontiguousarray(vel[:, 1])
u = u.reshape(X.shape)
v = v.reshape(X.shape)
return axes.quiver(X, Y, u, v)
def plot_grid_streamlines(field, axes=None):
import matplotlib.pyplot as plt
if axes is None:
fig, axes = plt.subplots()
node_positions = field.space.node_positions()
# Make data.
X = node_positions[0][:, 0]
Y = node_positions[1][0, :]
vel = field.dof_values.numpy()
u = np.ascontiguousarray(vel[:, 0])
v = np.ascontiguousarray(vel[:, 1])
u = np.transpose(u.reshape(node_positions[0].shape))
v = np.transpose(v.reshape(node_positions[0].shape))
splot = axes.streamplot(X, Y, u, v, density=2)
splot.axes = axes
return splot
def plot_3d_scatter(field, axes=None):
import matplotlib.pyplot as plt
from matplotlib import cm
if axes is None:
fig, axes = plt.subplots(subplot_kw={"projection": "3d"})
X, Y, Z = field.space.node_positions()
# Make data.
f = field.dof_values.numpy().reshape(X.shape)
# Plot the surface.
return axes.scatter(X, Y, Z, c=f, cmap=cm.coolwarm)
def plot_3d_velocities(field, axes=None):
import matplotlib.pyplot as plt
if axes is None:
fig, axes = plt.subplots(subplot_kw={"projection": "3d"})
X, Y, Z = field.space.node_positions()
vel = field.dof_values.numpy()
u = np.ascontiguousarray(vel[:, 0])
v = np.ascontiguousarray(vel[:, 1])
w = np.ascontiguousarray(vel[:, 2])
u = u.reshape(X.shape)
v = v.reshape(X.shape)
w = w.reshape(X.shape)
return axes.quiver(X, Y, Z, u, v, w, length=1.0 / X.shape[0], normalize=False)
| warp-main | examples/fem/plot_utils.py |
"""
This example simulates a convection-diffusion PDE using semi-Lagrangian advection
D phi / dt - nu d2 phi / dx^2 = 0
"""
import argparse
import warp as wp
from warp.fem.types import *
from warp.fem.geometry import Grid2D, Trimesh2D
from warp.fem.field import make_test, make_trial
from warp.fem.space import make_polynomial_space
from warp.fem.domain import Cells
from warp.fem.integrate import integrate, interpolate
from warp.fem.operator import grad, integrand, lookup
from bsr_utils import bsr_to_scipy
from plot_utils import plot_surface
from mesh_utils import gen_trimesh
from scipy.sparse.linalg import factorized
import matplotlib.pyplot as plt
import matplotlib.animation as animation
@integrand
def initial_condition(domain: Domain, s: Sample):
pos = domain(s)
if pos[0] > 0.4 and pos[0] < 0.6 and pos[1] > 0.2 and pos[1] < 0.8:
return 1.0
return 0.0
@wp.func
def velocity(pos: wp.vec2, ang_vel: float):
center = wp.vec2(0.5, 0.5)
offset = pos - center
return wp.vec2(offset[1], -offset[0]) * ang_vel
@integrand
def inertia_form(s: Sample, phi: Field, psi: Field, dt: float):
return phi(s) * psi(s) / dt
@integrand
def transported_inertia_form(s: Sample, domain: Domain, phi: Field, psi: Field, ang_vel: float, dt: float):
pos = domain(s)
vel = velocity(pos, ang_vel)
# semi-Lagrangian advection; evaluate phi upstream
conv_pos = pos - vel * dt
# lookup opertor constructs a Sample from a world position.
# the optional last argument provides a initial guess for the lookup
conv_phi = phi(lookup(domain, conv_pos, s))
return conv_phi * psi(s) / dt
@integrand
def diffusion_form(
s: Sample,
u: Field,
v: Field,
):
return wp.dot(
grad(u, s),
grad(v, s),
)
@integrand
def diffusion_and_inertia_form(s: Sample, phi: Field, psi: Field, dt: float, nu: float):
return inertia_form(s, phi, psi, dt) + nu * diffusion_form(s, phi, psi)
if __name__ == "__main__":
wp.init()
wp.set_module_options({"enable_backward": False})
parser = argparse.ArgumentParser()
parser.add_argument("--resolution", type=int, default=50)
parser.add_argument("--degree", type=int, default=2)
parser.add_argument("--n_frames", type=int, default=250)
parser.add_argument("--viscosity", type=float, default=0.001)
parser.add_argument("--ang_vel", type=float, default=1.0)
parser.add_argument("--tri_mesh", action="store_true", help="Use a triangular mesh")
args = parser.parse_args()
res = args.resolution
dt = 1.0 / (args.ang_vel * res)
if args.tri_mesh:
positions, tri_vidx = gen_trimesh(res=vec2i(res))
geo = Trimesh2D(tri_vertex_indices=tri_vidx, positions=positions)
else:
geo = Grid2D(res=vec2i(res))
domain = Cells(geometry=geo)
scalar_space = make_polynomial_space(geo, degree=args.degree)
quadrature = None
# Initial condition
phi0 = scalar_space.make_field()
interpolate(initial_condition, dest=phi0)
# Assemble and factorize diffusion and inertia matrix
test = make_test(space=scalar_space, domain=domain)
trial = make_trial(space=scalar_space, domain=domain)
matrix = integrate(
diffusion_and_inertia_form,
quadrature=quadrature,
fields={"phi": trial, "psi": test},
values={"nu": args.viscosity, "dt": dt},
)
matrix_solve = factorized(bsr_to_scipy(matrix))
results = [phi0.dof_values.numpy()]
phik = phi0
for k in range(args.n_frames):
# right-hand-side -- advected inertia
rhs = integrate(
transported_inertia_form,
quadrature=quadrature,
fields={"phi": phik, "psi": test},
values={"ang_vel": args.ang_vel, "dt": dt},
)
# Solve using Scipy
x = matrix_solve(rhs.numpy().flatten())
phik.dof_values = x
results.append(x)
colormesh = plot_surface(phi0)
ax = colormesh.axes
def animate(i):
ax.clear()
phik.dof_values = results[i]
return plot_surface(phik, axes=ax)
anim = animation.FuncAnimation(
ax.figure,
animate,
interval=30,
blit=False,
frames=len(results),
)
plt.show()
| warp-main | examples/fem/example_convection_diffusion.py |
"""
This example solves a 3d diffusion problem:
nu Div u = 1
with homogeneous Neumann conditions on horizontal sides and homogeneous Dirichlet boundary conditions other sides.
"""
import argparse
import warp as wp
import numpy as np
from warp.fem.types import *
from warp.fem.geometry import Grid3D, Tetmesh
from warp.fem.space import make_polynomial_space
from warp.fem.field import make_test, make_trial
from warp.fem.domain import Cells, BoundarySides
from warp.fem.integrate import integrate
from warp.fem.operator import normal, integrand
from warp.fem.dirichlet import project_linear_system
from warp.sparse import bsr_axpy
from plot_utils import plot_3d_scatter
from bsr_utils import bsr_cg
from mesh_utils import gen_tetmesh
from example_diffusion import diffusion_form, linear_form
import matplotlib.pyplot as plt
@integrand
def vert_boundary_projector_form(
s: Sample,
domain: Domain,
u: Field,
v: Field,
):
# Non-zero mass on vertical sides only
w = 1.0 - wp.abs(normal(domain, s)[1])
return w * u(s) * v(s)
if __name__ == "__main__":
wp.init()
wp.set_module_options({"enable_backward": False})
parser = argparse.ArgumentParser()
parser.add_argument("--resolution", type=int, default=10)
parser.add_argument("--degree", type=int, default=2)
parser.add_argument("--viscosity", type=float, default=2.0)
parser.add_argument("--boundary_compliance", type=float, default=0, help="Dirichlet boundary condition compliance")
parser.add_argument("--tet_mesh", action="store_true", help="Use a tetrahedral mesh")
args = parser.parse_args()
res = vec3i(args.resolution, args.resolution // 2, args.resolution * 2)
if args.tet_mesh:
pos, tet_vtx_indices = gen_tetmesh(
res=res,
bounds_lo=wp.vec3(0.0, 0.0, 0.0),
bounds_hi=wp.vec3(1.0, 0.5, 2.0),
)
geo = Tetmesh(tet_vtx_indices, pos)
else:
geo = Grid3D(
res=res,
bounds_lo=wp.vec3(0.0, 0.0, 0.0),
bounds_hi=wp.vec3(1.0, 0.5, 2.0),
)
# Domain and function spaces
domain = Cells(geometry=geo)
scalar_space = make_polynomial_space(geo, degree=args.degree)
# Right-hand-side
test = make_test(space=scalar_space, domain=domain)
rhs = integrate(linear_form, fields={"v": test})
# Weakly-imposed boundary conditions on Y sides
with wp.ScopedTimer("Integrate"):
boundary = BoundarySides(geo)
bd_test = make_test(space=scalar_space, domain=boundary)
bd_trial = make_trial(space=scalar_space, domain=boundary)
bd_matrix = integrate(vert_boundary_projector_form, fields={"u": bd_trial, "v": bd_test}, nodal=True)
# Diffusion form
trial = make_trial(space=scalar_space, domain=domain)
matrix = integrate(diffusion_form, fields={"u": trial, "v": test}, values={"nu": args.viscosity})
if args.boundary_compliance == 0.0:
# Hard BC: project linear system
bd_rhs = wp.zeros_like(rhs)
project_linear_system(matrix, rhs, bd_matrix, bd_rhs)
else:
# Weak BC: add toegether diffusion and boundary condition matrices
boundary_strength = 1.0 / args.boundary_compliance
bsr_axpy(x=bd_matrix, y=matrix, alpha=100.0, beta=1)
with wp.ScopedTimer("CG solve"):
x = wp.zeros_like(rhs)
bsr_cg(matrix, b=rhs, x=x)
scalar_field = scalar_space.make_field()
scalar_field.dof_values = x
plot_3d_scatter(scalar_field)
plt.show()
| warp-main | examples/fem/example_diffusion_3d.py |
"""
This example solves a 2d diffusion problem:
nu Div u = 1
with Dirichlet boundary conditions on vertical edges and homogeneous Neumann on horizontal edges.
"""
import argparse
import warp as wp
from warp.fem.types import *
from warp.fem.geometry import Grid2D, Trimesh2D
from warp.fem.space import make_polynomial_space
from warp.fem.field import make_test, make_trial
from warp.fem.domain import Cells, BoundarySides
from warp.fem.integrate import integrate
from warp.fem.operator import grad, normal, integrand
from warp.fem.dirichlet import project_linear_system
from warp.sparse import bsr_axpy
from warp.fem.utils import array_axpy
from plot_utils import plot_surface
from bsr_utils import bsr_cg
from mesh_utils import gen_trimesh
import matplotlib.pyplot as plt
@integrand
def linear_form(
s: Sample,
v: Field,
):
"""Linear form with constant slope 1 -- forcing term of our problem"""
return v(s)
@integrand
def diffusion_form(s: Sample, u: Field, v: Field, nu: float):
"""Diffusion bilinear form with constant coefficient ``nu``"""
return nu * wp.dot(
grad(u, s),
grad(v, s),
)
@integrand
def y_boundary_value_form(s: Sample, domain: Domain, v: Field, val: float):
"""Linear form with coefficient val on vertical edges, zero elsewhere"""
nor = normal(domain, s)
return val * v(s) * wp.abs(nor[0])
@integrand
def y_boundary_projector_form(
s: Sample,
domain: Domain,
u: Field,
v: Field,
):
"""
Bilinear boundary condition projector form, non-zero on vertical edges only.
"""
# Reuse the above linear form implementation by evaluating one of the participating field and passing it as a normal scalar argument.
return y_boundary_value_form(s, domain, v, u(s))
if __name__ == "__main__":
wp.init()
wp.set_module_options({"enable_backward": False})
parser = argparse.ArgumentParser()
parser.add_argument("--resolution", type=int, default=50)
parser.add_argument("--degree", type=int, default=2)
parser.add_argument("--viscosity", type=float, default=2.0)
parser.add_argument("--boundary_value", type=float, default=5.0)
parser.add_argument("--boundary_compliance", type=float, default=0, help="Dirichlet boundary condition compliance")
parser.add_argument("--tri_mesh", action="store_true", help="Use a triangular mesh")
args = parser.parse_args()
# Grid or triangle mesh geometry
if args.tri_mesh:
positions, tri_vidx = gen_trimesh(res=vec2i(args.resolution))
geo = Trimesh2D(tri_vertex_indices=tri_vidx, positions=positions)
else:
geo = Grid2D(res=vec2i(args.resolution))
# Domain and function spaces
domain = Cells(geometry=geo)
scalar_space = make_polynomial_space(geo, degree=args.degree)
# Right-hand-side (forcing term)
test = make_test(space=scalar_space, domain=domain)
rhs = integrate(linear_form, fields={"v": test})
# Diffusion form
trial = make_trial(space=scalar_space, domain=domain)
matrix = integrate(diffusion_form, fields={"u": trial, "v": test}, values={"nu": args.viscosity})
# Boundary conditions on Y sides
# Use nodal integration so that boundary conditions are specified on each node independently
boundary = BoundarySides(geo)
bd_test = make_test(space=scalar_space, domain=boundary)
bd_trial = make_trial(space=scalar_space, domain=boundary)
bd_matrix = integrate(y_boundary_projector_form, fields={"u": bd_trial, "v": bd_test}, nodal=True)
bd_rhs = integrate(y_boundary_value_form, fields={"v": bd_test}, values={"val": args.boundary_value}, nodal=True)
# Assemble linear system
if args.boundary_compliance == 0.0:
# Hard BC: project linear system
project_linear_system(matrix, rhs, bd_matrix, bd_rhs)
else:
# Weak BC: add toegether diffusion and boundary condition matrices
boundary_strength = 1.0 / args.boundary_compliance
bsr_axpy(x=bd_matrix, y=matrix, alpha=boundary_strength, beta=1)
array_axpy(x=bd_rhs, y=rhs, alpha=boundary_strength, beta=1)
# Solve linear system using Conjugate Gradient
x = wp.zeros_like(rhs)
bsr_cg(matrix, b=rhs, x=x)
# Assign system result to a discrete field,
scalar_field = scalar_space.make_field()
scalar_field.dof_values = x
# Visualize it with matplotlib
plot_surface(scalar_field)
plt.show()
| warp-main | examples/fem/example_diffusion.py |
import numpy as np
import warp as wp
def gen_trimesh(res, bounds_lo: wp.vec2 = wp.vec2(0.0), bounds_hi: wp.vec2 = wp.vec2(1.0)):
"""Constructs a triangular mesh by diving each cell of a dense 2D grid into two triangles
Args:
res: Resolution of the grid along each dimension
bounds_lo: Position of the lower bound of the axis-aligned grid
bounds_up: Position of the upper bound of the axis-aligned grid
Returns:
Tuple of ndarrays: (Vertex positions, Triangle vertex indices)
"""
Nx = res[0]
Ny = res[1]
x = np.linspace(bounds_lo[0], bounds_hi[0], Nx + 1)
y = np.linspace(bounds_lo[1], bounds_hi[1], Ny + 1)
positions = np.transpose(np.meshgrid(x, y, indexing="ij"), axes=(1, 2, 0)).reshape(-1, 2)
cx, cy = np.meshgrid(np.arange(Nx, dtype=int), np.arange(Ny, dtype=int), indexing="ij")
vidx = np.transpose(
np.array(
[
(Ny + 1) * cx + cy,
(Ny + 1) * (cx + 1) + cy,
(Ny + 1) * (cx + 1) + (cy + 1),
(Ny + 1) * cx + cy,
(Ny + 1) * (cx + 1) + (cy + 1),
(Ny + 1) * (cx) + (cy + 1),
]
)
).reshape((-1, 3))
return wp.array(positions, dtype=wp.vec2), wp.array(vidx, dtype=int)
def gen_tetmesh(res, bounds_lo: wp.vec3 = wp.vec3(0.0), bounds_hi: wp.vec3 = wp.vec3(1.0)):
"""Constructs a tetrahedral mesh by diving each cell of a dense 3D grid into five tetrahedrons
Args:
res: Resolution of the grid along each dimension
bounds_lo: Position of the lower bound of the axis-aligned grid
bounds_up: Position of the upper bound of the axis-aligned grid
Returns:
Tuple of ndarrays: (Vertex positions, Tetrahedron vertex indices)
"""
Nx = res[0]
Ny = res[1]
Nz = res[2]
x = np.linspace(bounds_lo[0], bounds_hi[0], Nx + 1)
y = np.linspace(bounds_lo[1], bounds_hi[1], Ny + 1)
z = np.linspace(bounds_lo[2], bounds_hi[2], Nz + 1)
positions = np.transpose(np.meshgrid(x, y, z, indexing="ij"), axes=(1, 2, 3, 0)).reshape(-1, 3)
# Global node indices for each cell
cx, cy, cz = np.meshgrid(
np.arange(Nx, dtype=int), np.arange(Ny, dtype=int), np.arange(Nz, dtype=int), indexing="ij"
)
grid_vidx = np.array(
[
(Ny + 1) * (Nz + 1) * cx + (Nz + 1) * cy + cz,
(Ny + 1) * (Nz + 1) * cx + (Nz + 1) * cy + cz + 1,
(Ny + 1) * (Nz + 1) * cx + (Nz + 1) * (cy + 1) + cz,
(Ny + 1) * (Nz + 1) * cx + (Nz + 1) * (cy + 1) + cz + 1,
(Ny + 1) * (Nz + 1) * (cx + 1) + (Nz + 1) * cy + cz,
(Ny + 1) * (Nz + 1) * (cx + 1) + (Nz + 1) * cy + cz + 1,
(Ny + 1) * (Nz + 1) * (cx + 1) + (Nz + 1) * (cy + 1) + cz,
(Ny + 1) * (Nz + 1) * (cx + 1) + (Nz + 1) * (cy + 1) + cz + 1,
]
)
# decompose grid cells into 5 tets
tet_vidx = np.array(
[
[0, 1, 2, 4],
[3, 2, 1, 7],
[5, 1, 7, 4],
[6, 7, 4, 2],
[4, 1, 2, 7],
]
)
# Convert to 3d index coordinates
vidx_coords = np.array(
[
[0, 0, 0],
[0, 0, 1],
[0, 1, 0],
[0, 1, 1],
[1, 0, 0],
[1, 0, 1],
[1, 1, 0],
[1, 1, 1],
]
)
tet_coords = vidx_coords[tet_vidx]
# Symmetry bits for each cell
ox, oy, oz = np.meshgrid(
np.arange(Nx, dtype=int) % 2, np.arange(Ny, dtype=int) % 2, np.arange(Nz, dtype=int) % 2, indexing="ij"
)
tet_coords = np.broadcast_to(tet_coords, shape=(*ox.shape, *tet_coords.shape))
# Flip coordinates according to symmetry
ox_bk = np.broadcast_to(ox.reshape(*ox.shape, 1, 1), tet_coords.shape[:-1])
oy_bk = np.broadcast_to(oy.reshape(*oy.shape, 1, 1), tet_coords.shape[:-1])
oz_bk = np.broadcast_to(oz.reshape(*oz.shape, 1, 1), tet_coords.shape[:-1])
tet_coords_x = tet_coords[..., 0] ^ ox_bk
tet_coords_y = tet_coords[..., 1] ^ oy_bk
tet_coords_z = tet_coords[..., 2] ^ oz_bk
# Back to local vertex indices
corner_indices = 4 * tet_coords_x + 2 * tet_coords_y + tet_coords_z
# Now go from cell-local to global node indices
# There must be a nicer way than this, but for example purposes this works
corner_indices = corner_indices.reshape(-1, 4)
grid_vidx = grid_vidx.reshape((8, -1, 1))
grid_vidx = np.broadcast_to(grid_vidx, shape=(8, grid_vidx.shape[1], 5))
grid_vidx = grid_vidx.reshape((8, -1))
node_indices = np.arange(corner_indices.shape[0])
tet_grid_vidx = np.transpose(
[
grid_vidx[corner_indices[:, 0], node_indices],
grid_vidx[corner_indices[:, 1], node_indices],
grid_vidx[corner_indices[:, 2], node_indices],
grid_vidx[corner_indices[:, 3], node_indices],
]
)
return wp.array(positions, dtype=wp.vec3), wp.array(tet_grid_vidx, dtype=int)
| warp-main | examples/fem/mesh_utils.py |
"""
This example solves a 2D Navier-Stokes flow problem
Du/dt -nu D(u) + grad p = 0
Div u = 0
with (hard) velocity-Dirichlet boundary conditions
and using semi-Lagrangian advection
"""
import argparse
import warp as wp
import numpy as np
from warp.fem.types import *
from warp.fem.geometry import Grid2D, Trimesh2D
from warp.fem.field import make_test, make_trial
from warp.fem.space import make_polynomial_space
from warp.fem.quadrature import RegularQuadrature
from warp.fem.domain import Cells, BoundarySides
from warp.fem.integrate import integrate
from warp.fem.operator import integrand, D, div, lookup
from warp.fem.dirichlet import project_linear_system, normalize_dirichlet_projector
from warp.fem.utils import array_axpy
from warp.sparse import bsr_mm, bsr_mv, bsr_copy
from bsr_utils import bsr_to_scipy
from plot_utils import plot_grid_streamlines, plot_velocities
from mesh_utils import gen_trimesh
from scipy.sparse import bmat
from scipy.sparse.linalg import factorized
import matplotlib.pyplot as plt
import matplotlib.animation as animation
@integrand
def u_boundary_value(s: Sample, domain: Domain, v: Field, top_vel: float):
# Horizontal velocity on top of domain, zero elsewhere
if domain(s)[1] == 1.0:
return wp.dot(wp.vec2f(top_vel, 0.0), v(s))
return wp.dot(wp.vec2f(0.0, 0.0), v(s))
@integrand
def mass_form(
s: Sample,
u: Field,
v: Field,
):
return wp.dot(u(s), v(s))
@integrand
def inertia_form(s: Sample, u: Field, v: Field, dt: float):
return mass_form(s, u, v) / dt
@integrand
def viscosity_form(s: Sample, u: Field, v: Field, nu: float):
return 2.0 * nu * wp.ddot(D(u, s), D(v, s))
@integrand
def viscosity_and_inertia_form(s: Sample, u: Field, v: Field, dt: float, nu: float):
return inertia_form(s, u, v, dt) + viscosity_form(s, u, v, nu)
@integrand
def transported_inertia_form(s: Sample, domain: Domain, u: Field, v: Field, dt: float):
pos = domain(s)
vel = u(s)
conv_pos = pos - 0.5 * vel * dt
conv_s = lookup(domain, conv_pos, s)
conv_vel = u(conv_s)
conv_pos = conv_pos - 0.5 * conv_vel * dt
conv_vel = u(lookup(domain, conv_pos, conv_s))
return wp.dot(conv_vel, v(s)) / dt
@integrand
def div_form(
s: Sample,
u: Field,
q: Field,
):
return -q(s) * div(u, s)
if __name__ == "__main__":
wp.init()
wp.set_module_options({"enable_backward": False})
parser = argparse.ArgumentParser()
parser.add_argument("--resolution", type=int, default=25)
parser.add_argument("--degree", type=int, default=2)
parser.add_argument("--n_frames", type=int, default=1000)
parser.add_argument("--top_velocity", type=float, default=1.0)
parser.add_argument("--Re", type=float, default=1000.0)
parser.add_argument("--tri_mesh", action="store_true", help="Use a triangular mesh")
args = parser.parse_args()
if args.tri_mesh:
positions, tri_vidx = gen_trimesh(res=vec2i(args.resolution))
geo = Trimesh2D(tri_vertex_indices=tri_vidx, positions=positions)
else:
geo = Grid2D(res=vec2i(args.resolution))
boundary = BoundarySides(geo)
viscosity = args.top_velocity / args.Re
dt = 1.0 / args.resolution
domain = Cells(geometry=geo)
# Functions spaces: Q(d)-Q(d-1)
u_degree = args.degree
u_space = make_polynomial_space(geo, degree=u_degree, dtype=wp.vec2)
p_space = make_polynomial_space(geo, degree=u_degree - 1)
quadrature = RegularQuadrature(domain=domain, order=2 * u_degree)
# Viscosity and inertia
u_test = make_test(space=u_space, domain=domain)
u_trial = make_trial(space=u_space, domain=domain)
u_matrix = integrate(
viscosity_and_inertia_form,
fields={"u": u_trial, "v": u_test},
values={"nu": viscosity, "dt": dt},
)
# Pressure-velocity coupling
p_test = make_test(space=p_space, domain=domain)
div_matrix = integrate(div_form, fields={"u": u_trial, "q": p_test})
# Enforcing the Dirichlet boundary condition the hard way;
# build projector for velocity left- and right-hand-sides
u_bd_test = make_test(space=u_space, domain=boundary)
u_bd_trial = make_trial(space=u_space, domain=boundary)
u_bd_projector = integrate(mass_form, fields={"u": u_bd_trial, "v": u_bd_test}, nodal=True)
u_bd_value = integrate(
u_boundary_value,
fields={"v": u_bd_test},
values={"top_vel": args.top_velocity},
nodal=True,
output_dtype=wp.vec2d,
)
normalize_dirichlet_projector(u_bd_projector, u_bd_value)
u_bd_rhs = wp.zeros_like(u_bd_value)
project_linear_system(u_matrix, u_bd_rhs, u_bd_projector, u_bd_value, normalize_projector=False)
# div_bd_rhs = div_matrix * u_bd_rhs
div_bd_rhs = wp.zeros(shape=(div_matrix.nrow,), dtype=div_matrix.scalar_type)
bsr_mv(div_matrix, u_bd_rhs, y=div_bd_rhs)
# div_matrix = div_matrix - div_matrix * bd_projector
bsr_mm(x=bsr_copy(div_matrix), y=u_bd_projector, z=div_matrix, alpha=-1.0, beta=1.0)
# Assemble saddle system with Scipy
div_matrix = bsr_to_scipy(div_matrix)
u_matrix = bsr_to_scipy(u_matrix)
div_bd_rhs = div_bd_rhs.numpy()
ones = np.ones(shape=(p_space.node_count(), 1), dtype=float)
saddle_system = bmat(
[
[u_matrix, div_matrix.transpose(), None],
[div_matrix, None, ones],
[None, ones.transpose(), None],
],
)
with wp.ScopedTimer("LU factorization"):
solve_saddle = factorized(saddle_system)
u_k = u_space.make_field()
u_rhs = wp.zeros_like(u_bd_rhs)
results = [u_k.dof_values.numpy()]
for k in range(args.n_frames):
print("Solving step", k)
u_inertia_rhs = integrate(
transported_inertia_form,
quadrature=quadrature,
fields={"u": u_k, "v": u_test},
values={"dt": dt},
output_dtype=wp.vec2d,
)
# u_rhs = (I - P) * u_inertia_rhs + u_bd_rhs
bsr_mv(u_bd_projector, u_inertia_rhs, y=u_rhs, alpha=-1.0, beta=0.0)
array_axpy(x=u_inertia_rhs, y=u_rhs, alpha=1.0, beta=1.0)
array_axpy(x=u_bd_rhs, y=u_rhs, alpha=1.0, beta=1.0)
# Assemble scipy saddle system rhs
saddle_rhs = np.zeros(saddle_system.shape[0])
u_slice = slice(0, 2 * u_space.node_count())
p_slice = slice(2 * u_space.node_count(), 2 * u_space.node_count() + p_space.node_count())
saddle_rhs[u_slice] = u_rhs.numpy().flatten()
saddle_rhs[p_slice] = div_bd_rhs
x = solve_saddle(saddle_rhs)
# Extract result
x_u = x[u_slice].reshape((-1, 2))
results.append(x_u)
u_k.dof_values = x_u
# p_field.dof_values = x[p_slice]
if isinstance(geo, Grid2D):
plot_grid_streamlines(u_k)
quiver = plot_velocities(u_k)
ax = quiver.axes
def animate(i):
ax.clear()
u_k.dof_values = results[i]
return plot_velocities(u_k, axes=ax)
anim = animation.FuncAnimation(
ax.figure,
animate,
interval=30,
blit=False,
frames=len(results),
)
plt.show()
| warp-main | examples/fem/example_navier_stokes.py |
# Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
###########################################################################
# Cartpole environment
#
# Shows how to set up a simulation of a rigid-body cartpole articulation
# from a URDF using the Environment class.
# Note this example does not include a trained policy.
#
###########################################################################
import os
import math
import warp as wp
import warp.sim
from environment import Environment, run_env
class CartpoleEnvironment(Environment):
sim_name = "env_cartpole"
env_offset = (2.0, 0.0, 2.0)
opengl_render_settings = dict(scaling=3.0)
usd_render_settings = dict(scaling=100.0)
sim_substeps_euler = 32
sim_substeps_xpbd = 5
activate_ground_plane = False
show_joints = True
def create_articulation(self, builder):
wp.sim.parse_urdf(
os.path.join(os.path.dirname(__file__), "../assets/cartpole.urdf"),
builder,
xform=wp.transform((0.0, 0.0, 0.0), wp.quat_from_axis_angle((1.0, 0.0, 0.0), -math.pi * 0.5)),
floating=False,
armature=0.1,
stiffness=0.0,
damping=0.0,
shape_ke=1.0e4,
shape_kd=1.0e2,
shape_kf=1.0e2,
shape_mu=1.0,
limit_ke=1.0e4,
limit_kd=1.0e1,
enable_self_collisions=False,
)
# joint initial positions
builder.joint_q[-3:] = [0.0, 0.3, 0.0]
builder.joint_target[:3] = [0.0, 0.0, 0.0]
if __name__ == "__main__":
run_env(CartpoleEnvironment)
| warp-main | examples/env/env_cartpole.py |
warp-main | examples/env/__init__.py |
|
# Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import warp as wp
import warp.sim
import warp.sim.render
import argparse
import os
import numpy as np
from enum import Enum
from typing import Tuple
wp.init()
class RenderMode(Enum):
NONE = "none"
OPENGL = "opengl"
USD = "usd"
def __str__(self):
return self.value
class IntegratorType(Enum):
EULER = "euler"
XPBD = "xpbd"
def __str__(self):
return self.value
def compute_env_offsets(num_envs, env_offset=(5.0, 0.0, 5.0), up_axis="Y"):
# compute positional offsets per environment
env_offset = np.array(env_offset)
nonzeros = np.nonzero(env_offset)[0]
num_dim = nonzeros.shape[0]
if num_dim > 0:
side_length = int(np.ceil(num_envs ** (1.0 / num_dim)))
env_offsets = []
else:
env_offsets = np.zeros((num_envs, 3))
if num_dim == 1:
for i in range(num_envs):
env_offsets.append(i * env_offset)
elif num_dim == 2:
for i in range(num_envs):
d0 = i // side_length
d1 = i % side_length
offset = np.zeros(3)
offset[nonzeros[0]] = d0 * env_offset[nonzeros[0]]
offset[nonzeros[1]] = d1 * env_offset[nonzeros[1]]
env_offsets.append(offset)
elif num_dim == 3:
for i in range(num_envs):
d0 = i // (side_length * side_length)
d1 = (i // side_length) % side_length
d2 = i % side_length
offset = np.zeros(3)
offset[0] = d0 * env_offset[0]
offset[1] = d1 * env_offset[1]
offset[2] = d2 * env_offset[2]
env_offsets.append(offset)
env_offsets = np.array(env_offsets)
min_offsets = np.min(env_offsets, axis=0)
correction = min_offsets + (np.max(env_offsets, axis=0) - min_offsets) / 2.0
if isinstance(up_axis, str):
up_axis = "XYZ".index(up_axis.upper())
correction[up_axis] = 0.0 # ensure the envs are not shifted below the ground plane
env_offsets -= correction
return env_offsets
class Environment:
sim_name: str = "Environment"
frame_dt = 1.0 / 60.0
episode_duration = 5.0 # seconds
# whether to play the simulation indefinitely when using the OpenGL renderer
continuous_opengl_render: bool = True
sim_substeps_euler: int = 16
sim_substeps_xpbd: int = 5
euler_settings = dict()
xpbd_settings = dict()
render_mode: RenderMode = RenderMode.OPENGL
opengl_render_settings = dict()
usd_render_settings = dict(scaling=10.0)
show_rigid_contact_points = False
contact_points_radius = 1e-3
show_joints = False
# whether OpenGLRenderer should render each environment in a separate tile
use_tiled_rendering = False
# whether to apply model.joint_q, joint_qd to bodies before simulating
eval_fk: bool = True
profile: bool = False
use_graph_capture: bool = wp.get_preferred_device().is_cuda
num_envs: int = 100
activate_ground_plane: bool = True
integrator_type: IntegratorType = IntegratorType.XPBD
up_axis: str = "Y"
gravity: float = -9.81
env_offset: Tuple[float, float, float] = (1.0, 0.0, 1.0)
# stiffness and damping for joint attachment dynamics used by Euler
joint_attach_ke: float = 32000.0
joint_attach_kd: float = 50.0
# distance threshold at which contacts are generated
rigid_contact_margin: float = 0.05
# whether each environment should have its own collision group
# to avoid collisions between environments
separate_collision_group_per_env: bool = True
plot_body_coords: bool = False
plot_joint_coords: bool = False
requires_grad: bool = False
# control-related definitions, to be updated by derived classes
control_dim: int = 0
def __init__(self):
self.parser = argparse.ArgumentParser()
self.parser.add_argument(
"--integrator",
help="Type of integrator",
type=IntegratorType,
choices=list(IntegratorType),
default=self.integrator_type.value,
)
self.parser.add_argument(
"--visualizer",
help="Type of renderer",
type=RenderMode,
choices=list(RenderMode),
default=self.render_mode.value,
)
self.parser.add_argument(
"--num_envs", help="Number of environments to simulate", type=int, default=self.num_envs
)
self.parser.add_argument("--profile", help="Enable profiling", type=bool, default=self.profile)
def parse_args(self):
args = self.parser.parse_args()
self.integrator_type = args.integrator
self.render_mode = args.visualizer
self.num_envs = args.num_envs
self.profile = args.profile
def init(self):
if self.integrator_type == IntegratorType.EULER:
self.sim_substeps = self.sim_substeps_euler
elif self.integrator_type == IntegratorType.XPBD:
self.sim_substeps = self.sim_substeps_xpbd
self.episode_frames = int(self.episode_duration / self.frame_dt)
self.sim_dt = self.frame_dt / self.sim_substeps
self.sim_steps = int(self.episode_duration / self.sim_dt)
if self.use_tiled_rendering and self.render_mode == RenderMode.OPENGL:
# no environment offset when using tiled rendering
self.env_offset = (0.0, 0.0, 0.0)
builder = wp.sim.ModelBuilder()
builder.rigid_contact_margin = self.rigid_contact_margin
try:
articulation_builder = wp.sim.ModelBuilder()
self.create_articulation(articulation_builder)
env_offsets = compute_env_offsets(self.num_envs, self.env_offset, self.up_axis)
for i in range(self.num_envs):
xform = wp.transform(env_offsets[i], wp.quat_identity())
builder.add_builder(
articulation_builder, xform, separate_collision_group=self.separate_collision_group_per_env
)
self.bodies_per_env = len(articulation_builder.body_q)
except NotImplementedError:
# custom simulation setup where something other than an articulation is used
self.setup(builder)
self.bodies_per_env = len(builder.body_q)
self.model = builder.finalize()
self.device = self.model.device
if not self.device.is_cuda:
self.use_graph_capture = False
self.model.ground = self.activate_ground_plane
self.model.joint_attach_ke = self.joint_attach_ke
self.model.joint_attach_kd = self.joint_attach_kd
# set up current and next state to be used by the integrator
self.state_0 = None
self.state_1 = None
if self.integrator_type == IntegratorType.EULER:
self.integrator = wp.sim.SemiImplicitIntegrator(**self.euler_settings)
elif self.integrator_type == IntegratorType.XPBD:
self.integrator = wp.sim.XPBDIntegrator(**self.xpbd_settings)
self.renderer = None
if self.profile:
self.render_mode = RenderMode.NONE
if self.render_mode == RenderMode.OPENGL:
self.renderer = wp.sim.render.SimRendererOpenGL(
self.model,
self.sim_name,
up_axis=self.up_axis,
show_rigid_contact_points=self.show_rigid_contact_points,
contact_points_radius=self.contact_points_radius,
show_joints=self.show_joints,
**self.opengl_render_settings)
if self.use_tiled_rendering and self.num_envs > 1:
floor_id = self.model.shape_count - 1
# all shapes except the floor
instance_ids = np.arange(floor_id, dtype=np.int32).tolist()
shapes_per_env = floor_id // self.num_envs
additional_instances = []
if self.activate_ground_plane:
additional_instances.append(floor_id)
self.renderer.setup_tiled_rendering(
instances=[
instance_ids[i * shapes_per_env : (i + 1) * shapes_per_env] + additional_instances
for i in range(self.num_envs)
]
)
elif self.render_mode == RenderMode.USD:
filename = os.path.join(os.path.dirname(__file__), "..", "outputs", self.sim_name + ".usd")
self.renderer = wp.sim.render.SimRendererUsd(
self.model,
filename,
up_axis=self.up_axis,
show_rigid_contact_points=self.show_rigid_contact_points,
**self.usd_render_settings,
)
def create_articulation(self, builder):
raise NotImplementedError
def setup(self, builder):
pass
def customize_model(self, model):
pass
def before_simulate(self):
pass
def after_simulate(self):
pass
def custom_update(self):
pass
@property
def state(self):
# shortcut to current state
return self.state_0
def update(self):
for i in range(self.sim_substeps):
self.state_0.clear_forces()
self.custom_update()
wp.sim.collide(self.model, self.state_0)
self.integrator.simulate(self.model, self.state_0, self.state_1, self.sim_dt)
self.state_0, self.state_1 = self.state_1, self.state_0
def render(self, state=None):
if self.renderer is not None:
with wp.ScopedTimer("render", False):
self.render_time += self.frame_dt
self.renderer.begin_frame(self.render_time)
# render state 1 (swapped with state 0 just before)
self.renderer.render(state or self.state_1)
self.renderer.end_frame()
def run(self):
# ---------------
# run simulation
self.sim_time = 0.0
self.render_time = 0.0
self.state_0 = self.model.state()
self.state_1 = self.model.state()
if self.eval_fk:
wp.sim.eval_fk(self.model, self.model.joint_q, self.model.joint_qd, None, self.state_0)
self.before_simulate()
if self.renderer is not None:
self.render(self.state_0)
if self.render_mode == RenderMode.OPENGL:
self.renderer.paused = True
profiler = {}
if self.use_graph_capture:
# create update graph
wp.capture_begin()
# simulate
self.update()
graph = wp.capture_end()
if self.plot_body_coords:
q_history = []
q_history.append(self.state_0.body_q.numpy().copy())
qd_history = []
qd_history.append(self.state_0.body_qd.numpy().copy())
delta_history = []
delta_history.append(self.state_0.body_deltas.numpy().copy())
num_con_history = []
num_con_history.append(self.model.rigid_contact_inv_weight.numpy().copy())
if self.plot_joint_coords:
joint_q_history = []
joint_q = wp.zeros_like(self.model.joint_q)
joint_qd = wp.zeros_like(self.model.joint_qd)
# simulate
with wp.ScopedTimer("simulate", detailed=False, print=False, active=True, dict=profiler):
running = True
while running:
for f in range(self.episode_frames):
if self.use_graph_capture:
wp.capture_launch(graph)
self.sim_time += self.frame_dt
else:
self.update()
self.sim_time += self.frame_dt
if not self.profile:
if self.plot_body_coords:
q_history.append(self.state_0.body_q.numpy().copy())
qd_history.append(self.state_0.body_qd.numpy().copy())
delta_history.append(self.state_0.body_deltas.numpy().copy())
num_con_history.append(self.model.rigid_contact_inv_weight.numpy().copy())
if self.plot_joint_coords:
wp.sim.eval_ik(self.model, self.state_0, joint_q, joint_qd)
joint_q_history.append(joint_q.numpy().copy())
self.render()
if self.render_mode == RenderMode.OPENGL and self.renderer.has_exit:
running = False
break
if not self.continuous_opengl_render or self.render_mode != RenderMode.OPENGL:
break
wp.synchronize()
self.after_simulate()
avg_time = np.array(profiler["simulate"]).mean() / self.episode_frames
avg_steps_second = 1000.0 * float(self.num_envs) / avg_time
print(f"envs: {self.num_envs} steps/second {avg_steps_second} avg_time {avg_time}")
if self.renderer is not None:
self.renderer.save()
if self.plot_body_coords:
import matplotlib.pyplot as plt
q_history = np.array(q_history)
qd_history = np.array(qd_history)
delta_history = np.array(delta_history)
num_con_history = np.array(num_con_history)
# find bodies with non-zero mass
body_indices = np.where(self.model.body_mass.numpy() > 0)[0]
body_indices = body_indices[:5] # limit number of bodies to plot
fig, ax = plt.subplots(len(body_indices), 7, figsize=(10, 10), squeeze=False)
fig.subplots_adjust(hspace=0.2, wspace=0.2)
for i, j in enumerate(body_indices):
ax[i, 0].set_title(f"Body {j} Position")
ax[i, 0].grid()
ax[i, 1].set_title(f"Body {j} Orientation")
ax[i, 1].grid()
ax[i, 2].set_title(f"Body {j} Linear Velocity")
ax[i, 2].grid()
ax[i, 3].set_title(f"Body {j} Angular Velocity")
ax[i, 3].grid()
ax[i, 4].set_title(f"Body {j} Linear Delta")
ax[i, 4].grid()
ax[i, 5].set_title(f"Body {j} Angular Delta")
ax[i, 5].grid()
ax[i, 6].set_title(f"Body {j} Num Contacts")
ax[i, 6].grid()
ax[i, 0].plot(q_history[:, j, :3])
ax[i, 1].plot(q_history[:, j, 3:])
ax[i, 2].plot(qd_history[:, j, 3:])
ax[i, 3].plot(qd_history[:, j, :3])
ax[i, 4].plot(delta_history[:, j, 3:])
ax[i, 5].plot(delta_history[:, j, :3])
ax[i, 6].plot(num_con_history[:, j])
ax[i, 0].set_xlim(0, self.sim_steps)
ax[i, 1].set_xlim(0, self.sim_steps)
ax[i, 2].set_xlim(0, self.sim_steps)
ax[i, 3].set_xlim(0, self.sim_steps)
ax[i, 4].set_xlim(0, self.sim_steps)
ax[i, 5].set_xlim(0, self.sim_steps)
ax[i, 6].set_xlim(0, self.sim_steps)
ax[i, 6].yaxis.get_major_locator().set_params(integer=True)
plt.show()
if self.plot_joint_coords:
import matplotlib.pyplot as plt
joint_q_history = np.array(joint_q_history)
dof_q = joint_q_history.shape[1]
ncols = int(np.ceil(np.sqrt(dof_q)))
nrows = int(np.ceil(dof_q / float(ncols)))
fig, axes = plt.subplots(
ncols=ncols,
nrows=nrows,
constrained_layout=True,
figsize=(ncols * 3.5, nrows * 3.5),
squeeze=False,
sharex=True,
)
joint_id = 0
joint_type_names = {
wp.sim.JOINT_BALL: "ball",
wp.sim.JOINT_REVOLUTE: "hinge",
wp.sim.JOINT_PRISMATIC: "slide",
wp.sim.JOINT_UNIVERSAL: "universal",
wp.sim.JOINT_COMPOUND: "compound",
wp.sim.JOINT_FREE: "free",
wp.sim.JOINT_FIXED: "fixed",
wp.sim.JOINT_DISTANCE: "distance",
wp.sim.JOINT_D6: "D6",
}
joint_lower = self.model.joint_limit_lower.numpy()
joint_upper = self.model.joint_limit_upper.numpy()
joint_type = self.model.joint_type.numpy()
while joint_id < len(joint_type) - 1 and joint_type[joint_id] == wp.sim.JOINT_FIXED:
# skip fixed joints
joint_id += 1
q_start = self.model.joint_q_start.numpy()
qd_start = self.model.joint_qd_start.numpy()
qd_i = qd_start[joint_id]
for dim in range(ncols * nrows):
ax = axes[dim // ncols, dim % ncols]
if dim >= dof_q:
ax.axis("off")
continue
ax.grid()
ax.plot(joint_q_history[:, dim])
if joint_type[joint_id] != wp.sim.JOINT_FREE:
lower = joint_lower[qd_i]
if abs(lower) < 2 * np.pi:
ax.axhline(lower, color="red")
upper = joint_upper[qd_i]
if abs(upper) < 2 * np.pi:
ax.axhline(upper, color="red")
joint_name = joint_type_names[joint_type[joint_id]]
ax.set_title(f"$\\mathbf{{q_{{{dim}}}}}$ ({self.model.joint_name[joint_id]} / {joint_name} {joint_id})")
if joint_id < self.model.joint_count - 1 and q_start[joint_id + 1] == dim + 1:
joint_id += 1
qd_i = qd_start[joint_id]
else:
qd_i += 1
plt.tight_layout()
plt.show()
return 1000.0 * float(self.num_envs) / avg_time
def run_env(Demo):
demo = Demo()
demo.parse_args()
if demo.profile:
import matplotlib.pyplot as plt
env_count = 2
env_times = []
env_size = []
for i in range(15):
demo.num_envs = env_count
demo.init()
steps_per_second = demo.run()
env_size.append(env_count)
env_times.append(steps_per_second)
env_count *= 2
# dump times
for i in range(len(env_times)):
print(f"envs: {env_size[i]} steps/second: {env_times[i]}")
# plot
plt.figure(1)
plt.plot(env_size, env_times)
plt.xscale("log")
plt.xlabel("Number of Envs")
plt.yscale("log")
plt.ylabel("Steps/Second")
plt.show()
else:
demo.init()
return demo.run()
| warp-main | examples/env/environment.py |
# Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
###########################################################################
# USD Environment
#
# Shows how to load a USD file containing USD Physics schema definitions.
#
###########################################################################
import warp as wp
import warp.sim
from environment import Environment, run_env
class UsdEnvironment(Environment):
sim_name = "env_usd"
opengl_render_settings = dict(scaling=10.0, draw_grid=True)
usd_render_settings = dict(scaling=100.0)
episode_duration = 2.0
sim_substeps_euler = 64
sim_substeps_xpbd = 8
xpbd_settings = dict(
iterations=10,
enable_restitution=True,
joint_linear_relaxation=0.8,
joint_angular_relaxation=0.45,
rigid_contact_relaxation=1.0,
rigid_contact_con_weighting=True,
)
# USD files define their own ground plane if necessary
activate_ground_plane = False
num_envs = 1
plot_body_coords = False
def create_articulation(self, builder):
settings = wp.sim.parse_usd(
"http://omniverse-content-staging.s3-us-west-2.amazonaws.com/Assets/Isaac/2022.2.1/Isaac/Robots/Franka/franka_instanceable.usd",
builder,
default_thickness=0.01,
# ignore collision meshes from Franka robot
ignore_paths=[".*collisions.*"],
default_ke=1e6,
)
self.frame_dt = 1.0 / settings["fps"]
if settings["duration"] > 0.0:
self.episode_duration = settings["duration"]
self.sim_substeps = 10
self.sim_dt = self.frame_dt / self.sim_substeps
self.episode_frames = int(self.episode_duration / self.frame_dt)
self.sim_steps = int(self.episode_duration / self.sim_dt)
self.sim_time = 0.0
self.render_time = 0.0
self.up_axis = settings["up_axis"]
def before_simulate(self):
# print some information about the loaded model
if self.model.shape_count > 0:
print("shape_transform", self.model.shape_transform.numpy())
print("geo_scale", self.model.shape_geo.scale.numpy())
if self.model.joint_count > 0:
print("joint parent", self.model.joint_parent.numpy())
print("joint child", self.model.joint_child.numpy())
if len(self.model.joint_q) > 0:
print("joint q", self.model.joint_q.numpy())
if len(self.model.joint_axis) > 0:
print("joint axis", self.model.joint_axis.numpy())
print("joint target", self.model.joint_target.numpy())
print("joint target ke", self.model.joint_target_ke.numpy())
print("joint target kd", self.model.joint_target_kd.numpy())
print("joint limit lower", self.model.joint_limit_lower.numpy())
print("joint limit upper", self.model.joint_limit_upper.numpy())
print("joint_X_p", self.model.joint_X_p.numpy())
print("joint_X_c", self.model.joint_X_c.numpy())
if self.model.body_count > 0:
print("COM", self.model.body_com.numpy())
print("Mass", self.model.body_mass.numpy())
print("Inertia", self.model.body_inertia.numpy())
print("body_q", self.state.body_q.numpy())
if __name__ == "__main__":
run_env(UsdEnvironment)
| warp-main | examples/env/env_usd.py |
# Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
###########################################################################
# Ant environment
#
# Shows how to set up a simulation of a rigid-body Ant articulation based on
# the OpenAI gym environment using the Environment class and MCJF
# importer. Note this example does not include a trained policy.
#
###########################################################################
import os
import math
import warp as wp
import warp.sim
from environment import Environment, run_env
class AntEnvironment(Environment):
sim_name = "env_ant"
env_offset = (2.5, 0.0, 2.5)
opengl_render_settings = dict(scaling=3.0)
usd_render_settings = dict(scaling=100.0)
sim_substeps_euler = 32
sim_substeps_xpbd = 3
joint_attach_ke: float = 100000.0
joint_attach_kd: float = 10.0
use_graph_capture = True
def create_articulation(self, builder):
wp.sim.parse_mjcf(
os.path.join(os.path.dirname(__file__), "../assets/nv_ant.xml"),
builder,
stiffness=0.0,
damping=1.0,
armature=0.1,
contact_ke=1.0e4,
contact_kd=1.0e2,
contact_kf=1.0e4,
contact_mu=1.0,
limit_ke=1.0e4,
limit_kd=1.0e1,
enable_self_collisions=False,
up_axis="y",
)
builder.joint_q[7:] = [0.0, 1.0, 0.0, -1.0, 0.0, -1.0, 0.0, 1.0]
builder.joint_q[:7] = [0.0, 0.7, 0.0, *wp.quat_from_axis_angle((1.0, 0.0, 0.0), -math.pi * 0.5)]
if __name__ == "__main__":
run_env(AntEnvironment)
| warp-main | examples/env/env_ant.py |
# Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
###########################################################################
# Humanoid environment
#
# Shows how to set up a simulation of a rigid-body Humanoid articulation based
# on the OpenAI gym environment using the Environment class and MCJF
# importer. Note this example does not include a trained policy.
#
###########################################################################
import os
import math
import warp as wp
import warp.sim
from environment import Environment, run_env
class HumanoidEnvironment(Environment):
sim_name = "env_humanoid"
env_offset = (2.0, 0.0, 2.0)
opengl_render_settings = dict(scaling=1.0)
usd_render_settings = dict(scaling=100.0)
sim_substeps_euler = 32
sim_substeps_xpbd = 5
xpbd_settings = dict(
iterations=2,
joint_linear_relaxation=0.7,
joint_angular_relaxation=0.5,
rigid_contact_relaxation=1.0,
rigid_contact_con_weighting=True,
)
def create_articulation(self, builder):
wp.sim.parse_mjcf(
os.path.join(os.path.dirname(__file__), "../assets/nv_humanoid.xml"),
builder,
stiffness=0.0,
damping=0.1,
armature=0.007,
armature_scale=10.0,
contact_ke=1.0e4,
contact_kd=1.0e2,
contact_kf=1.0e2,
contact_mu=0.5,
contact_restitution=0.0,
limit_ke=1.0e2,
limit_kd=1.0e1,
enable_self_collisions=True,
up_axis="y",
)
builder.joint_q[:7] = [0.0, 1.7, 0.0, *wp.quat_from_axis_angle((1.0, 0.0, 0.0), -math.pi * 0.5)]
if __name__ == "__main__":
run_env(HumanoidEnvironment)
| warp-main | examples/env/env_humanoid.py |
# Copyright (c) 2023 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import sys
import os
import subprocess
import warp.config
from warp.utils import ScopedTimer
def run_cmd(cmd, capture=False):
if warp.config.verbose:
print(cmd)
try:
return subprocess.check_output(cmd, shell=True)
except subprocess.CalledProcessError as e:
if e.stdout:
print(e.stdout.decode())
if e.stderr:
print(e.stderr.decode())
raise (e)
# cut-down version of vcvars64.bat that allows using
# custom toolchain locations
def set_msvc_compiler(msvc_path, sdk_path):
if "INCLUDE" not in os.environ:
os.environ["INCLUDE"] = ""
if "LIB" not in os.environ:
os.environ["LIB"] = ""
msvc_path = os.path.abspath(msvc_path)
sdk_path = os.path.abspath(sdk_path)
os.environ["INCLUDE"] += os.pathsep + os.path.join(msvc_path, "include")
os.environ["INCLUDE"] += os.pathsep + os.path.join(sdk_path, "include/winrt")
os.environ["INCLUDE"] += os.pathsep + os.path.join(sdk_path, "include/um")
os.environ["INCLUDE"] += os.pathsep + os.path.join(sdk_path, "include/ucrt")
os.environ["INCLUDE"] += os.pathsep + os.path.join(sdk_path, "include/shared")
os.environ["LIB"] += os.pathsep + os.path.join(msvc_path, "lib/x64")
os.environ["LIB"] += os.pathsep + os.path.join(sdk_path, "lib/ucrt/x64")
os.environ["LIB"] += os.pathsep + os.path.join(sdk_path, "lib/um/x64")
os.environ["PATH"] += os.pathsep + os.path.join(msvc_path, "bin/HostX64/x64")
os.environ["PATH"] += os.pathsep + os.path.join(sdk_path, "bin/x64")
warp.config.host_compiler = os.path.join(msvc_path, "bin", "HostX64", "x64", "cl.exe")
def find_host_compiler():
if os.name == "nt":
try:
# try and find an installed host compiler (msvc)
# runs vcvars and copies back the build environment
vswhere_path = r"%ProgramFiles(x86)%/Microsoft Visual Studio/Installer/vswhere.exe"
vswhere_path = os.path.expandvars(vswhere_path)
if not os.path.exists(vswhere_path):
return ""
vs_path = run_cmd(f'"{vswhere_path}" -latest -property installationPath').decode().rstrip()
vsvars_path = os.path.join(vs_path, "VC\\Auxiliary\\Build\\vcvars64.bat")
output = run_cmd(f'"{vsvars_path}" && set').decode()
for line in output.splitlines():
pair = line.split("=", 1)
if len(pair) >= 2:
os.environ[pair[0]] = pair[1]
cl_path = run_cmd("where cl.exe").decode("utf-8").rstrip()
cl_version = os.environ["VCToolsVersion"].split(".")
# ensure at least VS2019 version, see list of MSVC versions here https://en.wikipedia.org/wiki/Microsoft_Visual_C%2B%2B
cl_required_major = 14
cl_required_minor = 29
if (
(int(cl_version[0]) < cl_required_major)
or (int(cl_version[0]) == cl_required_major)
and int(cl_version[1]) < cl_required_minor
):
print(
f"Warp: MSVC found but compiler version too old, found {cl_version[0]}.{cl_version[1]}, but must be {cl_required_major}.{cl_required_minor} or higher, kernel host compilation will be disabled."
)
return ""
return cl_path
except Exception as e:
# couldn't find host compiler
return ""
else:
# try and find g++
try:
return run_cmd("which g++").decode()
except:
return ""
def get_cuda_toolkit_version(cuda_home):
try:
# the toolkit version can be obtained by running "nvcc --version"
nvcc_path = os.path.join(cuda_home, "bin", "nvcc")
nvcc_version_output = subprocess.check_output([nvcc_path, "--version"]).decode("utf-8")
# search for release substring (e.g., "release 11.5")
import re
m = re.search(r"(?<=release )\d+\.\d+", nvcc_version_output)
if m is not None:
return tuple(int(x) for x in m.group(0).split("."))
else:
raise Exception("Failed to parse NVCC output")
except Exception as e:
print(f"Failed to determine CUDA Toolkit version: {e}")
def quote(path):
return '"' + path + '"'
def build_dll_for_arch(dll_path, cpp_paths, cu_path, libs, mode, arch, verify_fp=False, fast_math=False, quick=False):
cuda_home = warp.config.cuda_path
cuda_cmd = None
if quick:
cutlass_includes = ""
cutlass_enabled = "WP_ENABLE_CUTLASS=0"
else:
cutlass_home = "warp/native/cutlass"
cutlass_includes = f'-I"{cutlass_home}/include" -I"{cutlass_home}/tools/util/include"'
cutlass_enabled = "WP_ENABLE_CUTLASS=1"
if quick or cu_path is None:
cuda_compat_enabled = "WP_ENABLE_CUDA_COMPATIBILITY=0"
else:
cuda_compat_enabled = "WP_ENABLE_CUDA_COMPATIBILITY=1"
import pathlib
warp_home_path = pathlib.Path(__file__).parent
warp_home = warp_home_path.resolve()
nanovdb_home = warp_home_path.parent / "_build/host-deps/nanovdb/include"
# output stale, rebuild
if warp.config.verbose:
print(f"Building {dll_path}")
native_dir = os.path.join(warp_home, "native")
if cu_path:
# check CUDA Toolkit version
min_ctk_version = (11, 5)
ctk_version = get_cuda_toolkit_version(cuda_home) or min_ctk_version
if ctk_version < min_ctk_version:
raise Exception(
f"CUDA Toolkit version {min_ctk_version[0]}.{min_ctk_version[1]}+ is required (found {ctk_version[0]}.{ctk_version[1]} in {cuda_home})"
)
gencode_opts = []
if quick:
# minimum supported architectures (PTX)
gencode_opts += ["-gencode=arch=compute_52,code=compute_52", "-gencode=arch=compute_75,code=compute_75"]
else:
# generate code for all supported architectures
gencode_opts += [
# SASS for supported desktop/datacenter architectures
"-gencode=arch=compute_52,code=sm_52", # Maxwell
"-gencode=arch=compute_60,code=sm_60", # Pascal
"-gencode=arch=compute_61,code=sm_61",
"-gencode=arch=compute_70,code=sm_70", # Volta
"-gencode=arch=compute_75,code=sm_75", # Turing
"-gencode=arch=compute_80,code=sm_80", # Ampere
"-gencode=arch=compute_86,code=sm_86",
# SASS for supported mobile architectures (e.g. Tegra/Jetson)
# "-gencode=arch=compute_53,code=sm_53",
# "-gencode=arch=compute_62,code=sm_62",
# "-gencode=arch=compute_72,code=sm_72",
# "-gencode=arch=compute_87,code=sm_87",
]
# support for Ada and Hopper is available with CUDA Toolkit 11.8+
if ctk_version >= (11, 8):
gencode_opts += [
"-gencode=arch=compute_89,code=sm_89", # Ada
"-gencode=arch=compute_90,code=sm_90", # Hopper
# PTX for future hardware
"-gencode=arch=compute_90,code=compute_90",
]
else:
gencode_opts += [
# PTX for future hardware
"-gencode=arch=compute_86,code=compute_86",
]
nvcc_opts = gencode_opts + [
"-t0", # multithreaded compilation
"--extended-lambda",
]
if fast_math:
nvcc_opts.append("--use_fast_math")
# is the library being built with CUDA enabled?
cuda_enabled = "WP_ENABLE_CUDA=1" if (cu_path is not None) else "WP_ENABLE_CUDA=0"
if os.name == "nt":
if warp.config.host_compiler:
host_linker = os.path.join(os.path.dirname(warp.config.host_compiler), "link.exe")
else:
raise RuntimeError("Warp build error: No host compiler was found")
cpp_includes = f' /I"{warp_home_path.parent}/external/llvm-project/out/install/{mode}-{arch}/include"'
cpp_includes += f' /I"{warp_home_path.parent}/_build/host-deps/llvm-project/release-{arch}/include"'
cuda_includes = f' /I"{cuda_home}/include"' if cu_path else ""
includes = cpp_includes + cuda_includes
# nvrtc_static.lib is built with /MT and _ITERATOR_DEBUG_LEVEL=0 so if we link it in we must match these options
if cu_path or mode != "debug":
runtime = "/MT"
iter_dbg = "_ITERATOR_DEBUG_LEVEL=0"
debug = "NDEBUG"
else:
runtime = "/MTd"
iter_dbg = "_ITERATOR_DEBUG_LEVEL=2"
debug = "_DEBUG"
if warp.config.mode == "debug":
cpp_flags = f'/nologo {runtime} /Zi /Od /D "{debug}" /D WP_ENABLE_DEBUG=1 /D "{cuda_enabled}" /D "{cutlass_enabled}" /D "{cuda_compat_enabled}" /D "{iter_dbg}" /I"{native_dir}" /I"{nanovdb_home}" {includes}'
linkopts = ["/DLL", "/DEBUG"]
elif warp.config.mode == "release":
cpp_flags = f'/nologo {runtime} /Ox /D "{debug}" /D WP_ENABLE_DEBUG=0 /D "{cuda_enabled}" /D "{cutlass_enabled}" /D "{cuda_compat_enabled}" /D "{iter_dbg}" /I"{native_dir}" /I"{nanovdb_home}" {includes}'
linkopts = ["/DLL"]
else:
raise RuntimeError(f"Unrecognized build configuration (debug, release), got: {mode}")
if verify_fp:
cpp_flags += ' /D "WP_VERIFY_FP"'
if fast_math:
cpp_flags += " /fp:fast"
with ScopedTimer("build", active=warp.config.verbose):
for cpp_path in cpp_paths:
cpp_out = cpp_path + ".obj"
linkopts.append(quote(cpp_out))
cpp_cmd = f'"{warp.config.host_compiler}" {cpp_flags} -c "{cpp_path}" /Fo"{cpp_out}"'
run_cmd(cpp_cmd)
if cu_path:
cu_out = cu_path + ".o"
if mode == "debug":
cuda_cmd = f'"{cuda_home}/bin/nvcc" --compiler-options=/MT,/Zi,/Od -g -G -O0 -DNDEBUG -D_ITERATOR_DEBUG_LEVEL=0 -I"{native_dir}" -I"{nanovdb_home}" -line-info {" ".join(nvcc_opts)} -DWP_ENABLE_CUDA=1 -D{cutlass_enabled} {cutlass_includes} -o "{cu_out}" -c "{cu_path}"'
elif mode == "release":
cuda_cmd = f'"{cuda_home}/bin/nvcc" -O3 {" ".join(nvcc_opts)} -I"{native_dir}" -I"{nanovdb_home}" -DNDEBUG -DWP_ENABLE_CUDA=1 -D{cutlass_enabled} {cutlass_includes} -o "{cu_out}" -c "{cu_path}"'
with ScopedTimer("build_cuda", active=warp.config.verbose):
run_cmd(cuda_cmd)
linkopts.append(quote(cu_out))
linkopts.append(
f'cudart_static.lib nvrtc_static.lib nvrtc-builtins_static.lib nvptxcompiler_static.lib ws2_32.lib user32.lib /LIBPATH:"{cuda_home}/lib/x64"'
)
with ScopedTimer("link", active=warp.config.verbose):
link_cmd = f'"{host_linker}" {" ".join(linkopts + libs)} /out:"{dll_path}"'
run_cmd(link_cmd)
else:
cpp_includes = f' -I"{warp_home_path.parent}/external/llvm-project/out/install/{mode}-{arch}/include"'
cpp_includes += f' -I"{warp_home_path.parent}/_build/host-deps/llvm-project/release-{arch}/include"'
cuda_includes = f' -I"{cuda_home}/include"' if cu_path else ""
includes = cpp_includes + cuda_includes
if sys.platform == "darwin":
target = f"--target={arch}-apple-macos11"
else:
target = ""
if mode == "debug":
cpp_flags = f'{target} -O0 -g -fno-rtti -D_DEBUG -DWP_ENABLE_DEBUG=1 -D{cuda_enabled} -D{cutlass_enabled} -D{cuda_compat_enabled} -fPIC -fvisibility=hidden --std=c++14 -D_GLIBCXX_USE_CXX11_ABI=0 -fkeep-inline-functions -I"{native_dir}" {includes}'
if mode == "release":
cpp_flags = f'{target} -O3 -DNDEBUG -DWP_ENABLE_DEBUG=0 -D{cuda_enabled} -D{cutlass_enabled} -D{cuda_compat_enabled} -fPIC -fvisibility=hidden --std=c++14 -D_GLIBCXX_USE_CXX11_ABI=0 -I"{native_dir}" {includes}'
if verify_fp:
cpp_flags += " -DWP_VERIFY_FP"
if fast_math:
cpp_flags += " -ffast-math"
ld_inputs = []
with ScopedTimer("build", active=warp.config.verbose):
for cpp_path in cpp_paths:
cpp_out = cpp_path + ".o"
ld_inputs.append(quote(cpp_out))
build_cmd = f'g++ {cpp_flags} -c "{cpp_path}" -o "{cpp_out}"'
run_cmd(build_cmd)
if cu_path:
cu_out = cu_path + ".o"
if mode == "debug":
cuda_cmd = f'"{cuda_home}/bin/nvcc" -g -G -O0 --compiler-options -fPIC,-fvisibility=hidden -D_DEBUG -D_ITERATOR_DEBUG_LEVEL=0 -line-info {" ".join(nvcc_opts)} -DWP_ENABLE_CUDA=1 -I"{native_dir}" -D{cutlass_enabled} {cutlass_includes} -o "{cu_out}" -c "{cu_path}"'
elif mode == "release":
cuda_cmd = f'"{cuda_home}/bin/nvcc" -O3 --compiler-options -fPIC,-fvisibility=hidden {" ".join(nvcc_opts)} -DNDEBUG -DWP_ENABLE_CUDA=1 -I"{native_dir}" -D{cutlass_enabled} {cutlass_includes} -o "{cu_out}" -c "{cu_path}"'
with ScopedTimer("build_cuda", active=warp.config.verbose):
run_cmd(cuda_cmd)
ld_inputs.append(quote(cu_out))
ld_inputs.append(
f'-L"{cuda_home}/lib64" -lcudart_static -lnvrtc_static -lnvrtc-builtins_static -lnvptxcompiler_static -lpthread -ldl -lrt'
)
if sys.platform == "darwin":
opt_no_undefined = "-Wl,-undefined,error"
opt_exclude_libs = ""
else:
opt_no_undefined = "-Wl,--no-undefined"
opt_exclude_libs = "-Wl,--exclude-libs,ALL"
with ScopedTimer("link", active=warp.config.verbose):
origin = "@loader_path" if (sys.platform == "darwin") else "$ORIGIN"
link_cmd = f"g++ {target} -shared -Wl,-rpath,'{origin}' {opt_no_undefined} {opt_exclude_libs} -o '{dll_path}' {' '.join(ld_inputs + libs)}"
run_cmd(link_cmd)
# Strip symbols to reduce the binary size
if sys.platform == "darwin":
run_cmd(f"strip -x {dll_path}") # Strip all local symbols
else: # Linux
# Strip all symbols except for those needed to support debugging JIT-compiled code
run_cmd(
f"strip --strip-all --keep-symbol=__jit_debug_register_code --keep-symbol=__jit_debug_descriptor {dll_path}"
)
def build_dll(dll_path, cpp_paths, cu_path, libs=[], mode="release", verify_fp=False, fast_math=False, quick=False):
if sys.platform == "darwin":
# create a universal binary by combining x86-64 and AArch64 builds
build_dll_for_arch(dll_path + "-x86_64", cpp_paths, cu_path, libs, mode, "x86_64", verify_fp, fast_math, quick)
build_dll_for_arch(dll_path + "-arm64", cpp_paths, cu_path, libs, mode, "arm64", verify_fp, fast_math, quick)
run_cmd(f"lipo -create -output {dll_path} {dll_path}-x86_64 {dll_path}-arm64")
os.remove(f"{dll_path}-x86_64")
os.remove(f"{dll_path}-arm64")
else:
build_dll_for_arch(dll_path, cpp_paths, cu_path, libs, mode, "x86_64", verify_fp, fast_math, quick)
| warp-main | warp/build_dll.py |
# Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import os
import warp.config
from warp.thirdparty import appdirs
# builds cuda source to PTX or CUBIN using NVRTC (output type determined by output_path extension)
def build_cuda(cu_path, arch, output_path, config="release", verify_fp=False, fast_math=False):
with open(cu_path, "rb") as src_file:
src = src_file.read()
cu_path = cu_path.encode("utf-8")
inc_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "native").encode("utf-8")
output_path = output_path.encode("utf-8")
if warp.config.llvm_cuda:
warp.context.runtime.llvm.compile_cuda(src, cu_path, inc_path, output_path, False)
else:
err = warp.context.runtime.core.cuda_compile_program(
src, arch, inc_path, config == "debug", warp.config.verbose, verify_fp, fast_math, output_path
)
if err:
raise Exception("CUDA build failed")
# load PTX or CUBIN as a CUDA runtime module (input type determined by input_path extension)
def load_cuda(input_path, device):
if not device.is_cuda:
raise ("Not a CUDA device")
return warp.context.runtime.core.cuda_load_module(device.context, input_path.encode("utf-8"))
def build_cpu(obj_path, cpp_path, mode="release", verify_fp=False, fast_math=False):
with open(cpp_path, "rb") as cpp:
src = cpp.read()
cpp_path = cpp_path.encode("utf-8")
inc_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "native").encode("utf-8")
obj_path = obj_path.encode("utf-8")
warp.context.runtime.llvm.compile_cpp(src, cpp_path, inc_path, obj_path, mode == "debug")
kernel_bin_dir = None
kernel_gen_dir = None
def init_kernel_cache(path=None):
"""Initialize kernel cache directory.
This function is used during Warp initialization, but it can also be called directly to change the cache location.
If the path is not explicitly specified, a default location will be chosen based on OS-specific conventions.
To change the default cache location, set warp.config.kernel_cache_dir before calling warp.init().
"""
if path is not None:
cache_root_dir = os.path.realpath(path)
else:
cache_root_dir = appdirs.user_cache_dir(
appname="warp", appauthor="NVIDIA Corporation", version=warp.config.version
)
cache_bin_dir = os.path.join(cache_root_dir, "bin")
cache_gen_dir = os.path.join(cache_root_dir, "gen")
if not os.path.isdir(cache_root_dir):
# print("Creating cache directory '%s'" % cache_root_dir)
os.makedirs(cache_root_dir, exist_ok=True)
if not os.path.isdir(cache_gen_dir):
# print("Creating codegen directory '%s'" % cache_gen_dir)
os.makedirs(cache_gen_dir, exist_ok=True)
if not os.path.isdir(cache_bin_dir):
# print("Creating binary directory '%s'" % cache_bin_dir)
os.makedirs(cache_bin_dir, exist_ok=True)
warp.config.kernel_cache_dir = cache_root_dir
global kernel_bin_dir, kernel_gen_dir
kernel_bin_dir = cache_bin_dir
kernel_gen_dir = cache_gen_dir
def clear_kernel_cache():
"""Clear the kernel cache."""
import glob
paths = []
if kernel_bin_dir is not None and os.path.isdir(kernel_bin_dir):
pattern = os.path.join(kernel_bin_dir, "wp_*")
paths += glob.glob(pattern)
if kernel_gen_dir is not None and os.path.isdir(kernel_gen_dir):
pattern = os.path.join(kernel_gen_dir, "wp_*")
paths += glob.glob(pattern)
for p in paths:
if os.path.isfile(p):
os.remove(p)
| warp-main | warp/build.py |
# Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
version = "1.0.0-beta.2"
cuda_path = (
None # path to local CUDA toolchain, if None at init time warp will attempt to find the SDK using CUDA_PATH env var
)
verify_fp = False # verify inputs and outputs are finite after each launch
verify_cuda = False # if true will check CUDA errors after each kernel launch / memory operation
print_launches = False # if true will print out launch information
mode = "release"
verbose = False # print extra informative messages
quiet = False # suppress all output except errors and warnings
host_compiler = None # user can specify host compiler here, otherwise will attempt to find one automatically
cache_kernels = True
kernel_cache_dir = None # path to kernel cache directory, if None a default path will be used
cuda_output = (
None # preferred CUDA output format for kernels ("ptx" or "cubin"), determined automatically if unspecified
)
ptx_target_arch = 70 # target architecture for PTX generation, defaults to the lowest architecture that supports all of Warp's features
enable_backward = True # whether to compiler the backward passes of the kernels
llvm_cuda = False # use Clang/LLVM instead of NVRTC to compile CUDA
| warp-main | warp/config.py |
import ctypes
import math
from typing import Any
import warp
from warp.types import *
class fabricbucket_t(ctypes.Structure):
_fields_ = [
("index_start", ctypes.c_size_t),
("index_end", ctypes.c_size_t),
("ptr", ctypes.c_void_p),
("lengths", ctypes.c_void_p),
]
def __init__(self, index_start=0, index_end=0, ptr=None, lengths=None):
self.index_start = index_start
self.index_end = index_end
self.ptr = ctypes.c_void_p(ptr)
self.lengths = ctypes.c_void_p(lengths)
class fabricarray_t(ctypes.Structure):
_fields_ = [
("buckets", ctypes.c_void_p), # array of fabricbucket_t on the correct device
("nbuckets", ctypes.c_size_t),
("size", ctypes.c_size_t),
]
def __init__(self, buckets=None, nbuckets=0, size=0):
self.buckets = ctypes.c_void_p(buckets)
self.nbuckets = nbuckets
self.size = size
class indexedfabricarray_t(ctypes.Structure):
_fields_ = [
("fa", fabricarray_t),
("indices", ctypes.c_void_p),
("size", ctypes.c_size_t),
]
def __init__(self, fa=None, indices=None):
if fa is None:
self.fa = fabricarray_t()
else:
self.fa = fa.__ctype__()
if indices is None:
self.indices = ctypes.c_void_p(None)
self.size = 0
else:
self.indices = ctypes.c_void_p(indices.ptr)
self.size = indices.size
def fabric_to_warp_dtype(type_info, attrib_name):
if not type_info[0]:
raise RuntimeError(f"Attribute '{attrib_name}' cannot be used in Warp")
base_type_dict = {
"b": warp.bool, # boolean
"i1": warp.int8,
"i2": warp.int16,
"i4": warp.int32,
"i8": warp.int64,
"u1": warp.uint8,
"u2": warp.uint16,
"u4": warp.uint32,
"u8": warp.uint64,
"f2": warp.float16,
"f4": warp.float32,
"f8": warp.float64,
}
base_dtype = base_type_dict.get(type_info[1])
if base_dtype is None:
raise RuntimeError(f"Attribute '{attrib_name}' base data type '{type_info[1]}' is not supported in Warp")
elem_count = type_info[2]
role = type_info[4]
if role in ("text", "path"):
raise RuntimeError(f"Attribute '{attrib_name}' role '{role}' is not supported in Warp")
if elem_count > 1:
# vector or matrix type
if role == "quat" and elem_count == 4:
return quaternion(base_dtype)
elif role in ("matrix", "transform", "frame"):
# only square matrices are currently supported
mat_size = int(math.sqrt(elem_count))
assert mat_size * mat_size == elem_count
return matrix((mat_size, mat_size), base_dtype)
else:
return vector(elem_count, base_dtype)
else:
# scalar type
return base_dtype
class fabricarray(noncontiguous_array_base[T]):
# member attributes available during code-gen (e.g.: d = arr.shape[0])
# (initialized when needed)
_vars = None
def __init__(self, data=None, attrib=None, dtype=Any, ndim=None):
super().__init__(ARRAY_TYPE_FABRIC)
if data is not None:
from .context import runtime
# ensure the attribute name was also specified
if not isinstance(attrib, str):
raise ValueError(f"Invalid attribute name: {attrib}")
# get the fabric interface dictionary
if isinstance(data, dict):
iface = data
elif hasattr(data, "__fabric_arrays_interface__"):
iface = data.__fabric_arrays_interface__
else:
raise ValueError(
"Invalid data argument for fabricarray: expected dict or object with __fabric_arrays_interface__"
)
version = iface.get("version")
if version != 1:
raise ValueError(f"Unsupported Fabric interface version: {version}")
device = iface.get("device")
if not isinstance(device, str):
raise ValueError(f"Invalid Fabric interface device: {device}")
self.device = runtime.get_device(device)
attribs = iface.get("attribs")
if not isinstance(attribs, dict):
raise ValueError("Failed to get Fabric interface attributes")
# look up attribute info by name
attrib_info = attribs.get(attrib)
if not isinstance(attrib_info, dict):
raise ValueError(f"Failed to get attribute '{attrib}'")
type_info = attrib_info["type"]
assert len(type_info) == 5
self.dtype = fabric_to_warp_dtype(type_info, attrib)
self.access = attrib_info["access"]
pointers = attrib_info["pointers"]
counts = attrib_info["counts"]
if not (hasattr(pointers, "__len__") and hasattr(counts, "__len__") and len(pointers) == len(counts)):
raise RuntimeError("Attribute pointers and counts must be lists of the same size")
# check whether it's an array
array_depth = type_info[3]
if array_depth == 0:
self.ndim = 1
array_lengths = None
elif array_depth == 1:
self.ndim = 2
array_lengths = attrib_info["array_lengths"]
if not hasattr(array_lengths, "__len__") or len(array_lengths) != len(pointers):
raise RuntimeError(
"Attribute `array_lengths` must be a list of the same size as `pointers` and `counts`"
)
else:
raise ValueError(f"Invalid attribute array depth: {array_depth}")
num_buckets = len(pointers)
size = 0
buckets = (fabricbucket_t * num_buckets)()
for i in range(num_buckets):
buckets[i].index_start = size
buckets[i].index_end = size + counts[i]
buckets[i].ptr = pointers[i]
if array_lengths:
buckets[i].lengths = array_lengths[i]
size += counts[i]
if self.device.is_cuda:
# copy bucket info to device
with warp.ScopedStream(self.device.null_stream):
buckets_size = ctypes.sizeof(buckets)
buckets_ptr = self.device.allocator.alloc(buckets_size)
runtime.core.memcpy_h2d(self.device.context, buckets_ptr, ctypes.addressof(buckets), buckets_size)
else:
buckets_ptr = ctypes.addressof(buckets)
self.buckets = buckets
self.size = size
self.shape = (size,)
self.ctype = fabricarray_t(buckets_ptr, num_buckets, size)
else:
# empty array or type annotation
self.dtype = dtype
self.ndim = ndim or 1
self.device = None
self.access = None
self.buckets = None
self.size = 0
self.shape = (0,)
self.ctype = fabricarray_t()
def __del__(self):
# release the GPU copy of bucket info
if self.buckets is not None and self.device.is_cuda:
buckets_size = ctypes.sizeof(self.buckets)
with self.device.context_guard:
self.device.allocator.free(self.ctype.buckets, buckets_size)
def __ctype__(self):
return self.ctype
def __len__(self):
return self.size
def __str__(self):
if self.device is None:
# type annotation
return f"fabricarray{self.dtype}"
else:
return str(self.numpy())
def __getitem__(self, key):
if isinstance(key, array):
return indexedfabricarray(fa=self, indices=key)
else:
raise ValueError(f"Fabric arrays only support indexing using index arrays, got key of type {type(key)}")
@property
def vars(self):
# member attributes available during code-gen (e.g.: d = arr.shape[0])
# Note: we use a shared dict for all fabricarray instances
if fabricarray._vars is None:
fabricarray._vars = {"size": warp.codegen.Var("size", uint64)}
return fabricarray._vars
def fill_(self, value):
# TODO?
# filling Fabric arrays of arrays is not supported, because they are jagged arrays of arbitrary lengths
if self.ndim > 1:
raise RuntimeError("Filling Fabric arrays of arrays is not supported")
super().fill_(value)
# special case for fabric array of arrays
# equivalent to calling fabricarray(..., ndim=2)
def fabricarrayarray(**kwargs):
kwargs["ndim"] = 2
return fabricarray(**kwargs)
class indexedfabricarray(noncontiguous_array_base[T]):
# member attributes available during code-gen (e.g.: d = arr.shape[0])
# (initialized when needed)
_vars = None
def __init__(self, fa=None, indices=None, dtype=None, ndim=None):
super().__init__(ARRAY_TYPE_FABRIC_INDEXED)
if fa is not None:
check_index_array(indices, fa.device)
self.fa = fa
self.indices = indices
self.dtype = fa.dtype
self.ndim = fa.ndim
self.device = fa.device
self.size = indices.size
self.shape = (indices.size,)
self.ctype = indexedfabricarray_t(fa, indices)
else:
# allow empty indexedarrays in type annotations
self.fa = None
self.indices = None
self.dtype = dtype
self.ndim = ndim or 1
self.device = None
self.size = 0
self.shape = (0,)
self.ctype = indexedfabricarray_t()
def __ctype__(self):
return self.ctype
def __len__(self):
return self.size
def __str__(self):
if self.device is None:
# type annotation
return f"indexedfabricarray{self.dtype}"
else:
return str(self.numpy())
@property
def vars(self):
# member attributes available during code-gen (e.g.: d = arr.shape[0])
# Note: we use a shared dict for all indexedfabricarray instances
if indexedfabricarray._vars is None:
indexedfabricarray._vars = {"size": warp.codegen.Var("size", uint64)}
return indexedfabricarray._vars
def fill_(self, value):
# TODO?
# filling Fabric arrays of arrays is not supported, because they are jagged arrays of arbitrary lengths
if self.ndim > 1:
raise RuntimeError("Filling indexed Fabric arrays of arrays is not supported")
super().fill_(value)
# special case for indexed fabric array of arrays
# equivalent to calling fabricarray(..., ndim=2)
def indexedfabricarrayarray(**kwargs):
kwargs["ndim"] = 2
return indexedfabricarray(**kwargs)
| warp-main | warp/fabric.py |
# Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import builtins
from typing import Any, Callable, Dict, List, Tuple
from warp.types import *
from .context import add_builtin
def sametype_value_func(default):
def fn(args, kwds, _):
if args is None:
return default
if not all(types_equal(args[0].type, a.type) for a in args[1:]):
raise RuntimeError(f"Input types must be the same, found: {[type_repr(a.type) for a in args]}")
return args[0].type
return fn
# ---------------------------------
# Scalar Math
add_builtin(
"min",
input_types={"x": Scalar, "y": Scalar},
value_func=sametype_value_func(Scalar),
doc="Return the minimum of two scalars.",
group="Scalar Math",
)
add_builtin(
"max",
input_types={"x": Scalar, "y": Scalar},
value_func=sametype_value_func(Scalar),
doc="Return the maximum of two scalars.",
group="Scalar Math",
)
add_builtin(
"clamp",
input_types={"x": Scalar, "a": Scalar, "b": Scalar},
value_func=sametype_value_func(Scalar),
doc="Clamp the value of x to the range [a, b].",
group="Scalar Math",
)
add_builtin(
"abs",
input_types={"x": Scalar},
value_func=sametype_value_func(Scalar),
doc="Return the absolute value of x.",
group="Scalar Math",
)
add_builtin(
"sign",
input_types={"x": Scalar},
value_func=sametype_value_func(Scalar),
doc="Return -1 if x < 0, return 1 otherwise.",
group="Scalar Math",
)
add_builtin(
"step",
input_types={"x": Scalar},
value_func=sametype_value_func(Scalar),
doc="Return 1.0 if x < 0.0, return 0.0 otherwise.",
group="Scalar Math",
)
add_builtin(
"nonzero",
input_types={"x": Scalar},
value_func=sametype_value_func(Scalar),
doc="Return 1.0 if x is not equal to zero, return 0.0 otherwise.",
group="Scalar Math",
)
add_builtin(
"sin",
input_types={"x": Float},
value_func=sametype_value_func(Float),
doc="Return the sine of x in radians.",
group="Scalar Math",
)
add_builtin(
"cos",
input_types={"x": Float},
value_func=sametype_value_func(Float),
doc="Return the cosine of x in radians.",
group="Scalar Math",
)
add_builtin(
"acos",
input_types={"x": Float},
value_func=sametype_value_func(Float),
doc="Return arccos of x in radians. Inputs are automatically clamped to [-1.0, 1.0].",
group="Scalar Math",
)
add_builtin(
"asin",
input_types={"x": Float},
value_func=sametype_value_func(Float),
doc="Return arcsin of x in radians. Inputs are automatically clamped to [-1.0, 1.0].",
group="Scalar Math",
)
add_builtin(
"sqrt",
input_types={"x": Float},
value_func=sametype_value_func(Float),
doc="Return the sqrt of x, where x is positive.",
group="Scalar Math",
)
add_builtin(
"tan",
input_types={"x": Float},
value_func=sametype_value_func(Float),
doc="Return tangent of x in radians.",
group="Scalar Math",
)
add_builtin(
"atan",
input_types={"x": Float},
value_func=sametype_value_func(Float),
doc="Return arctan of x.",
group="Scalar Math",
)
add_builtin(
"atan2",
input_types={"y": Float, "x": Float},
value_func=sametype_value_func(Float),
doc="Return atan2 of x.",
group="Scalar Math",
)
add_builtin(
"sinh",
input_types={"x": Float},
value_func=sametype_value_func(Float),
doc="Return the sinh of x.",
group="Scalar Math",
)
add_builtin(
"cosh",
input_types={"x": Float},
value_func=sametype_value_func(Float),
doc="Return the cosh of x.",
group="Scalar Math",
)
add_builtin(
"tanh",
input_types={"x": Float},
value_func=sametype_value_func(Float),
doc="Return the tanh of x.",
group="Scalar Math",
)
add_builtin(
"degrees",
input_types={"x": Float},
value_func=sametype_value_func(Float),
doc="Convert radians into degrees.",
group="Scalar Math",
)
add_builtin(
"radians",
input_types={"x": Float},
value_func=sametype_value_func(Float),
doc="Convert degrees into radians.",
group="Scalar Math",
)
add_builtin(
"log",
input_types={"x": Float},
value_func=sametype_value_func(Float),
doc="Return the natural log (base-e) of x, where x is positive.",
group="Scalar Math",
)
add_builtin(
"log2",
input_types={"x": Float},
value_func=sametype_value_func(Float),
doc="Return the natural log (base-2) of x, where x is positive.",
group="Scalar Math",
)
add_builtin(
"log10",
input_types={"x": Float},
value_func=sametype_value_func(Float),
doc="Return the natural log (base-10) of x, where x is positive.",
group="Scalar Math",
)
add_builtin(
"exp",
input_types={"x": Float},
value_func=sametype_value_func(Float),
doc="Return base-e exponential, e^x.",
group="Scalar Math",
)
add_builtin(
"pow",
input_types={"x": Float, "y": Float},
value_func=sametype_value_func(Float),
doc="Return the result of x raised to power of y.",
group="Scalar Math",
)
add_builtin(
"round",
input_types={"x": Float},
value_func=sametype_value_func(Float),
group="Scalar Math",
doc="""Calculate the nearest integer value, rounding halfway cases away from zero.
This is the most intuitive form of rounding in the colloquial sense, but can be slower than other options like ``warp.rint()``.
Differs from ``numpy.round()``, which behaves the same way as ``numpy.rint()``.""",
)
add_builtin(
"rint",
input_types={"x": Float},
value_func=sametype_value_func(Float),
group="Scalar Math",
doc="""Calculate the nearest integer value, rounding halfway cases to nearest even integer.
It is generally faster than ``warp.round()``.
Equivalent to ``numpy.rint()``.""",
)
add_builtin(
"trunc",
input_types={"x": Float},
value_func=sametype_value_func(Float),
group="Scalar Math",
doc="""Calculate the nearest integer that is closer to zero than x.
In other words, it discards the fractional part of x.
It is similar to casting ``float(int(x))``, but preserves the negative sign when x is in the range [-0.0, -1.0).
Equivalent to ``numpy.trunc()`` and ``numpy.fix()``.""",
)
add_builtin(
"floor",
input_types={"x": Float},
value_func=sametype_value_func(Float),
group="Scalar Math",
doc="""Calculate the largest integer that is less than or equal to x.""",
)
add_builtin(
"ceil",
input_types={"x": Float},
value_func=sametype_value_func(Float),
group="Scalar Math",
doc="""Calculate the smallest integer that is greater than or equal to x.""",
)
def infer_scalar_type(args):
if args is None:
return Scalar
def iterate_scalar_types(args):
for a in args:
if hasattr(a.type, "_wp_scalar_type_"):
yield a.type._wp_scalar_type_
elif a.type in scalar_types:
yield a.type
scalarTypes = set(iterate_scalar_types(args))
if len(scalarTypes) > 1:
raise RuntimeError(
f"Couldn't figure out return type as arguments have multiple precisions: {list(scalarTypes)}"
)
return list(scalarTypes)[0]
def sametype_scalar_value_func(args, kwds, _):
if args is None:
return Scalar
if not all(types_equal(args[0].type, a.type) for a in args[1:]):
raise RuntimeError(f"Input types must be exactly the same, {[a.type for a in args]}")
return infer_scalar_type(args)
# ---------------------------------
# Vector Math
add_builtin(
"dot",
input_types={"x": vector(length=Any, dtype=Scalar), "y": vector(length=Any, dtype=Scalar)},
value_func=sametype_scalar_value_func,
group="Vector Math",
doc="Compute the dot product between two vectors.",
)
add_builtin(
"ddot",
input_types={"x": matrix(shape=(Any, Any), dtype=Scalar), "y": matrix(shape=(Any, Any), dtype=Scalar)},
value_func=sametype_scalar_value_func,
group="Vector Math",
doc="Compute the double dot product between two matrices.",
)
add_builtin(
"min",
input_types={"x": vector(length=Any, dtype=Scalar), "y": vector(length=Any, dtype=Scalar)},
value_func=sametype_value_func(vector(length=Any, dtype=Scalar)),
doc="Return the element wise minimum of two vectors.",
group="Vector Math",
)
add_builtin(
"max",
input_types={"x": vector(length=Any, dtype=Scalar), "y": vector(length=Any, dtype=Scalar)},
value_func=sametype_value_func(vector(length=Any, dtype=Scalar)),
doc="Return the element wise maximum of two vectors.",
group="Vector Math",
)
add_builtin(
"min",
input_types={"v": vector(length=Any, dtype=Scalar)},
value_func=sametype_scalar_value_func,
doc="Return the minimum element of a vector.",
group="Vector Math",
)
add_builtin(
"max",
input_types={"v": vector(length=Any, dtype=Scalar)},
value_func=sametype_scalar_value_func,
doc="Return the maximum element of a vector.",
group="Vector Math",
)
add_builtin(
"argmin",
input_types={"v": vector(length=Any, dtype=Scalar)},
value_func=lambda args, kwds, _: warp.uint32,
doc="Return the index of the minimum element of a vector.",
group="Vector Math",
missing_grad=True,
)
add_builtin(
"argmax",
input_types={"v": vector(length=Any, dtype=Scalar)},
value_func=lambda args, kwds, _: warp.uint32,
doc="Return the index of the maximum element of a vector.",
group="Vector Math",
missing_grad=True,
)
def value_func_outer(args, kwds, _):
if args is None:
return matrix(shape=(Any, Any), dtype=Scalar)
scalarType = infer_scalar_type(args)
vectorLengths = [i.type._length_ for i in args]
return matrix(shape=(vectorLengths), dtype=scalarType)
add_builtin(
"outer",
input_types={"x": vector(length=Any, dtype=Scalar), "y": vector(length=Any, dtype=Scalar)},
value_func=value_func_outer,
group="Vector Math",
doc="Compute the outer product x*y^T for two vec2 objects.",
)
add_builtin(
"cross",
input_types={"x": vector(length=3, dtype=Scalar), "y": vector(length=3, dtype=Scalar)},
value_func=sametype_value_func(vector(length=3, dtype=Scalar)),
group="Vector Math",
doc="Compute the cross product of two 3d vectors.",
)
add_builtin(
"skew",
input_types={"x": vector(length=3, dtype=Scalar)},
value_func=lambda args, kwds, _: matrix(shape=(3, 3), dtype=args[0].type._wp_scalar_type_),
group="Vector Math",
doc="Compute the skew symmetric matrix for a 3d vector.",
)
add_builtin(
"length",
input_types={"x": vector(length=Any, dtype=Float)},
value_func=sametype_scalar_value_func,
group="Vector Math",
doc="Compute the length of a vector.",
)
add_builtin(
"length",
input_types={"x": quaternion(dtype=Float)},
value_func=sametype_scalar_value_func,
group="Vector Math",
doc="Compute the length of a quaternion.",
)
add_builtin(
"length_sq",
input_types={"x": vector(length=Any, dtype=Scalar)},
value_func=sametype_scalar_value_func,
group="Vector Math",
doc="Compute the squared length of a 2d vector.",
)
add_builtin(
"length_sq",
input_types={"x": quaternion(dtype=Scalar)},
value_func=sametype_scalar_value_func,
group="Vector Math",
doc="Compute the squared length of a quaternion.",
)
add_builtin(
"normalize",
input_types={"x": vector(length=Any, dtype=Float)},
value_func=sametype_value_func(vector(length=Any, dtype=Scalar)),
group="Vector Math",
doc="Compute the normalized value of x, if length(x) is 0 then the zero vector is returned.",
)
add_builtin(
"normalize",
input_types={"x": quaternion(dtype=Float)},
value_func=sametype_value_func(quaternion(dtype=Scalar)),
group="Vector Math",
doc="Compute the normalized value of x, if length(x) is 0 then the zero quat is returned.",
)
add_builtin(
"transpose",
input_types={"m": matrix(shape=(Any, Any), dtype=Scalar)},
value_func=lambda args, kwds, _: matrix(
shape=(args[0].type._shape_[1], args[0].type._shape_[0]), dtype=args[0].type._wp_scalar_type_
),
group="Vector Math",
doc="Return the transpose of the matrix m",
)
def value_func_mat_inv(args, kwds, _):
if args is None:
return matrix(shape=(Any, Any), dtype=Float)
return args[0].type
add_builtin(
"inverse",
input_types={"m": matrix(shape=(2, 2), dtype=Float)},
value_func=value_func_mat_inv,
group="Vector Math",
doc="Return the inverse of a 2x2 matrix m",
)
add_builtin(
"inverse",
input_types={"m": matrix(shape=(3, 3), dtype=Float)},
value_func=value_func_mat_inv,
group="Vector Math",
doc="Return the inverse of a 3x3 matrix m",
)
add_builtin(
"inverse",
input_types={"m": matrix(shape=(4, 4), dtype=Float)},
value_func=value_func_mat_inv,
group="Vector Math",
doc="Return the inverse of a 4x4 matrix m",
)
def value_func_mat_det(args, kwds, _):
if args is None:
return Scalar
return args[0].type._wp_scalar_type_
add_builtin(
"determinant",
input_types={"m": matrix(shape=(2, 2), dtype=Float)},
value_func=value_func_mat_det,
group="Vector Math",
doc="Return the determinant of a 2x2 matrix m",
)
add_builtin(
"determinant",
input_types={"m": matrix(shape=(3, 3), dtype=Float)},
value_func=value_func_mat_det,
group="Vector Math",
doc="Return the determinant of a 3x3 matrix m",
)
add_builtin(
"determinant",
input_types={"m": matrix(shape=(4, 4), dtype=Float)},
value_func=value_func_mat_det,
group="Vector Math",
doc="Return the determinant of a 4x4 matrix m",
)
def value_func_mat_trace(args, kwds, _):
if args is None:
return Scalar
if args[0].type._shape_[0] != args[0].type._shape_[1]:
raise RuntimeError(f"Matrix shape is {args[0].type._shape_}. Cannot find the trace of non square matrices")
return args[0].type._wp_scalar_type_
add_builtin(
"trace",
input_types={"m": matrix(shape=(Any, Any), dtype=Scalar)},
value_func=value_func_mat_trace,
group="Vector Math",
doc="Return the trace of the matrix m",
)
def value_func_diag(args, kwds, _):
if args is None:
return matrix(shape=(Any, Any), dtype=Scalar)
else:
return matrix(shape=(args[0].type._length_, args[0].type._length_), dtype=args[0].type._wp_scalar_type_)
add_builtin(
"diag",
input_types={"d": vector(length=Any, dtype=Scalar)},
value_func=value_func_diag,
group="Vector Math",
doc="Returns a matrix with the components of the vector d on the diagonal",
)
def value_func_get_diag(args, kwds, _):
if args is None:
return vector(length=(Any), dtype=Scalar)
else:
if args[0].type._shape_[0] != args[0].type._shape_[1]:
raise RuntimeError(
f"Matrix shape is {args[0].type._shape_}; get_diag is only available for square matrices."
)
return vector(length=args[0].type._shape_[0], dtype=args[0].type._wp_scalar_type_)
add_builtin(
"get_diag",
input_types={"m": matrix(shape=(Any, Any), dtype=Scalar)},
value_func=value_func_get_diag,
group="Vector Math",
doc="Returns a vector containing the diagonal elements of the square matrix.",
)
add_builtin(
"cw_mul",
input_types={"x": vector(length=Any, dtype=Scalar), "y": vector(length=Any, dtype=Scalar)},
value_func=sametype_value_func(vector(length=Any, dtype=Scalar)),
group="Vector Math",
doc="Component wise multiply of two 2d vectors.",
)
add_builtin(
"cw_div",
input_types={"x": vector(length=Any, dtype=Scalar), "y": vector(length=Any, dtype=Scalar)},
value_func=sametype_value_func(vector(length=Any, dtype=Scalar)),
group="Vector Math",
doc="Component wise division of two 2d vectors.",
)
add_builtin(
"cw_mul",
input_types={"x": matrix(shape=(Any, Any), dtype=Scalar), "y": matrix(shape=(Any, Any), dtype=Scalar)},
value_func=sametype_value_func(matrix(shape=(Any, Any), dtype=Scalar)),
group="Vector Math",
doc="Component wise multiply of two 2d vectors.",
)
add_builtin(
"cw_div",
input_types={"x": matrix(shape=(Any, Any), dtype=Scalar), "y": matrix(shape=(Any, Any), dtype=Scalar)},
value_func=sametype_value_func(matrix(shape=(Any, Any), dtype=Scalar)),
group="Vector Math",
doc="Component wise division of two 2d vectors.",
)
# scalar type constructors between all storage / compute types
scalar_types_all = [*scalar_types, int, float]
for t in scalar_types_all:
for u in scalar_types_all:
add_builtin(
t.__name__, input_types={"u": u}, value_type=t, doc="", hidden=True, group="Scalar Math", export=False
)
for u in [bool, builtins.bool]:
add_builtin(bool.__name__, input_types={"u": u}, value_type=bool, doc="", hidden=True, export=False, namespace="")
def vector_constructor_func(args, kwds, templates):
if args is None:
return vector(length=Any, dtype=Scalar)
if templates is None or len(templates) == 0:
# handle construction of anonymous (undeclared) vector types
if "length" in kwds:
if len(args) == 0:
if "dtype" not in kwds:
raise RuntimeError(
"vec() must have dtype as a keyword argument if it has no positional arguments, e.g.: wp.vector(length=5, dtype=wp.float32)"
)
# zero initialization e.g.: wp.vector(length=5, dtype=wp.float32)
veclen = kwds["length"]
vectype = kwds["dtype"]
elif len(args) == 1:
# value initialization e.g.: wp.vec(1.0, length=5)
veclen = kwds["length"]
vectype = args[0].type
if getattr(vectype, "_wp_generic_type_str_", None) == "vec_t":
# constructor from another matrix
if vectype._length_ != veclen:
raise RuntimeError(
f"Incompatible vector lengths for casting copy constructor, {veclen} vs {vectype._length_}"
)
vectype = vectype._wp_scalar_type_
else:
raise RuntimeError(
"vec() must have one scalar argument or the dtype keyword argument if the length keyword argument is specified, e.g.: wp.vec(1.0, length=5)"
)
else:
if len(args) == 0:
raise RuntimeError(
"vec() must have at least one numeric argument, if it's length, dtype is not specified"
)
if "dtype" in kwds:
raise RuntimeError(
"vec() should not have dtype specified if numeric arguments are given, the dtype will be inferred from the argument types"
)
# component wise construction of an anonymous vector, e.g. wp.vec(wp.float16(1.0), wp.float16(2.0), ....)
# we infer the length and data type from the number and type of the arg values
veclen = len(args)
vectype = args[0].type
if len(args) == 1 and getattr(vectype, "_wp_generic_type_str_", None) == "vec_t":
# constructor from another vector
veclen = vectype._length_
vectype = vectype._wp_scalar_type_
elif not all(vectype == a.type for a in args):
raise RuntimeError(
f"All numeric arguments to vec() constructor should have the same type, expected {veclen} args of type {vectype}, received { ','.join(map(lambda x : str(x.type), args)) }"
)
# update the templates list, so we can generate vec<len, type>() correctly in codegen
templates.append(veclen)
templates.append(vectype)
else:
# construction of a predeclared type, e.g.: vec5d
veclen, vectype = templates
if len(args) == 1 and getattr(args[0].type, "_wp_generic_type_str_", None) == "vec_t":
# constructor from another vector
if args[0].type._length_ != veclen:
raise RuntimeError(
f"Incompatible matrix sizes for casting copy constructor, {veclen} vs {args[0].type._length_}"
)
elif not all(vectype == a.type for a in args):
raise RuntimeError(
f"All numeric arguments to vec() constructor should have the same type, expected {veclen} args of type {vectype}, received { ','.join(map(lambda x : str(x.type), args)) }"
)
retvalue = vector(length=veclen, dtype=vectype)
return retvalue
add_builtin(
"vector",
input_types={"*args": Scalar, "length": int, "dtype": Scalar},
variadic=True,
initializer_list_func=lambda args, _: len(args) > 4,
value_func=vector_constructor_func,
native_func="vec_t",
doc="Construct a vector of with given length and dtype.",
group="Vector Math",
export=False,
)
def matrix_constructor_func(args, kwds, templates):
if args is None:
return matrix(shape=(Any, Any), dtype=Scalar)
if len(templates) == 0:
# anonymous construction
if "shape" not in kwds:
raise RuntimeError("shape keyword must be specified when calling matrix() function")
if len(args) == 0:
if "dtype" not in kwds:
raise RuntimeError("matrix() must have dtype as a keyword argument if it has no positional arguments")
# zero initialization, e.g.: m = matrix(shape=(3,2), dtype=wp.float16)
shape = kwds["shape"]
dtype = kwds["dtype"]
else:
# value initialization, e.g.: m = matrix(1.0, shape=(3,2))
shape = kwds["shape"]
dtype = args[0].type
if len(args) == 1 and getattr(dtype, "_wp_generic_type_str_", None) == "mat_t":
# constructor from another matrix
if types[0]._shape_ != shape:
raise RuntimeError(
f"Incompatible matrix sizes for casting copy constructor, {shape} vs {types[0]._shape_}"
)
dtype = dtype._wp_scalar_type_
elif len(args) > 1 and len(args) != shape[0] * shape[1]:
raise RuntimeError(
"Wrong number of arguments for matrix() function, must initialize with either a scalar value, or m*n values"
)
templates.append(shape[0])
templates.append(shape[1])
templates.append(dtype)
else:
# predeclared type, e.g.: mat32d
shape = (templates[0], templates[1])
dtype = templates[2]
if len(args) > 0:
types = [a.type for a in args]
if len(args) == 1 and getattr(types[0], "_wp_generic_type_str_", None) == "mat_t":
# constructor from another matrix with same dimension but possibly different type
if types[0]._shape_ != shape:
raise RuntimeError(
f"Incompatible matrix sizes for casting copy constructor, {shape} vs {types[0]._shape_}"
)
else:
# check scalar arg type matches declared type
if infer_scalar_type(args) != dtype:
raise RuntimeError("Wrong scalar type for mat {} constructor".format(",".join(map(str, templates))))
# check vector arg type matches declared type
if all(hasattr(a, "_wp_generic_type_str_") and a._wp_generic_type_str_ == "vec_t" for a in types):
cols = len(types)
if shape[1] != cols:
raise RuntimeError(
"Wrong number of vectors when attempting to construct a matrix with column vectors"
)
if not all(a._length_ == shape[0] for a in types):
raise RuntimeError(
"Wrong vector row count when attempting to construct a matrix with column vectors"
)
else:
# check that we either got 1 arg (scalar construction), or enough values for whole matrix
size = shape[0] * shape[1]
if len(args) > 1 and len(args) != size:
raise RuntimeError(
"Wrong number of scalars when attempting to construct a matrix from a list of components"
)
return matrix(shape=shape, dtype=dtype)
# only use initializer list if matrix size < 5x5, or for scalar construction
def matrix_initlist_func(args, templates):
m, n, dtype = templates
if (
len(args) == 0
or len(args) == 1 # zero construction
or (m == n and n < 5) # scalar construction # value construction for small matrices
):
return False
else:
return True
add_builtin(
"matrix",
input_types={"*args": Scalar, "shape": Tuple[int, int], "dtype": Scalar},
variadic=True,
initializer_list_func=matrix_initlist_func,
value_func=matrix_constructor_func,
native_func="mat_t",
doc="Construct a matrix, if positional args are not given then matrix will be zero-initialized.",
group="Vector Math",
export=False,
)
# identity:
def matrix_identity_value_func(args, kwds, templates):
if args is None:
return matrix(shape=(Any, Any), dtype=Scalar)
if len(args):
raise RuntimeError("identity() function does not accept positional arguments")
if "n" not in kwds:
raise RuntimeError("'n' keyword argument must be specified when calling identity() function")
if "dtype" not in kwds:
raise RuntimeError("'dtype' keyword argument must be specified when calling identity() function")
n, dtype = [kwds["n"], kwds["dtype"]]
if n is None:
raise RuntimeError("'n' must be a constant when calling identity() function")
templates.append(n)
templates.append(dtype)
return matrix(shape=(n, n), dtype=dtype)
add_builtin(
"identity",
input_types={"n": int, "dtype": Scalar},
value_func=matrix_identity_value_func,
variadic=True,
doc="Create an identity matrix with shape=(n,n) with the type given by ``dtype``.",
group="Vector Math",
export=False,
)
def matrix_transform_value_func(args, kwds, templates):
if templates is None:
return matrix(shape=(Any, Any), dtype=Float)
if len(templates) == 0:
raise RuntimeError("Cannot use a generic type name in a kernel")
m, n, dtype = templates
if (m, n) != (4, 4):
raise RuntimeError("Can only construct 4x4 matrices with position, rotation and scale")
if infer_scalar_type(args) != dtype:
raise RuntimeError("Wrong scalar type for mat<{}> constructor".format(",".join(map(str, templates))))
return matrix(shape=(4, 4), dtype=dtype)
add_builtin(
"matrix",
input_types={
"pos": vector(length=3, dtype=Float),
"rot": quaternion(dtype=Float),
"scale": vector(length=3, dtype=Float),
},
value_func=matrix_transform_value_func,
native_func="mat_t",
doc="""Construct a 4x4 transformation matrix that applies the transformations as Translation(pos)*Rotation(rot)*Scale(scale) when applied to column vectors, i.e.: y = (TRS)*x""",
group="Vector Math",
export=False,
)
# not making these functions available outside kernels (export=False) as they
# return data via references, which we don't currently support:
add_builtin(
"svd3",
input_types={
"A": matrix(shape=(3, 3), dtype=Float),
"U": matrix(shape=(3, 3), dtype=Float),
"sigma": vector(length=3, dtype=Float),
"V": matrix(shape=(3, 3), dtype=Scalar),
},
value_type=None,
group="Vector Math",
export=False,
doc="""Compute the SVD of a 3x3 matrix. The singular values are returned in sigma,
while the left and right basis vectors are returned in U and V.""",
)
add_builtin(
"qr3",
input_types={
"A": matrix(shape=(3, 3), dtype=Float),
"Q": matrix(shape=(3, 3), dtype=Float),
"R": matrix(shape=(3, 3), dtype=Float),
},
value_type=None,
group="Vector Math",
export=False,
doc="""Compute the QR decomposition of a 3x3 matrix. The orthogonal matrix is returned in Q, while the upper triangular matrix is returned in R.""",
)
add_builtin(
"eig3",
input_types={
"A": matrix(shape=(3, 3), dtype=Float),
"Q": matrix(shape=(3, 3), dtype=Float),
"d": vector(length=3, dtype=Float),
},
value_type=None,
group="Vector Math",
export=False,
doc="""Compute the eigendecomposition of a 3x3 matrix. The eigenvectors are returned as the columns of Q, while the corresponding eigenvalues are returned in d.""",
)
# ---------------------------------
# Quaternion Math
def quaternion_value_func(args, kwds, templates):
if args is None:
return quaternion(dtype=Scalar)
# if constructing anonymous quat type then infer output type from arguments
if len(templates) == 0:
dtype = infer_scalar_type(args)
templates.append(dtype)
else:
# if constructing predeclared type then check args match expectation
if len(args) > 0 and infer_scalar_type(args) != templates[0]:
raise RuntimeError("Wrong scalar type for quat {} constructor".format(",".join(map(str, templates))))
return quaternion(dtype=templates[0])
add_builtin(
"quaternion",
input_types={},
value_func=quaternion_value_func,
native_func="quat_t",
group="Quaternion Math",
doc="""Construct a zero-initialized quaternion, quaternions are laid out as
[ix, iy, iz, r], where ix, iy, iz are the imaginary part, and r the real part.""",
export=False,
)
add_builtin(
"quaternion",
input_types={"x": Float, "y": Float, "z": Float, "w": Float},
value_func=quaternion_value_func,
native_func="quat_t",
group="Quaternion Math",
doc="Create a quaternion using the supplied components (type inferred from component type)",
export=False,
)
add_builtin(
"quaternion",
input_types={"i": vector(length=3, dtype=Float), "r": Float},
value_func=quaternion_value_func,
native_func="quat_t",
group="Quaternion Math",
doc="Create a quaternion using the supplied vector/scalar (type inferred from scalar type)",
export=False,
)
def quat_identity_value_func(args, kwds, templates):
# if args is None then we are in 'export' mode
if args is None:
return quatf
if "dtype" not in kwds:
# defaulting to float32 to preserve current behavior:
dtype = float32
else:
dtype = kwds["dtype"]
templates.append(dtype)
return quaternion(dtype=dtype)
add_builtin(
"quat_identity",
input_types={},
value_func=quat_identity_value_func,
group="Quaternion Math",
doc="Construct an identity quaternion with zero imaginary part and real part of 1.0",
export=True,
)
add_builtin(
"quat_from_axis_angle",
input_types={"axis": vector(length=3, dtype=Float), "angle": Float},
value_func=lambda args, kwds, _: quaternion(dtype=infer_scalar_type(args)),
group="Quaternion Math",
doc="Construct a quaternion representing a rotation of angle radians around the given axis.",
)
add_builtin(
"quat_to_axis_angle",
input_types={"q": quaternion(dtype=Float), "axis": vector(length=3, dtype=Float), "angle": Float},
value_type=None,
group="Quaternion Math",
doc="Extract the rotation axis and angle radians a quaternion represents.",
)
add_builtin(
"quat_from_matrix",
input_types={"m": matrix(shape=(3, 3), dtype=Float)},
value_func=lambda args, kwds, _: quaternion(dtype=infer_scalar_type(args)),
group="Quaternion Math",
doc="Construct a quaternion from a 3x3 matrix.",
)
add_builtin(
"quat_rpy",
input_types={"roll": Float, "pitch": Float, "yaw": Float},
value_func=lambda args, kwds, _: quaternion(dtype=infer_scalar_type(args)),
group="Quaternion Math",
doc="Construct a quaternion representing a combined roll (z), pitch (x), yaw rotations (y) in radians.",
)
add_builtin(
"quat_inverse",
input_types={"q": quaternion(dtype=Float)},
value_func=lambda args, kwds, _: quaternion(dtype=infer_scalar_type(args)),
group="Quaternion Math",
doc="Compute quaternion conjugate.",
)
add_builtin(
"quat_rotate",
input_types={"q": quaternion(dtype=Float), "p": vector(length=3, dtype=Float)},
value_func=lambda args, kwds, _: vector(length=3, dtype=infer_scalar_type(args)),
group="Quaternion Math",
doc="Rotate a vector by a quaternion.",
)
add_builtin(
"quat_rotate_inv",
input_types={"q": quaternion(dtype=Float), "p": vector(length=3, dtype=Float)},
value_func=lambda args, kwds, _: vector(length=3, dtype=infer_scalar_type(args)),
group="Quaternion Math",
doc="Rotate a vector the inverse of a quaternion.",
)
add_builtin(
"quat_slerp",
input_types={"q0": quaternion(dtype=Float), "q1": quaternion(dtype=Float), "t": Float},
value_func=lambda args, kwds, _: quaternion(dtype=infer_scalar_type(args)),
group="Quaternion Math",
doc="Linearly interpolate between two quaternions.",
)
add_builtin(
"quat_to_matrix",
input_types={"q": quaternion(dtype=Float)},
value_func=lambda args, kwds, _: matrix(shape=(3, 3), dtype=infer_scalar_type(args)),
group="Quaternion Math",
doc="Convert a quaternion to a 3x3 rotation matrix.",
)
add_builtin(
"dot",
input_types={"x": quaternion(dtype=Float), "y": quaternion(dtype=Float)},
value_func=sametype_scalar_value_func,
group="Quaternion Math",
doc="Compute the dot product between two quaternions.",
)
# ---------------------------------
# Transformations
def transform_constructor_value_func(args, kwds, templates):
if templates is None:
return transformation(dtype=Scalar)
if len(templates) == 0:
# if constructing anonymous transform type then infer output type from arguments
dtype = infer_scalar_type(args)
templates.append(dtype)
else:
# if constructing predeclared type then check args match expectation
if infer_scalar_type(args) != templates[0]:
raise RuntimeError(
f"Wrong scalar type for transform constructor expected {templates[0]}, got {','.join(map(lambda x : str(x.type), args))}"
)
return transformation(dtype=templates[0])
add_builtin(
"transformation",
input_types={"p": vector(length=3, dtype=Float), "q": quaternion(dtype=Float)},
value_func=transform_constructor_value_func,
native_func="transform_t",
group="Transformations",
doc="Construct a rigid body transformation with translation part p and rotation q.",
export=False,
)
def transform_identity_value_func(args, kwds, templates):
if args is None:
return transformf
if "dtype" not in kwds:
# defaulting to float32 to preserve current behavior:
dtype = float32
else:
dtype = kwds["dtype"]
templates.append(dtype)
return transformation(dtype=dtype)
add_builtin(
"transform_identity",
input_types={},
value_func=transform_identity_value_func,
group="Transformations",
doc="Construct an identity transform with zero translation and identity rotation.",
export=True,
)
add_builtin(
"transform_get_translation",
input_types={"t": transformation(dtype=Float)},
value_func=lambda args, kwds, _: vector(length=3, dtype=infer_scalar_type(args)),
group="Transformations",
doc="Return the translational part of a transform.",
)
add_builtin(
"transform_get_rotation",
input_types={"t": transformation(dtype=Float)},
value_func=lambda args, kwds, _: quaternion(dtype=infer_scalar_type(args)),
group="Transformations",
doc="Return the rotational part of a transform.",
)
add_builtin(
"transform_multiply",
input_types={"a": transformation(dtype=Float), "b": transformation(dtype=Float)},
value_func=lambda args, kwds, _: transformation(dtype=infer_scalar_type(args)),
group="Transformations",
doc="Multiply two rigid body transformations together.",
)
add_builtin(
"transform_point",
input_types={"t": transformation(dtype=Scalar), "p": vector(length=3, dtype=Scalar)},
value_func=lambda args, kwds, _: vector(length=3, dtype=infer_scalar_type(args)),
group="Transformations",
doc="Apply the transform to a point p treating the homogenous coordinate as w=1 (translation and rotation).",
)
add_builtin(
"transform_point",
input_types={"m": matrix(shape=(4, 4), dtype=Scalar), "p": vector(length=3, dtype=Scalar)},
value_func=lambda args, kwds, _: vector(length=3, dtype=infer_scalar_type(args)),
group="Vector Math",
doc="""Apply the transform to a point ``p`` treating the homogenous coordinate as w=1. The transformation is applied treating ``p`` as a column vector, e.g.: ``y = M*p``
note this is in contrast to some libraries, notably USD, which applies transforms to row vectors, ``y^T = p^T*M^T``. If the transform is coming from a library that uses row-vectors
then users should transpose the transformation matrix before calling this method.""",
)
add_builtin(
"transform_vector",
input_types={"t": transformation(dtype=Scalar), "v": vector(length=3, dtype=Scalar)},
value_func=lambda args, kwds, _: vector(length=3, dtype=infer_scalar_type(args)),
group="Transformations",
doc="Apply the transform to a vector v treating the homogenous coordinate as w=0 (rotation only).",
)
add_builtin(
"transform_vector",
input_types={"m": matrix(shape=(4, 4), dtype=Scalar), "v": vector(length=3, dtype=Scalar)},
value_func=lambda args, kwds, _: vector(length=3, dtype=infer_scalar_type(args)),
group="Vector Math",
doc="""Apply the transform to a vector ``v`` treating the homogenous coordinate as w=0. The transformation is applied treating ``v`` as a column vector, e.g.: ``y = M*v``
note this is in contrast to some libraries, notably USD, which applies transforms to row vectors, ``y^T = v^T*M^T``. If the transform is coming from a library that uses row-vectors
then users should transpose the transformation matrix before calling this method.""",
)
add_builtin(
"transform_inverse",
input_types={"t": transformation(dtype=Float)},
value_func=sametype_value_func(transformation(dtype=Float)),
group="Transformations",
doc="Compute the inverse of the transform.",
)
# ---------------------------------
# Spatial Math
def spatial_vector_constructor_value_func(args, kwds, templates):
if templates is None:
return spatial_vector(dtype=Float)
if len(templates) == 0:
raise RuntimeError("Cannot use a generic type name in a kernel")
vectype = templates[1]
if len(args) and infer_scalar_type(args) != vectype:
raise RuntimeError("Wrong scalar type for spatial_vector<{}> constructor".format(",".join(map(str, templates))))
return vector(length=6, dtype=vectype)
add_builtin(
"vector",
input_types={"w": vector(length=3, dtype=Float), "v": vector(length=3, dtype=Float)},
value_func=spatial_vector_constructor_value_func,
native_func="vec_t",
group="Spatial Math",
doc="Construct a 6d screw vector from two 3d vectors.",
export=False,
)
add_builtin(
"spatial_adjoint",
input_types={"r": matrix(shape=(3, 3), dtype=Float), "s": matrix(shape=(3, 3), dtype=Float)},
value_func=lambda args, kwds, _: matrix(shape=(6, 6), dtype=infer_scalar_type(args)),
group="Spatial Math",
doc="Construct a 6x6 spatial inertial matrix from two 3x3 diagonal blocks.",
export=False,
)
add_builtin(
"spatial_dot",
input_types={"a": vector(length=6, dtype=Float), "b": vector(length=6, dtype=Float)},
value_func=sametype_scalar_value_func,
group="Spatial Math",
doc="Compute the dot product of two 6d screw vectors.",
)
add_builtin(
"spatial_cross",
input_types={"a": vector(length=6, dtype=Float), "b": vector(length=6, dtype=Float)},
value_func=sametype_value_func(vector(length=6, dtype=Float)),
group="Spatial Math",
doc="Compute the cross-product of two 6d screw vectors.",
)
add_builtin(
"spatial_cross_dual",
input_types={"a": vector(length=6, dtype=Float), "b": vector(length=6, dtype=Float)},
value_func=sametype_value_func(vector(length=6, dtype=Float)),
group="Spatial Math",
doc="Compute the dual cross-product of two 6d screw vectors.",
)
add_builtin(
"spatial_top",
input_types={"a": vector(length=6, dtype=Float)},
value_func=lambda args, kwds, _: vector(length=3, dtype=args[0].type._wp_scalar_type_),
group="Spatial Math",
doc="Return the top (first) part of a 6d screw vector.",
)
add_builtin(
"spatial_bottom",
input_types={"a": vector(length=6, dtype=Float)},
value_func=lambda args, kwds, _: vector(length=3, dtype=args[0].type._wp_scalar_type_),
group="Spatial Math",
doc="Return the bottom (second) part of a 6d screw vector.",
)
add_builtin(
"spatial_jacobian",
input_types={
"S": array(dtype=vector(length=6, dtype=Float)),
"joint_parents": array(dtype=int),
"joint_qd_start": array(dtype=int),
"joint_start": int,
"joint_count": int,
"J_start": int,
"J_out": array(dtype=Float),
},
value_type=None,
doc="",
group="Spatial Math",
)
add_builtin(
"spatial_mass",
input_types={
"I_s": array(dtype=matrix(shape=(6, 6), dtype=Float)),
"joint_start": int,
"joint_count": int,
"M_start": int,
"M": array(dtype=Float),
},
value_type=None,
doc="",
group="Spatial Math",
)
# ---------------------------------
# Linear Algebra
add_builtin(
"dense_gemm",
input_types={
"m": int,
"n": int,
"p": int,
"t1": int,
"t2": int,
"A": array(dtype=float),
"B": array(dtype=float),
"C": array(dtype=float),
},
value_type=None,
doc="",
group="Utility",
hidden=True,
)
add_builtin(
"dense_gemm_batched",
input_types={
"m": array(dtype=int),
"n": array(dtype=int),
"p": array(dtype=int),
"t1": int,
"t2": int,
"A_start": array(dtype=int),
"B_start": array(dtype=int),
"C_start": array(dtype=int),
"A": array(dtype=float),
"B": array(dtype=float),
"C": array(dtype=float),
},
value_type=None,
doc="",
group="Utility",
hidden=True,
)
add_builtin(
"dense_chol",
input_types={"n": int, "A": array(dtype=float), "regularization": float, "L": array(dtype=float)},
value_type=None,
doc="WIP",
group="Utility",
hidden=True,
)
add_builtin(
"dense_chol_batched",
input_types={
"A_start": array(dtype=int),
"A_dim": array(dtype=int),
"A": array(dtype=float),
"regularization": float,
"L": array(dtype=float),
},
value_type=None,
doc="WIP",
group="Utility",
hidden=True,
)
add_builtin(
"dense_subs",
input_types={"n": int, "L": array(dtype=float), "b": array(dtype=float), "x": array(dtype=float)},
value_type=None,
doc="WIP",
group="Utility",
hidden=True,
)
add_builtin(
"dense_solve",
input_types={
"n": int,
"A": array(dtype=float),
"L": array(dtype=float),
"b": array(dtype=float),
"x": array(dtype=float),
},
value_type=None,
doc="WIP",
group="Utility",
hidden=True,
)
add_builtin(
"dense_solve_batched",
input_types={
"b_start": array(dtype=int),
"A_start": array(dtype=int),
"A_dim": array(dtype=int),
"A": array(dtype=float),
"L": array(dtype=float),
"b": array(dtype=float),
"x": array(dtype=float),
},
value_type=None,
doc="WIP",
group="Utility",
hidden=True,
)
add_builtin(
"mlp",
input_types={
"weights": array(dtype=float, ndim=2),
"bias": array(dtype=float, ndim=1),
"activation": Callable,
"index": int,
"x": array(dtype=float, ndim=2),
"out": array(dtype=float, ndim=2),
},
value_type=None,
skip_replay=True,
doc="""Evaluate a multi-layer perceptron (MLP) layer in the form: ``out = act(weights*x + bias)``.
:param weights: A layer's network weights with dimensions ``(m, n)``.
:param bias: An array with dimensions ``(n)``.
:param activation: A ``wp.func`` function that takes a single scalar float as input and returns a scalar float as output
:param index: The batch item to process, typically each thread will process 1 item in the batch, in this case index should be ``wp.tid()``
:param x: The feature matrix with dimensions ``(n, b)``
:param out: The network output with dimensions ``(m, b)``
:note: Feature and output matrices are transposed compared to some other frameworks such as PyTorch. All matrices are assumed to be stored in flattened row-major memory layout (NumPy default).""",
group="Utility",
)
# ---------------------------------
# Geometry
add_builtin(
"bvh_query_aabb",
input_types={"id": uint64, "lower": vec3, "upper": vec3},
value_type=bvh_query_t,
group="Geometry",
doc="""Construct an axis-aligned bounding box query against a bvh object. This query can be used to iterate over all bounds
inside a bvh. Returns an object that is used to track state during bvh traversal.
:param id: The bvh identifier
:param lower: The lower bound of the bounding box in bvh space
:param upper: The upper bound of the bounding box in bvh space""",
)
add_builtin(
"bvh_query_ray",
input_types={"id": uint64, "start": vec3, "dir": vec3},
value_type=bvh_query_t,
group="Geometry",
doc="""Construct a ray query against a bvh object. This query can be used to iterate over all bounds
that intersect the ray. Returns an object that is used to track state during bvh traversal.
:param id: The bvh identifier
:param start: The start of the ray in bvh space
:param dir: The direction of the ray in bvh space""",
)
add_builtin(
"bvh_query_next",
input_types={"query": bvh_query_t, "index": int},
value_type=builtins.bool,
group="Geometry",
doc="""Move to the next bound returned by the query. The index of the current bound is stored in ``index``, returns ``False``
if there are no more overlapping bound.""",
)
add_builtin(
"mesh_query_point",
input_types={
"id": uint64,
"point": vec3,
"max_dist": float,
"inside": float,
"face": int,
"bary_u": float,
"bary_v": float,
},
value_type=builtins.bool,
group="Geometry",
doc="""Computes the closest point on the mesh with identifier `id` to the given point in space. Returns ``True`` if a point < ``max_dist`` is found.
Identifies the sign of the distance using additional ray-casts to determine if the point is inside or outside. This method is relatively robust, but
does increase computational cost. See below for additional sign determination methods.
:param id: The mesh identifier
:param point: The point in space to query
:param max_dist: Mesh faces above this distance will not be considered by the query
:param inside: Returns a value < 0 if query point is inside the mesh, >=0 otherwise. Note that mesh must be watertight for this to be robust
:param face: Returns the index of the closest face
:param bary_u: Returns the barycentric u coordinate of the closest point
:param bary_v: Returns the barycentric v coordinate of the closest point""",
)
add_builtin(
"mesh_query_point_no_sign",
input_types={
"id": uint64,
"point": vec3,
"max_dist": float,
"face": int,
"bary_u": float,
"bary_v": float,
},
value_type=builtins.bool,
group="Geometry",
doc="""Computes the closest point on the mesh with identifier `id` to the given point in space. Returns ``True`` if a point < ``max_dist`` is found.
This method does not compute the sign of the point (inside/outside) which makes it faster than other point query methods.
:param id: The mesh identifier
:param point: The point in space to query
:param max_dist: Mesh faces above this distance will not be considered by the query
:param face: Returns the index of the closest face
:param bary_u: Returns the barycentric u coordinate of the closest point
:param bary_v: Returns the barycentric v coordinate of the closest point""",
)
add_builtin(
"mesh_query_point_sign_normal",
input_types={
"id": uint64,
"point": vec3,
"max_dist": float,
"inside": float,
"face": int,
"bary_u": float,
"bary_v": float,
"epsilon": float,
},
defaults={"epsilon": 1.0e-3},
value_type=builtins.bool,
group="Geometry",
doc="""Computes the closest point on the mesh with identifier `id` to the given point in space. Returns ``True`` if a point < ``max_dist`` is found.
Identifies the sign of the distance (inside/outside) using the angle-weighted pseudo normal. This approach to sign determination is robust for well conditioned meshes
that are watertight and non-self intersecting, it is also comparatively fast to compute.
:param id: The mesh identifier
:param point: The point in space to query
:param max_dist: Mesh faces above this distance will not be considered by the query
:param inside: Returns a value < 0 if query point is inside the mesh, >=0 otherwise. Note that mesh must be watertight for this to be robust
:param face: Returns the index of the closest face
:param bary_u: Returns the barycentric u coordinate of the closest point
:param bary_v: Returns the barycentric v coordinate of the closest point
:param epsilon: Epsilon treating distance values as equal, when locating the minimum distance vertex/face/edge, as a fraction of the average edge length, also for treating closest point as being on edge/vertex default 1e-3""",
)
add_builtin(
"mesh_query_point_sign_winding_number",
input_types={
"id": uint64,
"point": vec3,
"max_dist": float,
"inside": float,
"face": int,
"bary_u": float,
"bary_v": float,
"accuracy": float,
"threshold": float,
},
defaults={"accuracy": 2.0, "threshold": 0.5},
value_type=builtins.bool,
group="Geometry",
doc="""Computes the closest point on the mesh with identifier `id` to the given point in space. Returns ``True`` if a point < ``max_dist`` is found.
Identifies the sign using the winding number of the mesh relative to the query point. This method of sign determination is robust for poorly conditioned meshes
and provides a smooth approximation to sign even when the mesh is not watertight. This method is the most robust and accurate of the sign determination meshes
but also the most expensive.
Note that the Mesh object must be constructed with ``suport_winding_number=True`` for this method to return correct results.
:param id: The mesh identifier
:param point: The point in space to query
:param max_dist: Mesh faces above this distance will not be considered by the query
:param inside: Returns a value < 0 if query point is inside the mesh, >=0 otherwise. Note that mesh must be watertight for this to be robust
:param face: Returns the index of the closest face
:param bary_u: Returns the barycentric u coordinate of the closest point
:param bary_v: Returns the barycentric v coordinate of the closest point
:param accuracy: Accuracy for computing the winding number with fast winding number method utilizing second order dipole approximation, default 2.0
:param threshold: The threshold of the winding number to be considered inside, default 0.5""",
)
add_builtin(
"mesh_query_ray",
input_types={
"id": uint64,
"start": vec3,
"dir": vec3,
"max_t": float,
"t": float,
"bary_u": float,
"bary_v": float,
"sign": float,
"normal": vec3,
"face": int,
},
value_type=builtins.bool,
group="Geometry",
doc="""Computes the closest ray hit on the mesh with identifier `id`, returns ``True`` if a point < ``max_t`` is found.
:param id: The mesh identifier
:param start: The start point of the ray
:param dir: The ray direction (should be normalized)
:param max_t: The maximum distance along the ray to check for intersections
:param t: Returns the distance of the closest hit along the ray
:param bary_u: Returns the barycentric u coordinate of the closest hit
:param bary_v: Returns the barycentric v coordinate of the closest hit
:param sign: Returns a value > 0 if the hit ray hit front of the face, returns < 0 otherwise
:param normal: Returns the face normal
:param face: Returns the index of the hit face""",
)
add_builtin(
"mesh_query_aabb",
input_types={"id": uint64, "lower": vec3, "upper": vec3},
value_type=mesh_query_aabb_t,
group="Geometry",
doc="""Construct an axis-aligned bounding box query against a mesh object. This query can be used to iterate over all triangles
inside a volume. Returns an object that is used to track state during mesh traversal.
:param id: The mesh identifier
:param lower: The lower bound of the bounding box in mesh space
:param upper: The upper bound of the bounding box in mesh space""",
)
add_builtin(
"mesh_query_aabb_next",
input_types={"query": mesh_query_aabb_t, "index": int},
value_type=builtins.bool,
group="Geometry",
doc="""Move to the next triangle overlapping the query bounding box. The index of the current face is stored in ``index``, returns ``False``
if there are no more overlapping triangles.""",
)
add_builtin(
"mesh_eval_position",
input_types={"id": uint64, "face": int, "bary_u": float, "bary_v": float},
value_type=vec3,
group="Geometry",
doc="""Evaluates the position on the mesh given a face index, and barycentric coordinates.""",
)
add_builtin(
"mesh_eval_velocity",
input_types={"id": uint64, "face": int, "bary_u": float, "bary_v": float},
value_type=vec3,
group="Geometry",
doc="""Evaluates the velocity on the mesh given a face index, and barycentric coordinates.""",
)
add_builtin(
"hash_grid_query",
input_types={"id": uint64, "point": vec3, "max_dist": float},
value_type=hash_grid_query_t,
group="Geometry",
doc="""Construct a point query against a hash grid. This query can be used to iterate over all neighboring points withing a
fixed radius from the query point. Returns an object that is used to track state during neighbor traversal.""",
)
add_builtin(
"hash_grid_query_next",
input_types={"query": hash_grid_query_t, "index": int},
value_type=builtins.bool,
group="Geometry",
doc="""Move to the next point in the hash grid query. The index of the current neighbor is stored in ``index``, returns ``False``
if there are no more neighbors.""",
)
add_builtin(
"hash_grid_point_id",
input_types={"id": uint64, "index": int},
value_type=int,
group="Geometry",
doc="""Return the index of a point in the grid, this can be used to re-order threads such that grid
traversal occurs in a spatially coherent order.""",
)
add_builtin(
"intersect_tri_tri",
input_types={"v0": vec3, "v1": vec3, "v2": vec3, "u0": vec3, "u1": vec3, "u2": vec3},
value_type=int,
group="Geometry",
doc="Tests for intersection between two triangles (v0, v1, v2) and (u0, u1, u2) using Moller's method. Returns > 0 if triangles intersect.",
)
add_builtin(
"mesh_get",
input_types={"id": uint64},
value_type=Mesh,
missing_grad=True,
group="Geometry",
doc="""Retrieves the mesh given its index.""",
)
add_builtin(
"mesh_eval_face_normal",
input_types={"id": uint64, "face": int},
value_type=vec3,
group="Geometry",
doc="""Evaluates the face normal the mesh given a face index.""",
)
add_builtin(
"mesh_get_point",
input_types={"id": uint64, "index": int},
value_type=vec3,
group="Geometry",
doc="""Returns the point of the mesh given a index.""",
)
add_builtin(
"mesh_get_velocity",
input_types={"id": uint64, "index": int},
value_type=vec3,
group="Geometry",
doc="""Returns the velocity of the mesh given a index.""",
)
add_builtin(
"mesh_get_index",
input_types={"id": uint64, "index": int},
value_type=int,
group="Geometry",
doc="""Returns the point-index of the mesh given a face-vertex index.""",
)
add_builtin(
"closest_point_edge_edge",
input_types={"p1": vec3, "q1": vec3, "p2": vec3, "q2": vec3, "epsilon": float},
value_type=vec3,
group="Geometry",
doc="""Finds the closest points between two edges. Returns barycentric weights to the points on each edge, as well as the closest distance between the edges.
:param p1: First point of first edge
:param q1: Second point of first edge
:param p2: First point of second edge
:param q2: Second point of second edge
:param epsilon: Zero tolerance for determining if points in an edge are degenerate.
:param out: vec3 output containing (s,t,d), where `s` in [0,1] is the barycentric weight for the first edge, `t` is the barycentric weight for the second edge, and `d` is the distance between the two edges at these two closest points.""",
)
# ---------------------------------
# Ranges
add_builtin("range", input_types={"end": int}, value_type=range_t, group="Utility", export=False, hidden=True)
add_builtin(
"range", input_types={"start": int, "end": int}, value_type=range_t, group="Utility", export=False, hidden=True
)
add_builtin(
"range",
input_types={"start": int, "end": int, "step": int},
value_type=range_t,
group="Utility",
export=False,
hidden=True,
)
# ---------------------------------
# Iterators
add_builtin("iter_next", input_types={"range": range_t}, value_type=int, group="Utility", hidden=True)
add_builtin("iter_next", input_types={"query": hash_grid_query_t}, value_type=int, group="Utility", hidden=True)
add_builtin("iter_next", input_types={"query": mesh_query_aabb_t}, value_type=int, group="Utility", hidden=True)
# ---------------------------------
# Volumes
add_builtin(
"volume_sample_f",
input_types={"id": uint64, "uvw": vec3, "sampling_mode": int},
value_type=float,
group="Volumes",
doc="""Sample the volume given by ``id`` at the volume local-space point ``uvw``. Interpolation should be ``wp.Volume.CLOSEST``, or ``wp.Volume.LINEAR.``""",
)
add_builtin(
"volume_sample_grad_f",
input_types={"id": uint64, "uvw": vec3, "sampling_mode": int, "grad": vec3},
value_type=float,
group="Volumes",
doc="""Sample the volume and its gradient given by ``id`` at the volume local-space point ``uvw``. Interpolation should be ``wp.Volume.CLOSEST``, or ``wp.Volume.LINEAR.``""",
)
add_builtin(
"volume_lookup_f",
input_types={"id": uint64, "i": int, "j": int, "k": int},
value_type=float,
group="Volumes",
doc="""Returns the value of voxel with coordinates ``i``, ``j``, ``k``, if the voxel at this index does not exist this function returns the background value""",
)
add_builtin(
"volume_store_f",
input_types={"id": uint64, "i": int, "j": int, "k": int, "value": float},
group="Volumes",
doc="""Store the value at voxel with coordinates ``i``, ``j``, ``k``.""",
)
add_builtin(
"volume_sample_v",
input_types={"id": uint64, "uvw": vec3, "sampling_mode": int},
value_type=vec3,
group="Volumes",
doc="""Sample the vector volume given by ``id`` at the volume local-space point ``uvw``. Interpolation should be ``wp.Volume.CLOSEST``, or ``wp.Volume.LINEAR.``""",
)
add_builtin(
"volume_lookup_v",
input_types={"id": uint64, "i": int, "j": int, "k": int},
value_type=vec3,
group="Volumes",
doc="""Returns the vector value of voxel with coordinates ``i``, ``j``, ``k``, if the voxel at this index does not exist this function returns the background value""",
)
add_builtin(
"volume_store_v",
input_types={"id": uint64, "i": int, "j": int, "k": int, "value": vec3},
group="Volumes",
doc="""Store the value at voxel with coordinates ``i``, ``j``, ``k``.""",
)
add_builtin(
"volume_sample_i",
input_types={"id": uint64, "uvw": vec3},
value_type=int,
group="Volumes",
doc="""Sample the int32 volume given by ``id`` at the volume local-space point ``uvw``. """,
)
add_builtin(
"volume_lookup_i",
input_types={"id": uint64, "i": int, "j": int, "k": int},
value_type=int,
group="Volumes",
doc="""Returns the int32 value of voxel with coordinates ``i``, ``j``, ``k``, if the voxel at this index does not exist this function returns the background value""",
)
add_builtin(
"volume_store_i",
input_types={"id": uint64, "i": int, "j": int, "k": int, "value": int},
group="Volumes",
doc="""Store the value at voxel with coordinates ``i``, ``j``, ``k``.""",
)
add_builtin(
"volume_index_to_world",
input_types={"id": uint64, "uvw": vec3},
value_type=vec3,
group="Volumes",
doc="""Transform a point defined in volume index space to world space given the volume's intrinsic affine transformation.""",
)
add_builtin(
"volume_world_to_index",
input_types={"id": uint64, "xyz": vec3},
value_type=vec3,
group="Volumes",
doc="""Transform a point defined in volume world space to the volume's index space, given the volume's intrinsic affine transformation.""",
)
add_builtin(
"volume_index_to_world_dir",
input_types={"id": uint64, "uvw": vec3},
value_type=vec3,
group="Volumes",
doc="""Transform a direction defined in volume index space to world space given the volume's intrinsic affine transformation.""",
)
add_builtin(
"volume_world_to_index_dir",
input_types={"id": uint64, "xyz": vec3},
value_type=vec3,
group="Volumes",
doc="""Transform a direction defined in volume world space to the volume's index space, given the volume's intrinsic affine transformation.""",
)
# ---------------------------------
# Random
add_builtin(
"rand_init",
input_types={"seed": int},
value_type=uint32,
group="Random",
doc="Initialize a new random number generator given a user-defined seed. Returns a 32-bit integer representing the RNG state.",
)
add_builtin(
"rand_init",
input_types={"seed": int, "offset": int},
value_type=uint32,
group="Random",
doc="""Initialize a new random number generator given a user-defined seed and an offset.
This alternative constructor can be useful in parallel programs, where a kernel as a whole should share a seed,
but each thread should generate uncorrelated values. In this case usage should be ``r = rand_init(seed, tid)``""",
)
add_builtin(
"randi",
input_types={"state": uint32},
value_type=int,
group="Random",
doc="Return a random integer between [0, 2^32)",
)
add_builtin(
"randi",
input_types={"state": uint32, "min": int, "max": int},
value_type=int,
group="Random",
doc="Return a random integer between [min, max)",
)
add_builtin(
"randf",
input_types={"state": uint32},
value_type=float,
group="Random",
doc="Return a random float between [0.0, 1.0)",
)
add_builtin(
"randf",
input_types={"state": uint32, "min": float, "max": float},
value_type=float,
group="Random",
doc="Return a random float between [min, max)",
)
add_builtin(
"randn", input_types={"state": uint32}, value_type=float, group="Random", doc="Sample a normal distribution"
)
add_builtin(
"sample_cdf",
input_types={"state": uint32, "cdf": array(dtype=float)},
value_type=int,
group="Random",
doc="Inverse transform sample a cumulative distribution function",
)
add_builtin(
"sample_triangle",
input_types={"state": uint32},
value_type=vec2,
group="Random",
doc="Uniformly sample a triangle. Returns sample barycentric coordinates",
)
add_builtin(
"sample_unit_ring",
input_types={"state": uint32},
value_type=vec2,
group="Random",
doc="Uniformly sample a ring in the xy plane",
)
add_builtin(
"sample_unit_disk",
input_types={"state": uint32},
value_type=vec2,
group="Random",
doc="Uniformly sample a disk in the xy plane",
)
add_builtin(
"sample_unit_sphere_surface",
input_types={"state": uint32},
value_type=vec3,
group="Random",
doc="Uniformly sample a unit sphere surface",
)
add_builtin(
"sample_unit_sphere",
input_types={"state": uint32},
value_type=vec3,
group="Random",
doc="Uniformly sample a unit sphere",
)
add_builtin(
"sample_unit_hemisphere_surface",
input_types={"state": uint32},
value_type=vec3,
group="Random",
doc="Uniformly sample a unit hemisphere surface",
)
add_builtin(
"sample_unit_hemisphere",
input_types={"state": uint32},
value_type=vec3,
group="Random",
doc="Uniformly sample a unit hemisphere",
)
add_builtin(
"sample_unit_square",
input_types={"state": uint32},
value_type=vec2,
group="Random",
doc="Uniformly sample a unit square",
)
add_builtin(
"sample_unit_cube",
input_types={"state": uint32},
value_type=vec3,
group="Random",
doc="Uniformly sample a unit cube",
)
add_builtin(
"poisson",
input_types={"state": uint32, "lam": float},
value_type=uint32,
group="Random",
doc="""Generate a random sample from a Poisson distribution.
:param state: RNG state
:param lam: The expected value of the distribution""",
)
add_builtin(
"noise",
input_types={"state": uint32, "x": float},
value_type=float,
group="Random",
doc="Non-periodic Perlin-style noise in 1d.",
)
add_builtin(
"noise",
input_types={"state": uint32, "xy": vec2},
value_type=float,
group="Random",
doc="Non-periodic Perlin-style noise in 2d.",
)
add_builtin(
"noise",
input_types={"state": uint32, "xyz": vec3},
value_type=float,
group="Random",
doc="Non-periodic Perlin-style noise in 3d.",
)
add_builtin(
"noise",
input_types={"state": uint32, "xyzt": vec4},
value_type=float,
group="Random",
doc="Non-periodic Perlin-style noise in 4d.",
)
add_builtin(
"pnoise",
input_types={"state": uint32, "x": float, "px": int},
value_type=float,
group="Random",
doc="Periodic Perlin-style noise in 1d.",
)
add_builtin(
"pnoise",
input_types={"state": uint32, "xy": vec2, "px": int, "py": int},
value_type=float,
group="Random",
doc="Periodic Perlin-style noise in 2d.",
)
add_builtin(
"pnoise",
input_types={"state": uint32, "xyz": vec3, "px": int, "py": int, "pz": int},
value_type=float,
group="Random",
doc="Periodic Perlin-style noise in 3d.",
)
add_builtin(
"pnoise",
input_types={"state": uint32, "xyzt": vec4, "px": int, "py": int, "pz": int, "pt": int},
value_type=float,
group="Random",
doc="Periodic Perlin-style noise in 4d.",
)
add_builtin(
"curlnoise",
input_types={"state": uint32, "xy": vec2},
value_type=vec2,
group="Random",
doc="Divergence-free vector field based on the gradient of a Perlin noise function.",
missing_grad=True,
)
add_builtin(
"curlnoise",
input_types={"state": uint32, "xyz": vec3},
value_type=vec3,
group="Random",
doc="Divergence-free vector field based on the curl of three Perlin noise functions.",
missing_grad=True,
)
add_builtin(
"curlnoise",
input_types={"state": uint32, "xyzt": vec4},
value_type=vec3,
group="Random",
doc="Divergence-free vector field based on the curl of three Perlin noise functions.",
missing_grad=True,
)
# note printf calls directly to global CRT printf (no wp:: namespace prefix)
add_builtin(
"printf",
input_types={},
namespace="",
variadic=True,
group="Utility",
doc="Allows printing formatted strings, using C-style format specifiers.",
)
add_builtin("print", input_types={"value": Any}, doc="Print variable to stdout", export=False, group="Utility")
add_builtin(
"breakpoint",
input_types={},
doc="Debugger breakpoint",
export=False,
group="Utility",
namespace="",
native_func="__debugbreak",
)
# helpers
add_builtin(
"tid",
input_types={},
value_type=int,
group="Utility",
doc="""Return the current thread index. Note that this is the *global* index of the thread in the range [0, dim)
where dim is the parameter passed to kernel launch.""",
)
add_builtin(
"tid",
input_types={},
value_type=[int, int],
group="Utility",
doc="""Return the current thread indices for a 2d kernel launch. Use ``i,j = wp.tid()`` syntax to retrieve the coordinates inside the kernel thread grid.""",
)
add_builtin(
"tid",
input_types={},
value_type=[int, int, int],
group="Utility",
doc="""Return the current thread indices for a 3d kernel launch. Use ``i,j,k = wp.tid()`` syntax to retrieve the coordinates inside the kernel thread grid.""",
)
add_builtin(
"tid",
input_types={},
value_type=[int, int, int, int],
group="Utility",
doc="""Return the current thread indices for a 4d kernel launch. Use ``i,j,k,l = wp.tid()`` syntax to retrieve the coordinates inside the kernel thread grid.""",
)
add_builtin("copy", variadic=True, hidden=True, export=False, group="Utility")
add_builtin(
"select",
input_types={"cond": bool, "arg1": Any, "arg2": Any},
value_func=lambda args, kwds, _: args[1].type,
doc="Select between two arguments, if cond is false then return ``arg1``, otherwise return ``arg2``",
group="Utility",
)
add_builtin(
"select",
input_types={"cond": builtins.bool, "arg1": Any, "arg2": Any},
value_func=lambda args, kwds, _: args[1].type,
doc="Select between two arguments, if cond is false then return ``arg1``, otherwise return ``arg2``",
group="Utility",
)
for t in int_types:
add_builtin(
"select",
input_types={"cond": t, "arg1": Any, "arg2": Any},
value_func=lambda args, kwds, _: args[1].type,
doc="Select between two arguments, if cond is false then return ``arg1``, otherwise return ``arg2``",
group="Utility",
)
add_builtin(
"select",
input_types={"arr": array(dtype=Any), "arg1": Any, "arg2": Any},
value_func=lambda args, kwds, _: args[1].type,
doc="Select between two arguments, if array is null then return ``arg1``, otherwise return ``arg2``",
group="Utility",
)
# does argument checking and type propagation for load()
def load_value_func(args, kwds, _):
if not is_array(args[0].type):
raise RuntimeError("load() argument 0 must be an array")
num_indices = len(args[1:])
num_dims = args[0].type.ndim
if num_indices < num_dims:
raise RuntimeError(
"Num indices < num dimensions for array load, this is a codegen error, should have generated a view instead"
)
if num_indices > num_dims:
raise RuntimeError(
f"Num indices > num dimensions for array load, received {num_indices}, but array only has {num_dims}"
)
# check index types
for a in args[1:]:
if type_is_int(a.type) == False:
raise RuntimeError(f"load() index arguments must be of integer type, got index of type {a.type}")
return args[0].type.dtype
# does argument checking and type propagation for view()
def view_value_func(args, kwds, _):
if not is_array(args[0].type):
raise RuntimeError("view() argument 0 must be an array")
# check array dim big enough to support view
num_indices = len(args[1:])
num_dims = args[0].type.ndim
if num_indices >= num_dims:
raise RuntimeError(
f"Trying to create an array view with {num_indices} indices, but the array only has {num_dims} dimension(s). Ensure that the argument type on the function or kernel specifies the expected number of dimensions, e.g.: def func(param: wp.array3d(dtype=float):"
)
# check index types
for a in args[1:]:
if type_is_int(a.type) == False:
raise RuntimeError(f"view() index arguments must be of integer type, got index of type {a.type}")
# create an array view with leading dimensions removed
dtype = args[0].type.dtype
ndim = num_dims - num_indices
if isinstance(args[0].type, (fabricarray, indexedfabricarray)):
# fabric array of arrays: return array attribute as a regular array
return array(dtype=dtype, ndim=ndim)
else:
return type(args[0].type)(dtype=dtype, ndim=ndim)
# does argument checking and type propagation for store()
def store_value_func(args, kwds, _):
# check target type
if not is_array(args[0].type):
raise RuntimeError("store() argument 0 must be an array")
num_indices = len(args[1:-1])
num_dims = args[0].type.ndim
# if this happens we should have generated a view instead of a load during code gen
if num_indices < num_dims:
raise RuntimeError("Num indices < num dimensions for array store")
if num_indices > num_dims:
raise RuntimeError(
f"Num indices > num dimensions for array store, received {num_indices}, but array only has {num_dims}"
)
# check index types
for a in args[1:-1]:
if type_is_int(a.type) == False:
raise RuntimeError(f"store() index arguments must be of integer type, got index of type {a.type}")
# check value type
if not types_equal(args[-1].type, args[0].type.dtype):
raise RuntimeError(
f"store() value argument type ({args[2].type}) must be of the same type as the array ({args[0].type.dtype})"
)
return None
add_builtin("load", variadic=True, hidden=True, value_func=load_value_func, group="Utility")
add_builtin("view", variadic=True, hidden=True, value_func=view_value_func, group="Utility")
add_builtin("store", variadic=True, hidden=True, value_func=store_value_func, skip_replay=True, group="Utility")
def atomic_op_value_func(args, kwds, _):
# check target type
if not is_array(args[0].type):
raise RuntimeError("atomic() operation argument 0 must be an array")
num_indices = len(args[1:-1])
num_dims = args[0].type.ndim
# if this happens we should have generated a view instead of a load during code gen
if num_indices < num_dims:
raise RuntimeError("Num indices < num dimensions for atomic array operation")
if num_indices > num_dims:
raise RuntimeError(
f"Num indices > num dimensions for atomic array operation, received {num_indices}, but array only has {num_dims}"
)
# check index types
for a in args[1:-1]:
if type_is_int(a.type) == False:
raise RuntimeError(
f"atomic() operation index arguments must be of integer type, got index of type {a.type}"
)
if not types_equal(args[-1].type, args[0].type.dtype):
raise RuntimeError(
f"atomic() value argument ({args[-1].type}) must be of the same type as the array ({args[0].type.dtype})"
)
return args[0].type.dtype
for array_type in array_types:
# don't list indexed array operations explicitly in docs
hidden = array_type == indexedarray
add_builtin(
"atomic_add",
hidden=hidden,
input_types={"a": array_type(dtype=Any), "i": int, "value": Any},
value_func=atomic_op_value_func,
doc="Atomically add ``value`` onto the array at location given by index.",
group="Utility",
skip_replay=True,
)
add_builtin(
"atomic_add",
hidden=hidden,
input_types={"a": array_type(dtype=Any), "i": int, "j": int, "value": Any},
value_func=atomic_op_value_func,
doc="Atomically add ``value`` onto the array at location given by indices.",
group="Utility",
skip_replay=True,
)
add_builtin(
"atomic_add",
hidden=hidden,
input_types={"a": array_type(dtype=Any), "i": int, "j": int, "k": int, "value": Any},
value_func=atomic_op_value_func,
doc="Atomically add ``value`` onto the array at location given by indices.",
group="Utility",
skip_replay=True,
)
add_builtin(
"atomic_add",
hidden=hidden,
input_types={"a": array_type(dtype=Any), "i": int, "j": int, "k": int, "l": int, "value": Any},
value_func=atomic_op_value_func,
doc="Atomically add ``value`` onto the array at location given by indices.",
group="Utility",
skip_replay=True,
)
add_builtin(
"atomic_sub",
hidden=hidden,
input_types={"a": array_type(dtype=Any), "i": int, "value": Any},
value_func=atomic_op_value_func,
doc="Atomically subtract ``value`` onto the array at location given by index.",
group="Utility",
skip_replay=True,
)
add_builtin(
"atomic_sub",
hidden=hidden,
input_types={"a": array_type(dtype=Any), "i": int, "j": int, "value": Any},
value_func=atomic_op_value_func,
doc="Atomically subtract ``value`` onto the array at location given by indices.",
group="Utility",
skip_replay=True,
)
add_builtin(
"atomic_sub",
hidden=hidden,
input_types={"a": array_type(dtype=Any), "i": int, "j": int, "k": int, "value": Any},
value_func=atomic_op_value_func,
doc="Atomically subtract ``value`` onto the array at location given by indices.",
group="Utility",
skip_replay=True,
)
add_builtin(
"atomic_sub",
hidden=hidden,
input_types={"a": array_type(dtype=Any), "i": int, "j": int, "k": int, "l": int, "value": Any},
value_func=atomic_op_value_func,
doc="Atomically subtract ``value`` onto the array at location given by indices.",
group="Utility",
skip_replay=True,
)
add_builtin(
"atomic_min",
hidden=hidden,
input_types={"a": array_type(dtype=Any), "i": int, "value": Any},
value_func=atomic_op_value_func,
doc="Compute the minimum of ``value`` and ``array[index]`` and atomically update the array. Note that for vectors and matrices the operation is only atomic on a per-component basis.",
group="Utility",
skip_replay=True,
)
add_builtin(
"atomic_min",
hidden=hidden,
input_types={"a": array_type(dtype=Any), "i": int, "j": int, "value": Any},
value_func=atomic_op_value_func,
doc="Compute the minimum of ``value`` and ``array[index]`` and atomically update the array. Note that for vectors and matrices the operation is only atomic on a per-component basis.",
group="Utility",
skip_replay=True,
)
add_builtin(
"atomic_min",
hidden=hidden,
input_types={"a": array_type(dtype=Any), "i": int, "j": int, "k": int, "value": Any},
value_func=atomic_op_value_func,
doc="Compute the minimum of ``value`` and ``array[index]`` and atomically update the array. Note that for vectors and matrices the operation is only atomic on a per-component basis.",
group="Utility",
skip_replay=True,
)
add_builtin(
"atomic_min",
hidden=hidden,
input_types={"a": array_type(dtype=Any), "i": int, "j": int, "k": int, "l": int, "value": Any},
value_func=atomic_op_value_func,
doc="Compute the minimum of ``value`` and ``array[index]`` and atomically update the array. Note that for vectors and matrices the operation is only atomic on a per-component basis.",
group="Utility",
skip_replay=True,
)
add_builtin(
"atomic_max",
hidden=hidden,
input_types={"a": array_type(dtype=Any), "i": int, "value": Any},
value_func=atomic_op_value_func,
doc="Compute the maximum of ``value`` and ``array[index]`` and atomically update the array. Note that for vectors and matrices the operation is only atomic on a per-component basis.",
group="Utility",
skip_replay=True,
)
add_builtin(
"atomic_max",
hidden=hidden,
input_types={"a": array_type(dtype=Any), "i": int, "j": int, "value": Any},
value_func=atomic_op_value_func,
doc="Compute the maximum of ``value`` and ``array[index]`` and atomically update the array. Note that for vectors and matrices the operation is only atomic on a per-component basis.",
group="Utility",
skip_replay=True,
)
add_builtin(
"atomic_max",
hidden=hidden,
input_types={"a": array_type(dtype=Any), "i": int, "j": int, "k": int, "value": Any},
value_func=atomic_op_value_func,
doc="Compute the maximum of ``value`` and ``array[index]`` and atomically update the array. Note that for vectors and matrices the operation is only atomic on a per-component basis.",
group="Utility",
skip_replay=True,
)
add_builtin(
"atomic_max",
hidden=hidden,
input_types={"a": array_type(dtype=Any), "i": int, "j": int, "k": int, "l": int, "value": Any},
value_func=atomic_op_value_func,
doc="Compute the maximum of ``value`` and ``array[index]`` and atomically update the array. Note that for vectors and matrices the operation is only atomic on a per-component basis.",
group="Utility",
skip_replay=True,
)
# used to index into builtin types, i.e.: y = vec3[1]
def index_value_func(args, kwds, _):
return args[0].type._wp_scalar_type_
add_builtin(
"index",
input_types={"a": vector(length=Any, dtype=Scalar), "i": int},
value_func=index_value_func,
hidden=True,
group="Utility",
)
add_builtin(
"index",
input_types={"a": quaternion(dtype=Scalar), "i": int},
value_func=index_value_func,
hidden=True,
group="Utility",
)
add_builtin(
"index",
input_types={"a": matrix(shape=(Any, Any), dtype=Scalar), "i": int},
value_func=lambda args, kwds, _: vector(length=args[0].type._shape_[1], dtype=args[0].type._wp_scalar_type_),
hidden=True,
group="Utility",
)
add_builtin(
"index",
input_types={"a": matrix(shape=(Any, Any), dtype=Scalar), "i": int, "j": int},
value_func=index_value_func,
hidden=True,
group="Utility",
)
add_builtin(
"index",
input_types={"a": transformation(dtype=Scalar), "i": int},
value_func=index_value_func,
hidden=True,
group="Utility",
)
add_builtin("index", input_types={"s": shape_t, "i": int}, value_type=int, hidden=True, group="Utility")
def vector_indexset_element_value_func(args, kwds, _):
vec = args[0]
index = args[1]
value = args[2]
if value.type is not vec.type._wp_scalar_type_:
raise RuntimeError(
f"Trying to assign type '{type_repr(value.type)}' to element of a vector with type '{type_repr(vec.type)}'"
)
return None
# implements vector[index] = value
add_builtin(
"indexset",
input_types={"a": vector(length=Any, dtype=Scalar), "i": int, "value": Scalar},
value_func=vector_indexset_element_value_func,
hidden=True,
group="Utility",
skip_replay=True,
)
def matrix_indexset_element_value_func(args, kwds, _):
mat = args[0]
row = args[1]
col = args[2]
value = args[3]
if value.type is not mat.type._wp_scalar_type_:
raise RuntimeError(
f"Trying to assign type '{type_repr(value.type)}' to element of a matrix with type '{type_repr(mat.type)}'"
)
return None
def matrix_indexset_row_value_func(args, kwds, _):
mat = args[0]
row = args[1]
value = args[2]
if value.type._shape_[0] != mat.type._shape_[1]:
raise RuntimeError(
f"Trying to assign vector with length {value.type._length} to matrix with shape {mat.type._shape}, vector length must match the number of matrix columns."
)
if value.type._wp_scalar_type_ is not mat.type._wp_scalar_type_:
raise RuntimeError(
f"Trying to assign vector of type '{type_repr(value.type)}' to row of matrix of type '{type_repr(mat.type)}'"
)
return None
# implements matrix[i] = row
add_builtin(
"indexset",
input_types={"a": matrix(shape=(Any, Any), dtype=Scalar), "i": int, "value": vector(length=Any, dtype=Scalar)},
value_func=matrix_indexset_row_value_func,
hidden=True,
group="Utility",
skip_replay=True,
)
# implements matrix[i,j] = scalar
add_builtin(
"indexset",
input_types={"a": matrix(shape=(Any, Any), dtype=Scalar), "i": int, "j": int, "value": Scalar},
value_func=matrix_indexset_element_value_func,
hidden=True,
group="Utility",
skip_replay=True,
)
for t in scalar_types + vector_types:
if "vec" in t.__name__ or "mat" in t.__name__:
continue
add_builtin(
"expect_eq",
input_types={"arg1": t, "arg2": t},
value_type=None,
doc="Prints an error to stdout if arg1 and arg2 are not equal",
group="Utility",
hidden=True,
)
def expect_eq_val_func(args, kwds, _):
if not types_equal(args[0].type, args[1].type):
raise RuntimeError("Can't test equality for objects with different types")
return None
add_builtin(
"expect_eq",
input_types={"arg1": vector(length=Any, dtype=Scalar), "arg2": vector(length=Any, dtype=Scalar)},
value_func=expect_eq_val_func,
doc="Prints an error to stdout if arg1 and arg2 are not equal",
group="Utility",
hidden=True,
)
add_builtin(
"expect_neq",
input_types={"arg1": vector(length=Any, dtype=Scalar), "arg2": vector(length=Any, dtype=Scalar)},
value_func=expect_eq_val_func,
doc="Prints an error to stdout if arg1 and arg2 are equal",
group="Utility",
hidden=True,
)
add_builtin(
"expect_eq",
input_types={"arg1": matrix(shape=(Any, Any), dtype=Scalar), "arg2": matrix(shape=(Any, Any), dtype=Scalar)},
value_func=expect_eq_val_func,
doc="Prints an error to stdout if arg1 and arg2 are not equal",
group="Utility",
hidden=True,
)
add_builtin(
"expect_neq",
input_types={"arg1": matrix(shape=(Any, Any), dtype=Scalar), "arg2": matrix(shape=(Any, Any), dtype=Scalar)},
value_func=expect_eq_val_func,
doc="Prints an error to stdout if arg1 and arg2 are equal",
group="Utility",
hidden=True,
)
add_builtin(
"lerp",
input_types={"a": Float, "b": Float, "t": Float},
value_func=sametype_value_func(Float),
doc="Linearly interpolate two values a and b using factor t, computed as ``a*(1-t) + b*t``",
group="Utility",
)
add_builtin(
"smoothstep",
input_types={"edge0": Float, "edge1": Float, "x": Float},
value_func=sametype_value_func(Float),
doc="Smoothly interpolate between two values edge0 and edge1 using a factor x, and return a result between 0 and 1 using a cubic Hermite interpolation after clamping",
group="Utility",
)
def lerp_value_func(default):
def fn(args, kwds, _):
if args is None:
return default
scalar_type = args[-1].type
if not types_equal(args[0].type, args[1].type):
raise RuntimeError("Can't lerp between objects with different types")
if args[0].type._wp_scalar_type_ != scalar_type:
raise RuntimeError("'t' parameter must have the same scalar type as objects you're lerping between")
return args[0].type
return fn
add_builtin(
"lerp",
input_types={"a": vector(length=Any, dtype=Float), "b": vector(length=Any, dtype=Float), "t": Float},
value_func=lerp_value_func(vector(length=Any, dtype=Float)),
doc="Linearly interpolate two values a and b using factor t, computed as ``a*(1-t) + b*t``",
group="Utility",
)
add_builtin(
"lerp",
input_types={"a": matrix(shape=(Any, Any), dtype=Float), "b": matrix(shape=(Any, Any), dtype=Float), "t": Float},
value_func=lerp_value_func(matrix(shape=(Any, Any), dtype=Float)),
doc="Linearly interpolate two values a and b using factor t, computed as ``a*(1-t) + b*t``",
group="Utility",
)
add_builtin(
"lerp",
input_types={"a": quaternion(dtype=Float), "b": quaternion(dtype=Float), "t": Float},
value_func=lerp_value_func(quaternion(dtype=Float)),
doc="Linearly interpolate two values a and b using factor t, computed as ``a*(1-t) + b*t``",
group="Utility",
)
add_builtin(
"lerp",
input_types={"a": transformation(dtype=Float), "b": transformation(dtype=Float), "t": Float},
value_func=lerp_value_func(transformation(dtype=Float)),
doc="Linearly interpolate two values a and b using factor t, computed as ``a*(1-t) + b*t``",
group="Utility",
)
# fuzzy compare for float values
add_builtin(
"expect_near",
input_types={"arg1": Float, "arg2": Float, "tolerance": Float},
defaults={"tolerance": 1.0e-6},
value_type=None,
doc="Prints an error to stdout if arg1 and arg2 are not closer than tolerance in magnitude",
group="Utility",
)
add_builtin(
"expect_near",
input_types={"arg1": vec3, "arg2": vec3, "tolerance": float},
value_type=None,
doc="Prints an error to stdout if any element of arg1 and arg2 are not closer than tolerance in magnitude",
group="Utility",
)
# ---------------------------------
# Algorithms
add_builtin(
"lower_bound",
input_types={"arr": array(dtype=Scalar), "value": Scalar},
value_type=int,
doc="Search a sorted array for the closest element greater than or equal to value.",
)
add_builtin(
"lower_bound",
input_types={"arr": array(dtype=Scalar), "arr_begin": int, "arr_end": int, "value": Scalar},
value_type=int,
doc="Search a sorted array range [arr_begin, arr_end) for the closest element greater than or equal to value.",
)
# ---------------------------------
# Operators
add_builtin(
"add", input_types={"x": Scalar, "y": Scalar}, value_func=sametype_value_func(Scalar), doc="", group="Operators"
)
add_builtin(
"add",
input_types={"x": vector(length=Any, dtype=Scalar), "y": vector(length=Any, dtype=Scalar)},
value_func=sametype_value_func(vector(length=Any, dtype=Scalar)),
doc="",
group="Operators",
)
add_builtin(
"add",
input_types={"x": quaternion(dtype=Scalar), "y": quaternion(dtype=Scalar)},
value_func=sametype_value_func(quaternion(dtype=Scalar)),
doc="",
group="Operators",
)
add_builtin(
"add",
input_types={"x": matrix(shape=(Any, Any), dtype=Scalar), "y": matrix(shape=(Any, Any), dtype=Scalar)},
value_func=sametype_value_func(matrix(shape=(Any, Any), dtype=Scalar)),
doc="",
group="Operators",
)
add_builtin(
"add",
input_types={"x": transformation(dtype=Scalar), "y": transformation(dtype=Scalar)},
value_func=sametype_value_func(transformation(dtype=Scalar)),
doc="",
group="Operators",
)
add_builtin(
"sub", input_types={"x": Scalar, "y": Scalar}, value_func=sametype_value_func(Scalar), doc="", group="Operators"
)
add_builtin(
"sub",
input_types={"x": vector(length=Any, dtype=Scalar), "y": vector(length=Any, dtype=Scalar)},
value_func=sametype_value_func(vector(length=Any, dtype=Scalar)),
doc="",
group="Operators",
)
add_builtin(
"sub",
input_types={"x": matrix(shape=(Any, Any), dtype=Scalar), "y": matrix(shape=(Any, Any), dtype=Scalar)},
value_func=sametype_value_func(matrix(shape=(Any, Any), dtype=Scalar)),
doc="",
group="Operators",
)
add_builtin(
"sub",
input_types={"x": quaternion(dtype=Scalar), "y": quaternion(dtype=Scalar)},
value_func=sametype_value_func(quaternion(dtype=Scalar)),
doc="",
group="Operators",
)
add_builtin(
"sub",
input_types={"x": transformation(dtype=Scalar), "y": transformation(dtype=Scalar)},
value_func=sametype_value_func(transformation(dtype=Scalar)),
doc="",
group="Operators",
)
# bitwise operators
add_builtin("bit_and", input_types={"x": Int, "y": Int}, value_func=sametype_value_func(Int), doc="", group="Operators")
add_builtin("bit_or", input_types={"x": Int, "y": Int}, value_func=sametype_value_func(Int), doc="", group="Operators")
add_builtin("bit_xor", input_types={"x": Int, "y": Int}, value_func=sametype_value_func(Int), doc="", group="Operators")
add_builtin("lshift", input_types={"x": Int, "y": Int}, value_func=sametype_value_func(Int), doc="", group="Operators")
add_builtin("rshift", input_types={"x": Int, "y": Int}, value_func=sametype_value_func(Int), doc="", group="Operators")
add_builtin("invert", input_types={"x": Int}, value_func=sametype_value_func(Int), doc="", group="Operators")
def scalar_mul_value_func(default):
def fn(args, kwds, _):
if args is None:
return default
scalar = [a.type for a in args if a.type in scalar_types][0]
compound = [a.type for a in args if a.type not in scalar_types][0]
if scalar != compound._wp_scalar_type_:
raise RuntimeError("Object and coefficient must have the same scalar type when multiplying by scalar")
return compound
return fn
def mul_matvec_value_func(args, kwds, _):
if args is None:
return vector(length=Any, dtype=Scalar)
if args[0].type._wp_scalar_type_ != args[1].type._wp_scalar_type_:
raise RuntimeError(
f"Can't multiply matrix and vector with different types {args[0].type._wp_scalar_type_}, {args[1].type._wp_scalar_type_}"
)
if args[0].type._shape_[1] != args[1].type._length_:
raise RuntimeError(
f"Can't multiply matrix of shape {args[0].type._shape_} and vector with length {args[1].type._length_}"
)
return vector(length=args[0].type._shape_[0], dtype=args[0].type._wp_scalar_type_)
def mul_matmat_value_func(args, kwds, _):
if args is None:
return matrix(length=Any, dtype=Scalar)
if args[0].type._wp_scalar_type_ != args[1].type._wp_scalar_type_:
raise RuntimeError(
f"Can't multiply matrices with different types {args[0].type._wp_scalar_type_}, {args[1].type._wp_scalar_type_}"
)
if args[0].type._shape_[1] != args[1].type._shape_[0]:
raise RuntimeError(f"Can't multiply matrix of shapes {args[0].type._shape_} and {args[1].type._shape_}")
return matrix(shape=(args[0].type._shape_[0], args[1].type._shape_[1]), dtype=args[0].type._wp_scalar_type_)
add_builtin(
"mul", input_types={"x": Scalar, "y": Scalar}, value_func=sametype_value_func(Scalar), doc="", group="Operators"
)
add_builtin(
"mul",
input_types={"x": vector(length=Any, dtype=Scalar), "y": Scalar},
value_func=scalar_mul_value_func(vector(length=Any, dtype=Scalar)),
doc="",
group="Operators",
)
add_builtin(
"mul",
input_types={"x": Scalar, "y": vector(length=Any, dtype=Scalar)},
value_func=scalar_mul_value_func(vector(length=Any, dtype=Scalar)),
doc="",
group="Operators",
)
add_builtin(
"mul",
input_types={"x": quaternion(dtype=Scalar), "y": Scalar},
value_func=scalar_mul_value_func(quaternion(dtype=Scalar)),
doc="",
group="Operators",
)
add_builtin(
"mul",
input_types={"x": Scalar, "y": quaternion(dtype=Scalar)},
value_func=scalar_mul_value_func(quaternion(dtype=Scalar)),
doc="",
group="Operators",
)
add_builtin(
"mul",
input_types={"x": quaternion(dtype=Scalar), "y": quaternion(dtype=Scalar)},
value_func=sametype_value_func(quaternion(dtype=Scalar)),
doc="",
group="Operators",
)
add_builtin(
"mul",
input_types={"x": Scalar, "y": matrix(shape=(Any, Any), dtype=Scalar)},
value_func=scalar_mul_value_func(matrix(shape=(Any, Any), dtype=Scalar)),
doc="",
group="Operators",
)
add_builtin(
"mul",
input_types={"x": matrix(shape=(Any, Any), dtype=Scalar), "y": Scalar},
value_func=scalar_mul_value_func(matrix(shape=(Any, Any), dtype=Scalar)),
doc="",
group="Operators",
)
add_builtin(
"mul",
input_types={"x": matrix(shape=(Any, Any), dtype=Scalar), "y": vector(length=Any, dtype=Scalar)},
value_func=mul_matvec_value_func,
doc="",
group="Operators",
)
add_builtin(
"mul",
input_types={"x": matrix(shape=(Any, Any), dtype=Scalar), "y": matrix(shape=(Any, Any), dtype=Scalar)},
value_func=mul_matmat_value_func,
doc="",
group="Operators",
)
add_builtin(
"mul",
input_types={"x": transformation(dtype=Scalar), "y": transformation(dtype=Scalar)},
value_func=sametype_value_func(transformation(dtype=Scalar)),
doc="",
group="Operators",
)
add_builtin(
"mul",
input_types={"x": Scalar, "y": transformation(dtype=Scalar)},
value_func=scalar_mul_value_func(transformation(dtype=Scalar)),
doc="",
group="Operators",
)
add_builtin(
"mul",
input_types={"x": transformation(dtype=Scalar), "y": Scalar},
value_func=scalar_mul_value_func(transformation(dtype=Scalar)),
doc="",
group="Operators",
)
add_builtin(
"mod", input_types={"x": Scalar, "y": Scalar}, value_func=sametype_value_func(Scalar), doc="", group="Operators"
)
add_builtin(
"div", input_types={"x": Scalar, "y": Scalar}, value_func=sametype_value_func(Scalar), doc="", group="Operators"
)
add_builtin(
"div",
input_types={"x": vector(length=Any, dtype=Scalar), "y": Scalar},
value_func=scalar_mul_value_func(vector(length=Any, dtype=Scalar)),
doc="",
group="Operators",
)
add_builtin(
"div",
input_types={"x": matrix(shape=(Any, Any), dtype=Scalar), "y": Scalar},
value_func=scalar_mul_value_func(matrix(shape=(Any, Any), dtype=Scalar)),
doc="",
group="Operators",
)
add_builtin(
"div",
input_types={"x": quaternion(dtype=Scalar), "y": Scalar},
value_func=scalar_mul_value_func(quaternion(dtype=Scalar)),
doc="",
group="Operators",
)
add_builtin(
"floordiv",
input_types={"x": Scalar, "y": Scalar},
value_func=sametype_value_func(Scalar),
doc="",
group="Operators",
)
add_builtin("pos", input_types={"x": Scalar}, value_func=sametype_value_func(Scalar), doc="", group="Operators")
add_builtin(
"pos",
input_types={"x": vector(length=Any, dtype=Scalar)},
value_func=sametype_value_func(vector(length=Any, dtype=Scalar)),
doc="",
group="Operators",
)
add_builtin(
"pos",
input_types={"x": quaternion(dtype=Scalar)},
value_func=sametype_value_func(quaternion(dtype=Scalar)),
doc="",
group="Operators",
)
add_builtin(
"pos",
input_types={"x": matrix(shape=(Any, Any), dtype=Scalar)},
value_func=sametype_value_func(matrix(shape=(Any, Any), dtype=Scalar)),
doc="",
group="Operators",
)
add_builtin("neg", input_types={"x": Scalar}, value_func=sametype_value_func(Scalar), doc="", group="Operators")
add_builtin(
"neg",
input_types={"x": vector(length=Any, dtype=Scalar)},
value_func=sametype_value_func(vector(length=Any, dtype=Scalar)),
doc="",
group="Operators",
)
add_builtin(
"neg",
input_types={"x": quaternion(dtype=Scalar)},
value_func=sametype_value_func(quaternion(dtype=Scalar)),
doc="",
group="Operators",
)
add_builtin(
"neg",
input_types={"x": matrix(shape=(Any, Any), dtype=Scalar)},
value_func=sametype_value_func(matrix(shape=(Any, Any), dtype=Scalar)),
doc="",
group="Operators",
)
add_builtin("unot", input_types={"b": builtins.bool}, value_type=builtins.bool, doc="", group="Operators")
add_builtin("unot", input_types={"b": bool}, value_type=builtins.bool, doc="", group="Operators")
for t in int_types:
add_builtin("unot", input_types={"b": t}, value_type=builtins.bool, doc="", group="Operators")
add_builtin("unot", input_types={"a": array(dtype=Any)}, value_type=builtins.bool, doc="", group="Operators")
| warp-main | warp/builtins.py |
# Copyright (c) 2023 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import warp
import ctypes
from warp.thirdparty.dlpack import (
DLManagedTensor,
DLDevice,
DLDeviceType,
DLDataType,
DLDataTypeCode,
DLTensor,
_c_str_dltensor,
)
ctypes.pythonapi.PyMem_RawMalloc.restype = ctypes.c_void_p
ctypes.pythonapi.PyMem_RawFree.argtypes = [ctypes.c_void_p]
ctypes.pythonapi.PyCapsule_New.restype = ctypes.py_object
ctypes.pythonapi.PyCapsule_New.argtypes = [ctypes.c_void_p, ctypes.c_char_p, ctypes.c_void_p]
ctypes.pythonapi.PyCapsule_IsValid.restype = ctypes.c_int
ctypes.pythonapi.PyCapsule_IsValid.argtypes = [ctypes.py_object, ctypes.c_char_p]
ctypes.pythonapi.PyCapsule_GetPointer.restype = ctypes.c_void_p
ctypes.pythonapi.PyCapsule_GetPointer.argtypes = [ctypes.py_object, ctypes.c_char_p]
class _Holder:
def __init__(self, wp_array) -> None:
self.wp_array = wp_array
def _as_manager_ctx(self) -> ctypes.c_void_p:
py_obj = ctypes.py_object(self)
py_obj_ptr = ctypes.pointer(py_obj)
ctypes.pythonapi.Py_IncRef(py_obj)
ctypes.pythonapi.Py_IncRef(ctypes.py_object(py_obj_ptr))
return ctypes.cast(py_obj_ptr, ctypes.c_void_p)
@ctypes.CFUNCTYPE(None, ctypes.c_void_p)
def _warp_array_deleter(handle: ctypes.c_void_p) -> None:
"""A function to deallocate the memory of a Warp array."""
dl_managed_tensor = DLManagedTensor.from_address(handle)
py_obj_ptr = ctypes.cast(dl_managed_tensor.manager_ctx, ctypes.POINTER(ctypes.py_object))
py_obj = py_obj_ptr.contents
ctypes.pythonapi.Py_DecRef(py_obj)
ctypes.pythonapi.Py_DecRef(ctypes.py_object(py_obj_ptr))
ctypes.pythonapi.PyMem_RawFree(handle)
@ctypes.CFUNCTYPE(None, ctypes.c_void_p)
def _warp_pycapsule_deleter(handle: ctypes.c_void_p) -> None:
"""A function to deallocate a pycapsule that wraps a Warp array."""
pycapsule: ctypes.py_object = ctypes.cast(handle, ctypes.py_object)
if ctypes.pythonapi.PyCapsule_IsValid(pycapsule, _c_str_dltensor):
dl_managed_tensor = ctypes.pythonapi.PyCapsule_GetPointer(pycapsule, _c_str_dltensor)
_warp_array_deleter(dl_managed_tensor)
ctypes.pythonapi.PyCapsule_SetDestructor(pycapsule, None)
def device_to_dlpack(wp_device) -> DLDevice:
d = warp.get_device(wp_device)
if d.is_cpu:
device_type = DLDeviceType.kDLCPU
device_id = 0
elif d.is_cuda:
device_type = DLDeviceType.kDLCUDA
device_id = d.ordinal
else:
raise RuntimeError("Unhandled device type converting to dlpack")
dl_device = DLDevice()
dl_device.device_type = device_type
dl_device.device_id = device_id
return dl_device
def dtype_to_dlpack(wp_dtype) -> DLDataType:
if wp_dtype == warp.int8:
return (DLDataTypeCode.kDLInt, 8, 1)
elif wp_dtype == warp.uint8:
return (DLDataTypeCode.kDLUInt, 8, 1)
elif wp_dtype == warp.int16:
return (DLDataTypeCode.kDLInt, 16, 1)
elif wp_dtype == warp.uint16:
return (DLDataTypeCode.kDLUInt, 16, 1)
elif wp_dtype == warp.int32:
return (DLDataTypeCode.kDLInt, 32, 1)
elif wp_dtype == warp.uint32:
return (DLDataTypeCode.kDLUInt, 32, 1)
elif wp_dtype == warp.int64:
return (DLDataTypeCode.kDLInt, 64, 1)
elif wp_dtype == warp.uint64:
return (DLDataTypeCode.kDLUInt, 64, 1)
elif wp_dtype == warp.float16:
return (DLDataTypeCode.kDLFloat, 16, 1)
elif wp_dtype == warp.float32:
return (DLDataTypeCode.kDLFloat, 32, 1)
elif wp_dtype == warp.float64:
return (DLDataTypeCode.kDLFloat, 64, 1)
else:
raise RuntimeError(f"No conversion from Warp type {wp_dtype} to DLPack type")
def dtype_from_dlpack(dl_dtype):
# unpack to tuple for easier comparison
dl_dtype = (dl_dtype.type_code.value, dl_dtype.bits)
if dl_dtype == (DLDataTypeCode.kDLUInt, 1):
raise RuntimeError("Warp does not support bit boolean types")
elif dl_dtype == (DLDataTypeCode.kDLInt, 8):
return warp.types.int8
elif dl_dtype == (DLDataTypeCode.kDLInt, 16):
return warp.types.int16
elif dl_dtype == (DLDataTypeCode.kDLInt, 32):
return warp.types.int32
elif dl_dtype == (DLDataTypeCode.kDLInt, 64):
return warp.types.int64
elif dl_dtype == (DLDataTypeCode.kDLUInt, 8):
return warp.types.uint8
elif dl_dtype == (DLDataTypeCode.kDLUInt, 16):
return warp.types.uint16
elif dl_dtype == (DLDataTypeCode.kDLUInt, 32):
return warp.types.uint32
elif dl_dtype == (DLDataTypeCode.kDLUInt, 64):
return warp.types.uint64
elif dl_dtype == (DLDataTypeCode.kDLFloat, 16):
return warp.types.float16
elif dl_dtype == (DLDataTypeCode.kDLFloat, 32):
return warp.types.float32
elif dl_dtype == (DLDataTypeCode.kDLFloat, 64):
return warp.types.float64
elif dl_dtype == (DLDataTypeCode.kDLComplex, 64):
raise RuntimeError("Warp does not support complex types")
elif dl_dtype == (DLDataTypeCode.kDLComplex, 128):
raise RuntimeError("Warp does not support complex types")
else:
raise RuntimeError(f"Unknown dlpack datatype {dl_dtype}")
def device_from_dlpack(dl_device):
if dl_device.device_type.value == DLDeviceType.kDLCPU or dl_device.device_type.value == DLDeviceType.kDLCUDAHost:
return "cpu"
elif (
dl_device.device_type.value == DLDeviceType.kDLCUDA
or dl_device.device_type.value == DLDeviceType.kDLCUDAManaged
):
return f"cuda:{dl_device.device_id}"
else:
raise RuntimeError(f"Unknown device type from dlpack: {dl_device.device_type.value}")
def shape_to_dlpack(shape):
a = (ctypes.c_int64 * len(shape))(*shape)
return a
def strides_to_dlpack(strides, dtype):
# convert from byte count to element count
s = []
for i in range(len(strides)):
s.append(int(int(strides[i]) / int(warp.types.type_size_in_bytes(dtype))))
a = (ctypes.c_int64 * len(strides))(*s)
return a
def to_dlpack(wp_array: warp.array):
"""Convert a Warp array to another type of dlpack compatible array.
Parameters
----------
np_array : np.ndarray
The source numpy array that will be converted.
Returns
-------
pycapsule : PyCapsule
A pycapsule containing a DLManagedTensor that can be converted
to other array formats without copying the underlying memory.
"""
# DLPack does not support structured arrays
if isinstance(wp_array.dtype, warp.codegen.Struct):
raise RuntimeError("Cannot convert structured Warp arrays to DLPack.")
holder = _Holder(wp_array)
# allocate DLManagedTensor
size = ctypes.c_size_t(ctypes.sizeof(DLManagedTensor))
dl_managed_tensor = DLManagedTensor.from_address(ctypes.pythonapi.PyMem_RawMalloc(size))
# handle vector types
if hasattr(wp_array.dtype, "_wp_scalar_type_"):
# vector type, flatten the dimensions into one tuple
target_dtype = wp_array.dtype._wp_scalar_type_
target_ndim = wp_array.ndim + len(wp_array.dtype._shape_)
target_shape = (*wp_array.shape, *wp_array.dtype._shape_)
dtype_strides = warp.types.strides_from_shape(wp_array.dtype._shape_, wp_array.dtype._wp_scalar_type_)
target_strides = (*wp_array.strides, *dtype_strides)
else:
# scalar type
target_dtype = wp_array.dtype
target_ndim = wp_array.ndim
target_shape = wp_array.shape
target_strides = wp_array.strides
# store the shape and stride arrays with the holder to prevent them from getting deallocated
holder._shape = shape_to_dlpack(target_shape)
holder._strides = strides_to_dlpack(target_strides, target_dtype)
if wp_array.pinned:
dl_device = DLDeviceType.kDLCUDAHost
else:
dl_device = device_to_dlpack(wp_array.device)
# set Tensor attributes
dl_managed_tensor.dl_tensor.data = wp_array.ptr
dl_managed_tensor.dl_tensor.device = dl_device
dl_managed_tensor.dl_tensor.ndim = target_ndim
dl_managed_tensor.dl_tensor.dtype = dtype_to_dlpack(target_dtype)
dl_managed_tensor.dl_tensor.shape = holder._shape
dl_managed_tensor.dl_tensor.strides = holder._strides
dl_managed_tensor.dl_tensor.byte_offset = 0
dl_managed_tensor.manager_ctx = holder._as_manager_ctx()
dl_managed_tensor.deleter = _warp_array_deleter
pycapsule = ctypes.pythonapi.PyCapsule_New(
ctypes.byref(dl_managed_tensor),
_c_str_dltensor,
_warp_pycapsule_deleter,
)
return pycapsule
def dtype_is_compatible(dl_dtype, wp_dtype):
if dl_dtype.bits % 8 != 0:
raise RuntimeError("Data types with less than 8 bits are not supported")
if dl_dtype.type_code.value == DLDataTypeCode.kDLFloat:
if dl_dtype.bits == 16:
return wp_dtype == warp.float16
elif dl_dtype.bits == 32:
return wp_dtype == warp.float32
elif dl_dtype.bits == 64:
return wp_dtype == warp.float64
elif dl_dtype.type_code.value == DLDataTypeCode.kDLInt or dl_dtype.type_code.value == DLDataTypeCode.kDLUInt:
if dl_dtype.bits == 8:
return wp_dtype == warp.int8 or wp_dtype == warp.uint8
elif dl_dtype.bits == 16:
return wp_dtype == warp.int16 or wp_dtype == warp.uint16
elif dl_dtype.bits == 32:
return wp_dtype == warp.int32 or wp_dtype == warp.uint32
elif dl_dtype.bits == 64:
return wp_dtype == warp.int64 or wp_dtype == warp.uint64
elif dl_dtype.type_code.value == DLDataTypeCode.kDLBfloat:
raise RuntimeError("Bfloat data type is not supported")
elif dl_dtype.type_code.value == DLDataTypeCode.kDLComplex:
raise RuntimeError("Complex data types are not supported")
else:
raise RuntimeError(f"Unsupported dlpack dtype {(str(dl_dtype.type_code), dl_dtype.bits)}")
def from_dlpack(pycapsule, dtype=None) -> warp.array:
"""Convert a dlpack tensor into a numpy array without copying.
Parameters
----------
pycapsule : PyCapsule
A pycapsule wrapping a dlpack tensor that will be converted.
Returns
-------
np_array : np.ndarray
A new numpy array that uses the same underlying memory as the input
pycapsule.
"""
assert ctypes.pythonapi.PyCapsule_IsValid(pycapsule, _c_str_dltensor)
dl_managed_tensor = ctypes.pythonapi.PyCapsule_GetPointer(pycapsule, _c_str_dltensor)
dl_managed_tensor_ptr = ctypes.cast(dl_managed_tensor, ctypes.POINTER(DLManagedTensor))
dl_managed_tensor = dl_managed_tensor_ptr.contents
dlt = dl_managed_tensor.dl_tensor
assert isinstance(dlt, DLTensor)
device = device_from_dlpack(dlt.device)
pinned = dlt.device.device_type.value == DLDeviceType.kDLCUDAHost
shape = tuple(dlt.shape[dim] for dim in range(dlt.ndim))
itemsize = dlt.dtype.bits // 8
if dlt.strides:
strides = tuple(dlt.strides[dim] * itemsize for dim in range(dlt.ndim))
else:
strides = None
# handle multi-lane dtypes as another dimension
if dlt.dtype.lanes > 1:
shape = (*shape, dlt.dtype.lanes)
if strides is not None:
strides = (*strides, itemsize)
if dtype is None:
# automatically detect dtype
dtype = dtype_from_dlpack(dlt.dtype)
elif hasattr(dtype, "_wp_scalar_type_"):
# handle vector/matrix types
if not dtype_is_compatible(dlt.dtype, dtype._wp_scalar_type_):
raise RuntimeError(f"Incompatible data types: {dlt.dtype} and {dtype}")
dtype_shape = dtype._shape_
dtype_dims = len(dtype._shape_)
if dtype_dims > len(shape) or dtype_shape != shape[-dtype_dims:]:
raise RuntimeError(
f"Could not convert DLPack tensor with shape {shape} to Warp array with dtype={dtype}, ensure that source inner shape is {dtype_shape}"
)
if strides is not None:
# ensure the inner strides are contiguous
stride = itemsize
for i in range(dtype_dims):
if strides[-i - 1] != stride:
raise RuntimeError(
f"Could not convert DLPack tensor with shape {shape} to Warp array with dtype={dtype}, because the source inner strides are not contiguous"
)
stride *= dtype_shape[-i - 1]
strides = tuple(strides[:-dtype_dims]) or (itemsize,)
shape = tuple(shape[:-dtype_dims]) or (1,)
elif not dtype_is_compatible(dlt.dtype, dtype):
# incompatible dtype requested
raise RuntimeError(f"Incompatible data types: {dlt.dtype} and {dtype}")
a = warp.types.array(
ptr=dlt.data, dtype=dtype, shape=shape, strides=strides, copy=False, owner=False, device=device, pinned=pinned
)
# keep a reference to the capsule so it doesn't get deleted
a._pycapsule = pycapsule
return a
| warp-main | warp/dlpack.py |
# Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import warp as wp
class Tape:
"""
Record kernel launches within a Tape scope to enable automatic differentiation.
Gradients can be computed after the operations have been recorded on the tape via
``tape.backward()``.
Example
-------
.. code-block:: python
tape = wp.Tape()
# forward pass
with tape:
wp.launch(kernel=compute1, inputs=[a, b], device="cuda")
wp.launch(kernel=compute2, inputs=[c, d], device="cuda")
wp.launch(kernel=loss, inputs=[d, l], device="cuda")
# reverse pass
tape.backward(l)
Gradients can be accessed via the ``tape.gradients`` dictionary, e.g.:
.. code-block:: python
print(tape.gradients[a])
"""
def __init__(self):
self.gradients = {}
self.const_gradients = set()
self.launches = []
self.loss = None
def __enter__(self):
if wp.context.runtime.tape is not None:
raise RuntimeError("Warp: Error, entering a tape while one is already active")
wp.context.runtime.tape = self
return self
def __exit__(self, exc_type, exc_value, traceback):
if wp.context.runtime.tape is None:
raise RuntimeError("Warp: Error, ended tape capture, but tape not present")
wp.context.runtime.tape = None
# adj_outputs is a mapping from output tensor -> adjoint of the output
# after running backward the gradients of tensors may be retrieved by:
#
# adj_tensor = tape.gradients[tensor]
#
def backward(self, loss: wp.array = None, grads: dict = None):
"""
Evaluate the backward pass of the recorded operations on the tape.
A single-element array ``loss`` or a dictionary of arrays ``grads``
can be provided to assign the incoming gradients for the reverse-mode
automatic differentiation pass.
Args:
loss (wp.array): A single-element array that holds the loss function value whose gradient is to be computed
grads (dict): A dictionary of arrays that map from Warp arrays to their incoming gradients
"""
# if scalar loss is specified then initialize
# a 'seed' array for it, with gradient of one
if loss:
if loss.size > 1 or wp.types.type_length(loss.dtype) > 1:
raise RuntimeError("Can only return gradients for scalar loss functions.")
if loss.requires_grad == False:
raise RuntimeError(
"Scalar loss arrays should have requires_grad=True set before calling Tape.backward()"
)
# set the seed grad to 1.0
loss.grad.fill_(1.0)
# simply apply dict grads to objects
# this is just for backward compat. with
# existing code before we added wp.array.grad attribute
if grads:
for a, g in grads.items():
a.grad = g
self.const_gradients.add(a)
# run launches backwards
for launch in reversed(self.launches):
if callable(launch):
launch()
else:
kernel = launch[0]
dim = launch[1]
inputs = launch[2]
outputs = launch[3]
device = launch[4]
adj_inputs = []
adj_outputs = []
# lookup adjoint inputs
for a in inputs:
adj_inputs.append(self.get_adjoint(a))
# lookup adjoint outputs, todo: only allocate outputs if necessary
for a in outputs:
adj_outputs.append(self.get_adjoint(a))
wp.launch(
kernel=kernel,
dim=dim,
inputs=inputs,
outputs=outputs,
adj_inputs=adj_inputs,
adj_outputs=adj_outputs,
device=device,
adjoint=True,
)
# record a kernel launch on the tape
def record_launch(self, kernel, dim, inputs, outputs, device):
self.launches.append([kernel, dim, inputs, outputs, device])
def record_func(self, backward, arrays):
"""
Records a custom function to be executed only in the backward pass.
Args:
backward (Callable): A callable Python object (can be any function) that will be executed in the backward pass.
arrays (list): A list of arrays that are used by the function for gradient tracking.
"""
self.launches.append(backward)
for a in arrays:
if isinstance(a, wp.array) and a.grad:
self.gradients[a] = a.grad
else:
raise RuntimeError(
f"Array {a} is not of type wp.array or is missing a gradient array. Set array parameter requires_grad=True during instantiation."
)
# returns the adjoint of a kernel parameter
def get_adjoint(self, a):
if not wp.types.is_array(a) and not isinstance(a, wp.codegen.StructInstance):
# if input is a simple type (e.g.: float, vec3, etc) then
# no gradient needed (we only return gradients through arrays and structs)
return a
elif wp.types.is_array(a) and a.grad:
# keep track of all gradients used by the tape (for zeroing)
# ignore the scalar loss since we don't want to clear its grad
self.gradients[a] = a.grad
return a.grad
elif isinstance(a, wp.codegen.StructInstance):
adj = a._cls()
for name, _ in a._cls.ctype._fields_:
if name.startswith("_"):
continue
if isinstance(a._cls.vars[name].type, wp.array):
arr = getattr(a, name)
if arr.grad:
grad = self.gradients[arr] = arr.grad
else:
grad = None
setattr(adj, name, grad)
else:
setattr(adj, name, getattr(a, name))
self.gradients[a] = adj
return adj
return None
def reset(self):
"""
Clear all operations recorded on the tape and zero out all gradients.
"""
self.launches = []
self.zero()
def zero(self):
"""
Zero out all gradients recorded on the tape.
"""
for a, g in self.gradients.items():
if a not in self.const_gradients:
if isinstance(a, wp.codegen.StructInstance):
for name in g._cls.vars:
if isinstance(g._cls.vars[name].type, wp.array) and g._cls.vars[name].requires_grad:
getattr(g, name).zero_()
else:
g.zero_()
| warp-main | warp/tape.py |
# Copyright (c) 2023 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
from warp.types import constant
__all__ = [
"E",
"e",
"LOG2E",
"log2e",
"LOG10E",
"log10e",
"LN2",
"ln2",
"LN10",
"ln10",
"PHI",
"phi",
"PI",
"pi",
"TAU",
"tau",
]
E = e = constant(2.71828182845904523536) # e
LOG2E = log2e = constant(1.44269504088896340736) # log2(e)
LOG10E = log10e = constant(0.43429448190325182765) # log10(e)
LN2 = ln2 = constant(0.69314718055994530942) # ln(2)
LN10 = ln10 = constant(2.30258509299404568402) # ln(10)
PHI = phi = constant(1.61803398874989484820) # golden constant
PI = pi = constant(3.14159265358979323846) # pi
TAU = tau = constant(6.28318530717958647692) # 2 * pi
| warp-main | warp/constants.py |
# Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
# for autocomplete on builtins
# from warp.stubs import *
from warp.types import array, array1d, array2d, array3d, array4d, constant
from warp.types import indexedarray, indexedarray1d, indexedarray2d, indexedarray3d, indexedarray4d
from warp.fabric import fabricarray, fabricarrayarray, indexedfabricarray, indexedfabricarrayarray
from warp.types import bool, int8, uint8, int16, uint16, int32, uint32, int64, uint64, float16, float32, float64
from warp.types import vec2, vec2b, vec2ub, vec2s, vec2us, vec2i, vec2ui, vec2l, vec2ul, vec2h, vec2f, vec2d
from warp.types import vec3, vec3b, vec3ub, vec3s, vec3us, vec3i, vec3ui, vec3l, vec3ul, vec3h, vec3f, vec3d
from warp.types import vec4, vec4b, vec4ub, vec4s, vec4us, vec4i, vec4ui, vec4l, vec4ul, vec4h, vec4f, vec4d
from warp.types import mat22, mat22h, mat22f, mat22d
from warp.types import mat33, mat33h, mat33f, mat33d
from warp.types import mat44, mat44h, mat44f, mat44d
from warp.types import quat, quath, quatf, quatd
from warp.types import transform, transformh, transformf, transformd
from warp.types import spatial_vector, spatial_vectorh, spatial_vectorf, spatial_vectord
from warp.types import spatial_matrix, spatial_matrixh, spatial_matrixf, spatial_matrixd
# geometry types
from warp.types import Bvh, Mesh, HashGrid, Volume, MarchingCubes
from warp.types import bvh_query_t, mesh_query_aabb_t, hash_grid_query_t
# device-wide gemms
from warp.types import matmul, adj_matmul, batched_matmul, adj_batched_matmul, from_ptr
# deprecated
from warp.types import vector as vec
from warp.types import matrix as mat
from warp.context import init, func, func_grad, func_replay, kernel, struct, overload
from warp.context import is_cpu_available, is_cuda_available, is_device_available
from warp.context import get_devices, get_preferred_device
from warp.context import get_cuda_devices, get_cuda_device_count, get_cuda_device, map_cuda_device, unmap_cuda_device
from warp.context import get_device, set_device, synchronize_device
from warp.context import (
zeros,
zeros_like,
full,
full_like,
clone,
empty,
empty_like,
copy,
from_numpy,
launch,
synchronize,
force_load,
load_module,
)
from warp.context import set_module_options, get_module_options, get_module
from warp.context import capture_begin, capture_end, capture_launch
from warp.context import print_builtins, export_builtins, export_stubs
from warp.context import Kernel, Function, Launch
from warp.context import Stream, get_stream, set_stream, synchronize_stream
from warp.context import Event, record_event, wait_event, wait_stream
from warp.context import RegisteredGLBuffer
from warp.tape import Tape
from warp.utils import ScopedTimer, ScopedDevice, ScopedStream
from warp.utils import transform_expand, quat_between_vectors
from warp.torch import from_torch, to_torch
from warp.torch import device_from_torch, device_to_torch
from warp.torch import stream_from_torch, stream_to_torch
from warp.jax import from_jax, to_jax
from warp.jax import device_from_jax, device_to_jax
from warp.dlpack import from_dlpack, to_dlpack
from warp.constants import *
from . import builtins
import warp.config
__version__ = warp.config.version
| warp-main | warp/__init__.py |
# Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import builtins
import ctypes
import hashlib
import struct
import zlib
from typing import Any, Callable, Generic, List, Tuple, TypeVar, Union
import numpy as np
import warp
# type hints
Length = TypeVar("Length", bound=int)
Rows = TypeVar("Rows")
Cols = TypeVar("Cols")
DType = TypeVar("DType")
Int = TypeVar("Int")
Float = TypeVar("Float")
Scalar = TypeVar("Scalar")
Vector = Generic[Length, Scalar]
Matrix = Generic[Rows, Cols, Scalar]
Quaternion = Generic[Float]
Transformation = Generic[Float]
DType = TypeVar("DType")
Array = Generic[DType]
T = TypeVar("T")
# shared hash for all constants
_constant_hash = hashlib.sha256()
def constant(x):
"""Function to declare compile-time constants accessible from Warp kernels
Args:
x: Compile-time constant value, can be any of the built-in math types.
"""
global _constant_hash
# hash the constant value
if isinstance(x, int):
_constant_hash.update(struct.pack("<q", x))
elif isinstance(x, float):
_constant_hash.update(struct.pack("<d", x))
elif isinstance(x, builtins.bool):
_constant_hash.update(struct.pack("?", x))
elif isinstance(x, float16):
# float16 is a special case
p = ctypes.pointer(ctypes.c_float(x.value))
_constant_hash.update(p.contents)
elif isinstance(x, tuple(scalar_types)):
p = ctypes.pointer(x._type_(x.value))
_constant_hash.update(p.contents)
elif isinstance(x, ctypes.Array):
_constant_hash.update(bytes(x))
else:
raise RuntimeError(f"Invalid constant type: {type(x)}")
return x
def float_to_half_bits(value):
return warp.context.runtime.core.float_to_half_bits(value)
def half_bits_to_float(value):
return warp.context.runtime.core.half_bits_to_float(value)
# ----------------------
# built-in types
def vector(length, dtype):
# canonicalize dtype
if dtype == int:
dtype = int32
elif dtype == float:
dtype = float32
class vec_t(ctypes.Array):
# ctypes.Array data for length, shape and c type:
_length_ = 0 if length is Any else length
_shape_ = (_length_,)
_type_ = ctypes.c_float if dtype in [Scalar, Float] else dtype._type_
# warp scalar type:
_wp_scalar_type_ = dtype
_wp_type_params_ = [length, dtype]
_wp_generic_type_str_ = "vec_t"
_wp_constructor_ = "vector"
# special handling for float16 type: in this case, data is stored
# as uint16 but it's actually half precision floating point
# data. This means we need to convert each of the arguments
# to uint16s containing half float bits before storing them in
# the array:
scalar_import = float_to_half_bits if _wp_scalar_type_ == float16 else lambda x: x
scalar_export = half_bits_to_float if _wp_scalar_type_ == float16 else lambda x: x
def __init__(self, *args):
num_args = len(args)
if num_args == 0:
super().__init__()
elif num_args == 1:
if hasattr(args[0], "__len__"):
# try to copy from expanded sequence, e.g. (1, 2, 3)
self.__init__(*args[0])
else:
# set all elements to the same value
value = vec_t.scalar_import(args[0])
for i in range(self._length_):
super().__setitem__(i, value)
elif num_args == self._length_:
# set all scalar elements
for i in range(self._length_):
super().__setitem__(i, vec_t.scalar_import(args[i]))
else:
raise ValueError(
f"Invalid number of arguments in vector constructor, expected {self._length_} elements, got {num_args}"
)
def __getitem__(self, key):
if isinstance(key, int):
return vec_t.scalar_export(super().__getitem__(key))
elif isinstance(key, slice):
if self._wp_scalar_type_ == float16:
return [vec_t.scalar_export(x) for x in super().__getitem__(key)]
else:
return super().__getitem__(key)
else:
raise KeyError(f"Invalid key {key}, expected int or slice")
def __setitem__(self, key, value):
if isinstance(key, int):
super().__setitem__(key, vec_t.scalar_import(value))
return value
elif isinstance(key, slice):
if self._wp_scalar_type_ == float16:
super().__setitem__(key, [vec_t.scalar_import(x) for x in value])
return value
else:
return super().__setitem__(key, value)
else:
raise KeyError(f"Invalid key {key}, expected int or slice")
def __add__(self, y):
return warp.add(self, y)
def __radd__(self, y):
return warp.add(self, y)
def __sub__(self, y):
return warp.sub(self, y)
def __rsub__(self, x):
return warp.sub(x, self)
def __mul__(self, y):
return warp.mul(self, y)
def __rmul__(self, x):
return warp.mul(x, self)
def __div__(self, y):
return warp.div(self, y)
def __rdiv__(self, x):
return warp.div(x, self)
def __pos__(self, y):
return warp.pos(self, y)
def __neg__(self, y):
return warp.neg(self, y)
def __str__(self):
return f"[{', '.join(map(str, self))}]"
def __eq__(self, other):
for i in range(self._length_):
if self[i] != other[i]:
return False
return True
@classmethod
def from_ptr(cls, ptr):
if ptr:
# create a new vector instance and initialize the contents from the binary data
# this skips float16 conversions, assuming that float16 data is already encoded as uint16
value = cls()
ctypes.memmove(ctypes.byref(value), ptr, ctypes.sizeof(cls._type_) * cls._length_)
return value
else:
raise RuntimeError("NULL pointer exception")
return vec_t
def matrix(shape, dtype):
assert len(shape) == 2
# canonicalize dtype
if dtype == int:
dtype = int32
elif dtype == float:
dtype = float32
class mat_t(ctypes.Array):
_length_ = 0 if shape[0] == Any or shape[1] == Any else shape[0] * shape[1]
_shape_ = (0, 0) if _length_ == 0 else shape
_type_ = ctypes.c_float if dtype in [Scalar, Float] else dtype._type_
# warp scalar type:
# used in type checking and when writing out c++ code for constructors:
_wp_scalar_type_ = dtype
_wp_type_params_ = [shape[0], shape[1], dtype]
_wp_generic_type_str_ = "mat_t"
_wp_constructor_ = "matrix"
_wp_row_type_ = vector(0 if shape[1] == Any else shape[1], dtype)
# special handling for float16 type: in this case, data is stored
# as uint16 but it's actually half precision floating point
# data. This means we need to convert each of the arguments
# to uint16s containing half float bits before storing them in
# the array:
scalar_import = float_to_half_bits if _wp_scalar_type_ == float16 else lambda x: x
scalar_export = half_bits_to_float if _wp_scalar_type_ == float16 else lambda x: x
def __init__(self, *args):
num_args = len(args)
if num_args == 0:
super().__init__()
elif num_args == 1:
if hasattr(args[0], "__len__"):
# try to copy from expanded sequence, e.g. [[1, 0], [0, 1]]
self.__init__(*args[0])
else:
# set all elements to the same value
value = mat_t.scalar_import(args[0])
for i in range(self._length_):
super().__setitem__(i, value)
elif num_args == self._length_:
# set all scalar elements
for i in range(self._length_):
super().__setitem__(i, mat_t.scalar_import(args[i]))
elif num_args == self._shape_[0]:
# row vectors
for i, row in enumerate(args):
if not hasattr(row, "__len__") or len(row) != self._shape_[1]:
raise TypeError(
f"Invalid argument in matrix constructor, expected row of length {self._shape_[1]}, got {row}"
)
offset = i * self._shape_[1]
for i in range(self._shape_[1]):
super().__setitem__(offset + i, mat_t.scalar_import(row[i]))
else:
raise ValueError(
f"Invalid number of arguments in matrix constructor, expected {self._length_} elements, got {num_args}"
)
def __add__(self, y):
return warp.add(self, y)
def __radd__(self, y):
return warp.add(self, y)
def __sub__(self, y):
return warp.sub(self, y)
def __rsub__(self, x):
return warp.sub(x, self)
def __mul__(self, y):
return warp.mul(self, y)
def __rmul__(self, x):
return warp.mul(x, self)
def __matmul__(self, y):
return warp.mul(self, y)
def __rmatmul__(self, x):
return warp.mul(x, self)
def __div__(self, y):
return warp.div(self, y)
def __rdiv__(self, x):
return warp.div(x, self)
def __pos__(self, y):
return warp.pos(self, y)
def __neg__(self, y):
return warp.neg(self, y)
def __str__(self):
row_str = []
for r in range(self._shape_[0]):
row_val = self.get_row(r)
row_str.append(f"[{', '.join(map(str, row_val))}]")
return "[" + ",\n ".join(row_str) + "]"
def __eq__(self, other):
for i in range(self._shape_[0]):
for j in range(self._shape_[1]):
if self[i][j] != other[i][j]:
return False
return True
def get_row(self, r):
if r < 0 or r >= self._shape_[0]:
raise IndexError("Invalid row index")
row_start = r * self._shape_[1]
row_end = row_start + self._shape_[1]
row_data = super().__getitem__(slice(row_start, row_end))
if self._wp_scalar_type_ == float16:
return self._wp_row_type_(*[mat_t.scalar_export(x) for x in row_data])
else:
return self._wp_row_type_(row_data)
def set_row(self, r, v):
if r < 0 or r >= self._shape_[0]:
raise IndexError("Invalid row index")
row_start = r * self._shape_[1]
row_end = row_start + self._shape_[1]
if self._wp_scalar_type_ == float16:
v = [mat_t.scalar_import(x) for x in v]
super().__setitem__(slice(row_start, row_end), v)
def __getitem__(self, key):
if isinstance(key, Tuple):
# element indexing m[i,j]
if len(key) != 2:
raise KeyError(f"Invalid key, expected one or two indices, got {len(key)}")
return mat_t.scalar_export(super().__getitem__(key[0] * self._shape_[1] + key[1]))
elif isinstance(key, int):
# row vector indexing m[r]
return self.get_row(key)
else:
raise KeyError(f"Invalid key {key}, expected int or pair of ints")
def __setitem__(self, key, value):
if isinstance(key, Tuple):
# element indexing m[i,j] = x
if len(key) != 2:
raise KeyError(f"Invalid key, expected one or two indices, got {len(key)}")
super().__setitem__(key[0] * self._shape_[1] + key[1], mat_t.scalar_import(value))
return value
elif isinstance(key, int):
# row vector indexing m[r] = v
self.set_row(key, value)
return value
else:
raise KeyError(f"Invalid key {key}, expected int or pair of ints")
@classmethod
def from_ptr(cls, ptr):
if ptr:
# create a new matrix instance and initialize the contents from the binary data
# this skips float16 conversions, assuming that float16 data is already encoded as uint16
value = cls()
ctypes.memmove(ctypes.byref(value), ptr, ctypes.sizeof(cls._type_) * cls._length_)
return value
else:
raise RuntimeError("NULL pointer exception")
return mat_t
class void:
def __init__(self):
pass
class bool:
_length_ = 1
_type_ = ctypes.c_bool
def __init__(self, x=False):
self.value = x
class float16:
_length_ = 1
_type_ = ctypes.c_uint16
def __init__(self, x=0.0):
self.value = x
class float32:
_length_ = 1
_type_ = ctypes.c_float
def __init__(self, x=0.0):
self.value = x
class float64:
_length_ = 1
_type_ = ctypes.c_double
def __init__(self, x=0.0):
self.value = x
class int8:
_length_ = 1
_type_ = ctypes.c_int8
def __init__(self, x=0):
self.value = x
class uint8:
_length_ = 1
_type_ = ctypes.c_uint8
def __init__(self, x=0):
self.value = x
class int16:
_length_ = 1
_type_ = ctypes.c_int16
def __init__(self, x=0):
self.value = x
class uint16:
_length_ = 1
_type_ = ctypes.c_uint16
def __init__(self, x=0):
self.value = x
class int32:
_length_ = 1
_type_ = ctypes.c_int32
def __init__(self, x=0):
self.value = x
class uint32:
_length_ = 1
_type_ = ctypes.c_uint32
def __init__(self, x=0):
self.value = x
class int64:
_length_ = 1
_type_ = ctypes.c_int64
def __init__(self, x=0):
self.value = x
class uint64:
_length_ = 1
_type_ = ctypes.c_uint64
def __init__(self, x=0):
self.value = x
def quaternion(dtype=Any):
class quat_t(vector(length=4, dtype=dtype)):
pass
# def __init__(self, *args):
# super().__init__(args)
ret = quat_t
ret._wp_type_params_ = [dtype]
ret._wp_generic_type_str_ = "quat_t"
ret._wp_constructor_ = "quaternion"
return ret
class quath(quaternion(dtype=float16)):
pass
class quatf(quaternion(dtype=float32)):
pass
class quatd(quaternion(dtype=float64)):
pass
def transformation(dtype=Any):
class transform_t(vector(length=7, dtype=dtype)):
_wp_type_params_ = [dtype]
_wp_generic_type_str_ = "transform_t"
_wp_constructor_ = "transformation"
def __init__(self, p=(0.0, 0.0, 0.0), q=(0.0, 0.0, 0.0, 1.0)):
super().__init__()
self[0:3] = vector(length=3, dtype=dtype)(*p)
self[3:7] = quaternion(dtype=dtype)(*q)
@property
def p(self):
return self[0:3]
@property
def q(self):
return self[3:7]
return transform_t
class transformh(transformation(dtype=float16)):
pass
class transformf(transformation(dtype=float32)):
pass
class transformd(transformation(dtype=float64)):
pass
class vec2h(vector(length=2, dtype=float16)):
pass
class vec3h(vector(length=3, dtype=float16)):
pass
class vec4h(vector(length=4, dtype=float16)):
pass
class vec2f(vector(length=2, dtype=float32)):
pass
class vec3f(vector(length=3, dtype=float32)):
pass
class vec4f(vector(length=4, dtype=float32)):
pass
class vec2d(vector(length=2, dtype=float64)):
pass
class vec3d(vector(length=3, dtype=float64)):
pass
class vec4d(vector(length=4, dtype=float64)):
pass
class vec2b(vector(length=2, dtype=int8)):
pass
class vec3b(vector(length=3, dtype=int8)):
pass
class vec4b(vector(length=4, dtype=int8)):
pass
class vec2ub(vector(length=2, dtype=uint8)):
pass
class vec3ub(vector(length=3, dtype=uint8)):
pass
class vec4ub(vector(length=4, dtype=uint8)):
pass
class vec2s(vector(length=2, dtype=int16)):
pass
class vec3s(vector(length=3, dtype=int16)):
pass
class vec4s(vector(length=4, dtype=int16)):
pass
class vec2us(vector(length=2, dtype=uint16)):
pass
class vec3us(vector(length=3, dtype=uint16)):
pass
class vec4us(vector(length=4, dtype=uint16)):
pass
class vec2i(vector(length=2, dtype=int32)):
pass
class vec3i(vector(length=3, dtype=int32)):
pass
class vec4i(vector(length=4, dtype=int32)):
pass
class vec2ui(vector(length=2, dtype=uint32)):
pass
class vec3ui(vector(length=3, dtype=uint32)):
pass
class vec4ui(vector(length=4, dtype=uint32)):
pass
class vec2l(vector(length=2, dtype=int64)):
pass
class vec3l(vector(length=3, dtype=int64)):
pass
class vec4l(vector(length=4, dtype=int64)):
pass
class vec2ul(vector(length=2, dtype=uint64)):
pass
class vec3ul(vector(length=3, dtype=uint64)):
pass
class vec4ul(vector(length=4, dtype=uint64)):
pass
class mat22h(matrix(shape=(2, 2), dtype=float16)):
pass
class mat33h(matrix(shape=(3, 3), dtype=float16)):
pass
class mat44h(matrix(shape=(4, 4), dtype=float16)):
pass
class mat22f(matrix(shape=(2, 2), dtype=float32)):
pass
class mat33f(matrix(shape=(3, 3), dtype=float32)):
pass
class mat44f(matrix(shape=(4, 4), dtype=float32)):
pass
class mat22d(matrix(shape=(2, 2), dtype=float64)):
pass
class mat33d(matrix(shape=(3, 3), dtype=float64)):
pass
class mat44d(matrix(shape=(4, 4), dtype=float64)):
pass
class spatial_vectorh(vector(length=6, dtype=float16)):
pass
class spatial_vectorf(vector(length=6, dtype=float32)):
pass
class spatial_vectord(vector(length=6, dtype=float64)):
pass
class spatial_matrixh(matrix(shape=(6, 6), dtype=float16)):
pass
class spatial_matrixf(matrix(shape=(6, 6), dtype=float32)):
pass
class spatial_matrixd(matrix(shape=(6, 6), dtype=float64)):
pass
# built-in type aliases that default to 32bit precision
vec2 = vec2f
vec3 = vec3f
vec4 = vec4f
mat22 = mat22f
mat33 = mat33f
mat44 = mat44f
quat = quatf
transform = transformf
spatial_vector = spatial_vectorf
spatial_matrix = spatial_matrixf
int_types = [int8, uint8, int16, uint16, int32, uint32, int64, uint64]
float_types = [float16, float32, float64]
scalar_types = int_types + float_types
vector_types = [
vec2b,
vec2ub,
vec2s,
vec2us,
vec2i,
vec2ui,
vec2l,
vec2ul,
vec2h,
vec2f,
vec2d,
vec3b,
vec3ub,
vec3s,
vec3us,
vec3i,
vec3ui,
vec3l,
vec3ul,
vec3h,
vec3f,
vec3d,
vec4b,
vec4ub,
vec4s,
vec4us,
vec4i,
vec4ui,
vec4l,
vec4ul,
vec4h,
vec4f,
vec4d,
mat22h,
mat22f,
mat22d,
mat33h,
mat33f,
mat33d,
mat44h,
mat44f,
mat44d,
quath,
quatf,
quatd,
transformh,
transformf,
transformd,
spatial_vectorh,
spatial_vectorf,
spatial_vectord,
spatial_matrixh,
spatial_matrixf,
spatial_matrixd,
]
np_dtype_to_warp_type = {
np.dtype(np.bool_): bool,
np.dtype(np.int8): int8,
np.dtype(np.uint8): uint8,
np.dtype(np.int16): int16,
np.dtype(np.uint16): uint16,
np.dtype(np.int32): int32,
np.dtype(np.int64): int64,
np.dtype(np.uint32): uint32,
np.dtype(np.uint64): uint64,
np.dtype(np.byte): int8,
np.dtype(np.ubyte): uint8,
np.dtype(np.float16): float16,
np.dtype(np.float32): float32,
np.dtype(np.float64): float64,
}
warp_type_to_np_dtype = {
bool: np.bool_,
int8: np.int8,
int16: np.int16,
int32: np.int32,
int64: np.int64,
uint8: np.uint8,
uint16: np.uint16,
uint32: np.uint32,
uint64: np.uint64,
float16: np.float16,
float32: np.float32,
float64: np.float64,
}
# represent a Python range iterator
class range_t:
def __init__(self):
pass
# definition just for kernel type (cannot be a parameter), see bvh.h
class bvh_query_t:
def __init__(self):
pass
# definition just for kernel type (cannot be a parameter), see mesh.h
class mesh_query_aabb_t:
def __init__(self):
pass
# definition just for kernel type (cannot be a parameter), see hash_grid.h
class hash_grid_query_t:
def __init__(self):
pass
# maximum number of dimensions, must match array.h
ARRAY_MAX_DIMS = 4
LAUNCH_MAX_DIMS = 4
# must match array.h
ARRAY_TYPE_REGULAR = 0
ARRAY_TYPE_INDEXED = 1
ARRAY_TYPE_FABRIC = 2
ARRAY_TYPE_FABRIC_INDEXED = 3
# represents bounds for kernel launch (number of threads across multiple dimensions)
class launch_bounds_t(ctypes.Structure):
_fields_ = [("shape", ctypes.c_int32 * LAUNCH_MAX_DIMS), ("ndim", ctypes.c_int32), ("size", ctypes.c_size_t)]
def __init__(self, shape):
if isinstance(shape, int):
# 1d launch
self.ndim = 1
self.size = shape
self.shape[0] = shape
else:
# nd launch
self.ndim = len(shape)
self.size = 1
for i in range(self.ndim):
self.shape[i] = shape[i]
self.size = self.size * shape[i]
# initialize the remaining dims to 1
for i in range(self.ndim, LAUNCH_MAX_DIMS):
self.shape[i] = 1
class shape_t(ctypes.Structure):
_fields_ = [("dims", ctypes.c_int32 * ARRAY_MAX_DIMS)]
def __init__(self):
pass
class array_t(ctypes.Structure):
_fields_ = [
("data", ctypes.c_uint64),
("grad", ctypes.c_uint64),
("shape", ctypes.c_int32 * ARRAY_MAX_DIMS),
("strides", ctypes.c_int32 * ARRAY_MAX_DIMS),
("ndim", ctypes.c_int32),
]
def __init__(self, data=0, grad=0, ndim=0, shape=(0,), strides=(0,)):
self.data = data
self.grad = grad
self.ndim = ndim
for i in range(ndim):
self.shape[i] = shape[i]
self.strides[i] = strides[i]
# structured type description used when array_t is packed in a struct and shared via numpy structured array.
@classmethod
def numpy_dtype(cls):
return cls._numpy_dtype_
# structured value used when array_t is packed in a struct and shared via a numpy structured array
def numpy_value(self):
return (self.data, self.grad, list(self.shape), list(self.strides), self.ndim)
# NOTE: must match array_t._fields_
array_t._numpy_dtype_ = {
"names": ["data", "grad", "shape", "strides", "ndim"],
"formats": ["u8", "u8", f"{ARRAY_MAX_DIMS}i4", f"{ARRAY_MAX_DIMS}i4", "i4"],
"offsets": [
array_t.data.offset,
array_t.grad.offset,
array_t.shape.offset,
array_t.strides.offset,
array_t.ndim.offset,
],
"itemsize": ctypes.sizeof(array_t),
}
class indexedarray_t(ctypes.Structure):
_fields_ = [
("data", array_t),
("indices", ctypes.c_void_p * ARRAY_MAX_DIMS),
("shape", ctypes.c_int32 * ARRAY_MAX_DIMS),
]
def __init__(self, data, indices, shape):
if data is None:
self.data = array().__ctype__()
for i in range(ARRAY_MAX_DIMS):
self.indices[i] = ctypes.c_void_p(None)
self.shape[i] = 0
else:
self.data = data.__ctype__()
for i in range(data.ndim):
if indices[i] is not None:
self.indices[i] = ctypes.c_void_p(indices[i].ptr)
else:
self.indices[i] = ctypes.c_void_p(None)
self.shape[i] = shape[i]
def type_ctype(dtype):
if dtype == float:
return ctypes.c_float
elif dtype == int:
return ctypes.c_int32
else:
# scalar type
return dtype._type_
def type_length(dtype):
if dtype == float or dtype == int or isinstance(dtype, warp.codegen.Struct):
return 1
else:
return dtype._length_
def type_scalar_type(dtype):
return getattr(dtype, "_wp_scalar_type_", dtype)
def type_size_in_bytes(dtype):
if dtype.__module__ == "ctypes":
return ctypes.sizeof(dtype)
elif type_is_struct(dtype):
return ctypes.sizeof(dtype.ctype)
elif dtype == float or dtype == int:
return 4
elif hasattr(dtype, "_type_"):
return getattr(dtype, "_length_", 1) * ctypes.sizeof(dtype._type_)
else:
return 0
def type_to_warp(dtype):
if dtype == float:
return float32
elif dtype == int:
return int32
else:
return dtype
def type_typestr(dtype):
from warp.codegen import Struct
if dtype == bool:
return "?"
elif dtype == float16:
return "<f2"
elif dtype == float32:
return "<f4"
elif dtype == float64:
return "<f8"
elif dtype == int8:
return "b"
elif dtype == uint8:
return "B"
elif dtype == int16:
return "<i2"
elif dtype == uint16:
return "<u2"
elif dtype == int32:
return "<i4"
elif dtype == uint32:
return "<u4"
elif dtype == int64:
return "<i8"
elif dtype == uint64:
return "<u8"
elif isinstance(dtype, Struct):
return f"|V{ctypes.sizeof(dtype.ctype)}"
elif issubclass(dtype, ctypes.Array):
return type_typestr(dtype._wp_scalar_type_)
else:
raise Exception("Unknown ctype")
# converts any known type to a human readable string, good for error messages, reporting etc
def type_repr(t):
if is_array(t):
return str(f"array(ndim={t.ndim}, dtype={t.dtype})")
if type_is_vector(t):
return str(f"vector(length={t._shape_[0]}, dtype={t._wp_scalar_type_})")
elif type_is_matrix(t):
return str(f"matrix(shape=({t._shape_[0]}, {t._shape_[1]}), dtype={t._wp_scalar_type_})")
else:
return str(t)
def type_is_int(t):
if t == int:
t = int32
return t in int_types
def type_is_float(t):
if t == float:
t = float32
return t in float_types
def type_is_struct(dtype):
from warp.codegen import Struct
if isinstance(dtype, Struct):
return True
else:
return False
# returns True if the passed *type* is a vector
def type_is_vector(t):
if hasattr(t, "_wp_generic_type_str_") and t._wp_generic_type_str_ == "vec_t":
return True
else:
return False
# returns True if the passed *type* is a matrix
def type_is_matrix(t):
if hasattr(t, "_wp_generic_type_str_") and t._wp_generic_type_str_ == "mat_t":
return True
else:
return False
# returns true for all value types (int, float, bool, scalars, vectors, matrices)
def type_is_value(x):
if (x == int) or (x == float) or (x == builtins.bool) or (x in scalar_types) or issubclass(x, ctypes.Array):
return True
else:
return False
# equivalent of the above but for values
def is_int(x):
return type_is_int(type(x))
def is_float(x):
return type_is_float(type(x))
def is_value(x):
return type_is_value(type(x))
# returns true if the passed *instance* is one of the array types
def is_array(a):
return isinstance(a, array_types)
def types_equal(a, b, match_generic=False):
# convert to canonical types
if a == float:
a = float32
elif a == int:
a = int32
if b == float:
b = float32
elif b == int:
b = int32
compatible_bool_types = [builtins.bool, bool]
def are_equal(p1, p2):
if match_generic:
if p1 == Any or p2 == Any:
return True
if p1 == Scalar and p2 in scalar_types:
return True
if p2 == Scalar and p1 in scalar_types:
return True
if p1 == Scalar and p2 == Scalar:
return True
if p1 == Float and p2 in float_types:
return True
if p2 == Float and p1 in float_types:
return True
if p1 == Float and p2 == Float:
return True
if p1 in compatible_bool_types and p2 in compatible_bool_types:
return True
else:
return p1 == p2
if (
hasattr(a, "_wp_generic_type_str_")
and hasattr(b, "_wp_generic_type_str_")
and a._wp_generic_type_str_ == b._wp_generic_type_str_
):
return all([are_equal(p1, p2) for p1, p2 in zip(a._wp_type_params_, b._wp_type_params_)])
if is_array(a) and type(a) == type(b):
return True
else:
return are_equal(a, b)
def strides_from_shape(shape: Tuple, dtype):
ndims = len(shape)
strides = [None] * ndims
i = ndims - 1
strides[i] = type_size_in_bytes(dtype)
while i > 0:
strides[i - 1] = strides[i] * shape[i]
i -= 1
return tuple(strides)
class array(Array):
# member attributes available during code-gen (e.g.: d = array.shape[0])
# (initialized when needed)
_vars = None
def __init__(
self,
data=None,
dtype: DType = Any,
shape=None,
strides=None,
length=None,
ptr=None,
capacity=None,
device=None,
pinned=False,
copy=True,
owner=True, # TODO: replace with deleter=None
ndim=None,
grad=None,
requires_grad=False,
):
"""Constructs a new Warp array object
When the ``data`` argument is a valid list, tuple, or ndarray the array will be constructed from this object's data.
For objects that are not stored sequentially in memory (e.g.: a list), then the data will first
be flattened before being transferred to the memory space given by device.
The second construction path occurs when the ``ptr`` argument is a non-zero uint64 value representing the
start address in memory where existing array data resides, e.g.: from an external or C-library. The memory
allocation should reside on the same device given by the device argument, and the user should set the length
and dtype parameter appropriately.
If neither ``data`` nor ``ptr`` are specified, the ``shape`` or ``length`` arguments are checked next.
This construction path can be used to create new uninitialized arrays, but users are encouraged to call
``wp.empty()``, ``wp.zeros()``, or ``wp.full()`` instead to create new arrays.
If none of the above arguments are specified, a simple type annotation is constructed. This is used when annotating
kernel arguments or struct members (e.g.,``arr: wp.array(dtype=float)``). In this case, only ``dtype`` and ``ndim``
are taken into account and no memory is allocated for the array.
Args:
data (Union[list, tuple, ndarray]) An object to construct the array from, can be a Tuple, List, or generally any type convertible to an np.array
dtype (Union): One of the built-in types, e.g.: :class:`warp.mat33`, if dtype is Any and data an ndarray then it will be inferred from the array data type
shape (tuple): Dimensions of the array
strides (tuple): Number of bytes in each dimension between successive elements of the array
length (int): Number of elements of the data type (deprecated, users should use `shape` argument)
ptr (uint64): Address of an external memory address to alias (data should be None)
capacity (int): Maximum size in bytes of the ptr allocation (data should be None)
device (Devicelike): Device the array lives on
copy (bool): Whether the incoming data will be copied or aliased, this is only possible when the incoming `data` already lives on the device specified and types match
owner (bool): Should the array object try to deallocate memory when it is deleted
requires_grad (bool): Whether or not gradients will be tracked for this array, see :class:`warp.Tape` for details
grad (array): The gradient array to use
pinned (bool): Whether to allocate pinned host memory, which allows asynchronous host-device transfers (only applicable with device="cpu")
"""
self.owner = False
self.ctype = None
self._requires_grad = False
self._grad = None
# __array_interface__ or __cuda_array_interface__, evaluated lazily and cached
self._array_interface = None
# canonicalize dtype
if dtype == int:
dtype = int32
elif dtype == float:
dtype = float32
# convert shape to tuple (or leave shape=None if neither shape nor length were specified)
if shape is not None:
if isinstance(shape, int):
shape = (shape,)
else:
shape = tuple(shape)
if len(shape) > ARRAY_MAX_DIMS:
raise RuntimeError(
f"Failed to create array with shape {shape}, the maximum number of dimensions is {ARRAY_MAX_DIMS}"
)
elif length is not None:
# backward compatibility
shape = (length,)
# determine the construction path from the given arguments
if data is not None:
# data or ptr, not both
if ptr is not None:
raise RuntimeError("Can only construct arrays with either `data` or `ptr` arguments, not both")
self._init_from_data(data, dtype, shape, device, copy, pinned)
elif ptr is not None:
self._init_from_ptr(ptr, dtype, shape, strides, capacity, device, owner, pinned)
elif shape is not None:
self._init_new(dtype, shape, strides, device, pinned)
else:
self._init_annotation(dtype, ndim or 1)
# initialize gradient, if needed
if self.device is not None:
if grad is not None:
# this will also check whether the gradient array is compatible
self.grad = grad
else:
# allocate gradient if needed
self._requires_grad = requires_grad
if requires_grad:
with warp.ScopedStream(self.device.null_stream):
self._alloc_grad()
def _init_from_data(self, data, dtype, shape, device, copy, pinned):
if not hasattr(data, "__len__"):
raise RuntimeError(f"Data must be a sequence or array, got scalar {data}")
if hasattr(dtype, "_wp_scalar_type_"):
dtype_shape = dtype._shape_
dtype_ndim = len(dtype_shape)
scalar_dtype = dtype._wp_scalar_type_
else:
dtype_shape = ()
dtype_ndim = 0
scalar_dtype = dtype
# convert input data to ndarray (handles lists, tuples, etc.) and determine dtype
if dtype == Any:
# infer dtype from data
try:
arr = np.array(data, copy=False, ndmin=1)
except Exception as e:
raise RuntimeError(f"Failed to convert input data to an array: {e}")
dtype = np_dtype_to_warp_type.get(arr.dtype)
if dtype is None:
raise RuntimeError(f"Unsupported input data dtype: {arr.dtype}")
elif isinstance(dtype, warp.codegen.Struct):
if isinstance(data, np.ndarray):
# construct from numpy structured array
if data.dtype != dtype.numpy_dtype():
raise RuntimeError(
f"Invalid source data type for array of structs, expected {dtype.numpy_dtype()}, got {data.dtype}"
)
arr = data
elif isinstance(data, (list, tuple)):
# construct from a sequence of structs
try:
# convert each struct instance to its corresponding ctype
ctype_list = [v.__ctype__() for v in data]
# convert the list of ctypes to a contiguous ctypes array
ctype_arr = (dtype.ctype * len(ctype_list))(*ctype_list)
# convert to numpy
arr = np.frombuffer(ctype_arr, dtype=dtype.ctype)
except Exception as e:
raise RuntimeError(
f"Error while trying to construct Warp array from a sequence of Warp structs: {e}"
)
else:
raise RuntimeError(
"Invalid data argument for array of structs, expected a sequence of structs or a NumPy structured array"
)
else:
# convert input data to the given dtype
npdtype = warp_type_to_np_dtype.get(scalar_dtype)
if npdtype is None:
raise RuntimeError(
f"Failed to convert input data to an array with Warp type {warp.context.type_str(dtype)}"
)
try:
arr = np.array(data, dtype=npdtype, copy=False, ndmin=1)
except Exception as e:
raise RuntimeError(f"Failed to convert input data to an array with type {npdtype}: {e}")
# determine whether the input needs reshaping
target_npshape = None
if shape is not None:
target_npshape = (*shape, *dtype_shape)
elif dtype_ndim > 0:
# prune inner dimensions of length 1
while arr.ndim > 1 and arr.shape[-1] == 1:
arr = np.squeeze(arr, axis=-1)
# if the inner dims don't match exactly, check if the innermost dim is a multiple of type length
if arr.ndim < dtype_ndim or arr.shape[-dtype_ndim:] != dtype_shape:
if arr.shape[-1] == dtype._length_:
target_npshape = (*arr.shape[:-1], *dtype_shape)
elif arr.shape[-1] % dtype._length_ == 0:
target_npshape = (*arr.shape[:-1], arr.shape[-1] // dtype._length_, *dtype_shape)
else:
if dtype_ndim == 1:
raise RuntimeError(
f"The inner dimensions of the input data are not compatible with the requested vector type {warp.context.type_str(dtype)}: expected an inner dimension that is a multiple of {dtype._length_}"
)
else:
raise RuntimeError(
f"The inner dimensions of the input data are not compatible with the requested matrix type {warp.context.type_str(dtype)}: expected inner dimensions {dtype._shape_} or a multiple of {dtype._length_}"
)
if target_npshape is not None:
try:
arr = arr.reshape(target_npshape)
except Exception as e:
raise RuntimeError(
f"Failed to reshape the input data to the given shape {shape} and type {warp.context.type_str(dtype)}: {e}"
)
# determine final shape and strides
if dtype_ndim > 0:
# make sure the inner dims are contiguous for vector/matrix types
scalar_size = type_size_in_bytes(dtype._wp_scalar_type_)
inner_contiguous = arr.strides[-1] == scalar_size
if inner_contiguous and dtype_ndim > 1:
inner_contiguous = arr.strides[-2] == scalar_size * dtype_shape[-1]
if not inner_contiguous:
arr = np.ascontiguousarray(arr)
shape = arr.shape[:-dtype_ndim] or (1,)
strides = arr.strides[:-dtype_ndim] or (type_size_in_bytes(dtype),)
else:
shape = arr.shape or (1,)
strides = arr.strides or (type_size_in_bytes(dtype),)
device = warp.get_device(device)
if device.is_cpu and not copy and not pinned:
# reference numpy memory directly
self._init_from_ptr(arr.ctypes.data, dtype, shape, strides, None, device, False, False)
# keep a ref to the source array to keep allocation alive
self._ref = arr
else:
# copy data into a new array
self._init_new(dtype, shape, None, device, pinned)
src = array(
ptr=arr.ctypes.data,
dtype=dtype,
shape=shape,
strides=strides,
device="cpu",
copy=False,
owner=False,
)
warp.copy(self, src)
def _init_from_ptr(self, ptr, dtype, shape, strides, capacity, device, owner, pinned):
if dtype == Any:
raise RuntimeError("A concrete data type is required to create the array")
device = warp.get_device(device)
size = 1
for d in shape:
size *= d
contiguous_strides = strides_from_shape(shape, dtype)
if strides is None:
strides = contiguous_strides
is_contiguous = True
if capacity is None:
capacity = size * type_size_in_bytes(dtype)
else:
is_contiguous = strides == contiguous_strides
if capacity is None:
capacity = shape[0] * strides[0]
self.dtype = dtype
self.ndim = len(shape)
self.size = size
self.capacity = capacity
self.shape = shape
self.strides = strides
self.ptr = ptr
self.device = device
self.owner = owner
self.pinned = pinned if device.is_cpu else False
self.is_contiguous = is_contiguous
def _init_new(self, dtype, shape, strides, device, pinned):
if dtype == Any:
raise RuntimeError("A concrete data type is required to create the array")
device = warp.get_device(device)
size = 1
for d in shape:
size *= d
contiguous_strides = strides_from_shape(shape, dtype)
if strides is None:
strides = contiguous_strides
is_contiguous = True
capacity = size * type_size_in_bytes(dtype)
else:
is_contiguous = strides == contiguous_strides
capacity = shape[0] * strides[0]
if capacity > 0:
ptr = device.allocator.alloc(capacity, pinned=pinned)
if ptr is None:
raise RuntimeError(f"Array allocation failed on device: {device} for {capacity} bytes")
else:
ptr = None
self.dtype = dtype
self.ndim = len(shape)
self.size = size
self.capacity = capacity
self.shape = shape
self.strides = strides
self.ptr = ptr
self.device = device
self.owner = True
self.pinned = pinned if device.is_cpu else False
self.is_contiguous = is_contiguous
def _init_annotation(self, dtype, ndim):
self.dtype = dtype
self.ndim = ndim
self.size = 0
self.capacity = 0
self.shape = (0,) * ndim
self.strides = (0,) * ndim
self.ptr = None
self.device = None
self.owner = False
self.pinned = False
self.is_contiguous = False
@property
def __array_interface__(self):
# raising an AttributeError here makes hasattr() return False
if self.device is None or not self.device.is_cpu:
raise AttributeError(f"__array_interface__ not supported because device is {self.device}")
if self._array_interface is None:
# get flat shape (including type shape)
if isinstance(self.dtype, warp.codegen.Struct):
# struct
arr_shape = self.shape
arr_strides = self.strides
descr = self.dtype.numpy_dtype()
elif issubclass(self.dtype, ctypes.Array):
# vector type, flatten the dimensions into one tuple
arr_shape = (*self.shape, *self.dtype._shape_)
dtype_strides = strides_from_shape(self.dtype._shape_, self.dtype._type_)
arr_strides = (*self.strides, *dtype_strides)
descr = None
else:
# scalar type
arr_shape = self.shape
arr_strides = self.strides
descr = None
self._array_interface = {
"data": (self.ptr if self.ptr is not None else 0, False),
"shape": tuple(arr_shape),
"strides": tuple(arr_strides),
"typestr": type_typestr(self.dtype),
"descr": descr, # optional description of structured array layout
"version": 3,
}
return self._array_interface
@property
def __cuda_array_interface__(self):
# raising an AttributeError here makes hasattr() return False
if self.device is None or not self.device.is_cuda:
raise AttributeError(f"__cuda_array_interface__ is not supported because device is {self.device}")
if self._array_interface is None:
# get flat shape (including type shape)
if issubclass(self.dtype, ctypes.Array):
# vector type, flatten the dimensions into one tuple
arr_shape = (*self.shape, *self.dtype._shape_)
dtype_strides = strides_from_shape(self.dtype._shape_, self.dtype._type_)
arr_strides = (*self.strides, *dtype_strides)
else:
# scalar or struct type
arr_shape = self.shape
arr_strides = self.strides
self._array_interface = {
"data": (self.ptr if self.ptr is not None else 0, False),
"shape": tuple(arr_shape),
"strides": tuple(arr_strides),
"typestr": type_typestr(self.dtype),
"version": 2,
}
return self._array_interface
def __del__(self):
if self.owner:
# use CUDA context guard to avoid side effects during garbage collection
with self.device.context_guard:
self.device.allocator.free(self.ptr, self.capacity, self.pinned)
def __len__(self):
return self.shape[0]
def __str__(self):
if self.device is None:
# for 'empty' arrays we just return the type information, these are used in kernel function signatures
return f"array{self.dtype}"
else:
return str(self.numpy())
def __getitem__(self, key):
if isinstance(key, int):
if self.ndim == 1:
raise RuntimeError("Item indexing is not supported on wp.array objects")
key = [key]
elif isinstance(key, (slice, array)):
key = [key]
elif isinstance(key, Tuple):
contains_slice = False
contains_indices = False
for k in key:
if isinstance(k, slice):
contains_slice = True
if isinstance(k, array):
contains_indices = True
if not contains_slice and not contains_indices and len(key) == self.ndim:
raise RuntimeError("Item indexing is not supported on wp.array objects")
else:
raise RuntimeError(f"Invalid index: {key}")
new_key = []
for i in range(0, len(key)):
new_key.append(key[i])
for i in range(len(key), self.ndim):
new_key.append(slice(None, None, None))
key = tuple(new_key)
new_shape = []
new_strides = []
ptr_offset = 0
new_dim = self.ndim
# maps dimension index to an array of indices, if given
index_arrays = {}
for idx, k in enumerate(key):
if isinstance(k, slice):
start, stop, step = k.start, k.stop, k.step
if start is None:
start = 0
if stop is None:
stop = self.shape[idx]
if step is None:
step = 1
if start < 0:
start = self.shape[idx] + start
if stop < 0:
stop = self.shape[idx] + stop
if start < 0 or start >= self.shape[idx]:
raise RuntimeError(f"Invalid indexing in slice: {start}:{stop}:{step}")
if stop < 1 or stop > self.shape[idx]:
raise RuntimeError(f"Invalid indexing in slice: {start}:{stop}:{step}")
if stop <= start:
raise RuntimeError(f"Invalid indexing in slice: {start}:{stop}:{step}")
new_shape.append(-((stop - start) // -step)) # ceil division
new_strides.append(self.strides[idx] * step)
ptr_offset += self.strides[idx] * start
elif isinstance(k, array):
# note: index array properties will be checked during indexedarray construction
index_arrays[idx] = k
# shape and strides are unchanged for this dimension
new_shape.append(self.shape[idx])
new_strides.append(self.strides[idx])
else: # is int
start = k
if start < 0:
start = self.shape[idx] + start
if start < 0 or start >= self.shape[idx]:
raise RuntimeError(f"Invalid indexing in slice: {k}")
new_dim -= 1
ptr_offset += self.strides[idx] * start
# handle grad
if self.grad is not None:
new_grad = array(
ptr=self.grad.ptr + ptr_offset if self.grad.ptr is not None else None,
dtype=self.grad.dtype,
shape=tuple(new_shape),
strides=tuple(new_strides),
device=self.grad.device,
pinned=self.grad.pinned,
owner=False,
)
# store back-ref to stop data being destroyed
new_grad._ref = self.grad
else:
new_grad = None
a = array(
ptr=self.ptr + ptr_offset if self.ptr is not None else None,
dtype=self.dtype,
shape=tuple(new_shape),
strides=tuple(new_strides),
device=self.device,
pinned=self.pinned,
owner=False,
grad=new_grad,
)
# store back-ref to stop data being destroyed
a._ref = self
if index_arrays:
indices = [None] * self.ndim
for dim, index_array in index_arrays.items():
indices[dim] = index_array
return indexedarray(a, indices)
else:
return a
# construct a C-representation of the array for passing to kernels
def __ctype__(self):
if self.ctype is None:
data = 0 if self.ptr is None else ctypes.c_uint64(self.ptr)
grad = 0 if self.grad is None or self.grad.ptr is None else ctypes.c_uint64(self.grad.ptr)
self.ctype = array_t(data=data, grad=grad, ndim=self.ndim, shape=self.shape, strides=self.strides)
return self.ctype
def __matmul__(self, other):
"""
Enables A @ B syntax for matrix multiplication
"""
if self.ndim != 2 or other.ndim != 2:
raise RuntimeError(
"A has dim = {}, B has dim = {}. If multiplying with @, A and B must have dim = 2.".format(
self.ndim, other.ndim
)
)
m = self.shape[0]
n = other.shape[1]
c = warp.zeros(shape=(m, n), dtype=self.dtype, device=self.device, requires_grad=True)
d = warp.zeros(shape=(m, n), dtype=self.dtype, device=self.device, requires_grad=True)
matmul(self, other, c, d, device=self.device)
return d
@property
def grad(self):
return self._grad
@grad.setter
def grad(self, grad):
if grad is None:
self._grad = None
self._requires_grad = False
else:
# make sure the given gradient array is compatible
if (
grad.dtype != self.dtype
or grad.shape != self.shape
or grad.strides != self.strides
or grad.device != self.device
):
raise ValueError("The given gradient array is incompatible")
self._grad = grad
self._requires_grad = True
# trigger re-creation of C-representation
self.ctype = None
@property
def requires_grad(self):
return self._requires_grad
@requires_grad.setter
def requires_grad(self, value: builtins.bool):
if value and self._grad is None:
self._alloc_grad()
elif not value:
self._grad = None
self._requires_grad = value
# trigger re-creation of C-representation
self.ctype = None
def _alloc_grad(self):
self._grad = array(
dtype=self.dtype, shape=self.shape, strides=self.strides, device=self.device, pinned=self.pinned
)
self._grad.zero_()
# trigger re-creation of C-representation
self.ctype = None
@property
def vars(self):
# member attributes available during code-gen (e.g.: d = array.shape[0])
# Note: we use a shared dict for all array instances
if array._vars is None:
array._vars = {"shape": warp.codegen.Var("shape", shape_t)}
return array._vars
def zero_(self):
if self.is_contiguous:
# simple memset is usually faster than generic fill
self.device.memset(self.ptr, 0, self.size * type_size_in_bytes(self.dtype))
else:
self.fill_(0)
def fill_(self, value):
if self.size == 0:
return
# try to convert the given value to the array dtype
try:
if isinstance(self.dtype, warp.codegen.Struct):
if isinstance(value, self.dtype.cls):
cvalue = value.__ctype__()
elif value == 0:
# allow zero-initializing structs using default constructor
cvalue = self.dtype().__ctype__()
else:
raise ValueError(
f"Invalid initializer value for struct {self.dtype.cls.__name__}, expected struct instance or 0"
)
elif issubclass(self.dtype, ctypes.Array):
# vector/matrix
cvalue = self.dtype(value)
else:
# scalar
if type(value) in warp.types.scalar_types:
value = value.value
if self.dtype == float16:
cvalue = self.dtype._type_(float_to_half_bits(value))
else:
cvalue = self.dtype._type_(value)
except Exception as e:
raise ValueError(f"Failed to convert the value to the array data type: {e}")
cvalue_ptr = ctypes.pointer(cvalue)
cvalue_size = ctypes.sizeof(cvalue)
# prefer using memtile for contiguous arrays, because it should be faster than generic fill
if self.is_contiguous:
self.device.memtile(self.ptr, cvalue_ptr, cvalue_size, self.size)
else:
carr = self.__ctype__()
carr_ptr = ctypes.pointer(carr)
if self.device.is_cuda:
warp.context.runtime.core.array_fill_device(
self.device.context, carr_ptr, ARRAY_TYPE_REGULAR, cvalue_ptr, cvalue_size
)
else:
warp.context.runtime.core.array_fill_host(carr_ptr, ARRAY_TYPE_REGULAR, cvalue_ptr, cvalue_size)
# equivalent to wrapping src data in an array and copying to self
def assign(self, src):
if is_array(src):
warp.copy(self, src)
else:
warp.copy(self, array(data=src, dtype=self.dtype, copy=False, device="cpu"))
# convert array to ndarray (alias memory through array interface)
def numpy(self):
if self.ptr:
# use the CUDA default stream for synchronous behaviour with other streams
with warp.ScopedStream(self.device.null_stream):
a = self.to("cpu")
# convert through __array_interface__
# Note: this handles arrays of structs using `descr`, so the result will be a structured NumPy array
return np.array(a, copy=False)
else:
# return an empty numpy array with the correct dtype and shape
if isinstance(self.dtype, warp.codegen.Struct):
npdtype = self.dtype.numpy_dtype()
npshape = self.shape
elif issubclass(self.dtype, ctypes.Array):
npdtype = warp_type_to_np_dtype[self.dtype._wp_scalar_type_]
npshape = (*self.shape, *self.dtype._shape_)
else:
npdtype = warp_type_to_np_dtype[self.dtype]
npshape = self.shape
return np.empty(npshape, dtype=npdtype)
# return a ctypes cast of the array address
# note #1: only CPU arrays support this method
# note #2: the array must be contiguous
# note #3: accesses to this object are *not* bounds checked
# note #4: for float16 types, a pointer to the internal uint16 representation is returned
def cptr(self):
if not self.ptr:
return None
if self.device != "cpu" or not self.is_contiguous:
raise RuntimeError(
"Accessing array memory through a ctypes ptr is only supported for contiguous CPU arrays."
)
if isinstance(self.dtype, warp.codegen.Struct):
p = ctypes.cast(self.ptr, ctypes.POINTER(self.dtype.ctype))
else:
p = ctypes.cast(self.ptr, ctypes.POINTER(self.dtype._type_))
# store backref to the underlying array to avoid it being deallocated
p._ref = self
return p
# returns a flattened list of items in the array as a Python list
def list(self):
a = self.numpy()
if isinstance(self.dtype, warp.codegen.Struct):
# struct
a = a.flatten()
data = a.ctypes.data
stride = a.strides[0]
return [self.dtype.from_ptr(data + i * stride) for i in range(self.size)]
elif issubclass(self.dtype, ctypes.Array):
# vector/matrix - flatten, but preserve inner vector/matrix dimensions
a = a.reshape((self.size, *self.dtype._shape_))
data = a.ctypes.data
stride = a.strides[0]
return [self.dtype.from_ptr(data + i * stride) for i in range(self.size)]
else:
# scalar
return list(a.flatten())
# convert data from one device to another, nop if already on device
def to(self, device):
device = warp.get_device(device)
if self.device == device:
return self
else:
return warp.clone(self, device=device)
def flatten(self):
if self.ndim == 1:
return self
if not self.is_contiguous:
raise RuntimeError("Flattening non-contiguous arrays is unsupported.")
a = array(
ptr=self.ptr,
dtype=self.dtype,
shape=(self.size,),
device=self.device,
pinned=self.pinned,
copy=False,
owner=False,
grad=None if self.grad is None else self.grad.flatten(),
)
# store back-ref to stop data being destroyed
a._ref = self
return a
def reshape(self, shape):
if not self.is_contiguous:
raise RuntimeError("Reshaping non-contiguous arrays is unsupported.")
# convert shape to tuple
if shape is None:
raise RuntimeError("shape parameter is required.")
if isinstance(shape, int):
shape = (shape,)
elif not isinstance(shape, tuple):
shape = tuple(shape)
if len(shape) > ARRAY_MAX_DIMS:
raise RuntimeError(
f"Arrays may only have {ARRAY_MAX_DIMS} dimensions maximum, trying to create array with {len(shape)} dims."
)
# check for -1 dimension and reformat
if -1 in shape:
idx = self.size
denom = 1
minus_one_count = 0
for i, d in enumerate(shape):
if d == -1:
idx = i
minus_one_count += 1
else:
denom *= d
if minus_one_count > 1:
raise RuntimeError("Cannot infer shape if more than one index is -1.")
new_shape = list(shape)
new_shape[idx] = int(self.size / denom)
shape = tuple(new_shape)
size = 1
for d in shape:
size *= d
if size != self.size:
raise RuntimeError("Reshaped array must have the same total size as the original.")
a = array(
ptr=self.ptr,
dtype=self.dtype,
shape=shape,
strides=None,
device=self.device,
pinned=self.pinned,
copy=False,
owner=False,
grad=None if self.grad is None else self.grad.reshape(shape),
)
# store back-ref to stop data being destroyed
a._ref = self
return a
def view(self, dtype):
if type_size_in_bytes(dtype) != type_size_in_bytes(self.dtype):
raise RuntimeError("Cannot cast dtypes of unequal byte size")
# return an alias of the array memory with different type information
a = array(
ptr=self.ptr,
dtype=dtype,
shape=self.shape,
strides=self.strides,
device=self.device,
pinned=self.pinned,
copy=False,
owner=False,
grad=None if self.grad is None else self.grad.view(dtype),
)
a._ref = self
return a
def contiguous(self):
if self.is_contiguous:
return self
a = warp.empty_like(self)
warp.copy(a, self)
return a
# note: transpose operation will return an array with a non-contiguous access pattern
def transpose(self, axes=None):
# noop if 1d array
if self.ndim == 1:
return self
if axes is None:
# reverse the order of the axes
axes = range(self.ndim)[::-1]
elif len(axes) != len(self.shape):
raise RuntimeError("Length of parameter axes must be equal in length to array shape")
shape = []
strides = []
for a in axes:
if not isinstance(a, int):
raise RuntimeError(f"axis index {a} is not of type int")
if a >= len(self.shape):
raise RuntimeError(f"axis index {a} must be smaller than the number of axes in array")
shape.append(self.shape[a])
strides.append(self.strides[a])
a = array(
ptr=self.ptr,
dtype=self.dtype,
shape=tuple(shape),
strides=tuple(strides),
device=self.device,
pinned=self.pinned,
copy=False,
owner=False,
grad=None if self.grad is None else self.grad.transpose(axes=axes),
)
a._ref = self
return a
# aliases for arrays with small dimensions
def array1d(*args, **kwargs):
kwargs["ndim"] = 1
return array(*args, **kwargs)
# equivalent to calling array(..., ndim=2)
def array2d(*args, **kwargs):
kwargs["ndim"] = 2
return array(*args, **kwargs)
# equivalent to calling array(..., ndim=3)
def array3d(*args, **kwargs):
kwargs["ndim"] = 3
return array(*args, **kwargs)
# equivalent to calling array(..., ndim=4)
def array4d(*args, **kwargs):
kwargs["ndim"] = 4
return array(*args, **kwargs)
# TODO: Rewrite so that we take only shape, not length and optional shape
def from_ptr(ptr, length, dtype=None, shape=None, device=None):
return array(
dtype=dtype,
length=length,
capacity=length * type_size_in_bytes(dtype),
ptr=0 if ptr == 0 else ctypes.cast(ptr, ctypes.POINTER(ctypes.c_size_t)).contents.value,
shape=shape,
device=device,
owner=False,
requires_grad=False,
)
# A base class for non-contiguous arrays, providing the implementation of common methods like
# contiguous(), to(), numpy(), list(), assign(), zero_(), and fill_().
class noncontiguous_array_base(Generic[T]):
def __init__(self, array_type_id):
self.type_id = array_type_id
self.is_contiguous = False
# return a contiguous copy
def contiguous(self):
a = warp.empty_like(self)
warp.copy(a, self)
return a
# copy data from one device to another, nop if already on device
def to(self, device):
device = warp.get_device(device)
if self.device == device:
return self
else:
return warp.clone(self, device=device)
# return a contiguous numpy copy
def numpy(self):
# use the CUDA default stream for synchronous behaviour with other streams
with warp.ScopedStream(self.device.null_stream):
return self.contiguous().numpy()
# returns a flattened list of items in the array as a Python list
def list(self):
# use the CUDA default stream for synchronous behaviour with other streams
with warp.ScopedStream(self.device.null_stream):
return self.contiguous().list()
# equivalent to wrapping src data in an array and copying to self
def assign(self, src):
if is_array(src):
warp.copy(self, src)
else:
warp.copy(self, array(data=src, dtype=self.dtype, copy=False, device="cpu"))
def zero_(self):
self.fill_(0)
def fill_(self, value):
if self.size == 0:
return
# try to convert the given value to the array dtype
try:
if isinstance(self.dtype, warp.codegen.Struct):
if isinstance(value, self.dtype.cls):
cvalue = value.__ctype__()
elif value == 0:
# allow zero-initializing structs using default constructor
cvalue = self.dtype().__ctype__()
else:
raise ValueError(
f"Invalid initializer value for struct {self.dtype.cls.__name__}, expected struct instance or 0"
)
elif issubclass(self.dtype, ctypes.Array):
# vector/matrix
cvalue = self.dtype(value)
else:
# scalar
if type(value) in warp.types.scalar_types:
value = value.value
if self.dtype == float16:
cvalue = self.dtype._type_(float_to_half_bits(value))
else:
cvalue = self.dtype._type_(value)
except Exception as e:
raise ValueError(f"Failed to convert the value to the array data type: {e}")
cvalue_ptr = ctypes.pointer(cvalue)
cvalue_size = ctypes.sizeof(cvalue)
ctype = self.__ctype__()
ctype_ptr = ctypes.pointer(ctype)
if self.device.is_cuda:
warp.context.runtime.core.array_fill_device(
self.device.context, ctype_ptr, self.type_id, cvalue_ptr, cvalue_size
)
else:
warp.context.runtime.core.array_fill_host(ctype_ptr, self.type_id, cvalue_ptr, cvalue_size)
# helper to check index array properties
def check_index_array(indices, expected_device):
if not isinstance(indices, array):
raise ValueError(f"Indices must be a Warp array, got {type(indices)}")
if indices.ndim != 1:
raise ValueError(f"Index array must be one-dimensional, got {indices.ndim}")
if indices.dtype != int32:
raise ValueError(f"Index array must use int32, got dtype {indices.dtype}")
if indices.device != expected_device:
raise ValueError(f"Index array device ({indices.device} does not match data array device ({expected_device}))")
class indexedarray(noncontiguous_array_base[T]):
# member attributes available during code-gen (e.g.: d = arr.shape[0])
# (initialized when needed)
_vars = None
def __init__(self, data: array = None, indices: Union[array, List[array]] = None, dtype=None, ndim=None):
super().__init__(ARRAY_TYPE_INDEXED)
# canonicalize types
if dtype is not None:
if dtype == int:
dtype = int32
elif dtype == float:
dtype = float32
self.data = data
self.indices = [None] * ARRAY_MAX_DIMS
if data is not None:
if not isinstance(data, array):
raise ValueError("Indexed array data must be a Warp array")
if dtype is not None and dtype != data.dtype:
raise ValueError(f"Requested dtype ({dtype}) does not match dtype of data array ({data.dtype})")
if ndim is not None and ndim != data.ndim:
raise ValueError(
f"Requested dimensionality ({ndim}) does not match dimensionality of data array ({data.ndim})"
)
self.dtype = data.dtype
self.ndim = data.ndim
self.device = data.device
self.pinned = data.pinned
# determine shape from original data shape and index counts
shape = list(data.shape)
if indices is not None:
if isinstance(indices, (list, tuple)):
if len(indices) > self.ndim:
raise ValueError(
f"Number of indices provided ({len(indices)}) exceeds number of dimensions ({self.ndim})"
)
for i in range(len(indices)):
if indices[i] is not None:
check_index_array(indices[i], data.device)
self.indices[i] = indices[i]
shape[i] = len(indices[i])
elif isinstance(indices, array):
# only a single index array was provided
check_index_array(indices, data.device)
self.indices[0] = indices
shape[0] = len(indices)
else:
raise ValueError("Indices must be a single Warp array or a list of Warp arrays")
self.shape = tuple(shape)
else:
# allow empty indexedarrays in type annotations
self.dtype = dtype
self.ndim = ndim or 1
self.device = None
self.pinned = False
self.shape = (0,) * self.ndim
# update size (num elements)
self.size = 1
for d in self.shape:
self.size *= d
def __len__(self):
return self.shape[0]
def __str__(self):
if self.device is None:
# type annotation
return f"indexedarray{self.dtype}"
else:
return str(self.numpy())
# construct a C-representation of the array for passing to kernels
def __ctype__(self):
return indexedarray_t(self.data, self.indices, self.shape)
@property
def vars(self):
# member attributes available during code-gen (e.g.: d = arr.shape[0])
# Note: we use a shared dict for all indexedarray instances
if indexedarray._vars is None:
indexedarray._vars = {"shape": warp.codegen.Var("shape", shape_t)}
return indexedarray._vars
# aliases for indexedarrays with small dimensions
def indexedarray1d(*args, **kwargs):
kwargs["ndim"] = 1
return indexedarray(*args, **kwargs)
# equivalent to calling indexedarray(..., ndim=2)
def indexedarray2d(*args, **kwargs):
kwargs["ndim"] = 2
return indexedarray(*args, **kwargs)
# equivalent to calling indexedarray(..., ndim=3)
def indexedarray3d(*args, **kwargs):
kwargs["ndim"] = 3
return indexedarray(*args, **kwargs)
# equivalent to calling indexedarray(..., ndim=4)
def indexedarray4d(*args, **kwargs):
kwargs["ndim"] = 4
return indexedarray(*args, **kwargs)
from warp.fabric import fabricarray, indexedfabricarray # noqa: E402
array_types = (array, indexedarray, fabricarray, indexedfabricarray)
def array_type_id(a):
if isinstance(a, array):
return ARRAY_TYPE_REGULAR
elif isinstance(a, indexedarray):
return ARRAY_TYPE_INDEXED
elif isinstance(a, fabricarray):
return ARRAY_TYPE_FABRIC
elif isinstance(a, indexedfabricarray):
return ARRAY_TYPE_FABRIC_INDEXED
else:
raise ValueError("Invalid array type")
class Bvh:
def __init__(self, lowers, uppers):
"""Class representing a bounding volume hierarchy.
Attributes:
id: Unique identifier for this bvh object, can be passed to kernels.
device: Device this object lives on, all buffers must live on the same device.
Args:
lowers (:class:`warp.array`): Array of lower bounds :class:`warp.vec3`
uppers (:class:`warp.array`): Array of upper bounds :class:`warp.vec3`
"""
if len(lowers) != len(uppers):
raise RuntimeError("Bvh the same number of lower and upper bounds must be provided")
if lowers.device != uppers.device:
raise RuntimeError("Bvh lower and upper bounds must live on the same device")
if lowers.dtype != vec3 or not lowers.is_contiguous:
raise RuntimeError("Bvh lowers should be a contiguous array of type wp.vec3")
if uppers.dtype != vec3 or not uppers.is_contiguous:
raise RuntimeError("Bvh uppers should be a contiguous array of type wp.vec3")
self.device = lowers.device
self.lowers = lowers
self.uppers = uppers
def get_data(array):
if array:
return ctypes.c_void_p(array.ptr)
else:
return ctypes.c_void_p(0)
from warp.context import runtime
if self.device.is_cpu:
self.id = runtime.core.bvh_create_host(get_data(lowers), get_data(uppers), int(len(lowers)))
else:
self.id = runtime.core.bvh_create_device(
self.device.context, get_data(lowers), get_data(uppers), int(len(lowers))
)
def __del__(self):
try:
from warp.context import runtime
if self.device.is_cpu:
runtime.core.bvh_destroy_host(self.id)
else:
# use CUDA context guard to avoid side effects during garbage collection
with self.device.context_guard:
runtime.core.bvh_destroy_device(self.id)
except Exception:
pass
def refit(self):
"""Refit the BVH. This should be called after users modify the `lowers` and `uppers` arrays."""
from warp.context import runtime
if self.device.is_cpu:
runtime.core.bvh_refit_host(self.id)
else:
runtime.core.bvh_refit_device(self.id)
runtime.verify_cuda_device(self.device)
class Mesh:
from warp.codegen import Var
vars = {
"points": Var("points", array(dtype=vec3)),
"velocities": Var("velocities", array(dtype=vec3)),
"indices": Var("indices", array(dtype=int32)),
}
def __init__(self, points=None, indices=None, velocities=None, support_winding_number=False):
"""Class representing a triangle mesh.
Attributes:
id: Unique identifier for this mesh object, can be passed to kernels.
device: Device this object lives on, all buffers must live on the same device.
Args:
points (:class:`warp.array`): Array of vertex positions of type :class:`warp.vec3`
indices (:class:`warp.array`): Array of triangle indices of type :class:`warp.int32`, should be a 1d array with shape (num_tris, 3)
velocities (:class:`warp.array`): Array of vertex velocities of type :class:`warp.vec3` (optional)
support_winding_number (bool): If true the mesh will build additional datastructures to support `wp.mesh_query_point_sign_winding_number()` queries
"""
if points.device != indices.device:
raise RuntimeError("Mesh points and indices must live on the same device")
if points.dtype != vec3 or not points.is_contiguous:
raise RuntimeError("Mesh points should be a contiguous array of type wp.vec3")
if velocities and (velocities.dtype != vec3 or not velocities.is_contiguous):
raise RuntimeError("Mesh velocities should be a contiguous array of type wp.vec3")
if indices.dtype != int32 or not indices.is_contiguous:
raise RuntimeError("Mesh indices should be a contiguous array of type wp.int32")
if indices.ndim > 1:
raise RuntimeError("Mesh indices should be a flattened 1d array of indices")
self.device = points.device
self.points = points
self.velocities = velocities
self.indices = indices
from warp.context import runtime
if self.device.is_cpu:
self.id = runtime.core.mesh_create_host(
points.__ctype__(),
velocities.__ctype__() if velocities else array().__ctype__(),
indices.__ctype__(),
int(len(points)),
int(indices.size / 3),
int(support_winding_number),
)
else:
self.id = runtime.core.mesh_create_device(
self.device.context,
points.__ctype__(),
velocities.__ctype__() if velocities else array().__ctype__(),
indices.__ctype__(),
int(len(points)),
int(indices.size / 3),
int(support_winding_number),
)
def __del__(self):
try:
from warp.context import runtime
if self.device.is_cpu:
runtime.core.mesh_destroy_host(self.id)
else:
# use CUDA context guard to avoid side effects during garbage collection
with self.device.context_guard:
runtime.core.mesh_destroy_device(self.id)
except Exception:
pass
def refit(self):
"""Refit the BVH to points. This should be called after users modify the `points` data."""
from warp.context import runtime
if self.device.is_cpu:
runtime.core.mesh_refit_host(self.id)
else:
runtime.core.mesh_refit_device(self.id)
runtime.verify_cuda_device(self.device)
class Volume:
CLOSEST = constant(0)
LINEAR = constant(1)
def __init__(self, data: array):
"""Class representing a sparse grid.
Attributes:
CLOSEST (int): Enum value to specify nearest-neighbor interpolation during sampling
LINEAR (int): Enum value to specify trilinear interpolation during sampling
Args:
data (:class:`warp.array`): Array of bytes representing the volume in NanoVDB format
"""
self.id = 0
from warp.context import runtime
self.context = runtime
if data is None:
return
if data.device is None:
raise RuntimeError("Invalid device")
self.device = data.device
if self.device.is_cpu:
self.id = self.context.core.volume_create_host(ctypes.cast(data.ptr, ctypes.c_void_p), data.size)
else:
self.id = self.context.core.volume_create_device(
self.device.context, ctypes.cast(data.ptr, ctypes.c_void_p), data.size
)
if self.id == 0:
raise RuntimeError("Failed to create volume from input array")
def __del__(self):
if self.id == 0:
return
try:
from warp.context import runtime
if self.device.is_cpu:
runtime.core.volume_destroy_host(self.id)
else:
# use CUDA context guard to avoid side effects during garbage collection
with self.device.context_guard:
runtime.core.volume_destroy_device(self.id)
except Exception:
pass
def array(self):
buf = ctypes.c_void_p(0)
size = ctypes.c_uint64(0)
if self.device.is_cpu:
self.context.core.volume_get_buffer_info_host(self.id, ctypes.byref(buf), ctypes.byref(size))
else:
self.context.core.volume_get_buffer_info_device(self.id, ctypes.byref(buf), ctypes.byref(size))
return array(ptr=buf.value, dtype=uint8, shape=size.value, device=self.device, owner=False)
def get_tiles(self):
if self.id == 0:
raise RuntimeError("Invalid Volume")
buf = ctypes.c_void_p(0)
size = ctypes.c_uint64(0)
if self.device.is_cpu:
self.context.core.volume_get_tiles_host(self.id, ctypes.byref(buf), ctypes.byref(size))
else:
self.context.core.volume_get_tiles_device(self.id, ctypes.byref(buf), ctypes.byref(size))
num_tiles = size.value // (3 * 4)
return array(ptr=buf.value, dtype=int32, shape=(num_tiles, 3), device=self.device, owner=True)
def get_voxel_size(self):
if self.id == 0:
raise RuntimeError("Invalid Volume")
dx, dy, dz = ctypes.c_float(0), ctypes.c_float(0), ctypes.c_float(0)
self.context.core.volume_get_voxel_size(self.id, ctypes.byref(dx), ctypes.byref(dy), ctypes.byref(dz))
return (dx.value, dy.value, dz.value)
@classmethod
def load_from_nvdb(cls, file_or_buffer, device=None):
"""Creates a Volume object from a NanoVDB file or in-memory buffer.
Returns:
A ``warp.Volume`` object.
"""
try:
data = file_or_buffer.read()
except AttributeError:
data = file_or_buffer
magic, version, grid_count, codec = struct.unpack("<QIHH", data[0:16])
if magic != 0x304244566F6E614E:
raise RuntimeError("NanoVDB signature not found")
if version >> 21 != 32: # checking major version
raise RuntimeError("Unsupported NanoVDB version")
if grid_count != 1:
raise RuntimeError("Only NVDBs with exactly one grid are supported")
grid_data_offset = 192 + struct.unpack("<I", data[152:156])[0]
if codec == 0: # no compression
grid_data = data[grid_data_offset:]
elif codec == 1: # zip compression
grid_data = zlib.decompress(data[grid_data_offset + 8 :])
else:
raise RuntimeError(f"Unsupported codec code: {codec}")
magic = struct.unpack("<Q", grid_data[0:8])[0]
if magic != 0x304244566F6E614E:
raise RuntimeError("NanoVDB signature not found on grid!")
data_array = array(np.frombuffer(grid_data, dtype=np.byte), device=device)
return cls(data_array)
@classmethod
def load_from_numpy(cls, ndarray: np.array, min_world=(0.0, 0.0, 0.0), voxel_size=1.0, bg_value=0.0, device=None):
"""Creates a Volume object from a dense 3D NumPy array.
Args:
min_world: The 3D coordinate of the lower corner of the volume
voxel_size: The size of each voxel in spatial coordinates
bg_value: Background value
device: The device to create the volume on, e.g.: "cpu", or "cuda:0"
Returns:
A ``warp.Volume`` object.
"""
import math
target_shape = (
math.ceil(ndarray.shape[0] / 8) * 8,
math.ceil(ndarray.shape[1] / 8) * 8,
math.ceil(ndarray.shape[2] / 8) * 8,
)
if hasattr(bg_value, "__len__"):
# vec3, assuming the numpy array is 4D
padded_array = np.array((target_shape[0], target_shape[1], target_shape[2], 3), dtype=np.single)
padded_array[:, :, :, :] = np.array(bg_value)
padded_array[0 : ndarray.shape[0], 0 : ndarray.shape[1], 0 : ndarray.shape[2], :] = ndarray
else:
padded_amount = (
math.ceil(ndarray.shape[0] / 8) * 8 - ndarray.shape[0],
math.ceil(ndarray.shape[1] / 8) * 8 - ndarray.shape[1],
math.ceil(ndarray.shape[2] / 8) * 8 - ndarray.shape[2],
)
padded_array = np.pad(
ndarray,
((0, padded_amount[0]), (0, padded_amount[1]), (0, padded_amount[2])),
mode="constant",
constant_values=bg_value,
)
shape = padded_array.shape
volume = warp.Volume.allocate(
min_world,
[
min_world[0] + (shape[0] - 1) * voxel_size,
min_world[1] + (shape[1] - 1) * voxel_size,
min_world[2] + (shape[2] - 1) * voxel_size,
],
voxel_size,
bg_value=bg_value,
points_in_world_space=True,
translation=min_world,
device=device,
)
# Populate volume
if hasattr(bg_value, "__len__"):
warp.launch(
warp.utils.copy_dense_volume_to_nano_vdb_v,
dim=(shape[0], shape[1], shape[2]),
inputs=[volume.id, warp.array(padded_array, dtype=warp.vec3, device=device)],
device=device,
)
elif type(bg_value) == int:
warp.launch(
warp.utils.copy_dense_volume_to_nano_vdb_i,
dim=shape,
inputs=[volume.id, warp.array(padded_array, dtype=warp.int32, device=device)],
device=device,
)
else:
warp.launch(
warp.utils.copy_dense_volume_to_nano_vdb_f,
dim=shape,
inputs=[volume.id, warp.array(padded_array, dtype=warp.float32, device=device)],
device=device,
)
return volume
@classmethod
def allocate(
cls,
min: List[int],
max: List[int],
voxel_size: float,
bg_value=0.0,
translation=(0.0, 0.0, 0.0),
points_in_world_space=False,
device=None,
):
"""Allocate a new Volume based on the bounding box defined by min and max.
Allocate a volume that is large enough to contain voxels [min[0], min[1], min[2]] - [max[0], max[1], max[2]], inclusive.
If points_in_world_space is true, then min and max are first converted to index space with the given voxel size and
translation, and the volume is allocated with those.
The smallest unit of allocation is a dense tile of 8x8x8 voxels, the requested bounding box is rounded up to tiles, and
the resulting tiles will be available in the new volume.
Args:
min (array-like): Lower 3D-coordinates of the bounding box in index space or world space, inclusive
max (array-like): Upper 3D-coordinates of the bounding box in index space or world space, inclusive
voxel_size (float): Voxel size of the new volume
bg_value (float or array-like): Value of unallocated voxels of the volume, also defines the volume's type, a :class:`warp.vec3` volume is created if this is `array-like`, otherwise a float volume is created
translation (array-like): translation between the index and world spaces
device (Devicelike): Device the array lives on
"""
if points_in_world_space:
min = np.around((np.array(min, dtype=np.float32) - translation) / voxel_size)
max = np.around((np.array(max, dtype=np.float32) - translation) / voxel_size)
tile_min = np.array(min, dtype=np.int32) // 8
tile_max = np.array(max, dtype=np.int32) // 8
tiles = np.array(
[
[i, j, k]
for i in range(tile_min[0], tile_max[0] + 1)
for j in range(tile_min[1], tile_max[1] + 1)
for k in range(tile_min[2], tile_max[2] + 1)
],
dtype=np.int32,
)
tile_points = array(tiles * 8, device=device)
return cls.allocate_by_tiles(tile_points, voxel_size, bg_value, translation, device)
@classmethod
def allocate_by_tiles(
cls, tile_points: array, voxel_size: float, bg_value=0.0, translation=(0.0, 0.0, 0.0), device=None
):
"""Allocate a new Volume with active tiles for each point tile_points.
The smallest unit of allocation is a dense tile of 8x8x8 voxels.
This is the primary method for allocating sparse volumes. It uses an array of points indicating the tiles that must be allocated.
Example use cases:
* `tile_points` can mark tiles directly in index space as in the case this method is called by `allocate`.
* `tile_points` can be a list of points used in a simulation that needs to transfer data to a volume.
Args:
tile_points (:class:`warp.array`): Array of positions that define the tiles to be allocated.
The array can be a 2d, N-by-3 array of :class:`warp.int32` values, indicating index space positions,
or can be a 1D array of :class:`warp.vec3` values, indicating world space positions.
Repeated points per tile are allowed and will be efficiently deduplicated.
voxel_size (float): Voxel size of the new volume
bg_value (float or array-like): Value of unallocated voxels of the volume, also defines the volume's type, a :class:`warp.vec3` volume is created if this is `array-like`, otherwise a float volume is created
translation (array-like): translation between the index and world spaces
device (Devicelike): Device the array lives on
"""
from warp.context import runtime
device = runtime.get_device(device)
if voxel_size <= 0.0:
raise RuntimeError(f"Voxel size must be positive! Got {voxel_size}")
if not device.is_cuda:
raise RuntimeError("Only CUDA devices are supported for allocate_by_tiles")
if not (
isinstance(tile_points, array)
and (tile_points.dtype == int32 and tile_points.ndim == 2)
or (tile_points.dtype == vec3 and tile_points.ndim == 1)
):
raise RuntimeError("Expected an warp array of vec3s or of n-by-3 int32s as tile_points!")
if not tile_points.device.is_cuda:
tile_points = array(tile_points, dtype=tile_points.dtype, device=device)
volume = cls(data=None)
volume.device = device
in_world_space = tile_points.dtype == vec3
if hasattr(bg_value, "__len__"):
volume.id = volume.context.core.volume_v_from_tiles_device(
volume.device.context,
ctypes.c_void_p(tile_points.ptr),
tile_points.shape[0],
voxel_size,
bg_value[0],
bg_value[1],
bg_value[2],
translation[0],
translation[1],
translation[2],
in_world_space,
)
elif type(bg_value) == int:
volume.id = volume.context.core.volume_i_from_tiles_device(
volume.device.context,
ctypes.c_void_p(tile_points.ptr),
tile_points.shape[0],
voxel_size,
bg_value,
translation[0],
translation[1],
translation[2],
in_world_space,
)
else:
volume.id = volume.context.core.volume_f_from_tiles_device(
volume.device.context,
ctypes.c_void_p(tile_points.ptr),
tile_points.shape[0],
voxel_size,
float(bg_value),
translation[0],
translation[1],
translation[2],
in_world_space,
)
if volume.id == 0:
raise RuntimeError("Failed to create volume")
return volume
def matmul(
a: array2d,
b: array2d,
c: array2d,
d: array2d,
alpha: float = 1.0,
beta: float = 0.0,
allow_tf32x3_arith: builtins.bool = False,
device=None,
):
"""Computes a generic matrix-matrix multiplication (GEMM) of the form: `d = alpha * (a @ b) + beta * c`.
Args:
a (array2d): two-dimensional array containing matrix A
b (array2d): two-dimensional array containing matrix B
c (array2d): two-dimensional array containing matrix C
d (array2d): two-dimensional array to which output D is written
alpha (float): parameter alpha of GEMM
beta (float): parameter beta of GEMM
allow_tf32x3_arith (bool): whether to use CUTLASS's 3xTF32 GEMMs, which enable accuracy similar to FP32
while using Tensor Cores
device: device we want to use to multiply matrices. Defaults to active runtime device. If "cpu", resorts to using numpy multiplication.
"""
from warp.context import runtime
if device is None:
device = runtime.get_device(device)
if a.device != device or b.device != device or c.device != device or d.device != device:
raise RuntimeError("Matrices A, B, C, and D must all be on the same device as the runtime device.")
if a.dtype != b.dtype or a.dtype != c.dtype or a.dtype != d.dtype:
raise RuntimeError(
"wp.matmul currently only supports operation between {A, B, C, D} matrices of the same type."
)
m = a.shape[0]
n = b.shape[1]
k = a.shape[1]
if b.shape != (k, n) or c.shape != (m, n) or d.shape != (m, n):
raise RuntimeError(
"Invalid shapes for matrices: A = {} B = {} C = {} D = {}".format(a.shape, b.shape, c.shape, d.shape)
)
if runtime.tape:
runtime.tape.record_func(
backward=lambda: adj_matmul(
a, b, c, a.grad, b.grad, c.grad, d.grad, alpha, beta, allow_tf32x3_arith, device
),
arrays=[a, b, c, d],
)
# cpu fallback if no cuda devices found
if device == "cpu":
d.assign(alpha * (a.numpy() @ b.numpy()) + beta * c.numpy())
return
cc = device.arch
ret = runtime.core.cutlass_gemm(
cc,
m,
n,
k,
type_typestr(a.dtype).encode(),
ctypes.c_void_p(a.ptr),
ctypes.c_void_p(b.ptr),
ctypes.c_void_p(c.ptr),
ctypes.c_void_p(d.ptr),
alpha,
beta,
True,
True,
allow_tf32x3_arith,
1,
)
if not ret:
raise RuntimeError("Matmul failed.")
def adj_matmul(
a: array2d,
b: array2d,
c: array2d,
adj_a: array2d,
adj_b: array2d,
adj_c: array2d,
adj_d: array2d,
alpha: float = 1.0,
beta: float = 0.0,
allow_tf32x3_arith: builtins.bool = False,
device=None,
):
"""Computes the adjoint of a generic matrix-matrix multiplication (GEMM) of the form: `d = alpha * (a @ b) + beta * c`.
note: the adjoint of parameter alpha is not included but can be computed as `adj_alpha = np.sum(np.concatenate(np.multiply(a @ b, adj_d)))`.
note: the adjoint of parameter beta is not included but can be computed as `adj_beta = np.sum(np.concatenate(np.multiply(c, adj_d)))`.
Args:
a (array2d): two-dimensional array containing matrix A
b (array2d): two-dimensional array containing matrix B
c (array2d): two-dimensional array containing matrix C
adj_a (array2d): two-dimensional array to which the adjoint of matrix A is written
adj_b (array2d): two-dimensional array to which the adjoint of matrix B is written
adj_c (array2d): two-dimensional array to which the adjoint of matrix C is written
adj_d (array2d): two-dimensional array containing the adjoint of matrix D
alpha (float): parameter alpha of GEMM
beta (float): parameter beta of GEMM
allow_tf32x3_arith (bool): whether to use CUTLASS's 3xTF32 GEMMs, which enable accuracy similar to FP32
while using Tensor Cores
device: device we want to use to multiply matrices. Defaults to active runtime device. If "cpu", resorts to using numpy multiplication.
"""
from warp.context import runtime
if device is None:
device = runtime.get_device(device)
if (
a.device != device
or b.device != device
or c.device != device
or adj_a.device != device
or adj_b.device != device
or adj_c.device != device
or adj_d.device != device
):
raise RuntimeError(
"Matrices A, B, C, D, and their adjoints must all be on the same device as the runtime device."
)
if (
a.dtype != b.dtype
or a.dtype != c.dtype
or a.dtype != adj_a.dtype
or a.dtype != adj_b.dtype
or a.dtype != adj_c.dtype
or a.dtype != adj_d.dtype
):
raise RuntimeError(
"wp.adj_matmul currently only supports operation between {A, B, C, adj_D, adj_A, adj_B, adj_C} matrices of the same type."
)
m = a.shape[0]
n = b.shape[1]
k = a.shape[1]
if (
a.shape != (m, k)
or b.shape != (k, n)
or c.shape != (m, n)
or adj_d.shape != (m, n)
or adj_a.shape != (m, k)
or adj_b.shape != (k, n)
or adj_c.shape != (m, n)
):
raise RuntimeError(
"Invalid shapes for matrices: A = {} B = {} C = {} adj_D = {} adj_A = {} adj_B = {} adj_C = {}".format(
a.shape, b.shape, c.shape, adj_d.shape, adj_a.shape, adj_b.shape, adj_c.shape
)
)
# cpu fallback if no cuda devices found
if device == "cpu":
adj_a.assign(alpha * np.matmul(adj_d.numpy(), b.numpy().transpose()))
adj_b.assign(alpha * (a.numpy().transpose() @ adj_d.numpy()))
adj_c.assign(beta * adj_d.numpy())
return
cc = device.arch
# adj_a
ret = runtime.core.cutlass_gemm(
cc,
m,
k,
n,
type_typestr(a.dtype).encode(),
ctypes.c_void_p(adj_d.ptr),
ctypes.c_void_p(b.ptr),
ctypes.c_void_p(a.ptr),
ctypes.c_void_p(adj_a.ptr),
alpha,
0.0,
True,
False,
allow_tf32x3_arith,
1,
)
if not ret:
raise RuntimeError("adj_matmul failed.")
# adj_b
ret = runtime.core.cutlass_gemm(
cc,
k,
n,
m,
type_typestr(a.dtype).encode(),
ctypes.c_void_p(a.ptr),
ctypes.c_void_p(adj_d.ptr),
ctypes.c_void_p(b.ptr),
ctypes.c_void_p(adj_b.ptr),
alpha,
0.0,
False,
True,
allow_tf32x3_arith,
1,
)
if not ret:
raise RuntimeError("adj_matmul failed.")
# adj_c
ret = runtime.core.cutlass_gemm(
cc,
m,
n,
k,
type_typestr(a.dtype).encode(),
ctypes.c_void_p(a.ptr),
ctypes.c_void_p(b.ptr),
ctypes.c_void_p(adj_d.ptr),
ctypes.c_void_p(adj_c.ptr),
0.0,
beta,
True,
True,
allow_tf32x3_arith,
1,
)
if not ret:
raise RuntimeError("adj_matmul failed.")
def batched_matmul(
a: array3d,
b: array3d,
c: array3d,
d: array3d,
alpha: float = 1.0,
beta: float = 0.0,
allow_tf32x3_arith: builtins.bool = False,
device=None,
):
"""Computes a batched generic matrix-matrix multiplication (GEMM) of the form: `d = alpha * (a @ b) + beta * c`.
Args:
a (array3d): three-dimensional array containing A matrices. Overall array dimension is {batch_count, M, K}
b (array3d): three-dimensional array containing B matrices. Overall array dimension is {batch_count, K, N}
c (array3d): three-dimensional array containing C matrices. Overall array dimension is {batch_count, M, N}
d (array3d): three-dimensional array to which output D is written. Overall array dimension is {batch_count, M, N}
alpha (float): parameter alpha of GEMM
beta (float): parameter beta of GEMM
allow_tf32x3_arith (bool): whether to use CUTLASS's 3xTF32 GEMMs, which enable accuracy similar to FP32
while using Tensor Cores
device: device we want to use to multiply matrices. Defaults to active runtime device. If "cpu", resorts to using numpy multiplication.
"""
from warp.context import runtime
if device is None:
device = runtime.get_device(device)
if a.device != device or b.device != device or c.device != device or d.device != device:
raise RuntimeError("Matrices A, B, C, and D must all be on the same device as the runtime device.")
if a.dtype != b.dtype or a.dtype != c.dtype or a.dtype != d.dtype:
raise RuntimeError(
"wp.batched_matmul currently only supports operation between {A, B, C, D} matrices of the same type."
)
m = a.shape[1]
n = b.shape[2]
k = a.shape[2]
batch_count = a.shape[0]
if b.shape != (batch_count, k, n) or c.shape != (batch_count, m, n) or d.shape != (batch_count, m, n):
raise RuntimeError(
"Invalid shapes for matrices: A = {} B = {} C = {} D = {}".format(a.shape, b.shape, c.shape, d.shape)
)
if runtime.tape:
runtime.tape.record_func(
backward=lambda: adj_matmul(
a, b, c, a.grad, b.grad, c.grad, d.grad, alpha, beta, allow_tf32x3_arith, device
),
arrays=[a, b, c, d],
)
# cpu fallback if no cuda devices found
if device == "cpu":
d.assign(alpha * np.matmul(a.numpy(), b.numpy()) + beta * c.numpy())
return
cc = device.arch
ret = runtime.core.cutlass_gemm(
cc,
m,
n,
k,
type_typestr(a.dtype).encode(),
ctypes.c_void_p(a.ptr),
ctypes.c_void_p(b.ptr),
ctypes.c_void_p(c.ptr),
ctypes.c_void_p(d.ptr),
alpha,
beta,
True,
True,
allow_tf32x3_arith,
batch_count,
)
if not ret:
raise RuntimeError("Batched matmul failed.")
def adj_batched_matmul(
a: array3d,
b: array3d,
c: array3d,
adj_a: array3d,
adj_b: array3d,
adj_c: array3d,
adj_d: array3d,
alpha: float = 1.0,
beta: float = 0.0,
allow_tf32x3_arith: builtins.bool = False,
device=None,
):
"""Computes a batched generic matrix-matrix multiplication (GEMM) of the form: `d = alpha * (a @ b) + beta * c`.
Args:
a (array3d): three-dimensional array containing A matrices. Overall array dimension is {batch_count, M, K}
b (array3d): three-dimensional array containing B matrices. Overall array dimension is {batch_count, K, N}
c (array3d): three-dimensional array containing C matrices. Overall array dimension is {batch_count, M, N}
adj_a (array3d): three-dimensional array to which the adjoints of A matrices are written. Overall array dimension is {batch_count, M, K}
adj_b (array3d): three-dimensional array to which the adjoints of B matrices are written. Overall array dimension is {batch_count, K, N}
adj_c (array3d): three-dimensional array to which the adjoints of C matrices are written. Overall array dimension is {batch_count, M, N}
adj_d (array3d): three-dimensional array containing adjoints of D matrices. Overall array dimension is {batch_count, M, N}
alpha (float): parameter alpha of GEMM
beta (float): parameter beta of GEMM
allow_tf32x3_arith (bool): whether to use CUTLASS's 3xTF32 GEMMs, which enable accuracy similar to FP32
while using Tensor Cores
device: device we want to use to multiply matrices. Defaults to active runtime device. If "cpu", resorts to using numpy multiplication.
"""
from warp.context import runtime
if device is None:
device = runtime.get_device(device)
if (
a.device != device
or b.device != device
or c.device != device
or adj_a.device != device
or adj_b.device != device
or adj_c.device != device
or adj_d.device != device
):
raise RuntimeError(
"Matrices A, B, C, D, and their adjoints must all be on the same device as the runtime device."
)
if (
a.dtype != b.dtype
or a.dtype != c.dtype
or a.dtype != adj_a.dtype
or a.dtype != adj_b.dtype
or a.dtype != adj_c.dtype
or a.dtype != adj_d.dtype
):
raise RuntimeError(
"wp.adj_batched_matmul currently only supports operation between {A, B, C, adj_D, adj_A, adj_B, adj_C} matrices of the same type."
)
m = a.shape[1]
n = b.shape[2]
k = a.shape[2]
batch_count = a.shape[0]
if (
b.shape != (batch_count, k, n)
or c.shape != (batch_count, m, n)
or adj_d.shape != (batch_count, m, n)
or adj_a.shape != (batch_count, m, k)
or adj_b.shape != (batch_count, k, n)
or adj_c.shape != (batch_count, m, n)
):
raise RuntimeError(
"Invalid shapes for matrices: A = {} B = {} C = {} adj_D = {} adj_A = {} adj_B = {} adj_C = {}".format(
a.shape, b.shape, c.shape, adj_d.shape, adj_a.shape, adj_b.shape, adj_c.shape
)
)
# cpu fallback if no cuda devices found
if device == "cpu":
adj_a.assign(alpha * np.matmul(adj_d.numpy(), b.numpy().transpose((0, 2, 1))))
adj_b.assign(alpha * np.matmul(a.numpy().transpose((0, 2, 1)), adj_d.numpy()))
adj_c.assign(beta * adj_d.numpy())
return
cc = device.arch
# adj_a
ret = runtime.core.cutlass_gemm(
cc,
m,
k,
n,
type_typestr(a.dtype).encode(),
ctypes.c_void_p(adj_d.ptr),
ctypes.c_void_p(b.ptr),
ctypes.c_void_p(a.ptr),
ctypes.c_void_p(adj_a.ptr),
alpha,
0.0,
True,
False,
allow_tf32x3_arith,
batch_count,
)
if not ret:
raise RuntimeError("adj_matmul failed.")
# adj_b
ret = runtime.core.cutlass_gemm(
cc,
k,
n,
m,
type_typestr(a.dtype).encode(),
ctypes.c_void_p(a.ptr),
ctypes.c_void_p(adj_d.ptr),
ctypes.c_void_p(b.ptr),
ctypes.c_void_p(adj_b.ptr),
alpha,
0.0,
False,
True,
allow_tf32x3_arith,
batch_count,
)
if not ret:
raise RuntimeError("adj_matmul failed.")
# adj_c
ret = runtime.core.cutlass_gemm(
cc,
m,
n,
k,
type_typestr(a.dtype).encode(),
ctypes.c_void_p(a.ptr),
ctypes.c_void_p(b.ptr),
ctypes.c_void_p(adj_d.ptr),
ctypes.c_void_p(adj_c.ptr),
0.0,
beta,
True,
True,
allow_tf32x3_arith,
batch_count,
)
if not ret:
raise RuntimeError("adj_matmul failed.")
class HashGrid:
def __init__(self, dim_x, dim_y, dim_z, device=None):
"""Class representing a hash grid object for accelerated point queries.
Attributes:
id: Unique identifier for this mesh object, can be passed to kernels.
device: Device this object lives on, all buffers must live on the same device.
Args:
dim_x (int): Number of cells in x-axis
dim_y (int): Number of cells in y-axis
dim_z (int): Number of cells in z-axis
"""
from warp.context import runtime
self.device = runtime.get_device(device)
if self.device.is_cpu:
self.id = runtime.core.hash_grid_create_host(dim_x, dim_y, dim_z)
else:
self.id = runtime.core.hash_grid_create_device(self.device.context, dim_x, dim_y, dim_z)
# indicates whether the grid data has been reserved for use by a kernel
self.reserved = False
def build(self, points, radius):
"""Updates the hash grid data structure.
This method rebuilds the underlying datastructure and should be called any time the set
of points changes.
Args:
points (:class:`warp.array`): Array of points of type :class:`warp.vec3`
radius (float): The cell size to use for bucketing points, cells are cubes with edges of this width.
For best performance the radius used to construct the grid should match closely to
the radius used when performing queries.
"""
from warp.context import runtime
if self.device.is_cpu:
runtime.core.hash_grid_update_host(self.id, radius, ctypes.cast(points.ptr, ctypes.c_void_p), len(points))
else:
runtime.core.hash_grid_update_device(self.id, radius, ctypes.cast(points.ptr, ctypes.c_void_p), len(points))
self.reserved = True
def reserve(self, num_points):
from warp.context import runtime
if self.device.is_cpu:
runtime.core.hash_grid_reserve_host(self.id, num_points)
else:
runtime.core.hash_grid_reserve_device(self.id, num_points)
self.reserved = True
def __del__(self):
try:
from warp.context import runtime
if self.device.is_cpu:
runtime.core.hash_grid_destroy_host(self.id)
else:
# use CUDA context guard to avoid side effects during garbage collection
with self.device.context_guard:
runtime.core.hash_grid_destroy_device(self.id)
except Exception:
pass
class MarchingCubes:
def __init__(self, nx: int, ny: int, nz: int, max_verts: int, max_tris: int, device=None):
from warp.context import runtime
self.device = runtime.get_device(device)
if not self.device.is_cuda:
raise RuntimeError("Only CUDA devices are supported for marching cubes")
self.nx = nx
self.ny = ny
self.nz = nz
self.max_verts = max_verts
self.max_tris = max_tris
# bindings to warp.so
self.alloc = runtime.core.marching_cubes_create_device
self.alloc.argtypes = [ctypes.c_void_p]
self.alloc.restype = ctypes.c_uint64
self.free = runtime.core.marching_cubes_destroy_device
from warp.context import zeros
self.verts = zeros(max_verts, dtype=vec3, device=self.device)
self.indices = zeros(max_tris * 3, dtype=int, device=self.device)
# alloc surfacer
self.id = ctypes.c_uint64(self.alloc(self.device.context))
def __del__(self):
# use CUDA context guard to avoid side effects during garbage collection
with self.device.context_guard:
# destroy surfacer
self.free(self.id)
def resize(self, nx: int, ny: int, nz: int, max_verts: int, max_tris: int):
# actual allocations will be resized on next call to surface()
self.nx = nx
self.ny = ny
self.nz = nz
self.max_verts = max_verts
self.max_tris = max_tris
def surface(self, field: array(dtype=float), threshold: float):
from warp.context import runtime
# WP_API int marching_cubes_surface_host(const float* field, int nx, int ny, int nz, float threshold, wp::vec3* verts, int* triangles, int max_verts, int max_tris, int* out_num_verts, int* out_num_tris);
num_verts = ctypes.c_int(0)
num_tris = ctypes.c_int(0)
runtime.core.marching_cubes_surface_device.restype = ctypes.c_int
error = runtime.core.marching_cubes_surface_device(
self.id,
ctypes.cast(field.ptr, ctypes.c_void_p),
self.nx,
self.ny,
self.nz,
ctypes.c_float(threshold),
ctypes.cast(self.verts.ptr, ctypes.c_void_p),
ctypes.cast(self.indices.ptr, ctypes.c_void_p),
self.max_verts,
self.max_tris,
ctypes.c_void_p(ctypes.addressof(num_verts)),
ctypes.c_void_p(ctypes.addressof(num_tris)),
)
if error:
raise RuntimeError(
"Buffers may not be large enough, marching cubes required at least {num_verts} vertices, and {num_tris} triangles."
)
# resize the geometry arrays
self.verts.shape = (num_verts.value,)
self.indices.shape = (num_tris.value * 3,)
self.verts.size = num_verts.value
self.indices.size = num_tris.value * 3
def type_is_generic(t):
if t in (Any, Scalar, Float, Int):
return True
elif is_array(t):
return type_is_generic(t.dtype)
elif hasattr(t, "_wp_scalar_type_"):
# vector/matrix type, check if dtype is generic
if type_is_generic(t._wp_scalar_type_):
return True
# check if any dimension is generic
for d in t._shape_:
if d == 0:
return True
else:
return False
def type_is_generic_scalar(t):
return t in (Scalar, Float, Int)
def type_matches_template(arg_type, template_type):
"""Check if an argument type matches a template.
This function is used to test whether the arguments passed to a generic @wp.kernel or @wp.func
match the template type annotations. The template_type can be generic, but the arg_type must be concrete.
"""
# canonicalize types
arg_type = type_to_warp(arg_type)
template_type = type_to_warp(template_type)
# arg type must be concrete
if type_is_generic(arg_type):
return False
# if template type is not generic, the argument type must match exactly
if not type_is_generic(template_type):
return types_equal(arg_type, template_type)
# template type is generic, check that the argument type matches
if template_type == Any:
return True
elif is_array(template_type):
# ensure the argument type is a non-generic array with matching dtype and dimensionality
if type(arg_type) != type(template_type):
return False
if not type_matches_template(arg_type.dtype, template_type.dtype):
return False
if arg_type.ndim != template_type.ndim:
return False
elif template_type == Float:
return arg_type in float_types
elif template_type == Int:
return arg_type in int_types
elif template_type == Scalar:
return arg_type in scalar_types
elif hasattr(template_type, "_wp_scalar_type_"):
# vector/matrix type
if not hasattr(arg_type, "_wp_scalar_type_"):
return False
if not type_matches_template(arg_type._wp_scalar_type_, template_type._wp_scalar_type_):
return False
ndim = len(template_type._shape_)
if len(arg_type._shape_) != ndim:
return False
# for any non-generic dimensions, make sure they match
for i in range(ndim):
if template_type._shape_[i] != 0 and arg_type._shape_[i] != template_type._shape_[i]:
return False
return True
def infer_argument_types(args, template_types, arg_names=None):
"""Resolve argument types with the given list of template types."""
if len(args) != len(template_types):
raise RuntimeError("Number of arguments must match number of template types.")
arg_types = []
for i in range(len(args)):
arg = args[i]
arg_type = type(arg)
arg_name = arg_names[i] if arg_names else str(i)
if arg_type in warp.types.array_types:
arg_types.append(arg_type(dtype=arg.dtype, ndim=arg.ndim))
elif arg_type in warp.types.scalar_types:
arg_types.append(arg_type)
elif arg_type in [int, float]:
# canonicalize type
arg_types.append(warp.types.type_to_warp(arg_type))
elif hasattr(arg_type, "_wp_scalar_type_"):
# vector/matrix type
arg_types.append(arg_type)
elif issubclass(arg_type, warp.codegen.StructInstance):
# a struct
arg_types.append(arg._cls)
# elif arg_type in [warp.types.launch_bounds_t, warp.types.shape_t, warp.types.range_t]:
# arg_types.append(arg_type)
# elif arg_type in [warp.hash_grid_query_t, warp.mesh_query_aabb_t, warp.bvh_query_t]:
# arg_types.append(arg_type)
elif arg is None:
# allow passing None for arrays
t = template_types[i]
if warp.types.is_array(t):
arg_types.append(type(t)(dtype=t.dtype, ndim=t.ndim))
else:
raise TypeError(f"Unable to infer the type of argument '{arg_name}', got None")
else:
# TODO: attempt to figure out if it's a vector/matrix type given as a numpy array, list, etc.
raise TypeError(f"Unable to infer the type of argument '{arg_name}', got {arg_type}")
return arg_types
simple_type_codes = {
int: "i4",
float: "f4",
builtins.bool: "b",
bool: "b",
str: "str", # accepted by print()
int8: "i1",
int16: "i2",
int32: "i4",
int64: "i8",
uint8: "u1",
uint16: "u2",
uint32: "u4",
uint64: "u8",
float16: "f2",
float32: "f4",
float64: "f8",
shape_t: "sh",
range_t: "rg",
launch_bounds_t: "lb",
hash_grid_query_t: "hgq",
mesh_query_aabb_t: "mqa",
bvh_query_t: "bvhq",
}
def get_type_code(arg_type):
if arg_type == Any:
# special case for generics
# note: since Python 3.11 Any is a type, so we check for it first
return "?"
elif isinstance(arg_type, type):
if hasattr(arg_type, "_wp_scalar_type_"):
# vector/matrix type
dtype_code = get_type_code(arg_type._wp_scalar_type_)
# check for "special" vector/matrix subtypes
if hasattr(arg_type, "_wp_generic_type_str_"):
type_str = arg_type._wp_generic_type_str_
if type_str == "quat_t":
return f"q{dtype_code}"
elif type_str == "transform_t":
return f"t{dtype_code}"
# elif type_str == "spatial_vector_t":
# return f"sv{dtype_code}"
# elif type_str == "spatial_matrix_t":
# return f"sm{dtype_code}"
# generic vector/matrix
ndim = len(arg_type._shape_)
if ndim == 1:
dim_code = "?" if arg_type._shape_[0] == 0 else str(arg_type._shape_[0])
return f"v{dim_code}{dtype_code}"
elif ndim == 2:
dim_code0 = "?" if arg_type._shape_[0] == 0 else str(arg_type._shape_[0])
dim_code1 = "?" if arg_type._shape_[1] == 0 else str(arg_type._shape_[1])
return f"m{dim_code0}{dim_code1}{dtype_code}"
else:
raise TypeError("Invalid vector/matrix dimensionality")
else:
# simple type
type_code = simple_type_codes.get(arg_type)
if type_code is not None:
return type_code
else:
raise TypeError(f"Unrecognized type '{arg_type}'")
elif isinstance(arg_type, array):
return f"a{arg_type.ndim}{get_type_code(arg_type.dtype)}"
elif isinstance(arg_type, indexedarray):
return f"ia{arg_type.ndim}{get_type_code(arg_type.dtype)}"
elif isinstance(arg_type, fabricarray):
return f"fa{arg_type.ndim}{get_type_code(arg_type.dtype)}"
elif isinstance(arg_type, indexedfabricarray):
return f"ifa{arg_type.ndim}{get_type_code(arg_type.dtype)}"
elif isinstance(arg_type, warp.codegen.Struct):
return warp.codegen.make_full_qualified_name(arg_type.cls)
elif arg_type == Scalar:
# generic scalar type
return "s?"
elif arg_type == Float:
# generic float
return "f?"
elif arg_type == Int:
# generic int
return "i?"
elif isinstance(arg_type, Callable):
# TODO: elaborate on Callable type?
return "c"
else:
raise TypeError(f"Unrecognized type '{arg_type}'")
def get_signature(arg_types, func_name=None, arg_names=None):
type_codes = []
for i, arg_type in enumerate(arg_types):
try:
type_codes.append(get_type_code(arg_type))
except Exception as e:
if arg_names is not None:
arg_str = f"'{arg_names[i]}'"
else:
arg_str = str(i + 1)
if func_name is not None:
func_str = f" of function {func_name}"
else:
func_str = ""
raise RuntimeError(f"Failed to determine type code for argument {arg_str}{func_str}: {e}")
return "_".join(type_codes)
def is_generic_signature(sig):
return "?" in sig
| warp-main | warp/types.py |
# Copyright (c) 2023 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import warp
def device_to_jax(wp_device):
import jax
d = warp.get_device(wp_device)
if d.is_cuda:
cuda_devices = jax.devices("cuda")
if d.ordinal >= len(cuda_devices):
raise RuntimeError(f"Jax device corresponding to '{wp_device}' is not available")
return cuda_devices[d.ordinal]
else:
cpu_devices = jax.devices("cpu")
if not cpu_devices:
raise RuntimeError(f"Jax device corresponding to '{wp_device}' is not available")
return cpu_devices[0]
def device_from_jax(jax_device):
if jax_device.platform == "cpu":
return warp.get_device("cpu")
elif jax_device.platform == "gpu":
return warp.get_cuda_device(jax_device.id)
else:
raise RuntimeError(f"Unknown or unsupported Jax device platform '{jax_device.platform}'")
def to_jax(wp_array):
import jax.dlpack
return jax.dlpack.from_dlpack(warp.to_dlpack(wp_array))
def from_jax(jax_array, dtype=None):
import jax.dlpack
return warp.from_dlpack(jax.dlpack.to_dlpack(jax_array), dtype=dtype)
| warp-main | warp/jax.py |
# Autogenerated file, do not edit, this file provides stubs for builtins autocomplete in VSCode, PyCharm, etc
from typing import Any
from typing import Tuple
from typing import Callable
from typing import TypeVar
from typing import Generic
from typing import overload as over
Length = TypeVar("Length", bound=int)
Rows = TypeVar("Rows", bound=int)
Cols = TypeVar("Cols", bound=int)
DType = TypeVar("DType")
Int = TypeVar("Int")
Float = TypeVar("Float")
Scalar = TypeVar("Scalar")
Vector = Generic[Length, Scalar]
Matrix = Generic[Rows, Cols, Scalar]
Quaternion = Generic[Float]
Transformation = Generic[Float]
Array = Generic[DType]
from warp.types import array, array1d, array2d, array3d, array4d, constant
from warp.types import indexedarray, indexedarray1d, indexedarray2d, indexedarray3d, indexedarray4d
from warp.fabric import fabricarray, fabricarrayarray, indexedfabricarray, indexedfabricarrayarray
from warp.types import bool, int8, uint8, int16, uint16, int32, uint32, int64, uint64, float16, float32, float64
from warp.types import vec2, vec2b, vec2ub, vec2s, vec2us, vec2i, vec2ui, vec2l, vec2ul, vec2h, vec2f, vec2d
from warp.types import vec3, vec3b, vec3ub, vec3s, vec3us, vec3i, vec3ui, vec3l, vec3ul, vec3h, vec3f, vec3d
from warp.types import vec4, vec4b, vec4ub, vec4s, vec4us, vec4i, vec4ui, vec4l, vec4ul, vec4h, vec4f, vec4d
from warp.types import mat22, mat22h, mat22f, mat22d
from warp.types import mat33, mat33h, mat33f, mat33d
from warp.types import mat44, mat44h, mat44f, mat44d
from warp.types import quat, quath, quatf, quatd
from warp.types import transform, transformh, transformf, transformd
from warp.types import spatial_vector, spatial_vectorh, spatial_vectorf, spatial_vectord
from warp.types import spatial_matrix, spatial_matrixh, spatial_matrixf, spatial_matrixd
from warp.types import Bvh, Mesh, HashGrid, Volume, MarchingCubes
from warp.types import bvh_query_t, mesh_query_aabb_t, hash_grid_query_t
from warp.types import matmul, adj_matmul, batched_matmul, adj_batched_matmul, from_ptr
from warp.types import vector as vec
from warp.types import matrix as mat
from warp.context import init, func, func_grad, func_replay, kernel, struct, overload
from warp.context import is_cpu_available, is_cuda_available, is_device_available
from warp.context import get_devices, get_preferred_device
from warp.context import get_cuda_devices, get_cuda_device_count, get_cuda_device, map_cuda_device, unmap_cuda_device
from warp.context import get_device, set_device, synchronize_device
from warp.context import (
zeros,
zeros_like,
full,
full_like,
clone,
empty,
empty_like,
copy,
from_numpy,
launch,
synchronize,
force_load,
load_module,
)
from warp.context import set_module_options, get_module_options, get_module
from warp.context import capture_begin, capture_end, capture_launch
from warp.context import print_builtins, export_builtins, export_stubs
from warp.context import Kernel, Function, Launch
from warp.context import Stream, get_stream, set_stream, synchronize_stream
from warp.context import Event, record_event, wait_event, wait_stream
from warp.context import RegisteredGLBuffer
from warp.tape import Tape
from warp.utils import ScopedTimer, ScopedDevice, ScopedStream
from warp.utils import transform_expand, quat_between_vectors
from warp.torch import from_torch, to_torch
from warp.torch import device_from_torch, device_to_torch
from warp.torch import stream_from_torch, stream_to_torch
from warp.jax import from_jax, to_jax
from warp.jax import device_from_jax, device_to_jax
from warp.dlpack import from_dlpack, to_dlpack
from warp.constants import *
from . import builtins
import warp.config
__version__ = warp.config.version
@over
def min(x: Scalar, y: Scalar) -> Scalar:
"""
Return the minimum of two scalars.
"""
...
@over
def min(x: Vector[Any, Scalar], y: Vector[Any, Scalar]) -> Vector[Any, Scalar]:
"""
Return the element wise minimum of two vectors.
"""
...
@over
def min(v: Vector[Any, Scalar]) -> Scalar:
"""
Return the minimum element of a vector.
"""
...
@over
def max(x: Scalar, y: Scalar) -> Scalar:
"""
Return the maximum of two scalars.
"""
...
@over
def max(x: Vector[Any, Scalar], y: Vector[Any, Scalar]) -> Vector[Any, Scalar]:
"""
Return the element wise maximum of two vectors.
"""
...
@over
def max(v: Vector[Any, Scalar]) -> Scalar:
"""
Return the maximum element of a vector.
"""
...
@over
def clamp(x: Scalar, a: Scalar, b: Scalar) -> Scalar:
"""
Clamp the value of x to the range [a, b].
"""
...
@over
def abs(x: Scalar) -> Scalar:
"""
Return the absolute value of x.
"""
...
@over
def sign(x: Scalar) -> Scalar:
"""
Return -1 if x < 0, return 1 otherwise.
"""
...
@over
def step(x: Scalar) -> Scalar:
"""
Return 1.0 if x < 0.0, return 0.0 otherwise.
"""
...
@over
def nonzero(x: Scalar) -> Scalar:
"""
Return 1.0 if x is not equal to zero, return 0.0 otherwise.
"""
...
@over
def sin(x: Float) -> Float:
"""
Return the sine of x in radians.
"""
...
@over
def cos(x: Float) -> Float:
"""
Return the cosine of x in radians.
"""
...
@over
def acos(x: Float) -> Float:
"""
Return arccos of x in radians. Inputs are automatically clamped to [-1.0, 1.0].
"""
...
@over
def asin(x: Float) -> Float:
"""
Return arcsin of x in radians. Inputs are automatically clamped to [-1.0, 1.0].
"""
...
@over
def sqrt(x: Float) -> Float:
"""
Return the sqrt of x, where x is positive.
"""
...
@over
def tan(x: Float) -> Float:
"""
Return tangent of x in radians.
"""
...
@over
def atan(x: Float) -> Float:
"""
Return arctan of x.
"""
...
@over
def atan2(y: Float, x: Float) -> Float:
"""
Return atan2 of x.
"""
...
@over
def sinh(x: Float) -> Float:
"""
Return the sinh of x.
"""
...
@over
def cosh(x: Float) -> Float:
"""
Return the cosh of x.
"""
...
@over
def tanh(x: Float) -> Float:
"""
Return the tanh of x.
"""
...
@over
def degrees(x: Float) -> Float:
"""
Convert radians into degrees.
"""
...
@over
def radians(x: Float) -> Float:
"""
Convert degrees into radians.
"""
...
@over
def log(x: Float) -> Float:
"""
Return the natural log (base-e) of x, where x is positive.
"""
...
@over
def log2(x: Float) -> Float:
"""
Return the natural log (base-2) of x, where x is positive.
"""
...
@over
def log10(x: Float) -> Float:
"""
Return the natural log (base-10) of x, where x is positive.
"""
...
@over
def exp(x: Float) -> Float:
"""
Return base-e exponential, e^x.
"""
...
@over
def pow(x: Float, y: Float) -> Float:
"""
Return the result of x raised to power of y.
"""
...
@over
def round(x: Float) -> Float:
"""
Calculate the nearest integer value, rounding halfway cases away from zero.
This is the most intuitive form of rounding in the colloquial sense, but can be slower than other options like ``warp.rint()``.
Differs from ``numpy.round()``, which behaves the same way as ``numpy.rint()``.
"""
...
@over
def rint(x: Float) -> Float:
"""
Calculate the nearest integer value, rounding halfway cases to nearest even integer.
It is generally faster than ``warp.round()``.
Equivalent to ``numpy.rint()``.
"""
...
@over
def trunc(x: Float) -> Float:
"""
Calculate the nearest integer that is closer to zero than x.
In other words, it discards the fractional part of x.
It is similar to casting ``float(int(x))``, but preserves the negative sign when x is in the range [-0.0, -1.0).
Equivalent to ``numpy.trunc()`` and ``numpy.fix()``.
"""
...
@over
def floor(x: Float) -> Float:
"""
Calculate the largest integer that is less than or equal to x.
"""
...
@over
def ceil(x: Float) -> Float:
"""
Calculate the smallest integer that is greater than or equal to x.
"""
...
@over
def dot(x: Vector[Any, Scalar], y: Vector[Any, Scalar]) -> Scalar:
"""
Compute the dot product between two vectors.
"""
...
@over
def dot(x: Quaternion[Float], y: Quaternion[Float]) -> Scalar:
"""
Compute the dot product between two quaternions.
"""
...
@over
def ddot(x: Matrix[Any, Any, Scalar], y: Matrix[Any, Any, Scalar]) -> Scalar:
"""
Compute the double dot product between two matrices.
"""
...
@over
def argmin(v: Vector[Any, Scalar]) -> uint32:
"""
Return the index of the minimum element of a vector.
"""
...
@over
def argmax(v: Vector[Any, Scalar]) -> uint32:
"""
Return the index of the maximum element of a vector.
"""
...
@over
def outer(x: Vector[Any, Scalar], y: Vector[Any, Scalar]) -> Matrix[Any, Any, Scalar]:
"""
Compute the outer product x*y^T for two vec2 objects.
"""
...
@over
def cross(x: Vector[3, Scalar], y: Vector[3, Scalar]) -> Vector[3, Scalar]:
"""
Compute the cross product of two 3d vectors.
"""
...
@over
def skew(x: Vector[3, Scalar]):
"""
Compute the skew symmetric matrix for a 3d vector.
"""
...
@over
def length(x: Vector[Any, Float]) -> Scalar:
"""
Compute the length of a vector.
"""
...
@over
def length(x: Quaternion[Float]) -> Scalar:
"""
Compute the length of a quaternion.
"""
...
@over
def length_sq(x: Vector[Any, Scalar]) -> Scalar:
"""
Compute the squared length of a 2d vector.
"""
...
@over
def length_sq(x: Quaternion[Scalar]) -> Scalar:
"""
Compute the squared length of a quaternion.
"""
...
@over
def normalize(x: Vector[Any, Float]) -> Vector[Any, Scalar]:
"""
Compute the normalized value of x, if length(x) is 0 then the zero vector is returned.
"""
...
@over
def normalize(x: Quaternion[Float]) -> Quaternion[Scalar]:
"""
Compute the normalized value of x, if length(x) is 0 then the zero quat is returned.
"""
...
@over
def transpose(m: Matrix[Any, Any, Scalar]):
"""
Return the transpose of the matrix m
"""
...
@over
def inverse(m: Matrix[2, 2, Float]) -> Matrix[Any, Any, Float]:
"""
Return the inverse of a 2x2 matrix m
"""
...
@over
def inverse(m: Matrix[3, 3, Float]) -> Matrix[Any, Any, Float]:
"""
Return the inverse of a 3x3 matrix m
"""
...
@over
def inverse(m: Matrix[4, 4, Float]) -> Matrix[Any, Any, Float]:
"""
Return the inverse of a 4x4 matrix m
"""
...
@over
def determinant(m: Matrix[2, 2, Float]) -> Scalar:
"""
Return the determinant of a 2x2 matrix m
"""
...
@over
def determinant(m: Matrix[3, 3, Float]) -> Scalar:
"""
Return the determinant of a 3x3 matrix m
"""
...
@over
def determinant(m: Matrix[4, 4, Float]) -> Scalar:
"""
Return the determinant of a 4x4 matrix m
"""
...
@over
def trace(m: Matrix[Any, Any, Scalar]) -> Scalar:
"""
Return the trace of the matrix m
"""
...
@over
def diag(d: Vector[Any, Scalar]) -> Matrix[Any, Any, Scalar]:
"""
Returns a matrix with the components of the vector d on the diagonal
"""
...
@over
def get_diag(m: Matrix[Any, Any, Scalar]) -> Vector[Any, Scalar]:
"""
Returns a vector containing the diagonal elements of the square matrix.
"""
...
@over
def cw_mul(x: Vector[Any, Scalar], y: Vector[Any, Scalar]) -> Vector[Any, Scalar]:
"""
Component wise multiply of two 2d vectors.
"""
...
@over
def cw_mul(x: Matrix[Any, Any, Scalar], y: Matrix[Any, Any, Scalar]) -> Matrix[Any, Any, Scalar]:
"""
Component wise multiply of two 2d vectors.
"""
...
@over
def cw_div(x: Vector[Any, Scalar], y: Vector[Any, Scalar]) -> Vector[Any, Scalar]:
"""
Component wise division of two 2d vectors.
"""
...
@over
def cw_div(x: Matrix[Any, Any, Scalar], y: Matrix[Any, Any, Scalar]) -> Matrix[Any, Any, Scalar]:
"""
Component wise division of two 2d vectors.
"""
...
@over
def quat_identity() -> quatf:
"""
Construct an identity quaternion with zero imaginary part and real part of 1.0
"""
...
@over
def quat_from_axis_angle(axis: Vector[3, Float], angle: Float) -> Quaternion[Scalar]:
"""
Construct a quaternion representing a rotation of angle radians around the given axis.
"""
...
@over
def quat_to_axis_angle(q: Quaternion[Float], axis: Vector[3, Float], angle: Float):
"""
Extract the rotation axis and angle radians a quaternion represents.
"""
...
@over
def quat_from_matrix(m: Matrix[3, 3, Float]) -> Quaternion[Scalar]:
"""
Construct a quaternion from a 3x3 matrix.
"""
...
@over
def quat_rpy(roll: Float, pitch: Float, yaw: Float) -> Quaternion[Scalar]:
"""
Construct a quaternion representing a combined roll (z), pitch (x), yaw rotations (y) in radians.
"""
...
@over
def quat_inverse(q: Quaternion[Float]) -> Quaternion[Scalar]:
"""
Compute quaternion conjugate.
"""
...
@over
def quat_rotate(q: Quaternion[Float], p: Vector[3, Float]) -> Vector[3, Scalar]:
"""
Rotate a vector by a quaternion.
"""
...
@over
def quat_rotate_inv(q: Quaternion[Float], p: Vector[3, Float]) -> Vector[3, Scalar]:
"""
Rotate a vector the inverse of a quaternion.
"""
...
@over
def quat_slerp(q0: Quaternion[Float], q1: Quaternion[Float], t: Float) -> Quaternion[Scalar]:
"""
Linearly interpolate between two quaternions.
"""
...
@over
def quat_to_matrix(q: Quaternion[Float]) -> Matrix[3, 3, Scalar]:
"""
Convert a quaternion to a 3x3 rotation matrix.
"""
...
@over
def transform_identity() -> transformf:
"""
Construct an identity transform with zero translation and identity rotation.
"""
...
@over
def transform_get_translation(t: Transformation[Float]) -> Vector[3, Scalar]:
"""
Return the translational part of a transform.
"""
...
@over
def transform_get_rotation(t: Transformation[Float]) -> Quaternion[Scalar]:
"""
Return the rotational part of a transform.
"""
...
@over
def transform_multiply(a: Transformation[Float], b: Transformation[Float]) -> Transformation[Scalar]:
"""
Multiply two rigid body transformations together.
"""
...
@over
def transform_point(t: Transformation[Scalar], p: Vector[3, Scalar]) -> Vector[3, Scalar]:
"""
Apply the transform to a point p treating the homogenous coordinate as w=1 (translation and rotation).
"""
...
@over
def transform_point(m: Matrix[4, 4, Scalar], p: Vector[3, Scalar]) -> Vector[3, Scalar]:
"""
Apply the transform to a point ``p`` treating the homogenous coordinate as w=1. The transformation is applied treating ``p`` as a column vector, e.g.: ``y = M*p``
note this is in contrast to some libraries, notably USD, which applies transforms to row vectors, ``y^T = p^T*M^T``. If the transform is coming from a library that uses row-vectors
then users should transpose the transformation matrix before calling this method.
"""
...
@over
def transform_vector(t: Transformation[Scalar], v: Vector[3, Scalar]) -> Vector[3, Scalar]:
"""
Apply the transform to a vector v treating the homogenous coordinate as w=0 (rotation only).
"""
...
@over
def transform_vector(m: Matrix[4, 4, Scalar], v: Vector[3, Scalar]) -> Vector[3, Scalar]:
"""
Apply the transform to a vector ``v`` treating the homogenous coordinate as w=0. The transformation is applied treating ``v`` as a column vector, e.g.: ``y = M*v``
note this is in contrast to some libraries, notably USD, which applies transforms to row vectors, ``y^T = v^T*M^T``. If the transform is coming from a library that uses row-vectors
then users should transpose the transformation matrix before calling this method.
"""
...
@over
def transform_inverse(t: Transformation[Float]) -> Transformation[Float]:
"""
Compute the inverse of the transform.
"""
...
@over
def spatial_dot(a: Vector[6, Float], b: Vector[6, Float]) -> Scalar:
"""
Compute the dot product of two 6d screw vectors.
"""
...
@over
def spatial_cross(a: Vector[6, Float], b: Vector[6, Float]) -> Vector[6, Float]:
"""
Compute the cross-product of two 6d screw vectors.
"""
...
@over
def spatial_cross_dual(a: Vector[6, Float], b: Vector[6, Float]) -> Vector[6, Float]:
"""
Compute the dual cross-product of two 6d screw vectors.
"""
...
@over
def spatial_top(a: Vector[6, Float]):
"""
Return the top (first) part of a 6d screw vector.
"""
...
@over
def spatial_bottom(a: Vector[6, Float]):
"""
Return the bottom (second) part of a 6d screw vector.
"""
...
@over
def spatial_jacobian(
S: Array[Vector[6, Float]],
joint_parents: Array[int32],
joint_qd_start: Array[int32],
joint_start: int32,
joint_count: int32,
J_start: int32,
J_out: Array[Float],
):
""" """
...
@over
def spatial_mass(
I_s: Array[Matrix[6, 6, Float]], joint_start: int32, joint_count: int32, M_start: int32, M: Array[Float]
):
""" """
...
@over
def mlp(
weights: Array[float32],
bias: Array[float32],
activation: Callable,
index: int32,
x: Array[float32],
out: Array[float32],
):
"""
Evaluate a multi-layer perceptron (MLP) layer in the form: ``out = act(weights*x + bias)``.
:param weights: A layer's network weights with dimensions ``(m, n)``.
:param bias: An array with dimensions ``(n)``.
:param activation: A ``wp.func`` function that takes a single scalar float as input and returns a scalar float as output
:param index: The batch item to process, typically each thread will process 1 item in the batch, in this case index should be ``wp.tid()``
:param x: The feature matrix with dimensions ``(n, b)``
:param out: The network output with dimensions ``(m, b)``
:note: Feature and output matrices are transposed compared to some other frameworks such as PyTorch. All matrices are assumed to be stored in flattened row-major memory layout (NumPy default).
"""
...
@over
def bvh_query_aabb(id: uint64, lower: vec3f, upper: vec3f) -> bvh_query_t:
"""
Construct an axis-aligned bounding box query against a bvh object. This query can be used to iterate over all bounds
inside a bvh. Returns an object that is used to track state during bvh traversal.
:param id: The bvh identifier
:param lower: The lower bound of the bounding box in bvh space
:param upper: The upper bound of the bounding box in bvh space
"""
...
@over
def bvh_query_ray(id: uint64, start: vec3f, dir: vec3f) -> bvh_query_t:
"""
Construct a ray query against a bvh object. This query can be used to iterate over all bounds
that intersect the ray. Returns an object that is used to track state during bvh traversal.
:param id: The bvh identifier
:param start: The start of the ray in bvh space
:param dir: The direction of the ray in bvh space
"""
...
@over
def bvh_query_next(query: bvh_query_t, index: int32) -> bool:
"""
Move to the next bound returned by the query. The index of the current bound is stored in ``index``, returns ``False``
if there are no more overlapping bound.
"""
...
@over
def mesh_query_point(
id: uint64, point: vec3f, max_dist: float32, inside: float32, face: int32, bary_u: float32, bary_v: float32
) -> bool:
"""
Computes the closest point on the mesh with identifier `id` to the given point in space. Returns ``True`` if a point < ``max_dist`` is found.
Identifies the sign of the distance using additional ray-casts to determine if the point is inside or outside. This method is relatively robust, but
does increase computational cost. See below for additional sign determination methods.
:param id: The mesh identifier
:param point: The point in space to query
:param max_dist: Mesh faces above this distance will not be considered by the query
:param inside: Returns a value < 0 if query point is inside the mesh, >=0 otherwise. Note that mesh must be watertight for this to be robust
:param face: Returns the index of the closest face
:param bary_u: Returns the barycentric u coordinate of the closest point
:param bary_v: Returns the barycentric v coordinate of the closest point
"""
...
@over
def mesh_query_point_no_sign(
id: uint64, point: vec3f, max_dist: float32, face: int32, bary_u: float32, bary_v: float32
) -> bool:
"""
Computes the closest point on the mesh with identifier `id` to the given point in space. Returns ``True`` if a point < ``max_dist`` is found.
This method does not compute the sign of the point (inside/outside) which makes it faster than other point query methods.
:param id: The mesh identifier
:param point: The point in space to query
:param max_dist: Mesh faces above this distance will not be considered by the query
:param face: Returns the index of the closest face
:param bary_u: Returns the barycentric u coordinate of the closest point
:param bary_v: Returns the barycentric v coordinate of the closest point
"""
...
@over
def mesh_query_point_sign_normal(
id: uint64,
point: vec3f,
max_dist: float32,
inside: float32,
face: int32,
bary_u: float32,
bary_v: float32,
epsilon: float32,
) -> bool:
"""
Computes the closest point on the mesh with identifier `id` to the given point in space. Returns ``True`` if a point < ``max_dist`` is found.
Identifies the sign of the distance (inside/outside) using the angle-weighted pseudo normal. This approach to sign determination is robust for well conditioned meshes
that are watertight and non-self intersecting, it is also comparatively fast to compute.
:param id: The mesh identifier
:param point: The point in space to query
:param max_dist: Mesh faces above this distance will not be considered by the query
:param inside: Returns a value < 0 if query point is inside the mesh, >=0 otherwise. Note that mesh must be watertight for this to be robust
:param face: Returns the index of the closest face
:param bary_u: Returns the barycentric u coordinate of the closest point
:param bary_v: Returns the barycentric v coordinate of the closest point
:param epsilon: Epsilon treating distance values as equal, when locating the minimum distance vertex/face/edge, as a fraction of the average edge length, also for treating closest point as being on edge/vertex default 1e-3
"""
...
@over
def mesh_query_point_sign_winding_number(
id: uint64,
point: vec3f,
max_dist: float32,
inside: float32,
face: int32,
bary_u: float32,
bary_v: float32,
accuracy: float32,
threshold: float32,
) -> bool:
"""
Computes the closest point on the mesh with identifier `id` to the given point in space. Returns ``True`` if a point < ``max_dist`` is found.
Identifies the sign using the winding number of the mesh relative to the query point. This method of sign determination is robust for poorly conditioned meshes
and provides a smooth approximation to sign even when the mesh is not watertight. This method is the most robust and accurate of the sign determination meshes
but also the most expensive.
Note that the Mesh object must be constructed with ``suport_winding_number=True`` for this method to return correct results.
:param id: The mesh identifier
:param point: The point in space to query
:param max_dist: Mesh faces above this distance will not be considered by the query
:param inside: Returns a value < 0 if query point is inside the mesh, >=0 otherwise. Note that mesh must be watertight for this to be robust
:param face: Returns the index of the closest face
:param bary_u: Returns the barycentric u coordinate of the closest point
:param bary_v: Returns the barycentric v coordinate of the closest point
:param accuracy: Accuracy for computing the winding number with fast winding number method utilizing second order dipole approximation, default 2.0
:param threshold: The threshold of the winding number to be considered inside, default 0.5
"""
...
@over
def mesh_query_ray(
id: uint64,
start: vec3f,
dir: vec3f,
max_t: float32,
t: float32,
bary_u: float32,
bary_v: float32,
sign: float32,
normal: vec3f,
face: int32,
) -> bool:
"""
Computes the closest ray hit on the mesh with identifier `id`, returns ``True`` if a point < ``max_t`` is found.
:param id: The mesh identifier
:param start: The start point of the ray
:param dir: The ray direction (should be normalized)
:param max_t: The maximum distance along the ray to check for intersections
:param t: Returns the distance of the closest hit along the ray
:param bary_u: Returns the barycentric u coordinate of the closest hit
:param bary_v: Returns the barycentric v coordinate of the closest hit
:param sign: Returns a value > 0 if the hit ray hit front of the face, returns < 0 otherwise
:param normal: Returns the face normal
:param face: Returns the index of the hit face
"""
...
@over
def mesh_query_aabb(id: uint64, lower: vec3f, upper: vec3f) -> mesh_query_aabb_t:
"""
Construct an axis-aligned bounding box query against a mesh object. This query can be used to iterate over all triangles
inside a volume. Returns an object that is used to track state during mesh traversal.
:param id: The mesh identifier
:param lower: The lower bound of the bounding box in mesh space
:param upper: The upper bound of the bounding box in mesh space
"""
...
@over
def mesh_query_aabb_next(query: mesh_query_aabb_t, index: int32) -> bool:
"""
Move to the next triangle overlapping the query bounding box. The index of the current face is stored in ``index``, returns ``False``
if there are no more overlapping triangles.
"""
...
@over
def mesh_eval_position(id: uint64, face: int32, bary_u: float32, bary_v: float32) -> vec3f:
"""
Evaluates the position on the mesh given a face index, and barycentric coordinates.
"""
...
@over
def mesh_eval_velocity(id: uint64, face: int32, bary_u: float32, bary_v: float32) -> vec3f:
"""
Evaluates the velocity on the mesh given a face index, and barycentric coordinates.
"""
...
@over
def hash_grid_query(id: uint64, point: vec3f, max_dist: float32) -> hash_grid_query_t:
"""
Construct a point query against a hash grid. This query can be used to iterate over all neighboring points withing a
fixed radius from the query point. Returns an object that is used to track state during neighbor traversal.
"""
...
@over
def hash_grid_query_next(query: hash_grid_query_t, index: int32) -> bool:
"""
Move to the next point in the hash grid query. The index of the current neighbor is stored in ``index``, returns ``False``
if there are no more neighbors.
"""
...
@over
def hash_grid_point_id(id: uint64, index: int32) -> int:
"""
Return the index of a point in the grid, this can be used to re-order threads such that grid
traversal occurs in a spatially coherent order.
"""
...
@over
def intersect_tri_tri(v0: vec3f, v1: vec3f, v2: vec3f, u0: vec3f, u1: vec3f, u2: vec3f) -> int:
"""
Tests for intersection between two triangles (v0, v1, v2) and (u0, u1, u2) using Moller's method. Returns > 0 if triangles intersect.
"""
...
@over
def mesh_get(id: uint64) -> Mesh:
"""
Retrieves the mesh given its index.
"""
...
@over
def mesh_eval_face_normal(id: uint64, face: int32) -> vec3f:
"""
Evaluates the face normal the mesh given a face index.
"""
...
@over
def mesh_get_point(id: uint64, index: int32) -> vec3f:
"""
Returns the point of the mesh given a index.
"""
...
@over
def mesh_get_velocity(id: uint64, index: int32) -> vec3f:
"""
Returns the velocity of the mesh given a index.
"""
...
@over
def mesh_get_index(id: uint64, index: int32) -> int:
"""
Returns the point-index of the mesh given a face-vertex index.
"""
...
@over
def closest_point_edge_edge(p1: vec3f, q1: vec3f, p2: vec3f, q2: vec3f, epsilon: float32) -> vec3f:
"""
Finds the closest points between two edges. Returns barycentric weights to the points on each edge, as well as the closest distance between the edges.
:param p1: First point of first edge
:param q1: Second point of first edge
:param p2: First point of second edge
:param q2: Second point of second edge
:param epsilon: Zero tolerance for determining if points in an edge are degenerate.
:param out: vec3 output containing (s,t,d), where `s` in [0,1] is the barycentric weight for the first edge, `t` is the barycentric weight for the second edge, and `d` is the distance between the two edges at these two closest points.
"""
...
@over
def volume_sample_f(id: uint64, uvw: vec3f, sampling_mode: int32) -> float:
"""
Sample the volume given by ``id`` at the volume local-space point ``uvw``. Interpolation should be ``wp.Volume.CLOSEST``, or ``wp.Volume.LINEAR.``
"""
...
@over
def volume_sample_grad_f(id: uint64, uvw: vec3f, sampling_mode: int32, grad: vec3f) -> float:
"""
Sample the volume and its gradient given by ``id`` at the volume local-space point ``uvw``. Interpolation should be ``wp.Volume.CLOSEST``, or ``wp.Volume.LINEAR.``
"""
...
@over
def volume_lookup_f(id: uint64, i: int32, j: int32, k: int32) -> float:
"""
Returns the value of voxel with coordinates ``i``, ``j``, ``k``, if the voxel at this index does not exist this function returns the background value
"""
...
@over
def volume_store_f(id: uint64, i: int32, j: int32, k: int32, value: float32):
"""
Store the value at voxel with coordinates ``i``, ``j``, ``k``.
"""
...
@over
def volume_sample_v(id: uint64, uvw: vec3f, sampling_mode: int32) -> vec3f:
"""
Sample the vector volume given by ``id`` at the volume local-space point ``uvw``. Interpolation should be ``wp.Volume.CLOSEST``, or ``wp.Volume.LINEAR.``
"""
...
@over
def volume_lookup_v(id: uint64, i: int32, j: int32, k: int32) -> vec3f:
"""
Returns the vector value of voxel with coordinates ``i``, ``j``, ``k``, if the voxel at this index does not exist this function returns the background value
"""
...
@over
def volume_store_v(id: uint64, i: int32, j: int32, k: int32, value: vec3f):
"""
Store the value at voxel with coordinates ``i``, ``j``, ``k``.
"""
...
@over
def volume_sample_i(id: uint64, uvw: vec3f) -> int:
"""
Sample the int32 volume given by ``id`` at the volume local-space point ``uvw``.
"""
...
@over
def volume_lookup_i(id: uint64, i: int32, j: int32, k: int32) -> int:
"""
Returns the int32 value of voxel with coordinates ``i``, ``j``, ``k``, if the voxel at this index does not exist this function returns the background value
"""
...
@over
def volume_store_i(id: uint64, i: int32, j: int32, k: int32, value: int32):
"""
Store the value at voxel with coordinates ``i``, ``j``, ``k``.
"""
...
@over
def volume_index_to_world(id: uint64, uvw: vec3f) -> vec3f:
"""
Transform a point defined in volume index space to world space given the volume's intrinsic affine transformation.
"""
...
@over
def volume_world_to_index(id: uint64, xyz: vec3f) -> vec3f:
"""
Transform a point defined in volume world space to the volume's index space, given the volume's intrinsic affine transformation.
"""
...
@over
def volume_index_to_world_dir(id: uint64, uvw: vec3f) -> vec3f:
"""
Transform a direction defined in volume index space to world space given the volume's intrinsic affine transformation.
"""
...
@over
def volume_world_to_index_dir(id: uint64, xyz: vec3f) -> vec3f:
"""
Transform a direction defined in volume world space to the volume's index space, given the volume's intrinsic affine transformation.
"""
...
@over
def rand_init(seed: int32) -> uint32:
"""
Initialize a new random number generator given a user-defined seed. Returns a 32-bit integer representing the RNG state.
"""
...
@over
def rand_init(seed: int32, offset: int32) -> uint32:
"""
Initialize a new random number generator given a user-defined seed and an offset.
This alternative constructor can be useful in parallel programs, where a kernel as a whole should share a seed,
but each thread should generate uncorrelated values. In this case usage should be ``r = rand_init(seed, tid)``
"""
...
@over
def randi(state: uint32) -> int:
"""
Return a random integer between [0, 2^32)
"""
...
@over
def randi(state: uint32, min: int32, max: int32) -> int:
"""
Return a random integer between [min, max)
"""
...
@over
def randf(state: uint32) -> float:
"""
Return a random float between [0.0, 1.0)
"""
...
@over
def randf(state: uint32, min: float32, max: float32) -> float:
"""
Return a random float between [min, max)
"""
...
@over
def randn(state: uint32) -> float:
"""
Sample a normal distribution
"""
...
@over
def sample_cdf(state: uint32, cdf: Array[float32]) -> int:
"""
Inverse transform sample a cumulative distribution function
"""
...
@over
def sample_triangle(state: uint32) -> vec2f:
"""
Uniformly sample a triangle. Returns sample barycentric coordinates
"""
...
@over
def sample_unit_ring(state: uint32) -> vec2f:
"""
Uniformly sample a ring in the xy plane
"""
...
@over
def sample_unit_disk(state: uint32) -> vec2f:
"""
Uniformly sample a disk in the xy plane
"""
...
@over
def sample_unit_sphere_surface(state: uint32) -> vec3f:
"""
Uniformly sample a unit sphere surface
"""
...
@over
def sample_unit_sphere(state: uint32) -> vec3f:
"""
Uniformly sample a unit sphere
"""
...
@over
def sample_unit_hemisphere_surface(state: uint32) -> vec3f:
"""
Uniformly sample a unit hemisphere surface
"""
...
@over
def sample_unit_hemisphere(state: uint32) -> vec3f:
"""
Uniformly sample a unit hemisphere
"""
...
@over
def sample_unit_square(state: uint32) -> vec2f:
"""
Uniformly sample a unit square
"""
...
@over
def sample_unit_cube(state: uint32) -> vec3f:
"""
Uniformly sample a unit cube
"""
...
@over
def poisson(state: uint32, lam: float32) -> uint32:
"""
Generate a random sample from a Poisson distribution.
:param state: RNG state
:param lam: The expected value of the distribution
"""
...
@over
def noise(state: uint32, x: float32) -> float:
"""
Non-periodic Perlin-style noise in 1d.
"""
...
@over
def noise(state: uint32, xy: vec2f) -> float:
"""
Non-periodic Perlin-style noise in 2d.
"""
...
@over
def noise(state: uint32, xyz: vec3f) -> float:
"""
Non-periodic Perlin-style noise in 3d.
"""
...
@over
def noise(state: uint32, xyzt: vec4f) -> float:
"""
Non-periodic Perlin-style noise in 4d.
"""
...
@over
def pnoise(state: uint32, x: float32, px: int32) -> float:
"""
Periodic Perlin-style noise in 1d.
"""
...
@over
def pnoise(state: uint32, xy: vec2f, px: int32, py: int32) -> float:
"""
Periodic Perlin-style noise in 2d.
"""
...
@over
def pnoise(state: uint32, xyz: vec3f, px: int32, py: int32, pz: int32) -> float:
"""
Periodic Perlin-style noise in 3d.
"""
...
@over
def pnoise(state: uint32, xyzt: vec4f, px: int32, py: int32, pz: int32, pt: int32) -> float:
"""
Periodic Perlin-style noise in 4d.
"""
...
@over
def curlnoise(state: uint32, xy: vec2f) -> vec2f:
"""
Divergence-free vector field based on the gradient of a Perlin noise function.
"""
...
@over
def curlnoise(state: uint32, xyz: vec3f) -> vec3f:
"""
Divergence-free vector field based on the curl of three Perlin noise functions.
"""
...
@over
def curlnoise(state: uint32, xyzt: vec4f) -> vec3f:
"""
Divergence-free vector field based on the curl of three Perlin noise functions.
"""
...
@over
def printf():
"""
Allows printing formatted strings, using C-style format specifiers.
"""
...
@over
def tid() -> int:
"""
Return the current thread index. Note that this is the *global* index of the thread in the range [0, dim)
where dim is the parameter passed to kernel launch.
"""
...
@over
def tid() -> Tuple[int, int]:
"""
Return the current thread indices for a 2d kernel launch. Use ``i,j = wp.tid()`` syntax to retrieve the coordinates inside the kernel thread grid.
"""
...
@over
def tid() -> Tuple[int, int, int]:
"""
Return the current thread indices for a 3d kernel launch. Use ``i,j,k = wp.tid()`` syntax to retrieve the coordinates inside the kernel thread grid.
"""
...
@over
def tid() -> Tuple[int, int, int, int]:
"""
Return the current thread indices for a 4d kernel launch. Use ``i,j,k,l = wp.tid()`` syntax to retrieve the coordinates inside the kernel thread grid.
"""
...
@over
def select(cond: bool, arg1: Any, arg2: Any):
"""
Select between two arguments, if cond is false then return ``arg1``, otherwise return ``arg2``
"""
...
@over
def select(cond: bool, arg1: Any, arg2: Any):
"""
Select between two arguments, if cond is false then return ``arg1``, otherwise return ``arg2``
"""
...
@over
def select(cond: int8, arg1: Any, arg2: Any):
"""
Select between two arguments, if cond is false then return ``arg1``, otherwise return ``arg2``
"""
...
@over
def select(cond: uint8, arg1: Any, arg2: Any):
"""
Select between two arguments, if cond is false then return ``arg1``, otherwise return ``arg2``
"""
...
@over
def select(cond: int16, arg1: Any, arg2: Any):
"""
Select between two arguments, if cond is false then return ``arg1``, otherwise return ``arg2``
"""
...
@over
def select(cond: uint16, arg1: Any, arg2: Any):
"""
Select between two arguments, if cond is false then return ``arg1``, otherwise return ``arg2``
"""
...
@over
def select(cond: int32, arg1: Any, arg2: Any):
"""
Select between two arguments, if cond is false then return ``arg1``, otherwise return ``arg2``
"""
...
@over
def select(cond: uint32, arg1: Any, arg2: Any):
"""
Select between two arguments, if cond is false then return ``arg1``, otherwise return ``arg2``
"""
...
@over
def select(cond: int64, arg1: Any, arg2: Any):
"""
Select between two arguments, if cond is false then return ``arg1``, otherwise return ``arg2``
"""
...
@over
def select(cond: uint64, arg1: Any, arg2: Any):
"""
Select between two arguments, if cond is false then return ``arg1``, otherwise return ``arg2``
"""
...
@over
def select(arr: Array[Any], arg1: Any, arg2: Any):
"""
Select between two arguments, if array is null then return ``arg1``, otherwise return ``arg2``
"""
...
@over
def atomic_add(a: Array[Any], i: int32, value: Any):
"""
Atomically add ``value`` onto the array at location given by index.
"""
...
@over
def atomic_add(a: Array[Any], i: int32, j: int32, value: Any):
"""
Atomically add ``value`` onto the array at location given by indices.
"""
...
@over
def atomic_add(a: Array[Any], i: int32, j: int32, k: int32, value: Any):
"""
Atomically add ``value`` onto the array at location given by indices.
"""
...
@over
def atomic_add(a: Array[Any], i: int32, j: int32, k: int32, l: int32, value: Any):
"""
Atomically add ``value`` onto the array at location given by indices.
"""
...
@over
def atomic_add(a: FabricArray[Any], i: int32, value: Any):
"""
Atomically add ``value`` onto the array at location given by index.
"""
...
@over
def atomic_add(a: FabricArray[Any], i: int32, j: int32, value: Any):
"""
Atomically add ``value`` onto the array at location given by indices.
"""
...
@over
def atomic_add(a: FabricArray[Any], i: int32, j: int32, k: int32, value: Any):
"""
Atomically add ``value`` onto the array at location given by indices.
"""
...
@over
def atomic_add(a: FabricArray[Any], i: int32, j: int32, k: int32, l: int32, value: Any):
"""
Atomically add ``value`` onto the array at location given by indices.
"""
...
@over
def atomic_add(a: IndexedFabricArray[Any], i: int32, value: Any):
"""
Atomically add ``value`` onto the array at location given by index.
"""
...
@over
def atomic_add(a: IndexedFabricArray[Any], i: int32, j: int32, value: Any):
"""
Atomically add ``value`` onto the array at location given by indices.
"""
...
@over
def atomic_add(a: IndexedFabricArray[Any], i: int32, j: int32, k: int32, value: Any):
"""
Atomically add ``value`` onto the array at location given by indices.
"""
...
@over
def atomic_add(a: IndexedFabricArray[Any], i: int32, j: int32, k: int32, l: int32, value: Any):
"""
Atomically add ``value`` onto the array at location given by indices.
"""
...
@over
def atomic_sub(a: Array[Any], i: int32, value: Any):
"""
Atomically subtract ``value`` onto the array at location given by index.
"""
...
@over
def atomic_sub(a: Array[Any], i: int32, j: int32, value: Any):
"""
Atomically subtract ``value`` onto the array at location given by indices.
"""
...
@over
def atomic_sub(a: Array[Any], i: int32, j: int32, k: int32, value: Any):
"""
Atomically subtract ``value`` onto the array at location given by indices.
"""
...
@over
def atomic_sub(a: Array[Any], i: int32, j: int32, k: int32, l: int32, value: Any):
"""
Atomically subtract ``value`` onto the array at location given by indices.
"""
...
@over
def atomic_sub(a: FabricArray[Any], i: int32, value: Any):
"""
Atomically subtract ``value`` onto the array at location given by index.
"""
...
@over
def atomic_sub(a: FabricArray[Any], i: int32, j: int32, value: Any):
"""
Atomically subtract ``value`` onto the array at location given by indices.
"""
...
@over
def atomic_sub(a: FabricArray[Any], i: int32, j: int32, k: int32, value: Any):
"""
Atomically subtract ``value`` onto the array at location given by indices.
"""
...
@over
def atomic_sub(a: FabricArray[Any], i: int32, j: int32, k: int32, l: int32, value: Any):
"""
Atomically subtract ``value`` onto the array at location given by indices.
"""
...
@over
def atomic_sub(a: IndexedFabricArray[Any], i: int32, value: Any):
"""
Atomically subtract ``value`` onto the array at location given by index.
"""
...
@over
def atomic_sub(a: IndexedFabricArray[Any], i: int32, j: int32, value: Any):
"""
Atomically subtract ``value`` onto the array at location given by indices.
"""
...
@over
def atomic_sub(a: IndexedFabricArray[Any], i: int32, j: int32, k: int32, value: Any):
"""
Atomically subtract ``value`` onto the array at location given by indices.
"""
...
@over
def atomic_sub(a: IndexedFabricArray[Any], i: int32, j: int32, k: int32, l: int32, value: Any):
"""
Atomically subtract ``value`` onto the array at location given by indices.
"""
...
@over
def atomic_min(a: Array[Any], i: int32, value: Any):
"""
Compute the minimum of ``value`` and ``array[index]`` and atomically update the array. Note that for vectors and matrices the operation is only atomic on a per-component basis.
"""
...
@over
def atomic_min(a: Array[Any], i: int32, j: int32, value: Any):
"""
Compute the minimum of ``value`` and ``array[index]`` and atomically update the array. Note that for vectors and matrices the operation is only atomic on a per-component basis.
"""
...
@over
def atomic_min(a: Array[Any], i: int32, j: int32, k: int32, value: Any):
"""
Compute the minimum of ``value`` and ``array[index]`` and atomically update the array. Note that for vectors and matrices the operation is only atomic on a per-component basis.
"""
...
@over
def atomic_min(a: Array[Any], i: int32, j: int32, k: int32, l: int32, value: Any):
"""
Compute the minimum of ``value`` and ``array[index]`` and atomically update the array. Note that for vectors and matrices the operation is only atomic on a per-component basis.
"""
...
@over
def atomic_min(a: FabricArray[Any], i: int32, value: Any):
"""
Compute the minimum of ``value`` and ``array[index]`` and atomically update the array. Note that for vectors and matrices the operation is only atomic on a per-component basis.
"""
...
@over
def atomic_min(a: FabricArray[Any], i: int32, j: int32, value: Any):
"""
Compute the minimum of ``value`` and ``array[index]`` and atomically update the array. Note that for vectors and matrices the operation is only atomic on a per-component basis.
"""
...
@over
def atomic_min(a: FabricArray[Any], i: int32, j: int32, k: int32, value: Any):
"""
Compute the minimum of ``value`` and ``array[index]`` and atomically update the array. Note that for vectors and matrices the operation is only atomic on a per-component basis.
"""
...
@over
def atomic_min(a: FabricArray[Any], i: int32, j: int32, k: int32, l: int32, value: Any):
"""
Compute the minimum of ``value`` and ``array[index]`` and atomically update the array. Note that for vectors and matrices the operation is only atomic on a per-component basis.
"""
...
@over
def atomic_min(a: IndexedFabricArray[Any], i: int32, value: Any):
"""
Compute the minimum of ``value`` and ``array[index]`` and atomically update the array. Note that for vectors and matrices the operation is only atomic on a per-component basis.
"""
...
@over
def atomic_min(a: IndexedFabricArray[Any], i: int32, j: int32, value: Any):
"""
Compute the minimum of ``value`` and ``array[index]`` and atomically update the array. Note that for vectors and matrices the operation is only atomic on a per-component basis.
"""
...
@over
def atomic_min(a: IndexedFabricArray[Any], i: int32, j: int32, k: int32, value: Any):
"""
Compute the minimum of ``value`` and ``array[index]`` and atomically update the array. Note that for vectors and matrices the operation is only atomic on a per-component basis.
"""
...
@over
def atomic_min(a: IndexedFabricArray[Any], i: int32, j: int32, k: int32, l: int32, value: Any):
"""
Compute the minimum of ``value`` and ``array[index]`` and atomically update the array. Note that for vectors and matrices the operation is only atomic on a per-component basis.
"""
...
@over
def atomic_max(a: Array[Any], i: int32, value: Any):
"""
Compute the maximum of ``value`` and ``array[index]`` and atomically update the array. Note that for vectors and matrices the operation is only atomic on a per-component basis.
"""
...
@over
def atomic_max(a: Array[Any], i: int32, j: int32, value: Any):
"""
Compute the maximum of ``value`` and ``array[index]`` and atomically update the array. Note that for vectors and matrices the operation is only atomic on a per-component basis.
"""
...
@over
def atomic_max(a: Array[Any], i: int32, j: int32, k: int32, value: Any):
"""
Compute the maximum of ``value`` and ``array[index]`` and atomically update the array. Note that for vectors and matrices the operation is only atomic on a per-component basis.
"""
...
@over
def atomic_max(a: Array[Any], i: int32, j: int32, k: int32, l: int32, value: Any):
"""
Compute the maximum of ``value`` and ``array[index]`` and atomically update the array. Note that for vectors and matrices the operation is only atomic on a per-component basis.
"""
...
@over
def atomic_max(a: FabricArray[Any], i: int32, value: Any):
"""
Compute the maximum of ``value`` and ``array[index]`` and atomically update the array. Note that for vectors and matrices the operation is only atomic on a per-component basis.
"""
...
@over
def atomic_max(a: FabricArray[Any], i: int32, j: int32, value: Any):
"""
Compute the maximum of ``value`` and ``array[index]`` and atomically update the array. Note that for vectors and matrices the operation is only atomic on a per-component basis.
"""
...
@over
def atomic_max(a: FabricArray[Any], i: int32, j: int32, k: int32, value: Any):
"""
Compute the maximum of ``value`` and ``array[index]`` and atomically update the array. Note that for vectors and matrices the operation is only atomic on a per-component basis.
"""
...
@over
def atomic_max(a: FabricArray[Any], i: int32, j: int32, k: int32, l: int32, value: Any):
"""
Compute the maximum of ``value`` and ``array[index]`` and atomically update the array. Note that for vectors and matrices the operation is only atomic on a per-component basis.
"""
...
@over
def atomic_max(a: IndexedFabricArray[Any], i: int32, value: Any):
"""
Compute the maximum of ``value`` and ``array[index]`` and atomically update the array. Note that for vectors and matrices the operation is only atomic on a per-component basis.
"""
...
@over
def atomic_max(a: IndexedFabricArray[Any], i: int32, j: int32, value: Any):
"""
Compute the maximum of ``value`` and ``array[index]`` and atomically update the array. Note that for vectors and matrices the operation is only atomic on a per-component basis.
"""
...
@over
def atomic_max(a: IndexedFabricArray[Any], i: int32, j: int32, k: int32, value: Any):
"""
Compute the maximum of ``value`` and ``array[index]`` and atomically update the array. Note that for vectors and matrices the operation is only atomic on a per-component basis.
"""
...
@over
def atomic_max(a: IndexedFabricArray[Any], i: int32, j: int32, k: int32, l: int32, value: Any):
"""
Compute the maximum of ``value`` and ``array[index]`` and atomically update the array. Note that for vectors and matrices the operation is only atomic on a per-component basis.
"""
...
@over
def lerp(a: Float, b: Float, t: Float) -> Float:
"""
Linearly interpolate two values a and b using factor t, computed as ``a*(1-t) + b*t``
"""
...
@over
def lerp(a: Vector[Any, Float], b: Vector[Any, Float], t: Float) -> Vector[Any, Float]:
"""
Linearly interpolate two values a and b using factor t, computed as ``a*(1-t) + b*t``
"""
...
@over
def lerp(a: Matrix[Any, Any, Float], b: Matrix[Any, Any, Float], t: Float) -> Matrix[Any, Any, Float]:
"""
Linearly interpolate two values a and b using factor t, computed as ``a*(1-t) + b*t``
"""
...
@over
def lerp(a: Quaternion[Float], b: Quaternion[Float], t: Float) -> Quaternion[Float]:
"""
Linearly interpolate two values a and b using factor t, computed as ``a*(1-t) + b*t``
"""
...
@over
def lerp(a: Transformation[Float], b: Transformation[Float], t: Float) -> Transformation[Float]:
"""
Linearly interpolate two values a and b using factor t, computed as ``a*(1-t) + b*t``
"""
...
@over
def smoothstep(edge0: Float, edge1: Float, x: Float) -> Float:
"""
Smoothly interpolate between two values edge0 and edge1 using a factor x, and return a result between 0 and 1 using a cubic Hermite interpolation after clamping
"""
...
@over
def expect_near(arg1: Float, arg2: Float, tolerance: Float):
"""
Prints an error to stdout if arg1 and arg2 are not closer than tolerance in magnitude
"""
...
@over
def expect_near(arg1: vec3f, arg2: vec3f, tolerance: float32):
"""
Prints an error to stdout if any element of arg1 and arg2 are not closer than tolerance in magnitude
"""
...
@over
def lower_bound(arr: Array[Scalar], value: Scalar) -> int:
"""
Search a sorted array for the closest element greater than or equal to value.
"""
...
@over
def lower_bound(arr: Array[Scalar], arr_begin: int32, arr_end: int32, value: Scalar) -> int:
"""
Search a sorted array range [arr_begin, arr_end) for the closest element greater than or equal to value.
"""
...
@over
def add(x: Scalar, y: Scalar) -> Scalar:
""" """
...
@over
def add(x: Vector[Any, Scalar], y: Vector[Any, Scalar]) -> Vector[Any, Scalar]:
""" """
...
@over
def add(x: Quaternion[Scalar], y: Quaternion[Scalar]) -> Quaternion[Scalar]:
""" """
...
@over
def add(x: Matrix[Any, Any, Scalar], y: Matrix[Any, Any, Scalar]) -> Matrix[Any, Any, Scalar]:
""" """
...
@over
def add(x: Transformation[Scalar], y: Transformation[Scalar]) -> Transformation[Scalar]:
""" """
...
@over
def sub(x: Scalar, y: Scalar) -> Scalar:
""" """
...
@over
def sub(x: Vector[Any, Scalar], y: Vector[Any, Scalar]) -> Vector[Any, Scalar]:
""" """
...
@over
def sub(x: Matrix[Any, Any, Scalar], y: Matrix[Any, Any, Scalar]) -> Matrix[Any, Any, Scalar]:
""" """
...
@over
def sub(x: Quaternion[Scalar], y: Quaternion[Scalar]) -> Quaternion[Scalar]:
""" """
...
@over
def sub(x: Transformation[Scalar], y: Transformation[Scalar]) -> Transformation[Scalar]:
""" """
...
@over
def bit_and(x: Int, y: Int) -> Int:
""" """
...
@over
def bit_or(x: Int, y: Int) -> Int:
""" """
...
@over
def bit_xor(x: Int, y: Int) -> Int:
""" """
...
@over
def lshift(x: Int, y: Int) -> Int:
""" """
...
@over
def rshift(x: Int, y: Int) -> Int:
""" """
...
@over
def invert(x: Int) -> Int:
""" """
...
@over
def mul(x: Scalar, y: Scalar) -> Scalar:
""" """
...
@over
def mul(x: Vector[Any, Scalar], y: Scalar) -> Vector[Any, Scalar]:
""" """
...
@over
def mul(x: Scalar, y: Vector[Any, Scalar]) -> Vector[Any, Scalar]:
""" """
...
@over
def mul(x: Quaternion[Scalar], y: Scalar) -> Quaternion[Scalar]:
""" """
...
@over
def mul(x: Scalar, y: Quaternion[Scalar]) -> Quaternion[Scalar]:
""" """
...
@over
def mul(x: Quaternion[Scalar], y: Quaternion[Scalar]) -> Quaternion[Scalar]:
""" """
...
@over
def mul(x: Scalar, y: Matrix[Any, Any, Scalar]) -> Matrix[Any, Any, Scalar]:
""" """
...
@over
def mul(x: Matrix[Any, Any, Scalar], y: Scalar) -> Matrix[Any, Any, Scalar]:
""" """
...
@over
def mul(x: Matrix[Any, Any, Scalar], y: Vector[Any, Scalar]) -> Vector[Any, Scalar]:
""" """
...
@over
def mul(x: Matrix[Any, Any, Scalar], y: Matrix[Any, Any, Scalar]):
""" """
...
@over
def mul(x: Transformation[Scalar], y: Transformation[Scalar]) -> Transformation[Scalar]:
""" """
...
@over
def mul(x: Scalar, y: Transformation[Scalar]) -> Transformation[Scalar]:
""" """
...
@over
def mul(x: Transformation[Scalar], y: Scalar) -> Transformation[Scalar]:
""" """
...
@over
def mod(x: Scalar, y: Scalar) -> Scalar:
""" """
...
@over
def div(x: Scalar, y: Scalar) -> Scalar:
""" """
...
@over
def div(x: Vector[Any, Scalar], y: Scalar) -> Vector[Any, Scalar]:
""" """
...
@over
def div(x: Matrix[Any, Any, Scalar], y: Scalar) -> Matrix[Any, Any, Scalar]:
""" """
...
@over
def div(x: Quaternion[Scalar], y: Scalar) -> Quaternion[Scalar]:
""" """
...
@over
def floordiv(x: Scalar, y: Scalar) -> Scalar:
""" """
...
@over
def pos(x: Scalar) -> Scalar:
""" """
...
@over
def pos(x: Vector[Any, Scalar]) -> Vector[Any, Scalar]:
""" """
...
@over
def pos(x: Quaternion[Scalar]) -> Quaternion[Scalar]:
""" """
...
@over
def pos(x: Matrix[Any, Any, Scalar]) -> Matrix[Any, Any, Scalar]:
""" """
...
@over
def neg(x: Scalar) -> Scalar:
""" """
...
@over
def neg(x: Vector[Any, Scalar]) -> Vector[Any, Scalar]:
""" """
...
@over
def neg(x: Quaternion[Scalar]) -> Quaternion[Scalar]:
""" """
...
@over
def neg(x: Matrix[Any, Any, Scalar]) -> Matrix[Any, Any, Scalar]:
""" """
...
@over
def unot(b: bool) -> bool:
""" """
...
@over
def unot(b: bool) -> bool:
""" """
...
@over
def unot(b: int8) -> bool:
""" """
...
@over
def unot(b: uint8) -> bool:
""" """
...
@over
def unot(b: int16) -> bool:
""" """
...
@over
def unot(b: uint16) -> bool:
""" """
...
@over
def unot(b: int32) -> bool:
""" """
...
@over
def unot(b: uint32) -> bool:
""" """
...
@over
def unot(b: int64) -> bool:
""" """
...
@over
def unot(b: uint64) -> bool:
""" """
...
@over
def unot(a: Array[Any]) -> bool:
""" """
...
| warp-main | warp/stubs.py |
# Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import ast
import ctypes
import hashlib
import inspect
import os
import platform
import sys
import types
from copy import copy as shallowcopy
from types import ModuleType
from typing import Any, Callable, Dict, List, Mapping, Optional, Sequence, Tuple, Union
import numpy as np
import warp
import warp.build
import warp.codegen
import warp.config
# represents either a built-in or user-defined function
def create_value_func(type):
def value_func(args, kwds, templates):
return type
return value_func
def get_function_args(func):
"""Ensures that all function arguments are annotated and returns a dictionary mapping from argument name to its type."""
import inspect
argspec = inspect.getfullargspec(func)
# use source-level argument annotations
if len(argspec.annotations) < len(argspec.args):
raise RuntimeError(f"Incomplete argument annotations on function {func.__qualname__}")
return argspec.annotations
class Function:
def __init__(
self,
func,
key,
namespace,
input_types=None,
value_func=None,
template_func=None,
module=None,
variadic=False,
initializer_list_func=None,
export=False,
doc="",
group="",
hidden=False,
skip_replay=False,
missing_grad=False,
generic=False,
native_func=None,
defaults=None,
custom_replay_func=None,
skip_forward_codegen=False,
skip_reverse_codegen=False,
custom_reverse_num_input_args=-1,
custom_reverse_mode=False,
overloaded_annotations=None,
code_transformers=[],
skip_adding_overload=False,
):
self.func = func # points to Python function decorated with @wp.func, may be None for builtins
self.key = key
self.namespace = namespace
self.value_func = value_func # a function that takes a list of args and a list of templates and returns the value type, e.g.: load(array, index) returns the type of value being loaded
self.template_func = template_func
self.input_types = {}
self.export = export
self.doc = doc
self.group = group
self.module = module
self.variadic = variadic # function can take arbitrary number of inputs, e.g.: printf()
self.defaults = defaults
# Function instance for a custom implementation of the replay pass
self.custom_replay_func = custom_replay_func
self.custom_grad_func = None
if initializer_list_func is None:
self.initializer_list_func = lambda x, y: False
else:
self.initializer_list_func = (
initializer_list_func # True if the arguments should be emitted as an initializer list in the c++ code
)
self.hidden = hidden # function will not be listed in docs
self.skip_replay = (
skip_replay # whether or not operation will be performed during the forward replay in the backward pass
)
self.missing_grad = missing_grad # whether or not builtin is missing a corresponding adjoint
self.generic = generic
# allow registering builtin functions with a different name in Python from the native code
if native_func is None:
self.native_func = key
else:
self.native_func = native_func
if func:
# user-defined function
# generic and concrete overload lookups by type signature
self.user_templates = {}
self.user_overloads = {}
# user defined (Python) function
self.adj = warp.codegen.Adjoint(
func,
is_user_function=True,
skip_forward_codegen=skip_forward_codegen,
skip_reverse_codegen=skip_reverse_codegen,
custom_reverse_num_input_args=custom_reverse_num_input_args,
custom_reverse_mode=custom_reverse_mode,
overload_annotations=overloaded_annotations,
transformers=code_transformers,
)
# record input types
for name, type in self.adj.arg_types.items():
if name == "return":
self.value_func = create_value_func(type)
else:
self.input_types[name] = type
else:
# builtin function
# embedded linked list of all overloads
# the builtin_functions dictionary holds
# the list head for a given key (func name)
self.overloads = []
# builtin (native) function, canonicalize argument types
for k, v in input_types.items():
self.input_types[k] = warp.types.type_to_warp(v)
# cache mangled name
if self.is_simple():
self.mangled_name = self.mangle()
else:
self.mangled_name = None
if not skip_adding_overload:
self.add_overload(self)
# add to current module
if module:
module.register_function(self, skip_adding_overload)
def __call__(self, *args, **kwargs):
# handles calling a builtin (native) function
# as if it was a Python function, i.e.: from
# within the CPython interpreter rather than
# from within a kernel (experimental).
if self.is_builtin() and self.mangled_name:
# store last error during overload resolution
error = None
for f in self.overloads:
if f.generic:
continue
# try and find builtin in the warp.dll
if not hasattr(warp.context.runtime.core, f.mangled_name):
raise RuntimeError(
f"Couldn't find function {self.key} with mangled name {f.mangled_name} in the Warp native library"
)
try:
# try and pack args into what the function expects
params = []
for i, (arg_name, arg_type) in enumerate(f.input_types.items()):
a = args[i]
# try to convert to a value type (vec3, mat33, etc)
if issubclass(arg_type, ctypes.Array):
# wrap the arg_type (which is an ctypes.Array) in a structure
# to ensure parameter is passed to the .dll by value rather than reference
class ValueArg(ctypes.Structure):
_fields_ = [("value", arg_type)]
x = ValueArg()
# force conversion to ndarray first (handles tuple / list, Gf.Vec3 case)
if isinstance(a, ctypes.Array) is False:
# assume you want the float32 version of the function so it doesn't just
# grab an override for a random data type:
if arg_type._type_ != ctypes.c_float:
raise RuntimeError(
f"Error calling function '{f.key}', parameter for argument '{arg_name}' does not have c_float type."
)
a = np.array(a)
# flatten to 1D array
v = a.flatten()
if len(v) != arg_type._length_:
raise RuntimeError(
f"Error calling function '{f.key}', parameter for argument '{arg_name}' has length {len(v)}, but expected {arg_type._length_}. Could not convert parameter to {arg_type}."
)
for i in range(arg_type._length_):
x.value[i] = v[i]
else:
# already a built-in type, check it matches
if not warp.types.types_equal(type(a), arg_type):
raise RuntimeError(
f"Error calling function '{f.key}', parameter for argument '{arg_name}' has type '{type(a)}' but expected '{arg_type}'"
)
x.value = a
params.append(x)
else:
try:
# try to pack as a scalar type
params.append(arg_type._type_(a))
except Exception:
raise RuntimeError(
f"Error calling function {f.key}, unable to pack function parameter type {type(a)} for param {arg_name}, expected {arg_type}"
)
# returns the corresponding ctype for a scalar or vector warp type
def type_ctype(dtype):
if dtype == float:
return ctypes.c_float
elif dtype == int:
return ctypes.c_int32
elif issubclass(dtype, ctypes.Array):
return dtype
elif issubclass(dtype, ctypes.Structure):
return dtype
else:
# scalar type
return dtype._type_
value_type = type_ctype(f.value_func(None, None, None))
# construct return value (passed by address)
ret = value_type()
ret_addr = ctypes.c_void_p(ctypes.addressof(ret))
params.append(ret_addr)
c_func = getattr(warp.context.runtime.core, f.mangled_name)
c_func(*params)
if issubclass(value_type, ctypes.Array) or issubclass(value_type, ctypes.Structure):
# return vector types as ctypes
return ret
else:
# return scalar types as int/float
return ret.value
except Exception as e:
# couldn't pack values to match this overload
# store error and move onto the next one
error = e
continue
# overload resolution or call failed
# raise the last exception encountered
if error:
raise error
else:
raise RuntimeError(f"Error calling function '{f.key}'.")
elif hasattr(self, "user_overloads") and len(self.user_overloads):
# user-defined function with overloads
if len(kwargs):
raise RuntimeError(
f"Error calling function '{self.key}', keyword arguments are not supported for user-defined overloads."
)
# try and find a matching overload
for f in self.user_overloads.values():
if len(f.input_types) != len(args):
continue
template_types = list(f.input_types.values())
arg_names = list(f.input_types.keys())
try:
# attempt to unify argument types with function template types
warp.types.infer_argument_types(args, template_types, arg_names)
return f.func(*args)
except Exception:
continue
raise RuntimeError(f"Error calling function '{self.key}', no overload found for arguments {args}")
else:
# user-defined function with no overloads
if self.func is None:
raise RuntimeError(f"Error calling function '{self.key}', function is undefined")
# this function has no overloads, call it like a plain Python function
return self.func(*args, **kwargs)
def is_builtin(self):
return self.func is None
def is_simple(self):
if self.variadic:
return False
# only export simple types that don't use arrays
for k, v in self.input_types.items():
if isinstance(v, warp.array) or v == Any or v == Callable or v == Tuple:
return False
return_type = ""
try:
# todo: construct a default value for each of the functions args
# so we can generate the return type for overloaded functions
return_type = type_str(self.value_func(None, None, None))
except Exception:
return False
if return_type.startswith("Tuple"):
return False
return True
def mangle(self):
# builds a mangled name for the C-exported
# function, e.g.: builtin_normalize_vec3()
name = "builtin_" + self.key
types = []
for t in self.input_types.values():
types.append(t.__name__)
return "_".join([name, *types])
def add_overload(self, f):
if self.is_builtin():
# todo: note that it is an error to add two functions
# with the exact same signature as this would cause compile
# errors during compile time. We should check here if there
# is a previously created function with the same signature
self.overloads.append(f)
# make sure variadic overloads appear last so non variadic
# ones are matched first:
self.overloads.sort(key=lambda f: f.variadic)
else:
# get function signature based on the input types
sig = warp.types.get_signature(
f.input_types.values(), func_name=f.key, arg_names=list(f.input_types.keys())
)
# check if generic
if warp.types.is_generic_signature(sig):
if sig in self.user_templates:
raise RuntimeError(
f"Duplicate generic function overload {self.key} with arguments {f.input_types.values()}"
)
self.user_templates[sig] = f
else:
if sig in self.user_overloads:
raise RuntimeError(
f"Duplicate function overload {self.key} with arguments {f.input_types.values()}"
)
self.user_overloads[sig] = f
def get_overload(self, arg_types):
assert not self.is_builtin()
sig = warp.types.get_signature(arg_types, func_name=self.key)
f = self.user_overloads.get(sig)
if f is not None:
return f
else:
for f in self.user_templates.values():
if len(f.input_types) != len(arg_types):
continue
# try to match the given types to the function template types
template_types = list(f.input_types.values())
args_matched = True
for i in range(len(arg_types)):
if not warp.types.type_matches_template(arg_types[i], template_types[i]):
args_matched = False
break
if args_matched:
# instantiate this function with the specified argument types
arg_names = f.input_types.keys()
overload_annotations = dict(zip(arg_names, arg_types))
ovl = shallowcopy(f)
ovl.adj = warp.codegen.Adjoint(f.func, overload_annotations)
ovl.input_types = overload_annotations
ovl.value_func = None
self.user_overloads[sig] = ovl
return ovl
# failed to find overload
return None
def __repr__(self):
inputs_str = ", ".join([f"{k}: {v.__name__}" for k, v in self.input_types.items()])
return f"<Function {self.key}({inputs_str})>"
class KernelHooks:
def __init__(self, forward, backward):
self.forward = forward
self.backward = backward
# caches source and compiled entry points for a kernel (will be populated after module loads)
class Kernel:
def __init__(self, func, key, module, options=None, code_transformers=[]):
self.func = func
self.module = module
self.key = key
self.options = {} if options is None else options
self.adj = warp.codegen.Adjoint(func, transformers=code_transformers)
# check if generic
self.is_generic = False
for arg_type in self.adj.arg_types.values():
if warp.types.type_is_generic(arg_type):
self.is_generic = True
break
# unique signature (used to differentiate instances of generic kernels during codegen)
self.sig = ""
# known overloads for generic kernels, indexed by type signature
self.overloads = {}
# argument indices by name
self.arg_indices = dict((a.label, i) for i, a in enumerate(self.adj.args))
if module:
module.register_kernel(self)
def infer_argument_types(self, args):
template_types = list(self.adj.arg_types.values())
if len(args) != len(template_types):
raise RuntimeError(f"Invalid number of arguments for kernel {self.key}")
arg_names = list(self.adj.arg_types.keys())
return warp.types.infer_argument_types(args, template_types, arg_names)
def add_overload(self, arg_types):
if len(arg_types) != len(self.adj.arg_types):
raise RuntimeError(f"Invalid number of arguments for kernel {self.key}")
arg_names = list(self.adj.arg_types.keys())
template_types = list(self.adj.arg_types.values())
# make sure all argument types are concrete and match the kernel parameters
for i in range(len(arg_types)):
if not warp.types.type_matches_template(arg_types[i], template_types[i]):
if warp.types.type_is_generic(arg_types[i]):
raise TypeError(
f"Kernel {self.key} argument '{arg_names[i]}' cannot be generic, got {arg_types[i]}"
)
else:
raise TypeError(
f"Kernel {self.key} argument '{arg_names[i]}' type mismatch: expected {template_types[i]}, got {arg_types[i]}"
)
# get a type signature from the given argument types
sig = warp.types.get_signature(arg_types, func_name=self.key)
if sig in self.overloads:
raise RuntimeError(
f"Duplicate overload for kernel {self.key}, an overload with the given arguments already exists"
)
overload_annotations = dict(zip(arg_names, arg_types))
# instantiate this kernel with the given argument types
ovl = shallowcopy(self)
ovl.adj = warp.codegen.Adjoint(self.func, overload_annotations)
ovl.is_generic = False
ovl.overloads = {}
ovl.sig = sig
self.overloads[sig] = ovl
self.module.unload()
return ovl
def get_overload(self, arg_types):
sig = warp.types.get_signature(arg_types, func_name=self.key)
ovl = self.overloads.get(sig)
if ovl is not None:
return ovl
else:
return self.add_overload(arg_types)
def get_mangled_name(self):
if self.sig:
return f"{self.key}_{self.sig}"
else:
return self.key
# ----------------------
# decorator to register function, @func
def func(f):
name = warp.codegen.make_full_qualified_name(f)
m = get_module(f.__module__)
func = Function(
func=f, key=name, namespace="", module=m, value_func=None
) # value_type not known yet, will be inferred during Adjoint.build()
# return the top of the list of overloads for this key
return m.functions[name]
def func_grad(forward_fn):
"""
Decorator to register a custom gradient function for a given forward function.
The function signature must correspond to one of the function overloads in the following way:
the first part of the input arguments are the original input variables with the same types as their
corresponding arguments in the original function, and the second part of the input arguments are the
adjoint variables of the output variables (if available) of the original function with the same types as the
output variables. The function must not return anything.
"""
def wrapper(grad_fn):
generic = any(warp.types.type_is_generic(x) for x in forward_fn.input_types.values())
if generic:
raise RuntimeError(
f"Cannot define custom grad definition for {forward_fn.key} since functions with generic input arguments are not yet supported."
)
reverse_args = {}
reverse_args.update(forward_fn.input_types)
# create temporary Adjoint instance to analyze the function signature
adj = warp.codegen.Adjoint(
grad_fn, skip_forward_codegen=True, skip_reverse_codegen=False, transformers=forward_fn.adj.transformers
)
from warp.types import types_equal
grad_args = adj.args
grad_sig = warp.types.get_signature([arg.type for arg in grad_args], func_name=forward_fn.key)
generic = any(warp.types.type_is_generic(x.type) for x in grad_args)
if generic:
raise RuntimeError(
f"Cannot define custom grad definition for {forward_fn.key} since the provided grad function has generic input arguments."
)
def match_function(f):
# check whether the function overload f matches the signature of the provided gradient function
if not hasattr(f.adj, "return_var"):
f.adj.build(None)
expected_args = list(f.input_types.items())
if f.adj.return_var is not None:
expected_args += [(f"adj_ret_{var.label}", var.type) for var in f.adj.return_var]
if len(grad_args) != len(expected_args):
return False
if any(not types_equal(a.type, exp_type) for a, (_, exp_type) in zip(grad_args, expected_args)):
return False
return True
def add_custom_grad(f: Function):
# register custom gradient function
f.custom_grad_func = Function(
grad_fn,
key=f.key,
namespace=f.namespace,
input_types=reverse_args,
value_func=None,
module=f.module,
template_func=f.template_func,
skip_forward_codegen=True,
custom_reverse_mode=True,
custom_reverse_num_input_args=len(f.input_types),
skip_adding_overload=False,
code_transformers=f.adj.transformers,
)
f.adj.skip_reverse_codegen = True
if hasattr(forward_fn, "user_overloads") and len(forward_fn.user_overloads):
# find matching overload for which this grad function is defined
for sig, f in forward_fn.user_overloads.items():
if not grad_sig.startswith(sig):
continue
if match_function(f):
add_custom_grad(f)
return
raise RuntimeError(
f"No function overload found for gradient function {grad_fn.__qualname__} for function {forward_fn.key}"
)
else:
# resolve return variables
forward_fn.adj.build(None)
expected_args = list(forward_fn.input_types.items())
if forward_fn.adj.return_var is not None:
expected_args += [(f"adj_ret_{var.label}", var.type) for var in forward_fn.adj.return_var]
# check if the signature matches this function
if match_function(forward_fn):
add_custom_grad(forward_fn)
else:
raise RuntimeError(
f"Gradient function {grad_fn.__qualname__} for function {forward_fn.key} has an incorrect signature. The arguments must match the "
"forward function arguments plus the adjoint variables corresponding to the return variables:"
f"\n{', '.join(map(lambda nt: f'{nt[0]}: {nt[1].__name__}', expected_args))}"
)
return wrapper
def func_replay(forward_fn):
"""
Decorator to register a custom replay function for a given forward function.
The replay function is the function version that is called in the forward phase of the backward pass (replay mode) and corresponds to the forward function by default.
The provided function has to match the signature of one of the original forward function overloads.
"""
def wrapper(replay_fn):
generic = any(warp.types.type_is_generic(x) for x in forward_fn.input_types.values())
if generic:
raise RuntimeError(
f"Cannot define custom replay definition for {forward_fn.key} since functions with generic input arguments are not yet supported."
)
args = get_function_args(replay_fn)
arg_types = list(args.values())
generic = any(warp.types.type_is_generic(x) for x in arg_types)
if generic:
raise RuntimeError(
f"Cannot define custom replay definition for {forward_fn.key} since the provided replay function has generic input arguments."
)
f = forward_fn.get_overload(arg_types)
if f is None:
inputs_str = ", ".join([f"{k}: {v.__name__}" for k, v in args.items()])
raise RuntimeError(
f"Could not find forward definition of function {forward_fn.key} that matches custom replay definition with arguments:\n{inputs_str}"
)
f.custom_replay_func = Function(
replay_fn,
key=f"replay_{f.key}",
namespace=f.namespace,
input_types=f.input_types,
value_func=f.value_func,
module=f.module,
template_func=f.template_func,
skip_reverse_codegen=True,
skip_adding_overload=True,
code_transformers=f.adj.transformers,
)
return wrapper
# decorator to register kernel, @kernel, custom_name may be a string
# that creates a kernel with a different name from the actual function
def kernel(f=None, *, enable_backward=None):
def wrapper(f, *args, **kwargs):
options = {}
if enable_backward is not None:
options["enable_backward"] = enable_backward
m = get_module(f.__module__)
k = Kernel(
func=f,
key=warp.codegen.make_full_qualified_name(f),
module=m,
options=options,
)
return k
if f is None:
# Arguments were passed to the decorator.
return wrapper
return wrapper(f)
# decorator to register struct, @struct
def struct(c):
m = get_module(c.__module__)
s = warp.codegen.Struct(cls=c, key=warp.codegen.make_full_qualified_name(c), module=m)
return s
# overload a kernel with the given argument types
def overload(kernel, arg_types=None):
if isinstance(kernel, Kernel):
# handle cases where user calls us directly, e.g. wp.overload(kernel, [args...])
if not kernel.is_generic:
raise RuntimeError(f"Only generic kernels can be overloaded. Kernel {kernel.key} is not generic")
if isinstance(arg_types, list):
arg_list = arg_types
elif isinstance(arg_types, dict):
# substitute named args
arg_list = [a.type for a in kernel.adj.args]
for arg_name, arg_type in arg_types.items():
idx = kernel.arg_indices.get(arg_name)
if idx is None:
raise RuntimeError(f"Invalid argument name '{arg_name}' in overload of kernel {kernel.key}")
arg_list[idx] = arg_type
elif arg_types is None:
arg_list = []
else:
raise TypeError("Kernel overload types must be given in a list or dict")
# return new kernel overload
return kernel.add_overload(arg_list)
elif isinstance(kernel, types.FunctionType):
# handle cases where user calls us as a function decorator (@wp.overload)
# ensure this function name corresponds to a kernel
fn = kernel
module = get_module(fn.__module__)
kernel = module.kernels.get(fn.__name__)
if kernel is None:
raise RuntimeError(f"Failed to find a kernel named '{fn.__name__}' in module {fn.__module__}")
if not kernel.is_generic:
raise RuntimeError(f"Only generic kernels can be overloaded. Kernel {kernel.key} is not generic")
# ensure the function is defined without a body, only ellipsis (...), pass, or a string expression
# TODO: show we allow defining a new body for kernel overloads?
source = inspect.getsource(fn)
tree = ast.parse(source)
assert isinstance(tree, ast.Module)
assert isinstance(tree.body[0], ast.FunctionDef)
func_body = tree.body[0].body
for node in func_body:
if isinstance(node, ast.Pass):
continue
elif isinstance(node, ast.Expr) and isinstance(node.value, (ast.Str, ast.Ellipsis)):
continue
raise RuntimeError(
"Illegal statement in kernel overload definition. Only pass, ellipsis (...), comments, or docstrings are allowed"
)
# ensure all arguments are annotated
argspec = inspect.getfullargspec(fn)
if len(argspec.annotations) < len(argspec.args):
raise RuntimeError(f"Incomplete argument annotations on kernel overload {fn.__name__}")
# get type annotation list
arg_list = []
for arg_name, arg_type in argspec.annotations.items():
if arg_name != "return":
arg_list.append(arg_type)
# add new overload, but we must return the original kernel from @wp.overload decorator!
kernel.add_overload(arg_list)
return kernel
else:
raise RuntimeError("wp.overload() called with invalid argument!")
builtin_functions = {}
def add_builtin(
key,
input_types={},
value_type=None,
value_func=None,
template_func=None,
doc="",
namespace="wp::",
variadic=False,
initializer_list_func=None,
export=True,
group="Other",
hidden=False,
skip_replay=False,
missing_grad=False,
native_func=None,
defaults=None,
):
# wrap simple single-type functions with a value_func()
if value_func is None:
def value_func(args, kwds, templates):
return value_type
if initializer_list_func is None:
def initializer_list_func(args, templates):
return False
if defaults is None:
defaults = {}
# Add specialized versions of this builtin if it's generic by matching arguments against
# hard coded types. We do this so you can use hard coded warp types outside kernels:
generic = any(warp.types.type_is_generic(x) for x in input_types.values())
if generic and export:
# get a list of existing generic vector types (includes matrices and stuff)
# so we can match arguments against them:
generic_vtypes = [x for x in warp.types.vector_types if hasattr(x, "_wp_generic_type_str_")]
# deduplicate identical types:
def typekey(t):
return f"{t._wp_generic_type_str_}_{t._wp_type_params_}"
typedict = {typekey(t): t for t in generic_vtypes}
generic_vtypes = [typedict[k] for k in sorted(typedict.keys())]
# collect the parent type names of all the generic arguments:
def generic_names(l):
for t in l:
if hasattr(t, "_wp_generic_type_str_"):
yield t._wp_generic_type_str_
elif warp.types.type_is_generic_scalar(t):
yield t.__name__
genericset = set(generic_names(input_types.values()))
# for each of those type names, get a list of all hard coded types derived
# from them:
def derived(name):
if name == "Float":
return warp.types.float_types
elif name == "Scalar":
return warp.types.scalar_types
elif name == "Int":
return warp.types.int_types
return [x for x in generic_vtypes if x._wp_generic_type_str_ == name]
gtypes = {k: derived(k) for k in genericset}
# find the scalar data types supported by all the arguments by intersecting
# sets:
def scalar_type(t):
if t in warp.types.scalar_types:
return t
return [p for p in t._wp_type_params_ if p in warp.types.scalar_types][0]
scalartypes = [{scalar_type(x) for x in gtypes[k]} for k in gtypes.keys()]
if scalartypes:
scalartypes = scalartypes.pop().intersection(*scalartypes)
scalartypes = list(scalartypes)
scalartypes.sort(key=str)
# generate function calls for each of these scalar types:
for stype in scalartypes:
# find concrete types for this scalar type (eg if the scalar type is float32
# this dict will look something like this:
# {"vec":[wp.vec2,wp.vec3,wp.vec4], "mat":[wp.mat22,wp.mat33,wp.mat44]})
consistenttypes = {k: [x for x in v if scalar_type(x) == stype] for k, v in gtypes.items()}
def typelist(param):
if warp.types.type_is_generic_scalar(param):
return [stype]
if hasattr(param, "_wp_generic_type_str_"):
l = consistenttypes[param._wp_generic_type_str_]
return [x for x in l if warp.types.types_equal(param, x, match_generic=True)]
return [param]
# gotta try generating function calls for all combinations of these argument types
# now.
import itertools
typelists = [typelist(param) for param in input_types.values()]
for argtypes in itertools.product(*typelists):
# Some of these argument lists won't work, eg if the function is mul(), we won't be
# able to do a matrix vector multiplication for a mat22 and a vec3, so we call value_func
# on the generated argument list and skip generation if it fails.
# This also gives us the return type, which we keep for later:
try:
return_type = value_func([warp.codegen.Var("", t) for t in argtypes], {}, [])
except Exception:
continue
# The return_type might just be vector_t(length=3,dtype=wp.float32), so we've got to match that
# in the list of hard coded types so it knows it's returning one of them:
if hasattr(return_type, "_wp_generic_type_str_"):
return_type_match = [
x
for x in generic_vtypes
if x._wp_generic_type_str_ == return_type._wp_generic_type_str_
and x._wp_type_params_ == return_type._wp_type_params_
]
if not return_type_match:
continue
return_type = return_type_match[0]
# finally we can generate a function call for these concrete types:
add_builtin(
key,
input_types=dict(zip(input_types.keys(), argtypes)),
value_type=return_type,
doc=doc,
namespace=namespace,
variadic=variadic,
initializer_list_func=initializer_list_func,
export=export,
group=group,
hidden=True,
skip_replay=skip_replay,
missing_grad=missing_grad,
)
func = Function(
func=None,
key=key,
namespace=namespace,
input_types=input_types,
value_func=value_func,
template_func=template_func,
variadic=variadic,
initializer_list_func=initializer_list_func,
export=export,
doc=doc,
group=group,
hidden=hidden,
skip_replay=skip_replay,
missing_grad=missing_grad,
generic=generic,
native_func=native_func,
defaults=defaults,
)
if key in builtin_functions:
builtin_functions[key].add_overload(func)
else:
builtin_functions[key] = func
# export means the function will be added to the `warp` module namespace
# so that users can call it directly from the Python interpreter
if export is True:
if hasattr(warp, key):
# check that we haven't already created something at this location
# if it's just an overload stub for auto-complete then overwrite it
if getattr(warp, key).__name__ != "_overload_dummy":
raise RuntimeError(
f"Trying to register builtin function '{key}' that would overwrite existing object."
)
setattr(warp, key, func)
# global dictionary of modules
user_modules = {}
def get_module(name):
# some modules might be manually imported using `importlib` without being
# registered into `sys.modules`
parent = sys.modules.get(name, None)
parent_loader = None if parent is None else parent.__loader__
if name in user_modules:
# check if the Warp module was created using a different loader object
# if so, we assume the file has changed and we recreate the module to
# clear out old kernels / functions
if user_modules[name].loader is not parent_loader:
old_module = user_modules[name]
# Unload the old module and recursively unload all of its dependents.
# This ensures that dependent modules will be re-hashed and reloaded on next launch.
# The visited set tracks modules already visited to avoid circular references.
def unload_recursive(module, visited):
module.unload()
visited.add(module)
for d in module.dependents:
if d not in visited:
unload_recursive(d, visited)
unload_recursive(old_module, visited=set())
# clear out old kernels, funcs, struct definitions
old_module.kernels = {}
old_module.functions = {}
old_module.constants = []
old_module.structs = {}
old_module.loader = parent_loader
return user_modules[name]
else:
# else Warp module didn't exist yet, so create a new one
user_modules[name] = warp.context.Module(name, parent_loader)
return user_modules[name]
class ModuleBuilder:
def __init__(self, module, options):
self.functions = {}
self.structs = {}
self.options = options
self.module = module
# build all functions declared in the module
for func in module.functions.values():
for f in func.user_overloads.values():
self.build_function(f)
if f.custom_replay_func is not None:
self.build_function(f.custom_replay_func)
# build all kernel entry points
for kernel in module.kernels.values():
if not kernel.is_generic:
self.build_kernel(kernel)
else:
for k in kernel.overloads.values():
self.build_kernel(k)
def build_struct_recursive(self, struct: warp.codegen.Struct):
structs = []
stack = [struct]
while stack:
s = stack.pop()
if s not in structs:
structs.append(s)
for var in s.vars.values():
if isinstance(var.type, warp.codegen.Struct):
stack.append(var.type)
elif isinstance(var.type, warp.types.array) and isinstance(var.type.dtype, warp.codegen.Struct):
stack.append(var.type.dtype)
# Build them in reverse to generate a correct dependency order.
for s in reversed(structs):
self.build_struct(s)
def build_struct(self, struct):
self.structs[struct] = None
def build_kernel(self, kernel):
kernel.adj.build(self)
if kernel.adj.return_var is not None:
if kernel.adj.return_var.ctype() != "void":
raise TypeError(f"Error, kernels can't have return values, got: {kernel.adj.return_var}")
def build_function(self, func):
if func in self.functions:
return
else:
func.adj.build(self)
# complete the function return type after we have analyzed it (inferred from return statement in ast)
if not func.value_func:
def wrap(adj):
def value_type(args, kwds, templates):
if adj.return_var is None or len(adj.return_var) == 0:
return None
if len(adj.return_var) == 1:
return adj.return_var[0].type
else:
return [v.type for v in adj.return_var]
return value_type
func.value_func = wrap(func.adj)
# use dict to preserve import order
self.functions[func] = None
def codegen(self, device):
source = ""
# code-gen structs
for struct in self.structs.keys():
source += warp.codegen.codegen_struct(struct)
# code-gen all imported functions
for func in self.functions.keys():
source += warp.codegen.codegen_func(
func.adj, c_func_name=func.native_func, device=device, options=self.options
)
for kernel in self.module.kernels.values():
# each kernel gets an entry point in the module
if not kernel.is_generic:
source += warp.codegen.codegen_kernel(kernel, device=device, options=self.options)
source += warp.codegen.codegen_module(kernel, device=device)
else:
for k in kernel.overloads.values():
source += warp.codegen.codegen_kernel(k, device=device, options=self.options)
source += warp.codegen.codegen_module(k, device=device)
# add headers
if device == "cpu":
source = warp.codegen.cpu_module_header + source
else:
source = warp.codegen.cuda_module_header + source
return source
# -----------------------------------------------------
# stores all functions and kernels for a Python module
# creates a hash of the function to use for checking
# build cache
class Module:
def __init__(self, name, loader):
self.name = name
self.loader = loader
self.kernels = {}
self.functions = {}
self.constants = []
self.structs = {}
self.cpu_module = None
self.cuda_modules = {} # module lookup by CUDA context
self.cpu_build_failed = False
self.cuda_build_failed = False
self.options = {
"max_unroll": 16,
"enable_backward": warp.config.enable_backward,
"fast_math": False,
"cuda_output": None, # supported values: "ptx", "cubin", or None (automatic)
"mode": warp.config.mode,
}
# kernel hook lookup per device
# hooks are stored with the module so they can be easily cleared when the module is reloaded.
# -> See ``Module.get_kernel_hooks()``
self.kernel_hooks = {}
# Module dependencies are determined by scanning each function
# and kernel for references to external functions and structs.
#
# When a referenced module is modified, all of its dependents need to be reloaded
# on the next launch. To detect this, a module's hash recursively includes
# all of its references.
# -> See ``Module.hash_module()``
#
# The dependency mechanism works for both static and dynamic (runtime) modifications.
# When a module is reloaded at runtime, we recursively unload all of its
# dependents, so that they will be re-hashed and reloaded on the next launch.
# -> See ``get_module()``
self.references = set() # modules whose content we depend on
self.dependents = set() # modules that depend on our content
# Since module hashing is recursive, we improve performance by caching the hash of the
# module contents (kernel source, function source, and struct source).
# After all kernels, functions, and structs are added to the module (usually at import time),
# the content hash doesn't change.
# -> See ``Module.hash_module_recursive()``
self.content_hash = None
def register_struct(self, struct):
self.structs[struct.key] = struct
# for a reload of module on next launch
self.unload()
def register_kernel(self, kernel):
self.kernels[kernel.key] = kernel
self.find_references(kernel.adj)
# for a reload of module on next launch
self.unload()
def register_function(self, func, skip_adding_overload=False):
if func.key not in self.functions:
self.functions[func.key] = func
else:
# Check whether the new function's signature match any that has
# already been registered. If so, then we simply override it, as
# Python would do it, otherwise we register it as a new overload.
func_existing = self.functions[func.key]
sig = warp.types.get_signature(
func.input_types.values(),
func_name=func.key,
arg_names=list(func.input_types.keys()),
)
sig_existing = warp.types.get_signature(
func_existing.input_types.values(),
func_name=func_existing.key,
arg_names=list(func_existing.input_types.keys()),
)
if sig == sig_existing:
self.functions[func.key] = func
elif not skip_adding_overload:
func_existing.add_overload(func)
self.find_references(func.adj)
# for a reload of module on next launch
self.unload()
# collect all referenced functions / structs
# given the AST of a function or kernel
def find_references(self, adj):
def add_ref(ref):
if ref is not self:
self.references.add(ref)
ref.dependents.add(self)
# scan for function calls
for node in ast.walk(adj.tree):
if isinstance(node, ast.Call):
try:
# try to resolve the function
func, _ = adj.resolve_path(node.func)
# if this is a user-defined function, add a module reference
if isinstance(func, warp.context.Function) and func.module is not None:
add_ref(func.module)
except Exception:
# Lookups may fail for builtins, but that's ok.
# Lookups may also fail for functions in this module that haven't been imported yet,
# and that's ok too (not an external reference).
pass
# scan for structs
for arg in adj.args:
if isinstance(arg.type, warp.codegen.Struct) and arg.type.module is not None:
add_ref(arg.type.module)
def hash_module(self):
def get_annotations(obj: Any) -> Mapping[str, Any]:
"""Alternative to `inspect.get_annotations()` for Python 3.9 and older."""
# See https://docs.python.org/3/howto/annotations.html#accessing-the-annotations-dict-of-an-object-in-python-3-9-and-older
if isinstance(obj, type):
return obj.__dict__.get("__annotations__", {})
return getattr(obj, "__annotations__", {})
def get_type_name(type_hint):
if isinstance(type_hint, warp.codegen.Struct):
return get_type_name(type_hint.cls)
return type_hint
def hash_recursive(module, visited):
# Hash this module, including all referenced modules recursively.
# The visited set tracks modules already visited to avoid circular references.
# check if we need to update the content hash
if not module.content_hash:
# recompute content hash
ch = hashlib.sha256()
# struct source
for struct in module.structs.values():
s = ",".join(
"{}: {}".format(name, get_type_name(type_hint))
for name, type_hint in get_annotations(struct.cls).items()
)
ch.update(bytes(s, "utf-8"))
# functions source
for func in module.functions.values():
s = func.adj.source
ch.update(bytes(s, "utf-8"))
# kernel source
for kernel in module.kernels.values():
ch.update(bytes(kernel.adj.source, "utf-8"))
# for generic kernels the Python source is always the same,
# but we hash the type signatures of all the overloads
if kernel.is_generic:
for sig in sorted(kernel.overloads.keys()):
ch.update(bytes(sig, "utf-8"))
module.content_hash = ch.digest()
h = hashlib.sha256()
# content hash
h.update(module.content_hash)
# configuration parameters
for k in sorted(module.options.keys()):
s = f"{k}={module.options[k]}"
h.update(bytes(s, "utf-8"))
# ensure to trigger recompilation if flags affecting kernel compilation are changed
if warp.config.verify_fp:
h.update(bytes("verify_fp", "utf-8"))
h.update(bytes(warp.config.mode, "utf-8"))
# compile-time constants (global)
if warp.types._constant_hash:
h.update(warp.types._constant_hash.digest())
# recurse on references
visited.add(module)
sorted_deps = sorted(module.references, key=lambda m: m.name)
for dep in sorted_deps:
if dep not in visited:
dep_hash = hash_recursive(dep, visited)
h.update(dep_hash)
return h.digest()
return hash_recursive(self, visited=set())
def load(self, device):
from warp.utils import ScopedTimer
device = get_device(device)
if device.is_cpu:
# check if already loaded
if self.cpu_module:
return True
# avoid repeated build attempts
if self.cpu_build_failed:
return False
if not warp.is_cpu_available():
raise RuntimeError("Failed to build CPU module because no CPU buildchain was found")
else:
# check if already loaded
if device.context in self.cuda_modules:
return True
# avoid repeated build attempts
if self.cuda_build_failed:
return False
if not warp.is_cuda_available():
raise RuntimeError("Failed to build CUDA module because CUDA is not available")
with ScopedTimer(f"Module {self.name} load on device '{device}'", active=not warp.config.quiet):
build_path = warp.build.kernel_bin_dir
gen_path = warp.build.kernel_gen_dir
if not os.path.exists(build_path):
os.makedirs(build_path)
if not os.path.exists(gen_path):
os.makedirs(gen_path)
module_name = "wp_" + self.name
module_path = os.path.join(build_path, module_name)
module_hash = self.hash_module()
builder = ModuleBuilder(self, self.options)
if device.is_cpu:
obj_path = os.path.join(build_path, module_name)
obj_path = obj_path + ".o"
cpu_hash_path = module_path + ".cpu.hash"
# check cache
if warp.config.cache_kernels and os.path.isfile(cpu_hash_path) and os.path.isfile(obj_path):
with open(cpu_hash_path, "rb") as f:
cache_hash = f.read()
if cache_hash == module_hash:
runtime.llvm.load_obj(obj_path.encode("utf-8"), module_name.encode("utf-8"))
self.cpu_module = module_name
return True
# build
try:
cpp_path = os.path.join(gen_path, module_name + ".cpp")
# write cpp sources
cpp_source = builder.codegen("cpu")
cpp_file = open(cpp_path, "w")
cpp_file.write(cpp_source)
cpp_file.close()
# build object code
with ScopedTimer("Compile x86", active=warp.config.verbose):
warp.build.build_cpu(
obj_path,
cpp_path,
mode=self.options["mode"],
fast_math=self.options["fast_math"],
verify_fp=warp.config.verify_fp,
)
# update cpu hash
with open(cpu_hash_path, "wb") as f:
f.write(module_hash)
# load the object code
runtime.llvm.load_obj(obj_path.encode("utf-8"), module_name.encode("utf-8"))
self.cpu_module = module_name
except Exception as e:
self.cpu_build_failed = True
raise (e)
elif device.is_cuda:
# determine whether to use PTX or CUBIN
if device.is_cubin_supported:
# get user preference specified either per module or globally
preferred_cuda_output = self.options.get("cuda_output") or warp.config.cuda_output
if preferred_cuda_output is not None:
use_ptx = preferred_cuda_output == "ptx"
else:
# determine automatically: older drivers may not be able to handle PTX generated using newer
# CUDA Toolkits, in which case we fall back on generating CUBIN modules
use_ptx = runtime.driver_version >= runtime.toolkit_version
else:
# CUBIN not an option, must use PTX (e.g. CUDA Toolkit too old)
use_ptx = True
if use_ptx:
output_arch = min(device.arch, warp.config.ptx_target_arch)
output_path = module_path + f".sm{output_arch}.ptx"
else:
output_arch = device.arch
output_path = module_path + f".sm{output_arch}.cubin"
cuda_hash_path = module_path + f".sm{output_arch}.hash"
# check cache
if warp.config.cache_kernels and os.path.isfile(cuda_hash_path) and os.path.isfile(output_path):
with open(cuda_hash_path, "rb") as f:
cache_hash = f.read()
if cache_hash == module_hash:
cuda_module = warp.build.load_cuda(output_path, device)
if cuda_module is not None:
self.cuda_modules[device.context] = cuda_module
return True
# build
try:
cu_path = os.path.join(gen_path, module_name + ".cu")
# write cuda sources
cu_source = builder.codegen("cuda")
cu_file = open(cu_path, "w")
cu_file.write(cu_source)
cu_file.close()
# generate PTX or CUBIN
with ScopedTimer("Compile CUDA", active=warp.config.verbose):
warp.build.build_cuda(
cu_path,
output_arch,
output_path,
config=self.options["mode"],
fast_math=self.options["fast_math"],
verify_fp=warp.config.verify_fp,
)
# update cuda hash
with open(cuda_hash_path, "wb") as f:
f.write(module_hash)
# load the module
cuda_module = warp.build.load_cuda(output_path, device)
if cuda_module is not None:
self.cuda_modules[device.context] = cuda_module
else:
raise Exception("Failed to load CUDA module")
except Exception as e:
self.cuda_build_failed = True
raise (e)
return True
def unload(self):
if self.cpu_module:
runtime.llvm.unload_obj(self.cpu_module.encode("utf-8"))
self.cpu_module = None
# need to unload the CUDA module from all CUDA contexts where it is loaded
# note: we ensure that this doesn't change the current CUDA context
if self.cuda_modules:
saved_context = runtime.core.cuda_context_get_current()
for context, module in self.cuda_modules.items():
runtime.core.cuda_unload_module(context, module)
runtime.core.cuda_context_set_current(saved_context)
self.cuda_modules = {}
# clear kernel hooks
self.kernel_hooks = {}
# clear content hash
self.content_hash = None
# lookup and cache kernel entry points based on name, called after compilation / module load
def get_kernel_hooks(self, kernel, device):
# get all hooks for this device
device_hooks = self.kernel_hooks.get(device.context)
if device_hooks is None:
self.kernel_hooks[device.context] = device_hooks = {}
# look up this kernel
hooks = device_hooks.get(kernel)
if hooks is not None:
return hooks
name = kernel.get_mangled_name()
if device.is_cpu:
func = ctypes.CFUNCTYPE(None)
forward = func(
runtime.llvm.lookup(self.cpu_module.encode("utf-8"), (name + "_cpu_forward").encode("utf-8"))
)
backward = func(
runtime.llvm.lookup(self.cpu_module.encode("utf-8"), (name + "_cpu_backward").encode("utf-8"))
)
else:
cu_module = self.cuda_modules[device.context]
forward = runtime.core.cuda_get_kernel(
device.context, cu_module, (name + "_cuda_kernel_forward").encode("utf-8")
)
backward = runtime.core.cuda_get_kernel(
device.context, cu_module, (name + "_cuda_kernel_backward").encode("utf-8")
)
hooks = KernelHooks(forward, backward)
device_hooks[kernel] = hooks
return hooks
# -------------------------------------------
# execution context
# a simple allocator
# TODO: use a pooled allocator to avoid hitting the system allocator
class Allocator:
def __init__(self, device):
self.device = device
def alloc(self, size_in_bytes, pinned=False):
if self.device.is_cuda:
if self.device.is_capturing:
raise RuntimeError(f"Cannot allocate memory on device {self} while graph capture is active")
return runtime.core.alloc_device(self.device.context, size_in_bytes)
elif self.device.is_cpu:
if pinned:
return runtime.core.alloc_pinned(size_in_bytes)
else:
return runtime.core.alloc_host(size_in_bytes)
def free(self, ptr, size_in_bytes, pinned=False):
if self.device.is_cuda:
if self.device.is_capturing:
raise RuntimeError(f"Cannot free memory on device {self} while graph capture is active")
return runtime.core.free_device(self.device.context, ptr)
elif self.device.is_cpu:
if pinned:
return runtime.core.free_pinned(ptr)
else:
return runtime.core.free_host(ptr)
class ContextGuard:
def __init__(self, device):
self.device = device
def __enter__(self):
if self.device.is_cuda:
runtime.core.cuda_context_push_current(self.device.context)
elif is_cuda_available():
self.saved_context = runtime.core.cuda_context_get_current()
def __exit__(self, exc_type, exc_value, traceback):
if self.device.is_cuda:
runtime.core.cuda_context_pop_current()
elif is_cuda_available():
runtime.core.cuda_context_set_current(self.saved_context)
class Stream:
def __init__(self, device=None, **kwargs):
self.owner = False
# we can't use get_device() if called during init, but we can use an explicit Device arg
if runtime is not None:
device = runtime.get_device(device)
elif not isinstance(device, Device):
raise RuntimeError(
"A device object is required when creating a stream before or during Warp initialization"
)
if not device.is_cuda:
raise RuntimeError(f"Device {device} is not a CUDA device")
# we pass cuda_stream through kwargs because cuda_stream=None is actually a valid value (CUDA default stream)
if "cuda_stream" in kwargs:
self.cuda_stream = kwargs["cuda_stream"]
else:
self.cuda_stream = device.runtime.core.cuda_stream_create(device.context)
if not self.cuda_stream:
raise RuntimeError(f"Failed to create stream on device {device}")
self.owner = True
self.device = device
def __del__(self):
if self.owner:
runtime.core.cuda_stream_destroy(self.device.context, self.cuda_stream)
def record_event(self, event=None):
if event is None:
event = Event(self.device)
elif event.device != self.device:
raise RuntimeError(
f"Event from device {event.device} cannot be recorded on stream from device {self.device}"
)
runtime.core.cuda_event_record(self.device.context, event.cuda_event, self.cuda_stream)
return event
def wait_event(self, event):
runtime.core.cuda_stream_wait_event(self.device.context, self.cuda_stream, event.cuda_event)
def wait_stream(self, other_stream, event=None):
if event is None:
event = Event(other_stream.device)
runtime.core.cuda_stream_wait_stream(
self.device.context, self.cuda_stream, other_stream.cuda_stream, event.cuda_event
)
class Event:
# event creation flags
class Flags:
DEFAULT = 0x0
BLOCKING_SYNC = 0x1
DISABLE_TIMING = 0x2
def __init__(self, device=None, cuda_event=None, enable_timing=False):
self.owner = False
device = get_device(device)
if not device.is_cuda:
raise RuntimeError(f"Device {device} is not a CUDA device")
self.device = device
if cuda_event is not None:
self.cuda_event = cuda_event
else:
flags = Event.Flags.DEFAULT
if not enable_timing:
flags |= Event.Flags.DISABLE_TIMING
self.cuda_event = runtime.core.cuda_event_create(device.context, flags)
if not self.cuda_event:
raise RuntimeError(f"Failed to create event on device {device}")
self.owner = True
def __del__(self):
if self.owner:
runtime.core.cuda_event_destroy(self.device.context, self.cuda_event)
class Device:
def __init__(self, runtime, alias, ordinal=-1, is_primary=False, context=None):
self.runtime = runtime
self.alias = alias
self.ordinal = ordinal
self.is_primary = is_primary
# context can be None to avoid acquiring primary contexts until the device is used
self._context = context
# if the device context is not primary, it cannot be None
if ordinal != -1 and not is_primary:
assert context is not None
# streams will be created when context is acquired
self._stream = None
self.null_stream = None
# indicates whether CUDA graph capture is active for this device
self.is_capturing = False
self.allocator = Allocator(self)
self.context_guard = ContextGuard(self)
if self.ordinal == -1:
# CPU device
self.name = platform.processor() or "CPU"
self.arch = 0
self.is_uva = False
self.is_cubin_supported = False
self.is_mempool_supported = False
# TODO: add more device-specific dispatch functions
self.memset = runtime.core.memset_host
self.memtile = runtime.core.memtile_host
elif ordinal >= 0 and ordinal < runtime.core.cuda_device_get_count():
# CUDA device
self.name = runtime.core.cuda_device_get_name(ordinal).decode()
self.arch = runtime.core.cuda_device_get_arch(ordinal)
self.is_uva = runtime.core.cuda_device_is_uva(ordinal)
# check whether our NVRTC can generate CUBINs for this architecture
self.is_cubin_supported = self.arch in runtime.nvrtc_supported_archs
self.is_mempool_supported = runtime.core.cuda_device_is_memory_pool_supported(ordinal)
# Warn the user of a possible misconfiguration of their system
if not self.is_mempool_supported:
warp.utils.warn(
f"Support for stream ordered memory allocators was not detected on device {ordinal}. "
"This can prevent the use of graphs and/or result in poor performance. "
"Is the UVM driver enabled?"
)
# initialize streams unless context acquisition is postponed
if self._context is not None:
self.init_streams()
# TODO: add more device-specific dispatch functions
self.memset = lambda ptr, value, size: runtime.core.memset_device(self.context, ptr, value, size)
self.memtile = lambda ptr, src, srcsize, reps: runtime.core.memtile_device(
self.context, ptr, src, srcsize, reps
)
else:
raise RuntimeError(f"Invalid device ordinal ({ordinal})'")
def init_streams(self):
# create a stream for asynchronous work
self.stream = Stream(self)
# CUDA default stream for some synchronous operations
self.null_stream = Stream(self, cuda_stream=None)
@property
def is_cpu(self):
return self.ordinal < 0
@property
def is_cuda(self):
return self.ordinal >= 0
@property
def context(self):
if self._context is not None:
return self._context
elif self.is_primary:
# acquire primary context on demand
self._context = self.runtime.core.cuda_device_primary_context_retain(self.ordinal)
if self._context is None:
raise RuntimeError(f"Failed to acquire primary context for device {self}")
self.runtime.context_map[self._context] = self
# initialize streams
self.init_streams()
return self._context
@property
def has_context(self):
return self._context is not None
@property
def stream(self):
if self.context:
return self._stream
else:
raise RuntimeError(f"Device {self} is not a CUDA device")
@stream.setter
def stream(self, s):
if self.is_cuda:
if s.device != self:
raise RuntimeError(f"Stream from device {s.device} cannot be used on device {self}")
self._stream = s
self.runtime.core.cuda_context_set_stream(self.context, s.cuda_stream)
else:
raise RuntimeError(f"Device {self} is not a CUDA device")
@property
def has_stream(self):
return self._stream is not None
def __str__(self):
return self.alias
def __repr__(self):
return f"'{self.alias}'"
def __eq__(self, other):
if self is other:
return True
elif isinstance(other, Device):
return self.context == other.context
elif isinstance(other, str):
if other == "cuda":
return self == self.runtime.get_current_cuda_device()
else:
return other == self.alias
else:
return False
def make_current(self):
if self.context is not None:
self.runtime.core.cuda_context_set_current(self.context)
def can_access(self, other):
other = self.runtime.get_device(other)
if self.context == other.context:
return True
elif self.context is not None and other.context is not None:
return bool(self.runtime.core.cuda_context_can_access_peer(self.context, other.context))
else:
return False
""" Meta-type for arguments that can be resolved to a concrete Device.
"""
Devicelike = Union[Device, str, None]
class Graph:
def __init__(self, device: Device, exec: ctypes.c_void_p):
self.device = device
self.exec = exec
def __del__(self):
# use CUDA context guard to avoid side effects during garbage collection
with self.device.context_guard:
runtime.core.cuda_graph_destroy(self.device.context, self.exec)
class Runtime:
def __init__(self):
bin_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "bin")
if os.name == "nt":
if sys.version_info[0] > 3 or sys.version_info[0] == 3 and sys.version_info[1] >= 8:
# Python >= 3.8 this method to add dll search paths
os.add_dll_directory(bin_path)
else:
# Python < 3.8 we add dll directory to path
os.environ["PATH"] = bin_path + os.pathsep + os.environ["PATH"]
warp_lib = os.path.join(bin_path, "warp.dll")
llvm_lib = os.path.join(bin_path, "warp-clang.dll")
elif sys.platform == "darwin":
warp_lib = os.path.join(bin_path, "libwarp.dylib")
llvm_lib = os.path.join(bin_path, "libwarp-clang.dylib")
else:
warp_lib = os.path.join(bin_path, "warp.so")
llvm_lib = os.path.join(bin_path, "warp-clang.so")
self.core = self.load_dll(warp_lib)
if llvm_lib and os.path.exists(llvm_lib):
self.llvm = self.load_dll(llvm_lib)
# setup c-types for warp-clang.dll
self.llvm.lookup.restype = ctypes.c_uint64
else:
self.llvm = None
# setup c-types for warp.dll
self.core.alloc_host.argtypes = [ctypes.c_size_t]
self.core.alloc_host.restype = ctypes.c_void_p
self.core.alloc_pinned.argtypes = [ctypes.c_size_t]
self.core.alloc_pinned.restype = ctypes.c_void_p
self.core.alloc_device.argtypes = [ctypes.c_void_p, ctypes.c_size_t]
self.core.alloc_device.restype = ctypes.c_void_p
self.core.float_to_half_bits.argtypes = [ctypes.c_float]
self.core.float_to_half_bits.restype = ctypes.c_uint16
self.core.half_bits_to_float.argtypes = [ctypes.c_uint16]
self.core.half_bits_to_float.restype = ctypes.c_float
self.core.free_host.argtypes = [ctypes.c_void_p]
self.core.free_host.restype = None
self.core.free_pinned.argtypes = [ctypes.c_void_p]
self.core.free_pinned.restype = None
self.core.free_device.argtypes = [ctypes.c_void_p, ctypes.c_void_p]
self.core.free_device.restype = None
self.core.memset_host.argtypes = [ctypes.c_void_p, ctypes.c_int, ctypes.c_size_t]
self.core.memset_host.restype = None
self.core.memset_device.argtypes = [ctypes.c_void_p, ctypes.c_void_p, ctypes.c_int, ctypes.c_size_t]
self.core.memset_device.restype = None
self.core.memtile_host.argtypes = [ctypes.c_void_p, ctypes.c_void_p, ctypes.c_size_t, ctypes.c_size_t]
self.core.memtile_host.restype = None
self.core.memtile_device.argtypes = [
ctypes.c_void_p,
ctypes.c_void_p,
ctypes.c_void_p,
ctypes.c_size_t,
ctypes.c_size_t,
]
self.core.memtile_device.restype = None
self.core.memcpy_h2h.argtypes = [ctypes.c_void_p, ctypes.c_void_p, ctypes.c_size_t]
self.core.memcpy_h2h.restype = None
self.core.memcpy_h2d.argtypes = [ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_size_t]
self.core.memcpy_h2d.restype = None
self.core.memcpy_d2h.argtypes = [ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_size_t]
self.core.memcpy_d2h.restype = None
self.core.memcpy_d2d.argtypes = [ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_size_t]
self.core.memcpy_d2d.restype = None
self.core.memcpy_peer.argtypes = [ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_size_t]
self.core.memcpy_peer.restype = None
self.core.array_copy_host.argtypes = [
ctypes.c_void_p,
ctypes.c_void_p,
ctypes.c_int,
ctypes.c_int,
ctypes.c_int,
]
self.core.array_copy_host.restype = ctypes.c_size_t
self.core.array_copy_device.argtypes = [
ctypes.c_void_p,
ctypes.c_void_p,
ctypes.c_void_p,
ctypes.c_int,
ctypes.c_int,
ctypes.c_int,
]
self.core.array_copy_device.restype = ctypes.c_size_t
self.core.array_fill_host.argtypes = [ctypes.c_void_p, ctypes.c_int, ctypes.c_void_p, ctypes.c_int]
self.core.array_fill_host.restype = None
self.core.array_fill_device.argtypes = [
ctypes.c_void_p,
ctypes.c_void_p,
ctypes.c_int,
ctypes.c_void_p,
ctypes.c_int,
]
self.core.array_fill_device.restype = None
self.core.array_sum_double_host.argtypes = [
ctypes.c_uint64,
ctypes.c_uint64,
ctypes.c_int,
ctypes.c_int,
ctypes.c_int,
]
self.core.array_sum_float_host.argtypes = [
ctypes.c_uint64,
ctypes.c_uint64,
ctypes.c_int,
ctypes.c_int,
ctypes.c_int,
]
self.core.array_sum_double_device.argtypes = [
ctypes.c_uint64,
ctypes.c_uint64,
ctypes.c_int,
ctypes.c_int,
ctypes.c_int,
]
self.core.array_sum_float_device.argtypes = [
ctypes.c_uint64,
ctypes.c_uint64,
ctypes.c_int,
ctypes.c_int,
ctypes.c_int,
]
self.core.array_inner_double_host.argtypes = [
ctypes.c_uint64,
ctypes.c_uint64,
ctypes.c_uint64,
ctypes.c_int,
ctypes.c_int,
ctypes.c_int,
ctypes.c_int,
]
self.core.array_inner_float_host.argtypes = [
ctypes.c_uint64,
ctypes.c_uint64,
ctypes.c_uint64,
ctypes.c_int,
ctypes.c_int,
ctypes.c_int,
ctypes.c_int,
]
self.core.array_inner_double_device.argtypes = [
ctypes.c_uint64,
ctypes.c_uint64,
ctypes.c_uint64,
ctypes.c_int,
ctypes.c_int,
ctypes.c_int,
ctypes.c_int,
]
self.core.array_inner_float_device.argtypes = [
ctypes.c_uint64,
ctypes.c_uint64,
ctypes.c_uint64,
ctypes.c_int,
ctypes.c_int,
ctypes.c_int,
ctypes.c_int,
]
self.core.array_scan_int_host.argtypes = [ctypes.c_uint64, ctypes.c_uint64, ctypes.c_int, ctypes.c_bool]
self.core.array_scan_float_host.argtypes = [ctypes.c_uint64, ctypes.c_uint64, ctypes.c_int, ctypes.c_bool]
self.core.array_scan_int_device.argtypes = [ctypes.c_uint64, ctypes.c_uint64, ctypes.c_int, ctypes.c_bool]
self.core.array_scan_float_device.argtypes = [ctypes.c_uint64, ctypes.c_uint64, ctypes.c_int, ctypes.c_bool]
self.core.radix_sort_pairs_int_host.argtypes = [ctypes.c_uint64, ctypes.c_uint64, ctypes.c_int]
self.core.radix_sort_pairs_int_device.argtypes = [ctypes.c_uint64, ctypes.c_uint64, ctypes.c_int]
self.core.runlength_encode_int_host.argtypes = [
ctypes.c_uint64,
ctypes.c_uint64,
ctypes.c_uint64,
ctypes.c_uint64,
ctypes.c_int,
]
self.core.runlength_encode_int_device.argtypes = [
ctypes.c_uint64,
ctypes.c_uint64,
ctypes.c_uint64,
ctypes.c_uint64,
ctypes.c_int,
]
self.core.bvh_create_host.restype = ctypes.c_uint64
self.core.bvh_create_host.argtypes = [ctypes.c_void_p, ctypes.c_void_p, ctypes.c_int]
self.core.bvh_create_device.restype = ctypes.c_uint64
self.core.bvh_create_device.argtypes = [ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_int]
self.core.bvh_destroy_host.argtypes = [ctypes.c_uint64]
self.core.bvh_destroy_device.argtypes = [ctypes.c_uint64]
self.core.bvh_refit_host.argtypes = [ctypes.c_uint64]
self.core.bvh_refit_device.argtypes = [ctypes.c_uint64]
self.core.mesh_create_host.restype = ctypes.c_uint64
self.core.mesh_create_host.argtypes = [
warp.types.array_t,
warp.types.array_t,
warp.types.array_t,
ctypes.c_int,
ctypes.c_int,
ctypes.c_int,
]
self.core.mesh_create_device.restype = ctypes.c_uint64
self.core.mesh_create_device.argtypes = [
ctypes.c_void_p,
warp.types.array_t,
warp.types.array_t,
warp.types.array_t,
ctypes.c_int,
ctypes.c_int,
ctypes.c_int,
]
self.core.mesh_destroy_host.argtypes = [ctypes.c_uint64]
self.core.mesh_destroy_device.argtypes = [ctypes.c_uint64]
self.core.mesh_refit_host.argtypes = [ctypes.c_uint64]
self.core.mesh_refit_device.argtypes = [ctypes.c_uint64]
self.core.hash_grid_create_host.argtypes = [ctypes.c_int, ctypes.c_int, ctypes.c_int]
self.core.hash_grid_create_host.restype = ctypes.c_uint64
self.core.hash_grid_destroy_host.argtypes = [ctypes.c_uint64]
self.core.hash_grid_update_host.argtypes = [ctypes.c_uint64, ctypes.c_float, ctypes.c_void_p, ctypes.c_int]
self.core.hash_grid_reserve_host.argtypes = [ctypes.c_uint64, ctypes.c_int]
self.core.hash_grid_create_device.argtypes = [ctypes.c_void_p, ctypes.c_int, ctypes.c_int, ctypes.c_int]
self.core.hash_grid_create_device.restype = ctypes.c_uint64
self.core.hash_grid_destroy_device.argtypes = [ctypes.c_uint64]
self.core.hash_grid_update_device.argtypes = [ctypes.c_uint64, ctypes.c_float, ctypes.c_void_p, ctypes.c_int]
self.core.hash_grid_reserve_device.argtypes = [ctypes.c_uint64, ctypes.c_int]
self.core.cutlass_gemm.argtypes = [
ctypes.c_int,
ctypes.c_int,
ctypes.c_int,
ctypes.c_int,
ctypes.c_char_p,
ctypes.c_void_p,
ctypes.c_void_p,
ctypes.c_void_p,
ctypes.c_void_p,
ctypes.c_float,
ctypes.c_float,
ctypes.c_bool,
ctypes.c_bool,
ctypes.c_bool,
ctypes.c_int,
]
self.core.cutlass_gemm.restypes = ctypes.c_bool
self.core.volume_create_host.argtypes = [ctypes.c_void_p, ctypes.c_uint64]
self.core.volume_create_host.restype = ctypes.c_uint64
self.core.volume_get_buffer_info_host.argtypes = [
ctypes.c_uint64,
ctypes.POINTER(ctypes.c_void_p),
ctypes.POINTER(ctypes.c_uint64),
]
self.core.volume_get_tiles_host.argtypes = [
ctypes.c_uint64,
ctypes.POINTER(ctypes.c_void_p),
ctypes.POINTER(ctypes.c_uint64),
]
self.core.volume_destroy_host.argtypes = [ctypes.c_uint64]
self.core.volume_create_device.argtypes = [ctypes.c_void_p, ctypes.c_void_p, ctypes.c_uint64]
self.core.volume_create_device.restype = ctypes.c_uint64
self.core.volume_f_from_tiles_device.argtypes = [
ctypes.c_void_p,
ctypes.c_void_p,
ctypes.c_int,
ctypes.c_float,
ctypes.c_float,
ctypes.c_float,
ctypes.c_float,
ctypes.c_float,
ctypes.c_bool,
]
self.core.volume_f_from_tiles_device.restype = ctypes.c_uint64
self.core.volume_v_from_tiles_device.argtypes = [
ctypes.c_void_p,
ctypes.c_void_p,
ctypes.c_int,
ctypes.c_float,
ctypes.c_float,
ctypes.c_float,
ctypes.c_float,
ctypes.c_float,
ctypes.c_float,
ctypes.c_float,
ctypes.c_bool,
]
self.core.volume_v_from_tiles_device.restype = ctypes.c_uint64
self.core.volume_i_from_tiles_device.argtypes = [
ctypes.c_void_p,
ctypes.c_void_p,
ctypes.c_int,
ctypes.c_float,
ctypes.c_int,
ctypes.c_float,
ctypes.c_float,
ctypes.c_float,
ctypes.c_bool,
]
self.core.volume_i_from_tiles_device.restype = ctypes.c_uint64
self.core.volume_get_buffer_info_device.argtypes = [
ctypes.c_uint64,
ctypes.POINTER(ctypes.c_void_p),
ctypes.POINTER(ctypes.c_uint64),
]
self.core.volume_get_tiles_device.argtypes = [
ctypes.c_uint64,
ctypes.POINTER(ctypes.c_void_p),
ctypes.POINTER(ctypes.c_uint64),
]
self.core.volume_destroy_device.argtypes = [ctypes.c_uint64]
self.core.volume_get_voxel_size.argtypes = [
ctypes.c_uint64,
ctypes.POINTER(ctypes.c_float),
ctypes.POINTER(ctypes.c_float),
ctypes.POINTER(ctypes.c_float),
]
bsr_matrix_from_triplets_argtypes = [
ctypes.c_int,
ctypes.c_int,
ctypes.c_int,
ctypes.c_int,
ctypes.c_uint64,
ctypes.c_uint64,
ctypes.c_uint64,
ctypes.c_uint64,
ctypes.c_uint64,
ctypes.c_uint64,
]
self.core.bsr_matrix_from_triplets_float_host.argtypes = bsr_matrix_from_triplets_argtypes
self.core.bsr_matrix_from_triplets_double_host.argtypes = bsr_matrix_from_triplets_argtypes
self.core.bsr_matrix_from_triplets_float_device.argtypes = bsr_matrix_from_triplets_argtypes
self.core.bsr_matrix_from_triplets_double_device.argtypes = bsr_matrix_from_triplets_argtypes
self.core.bsr_matrix_from_triplets_float_host.restype = ctypes.c_int
self.core.bsr_matrix_from_triplets_double_host.restype = ctypes.c_int
self.core.bsr_matrix_from_triplets_float_device.restype = ctypes.c_int
self.core.bsr_matrix_from_triplets_double_device.restype = ctypes.c_int
bsr_transpose_argtypes = [
ctypes.c_int,
ctypes.c_int,
ctypes.c_int,
ctypes.c_int,
ctypes.c_int,
ctypes.c_uint64,
ctypes.c_uint64,
ctypes.c_uint64,
ctypes.c_uint64,
ctypes.c_uint64,
ctypes.c_uint64,
]
self.core.bsr_transpose_float_host.argtypes = bsr_transpose_argtypes
self.core.bsr_transpose_double_host.argtypes = bsr_transpose_argtypes
self.core.bsr_transpose_float_device.argtypes = bsr_transpose_argtypes
self.core.bsr_transpose_double_device.argtypes = bsr_transpose_argtypes
self.core.is_cuda_enabled.argtypes = None
self.core.is_cuda_enabled.restype = ctypes.c_int
self.core.is_cuda_compatibility_enabled.argtypes = None
self.core.is_cuda_compatibility_enabled.restype = ctypes.c_int
self.core.is_cutlass_enabled.argtypes = None
self.core.is_cutlass_enabled.restype = ctypes.c_int
self.core.cuda_driver_version.argtypes = None
self.core.cuda_driver_version.restype = ctypes.c_int
self.core.cuda_toolkit_version.argtypes = None
self.core.cuda_toolkit_version.restype = ctypes.c_int
self.core.nvrtc_supported_arch_count.argtypes = None
self.core.nvrtc_supported_arch_count.restype = ctypes.c_int
self.core.nvrtc_supported_archs.argtypes = [ctypes.POINTER(ctypes.c_int)]
self.core.nvrtc_supported_archs.restype = None
self.core.cuda_device_get_count.argtypes = None
self.core.cuda_device_get_count.restype = ctypes.c_int
self.core.cuda_device_primary_context_retain.argtypes = [ctypes.c_int]
self.core.cuda_device_primary_context_retain.restype = ctypes.c_void_p
self.core.cuda_device_get_name.argtypes = [ctypes.c_int]
self.core.cuda_device_get_name.restype = ctypes.c_char_p
self.core.cuda_device_get_arch.argtypes = [ctypes.c_int]
self.core.cuda_device_get_arch.restype = ctypes.c_int
self.core.cuda_device_is_uva.argtypes = [ctypes.c_int]
self.core.cuda_device_is_uva.restype = ctypes.c_int
self.core.cuda_context_get_current.argtypes = None
self.core.cuda_context_get_current.restype = ctypes.c_void_p
self.core.cuda_context_set_current.argtypes = [ctypes.c_void_p]
self.core.cuda_context_set_current.restype = None
self.core.cuda_context_push_current.argtypes = [ctypes.c_void_p]
self.core.cuda_context_push_current.restype = None
self.core.cuda_context_pop_current.argtypes = None
self.core.cuda_context_pop_current.restype = None
self.core.cuda_context_create.argtypes = [ctypes.c_int]
self.core.cuda_context_create.restype = ctypes.c_void_p
self.core.cuda_context_destroy.argtypes = [ctypes.c_void_p]
self.core.cuda_context_destroy.restype = None
self.core.cuda_context_synchronize.argtypes = [ctypes.c_void_p]
self.core.cuda_context_synchronize.restype = None
self.core.cuda_context_check.argtypes = [ctypes.c_void_p]
self.core.cuda_context_check.restype = ctypes.c_uint64
self.core.cuda_context_get_device_ordinal.argtypes = [ctypes.c_void_p]
self.core.cuda_context_get_device_ordinal.restype = ctypes.c_int
self.core.cuda_context_is_primary.argtypes = [ctypes.c_void_p]
self.core.cuda_context_is_primary.restype = ctypes.c_int
self.core.cuda_context_get_stream.argtypes = [ctypes.c_void_p]
self.core.cuda_context_get_stream.restype = ctypes.c_void_p
self.core.cuda_context_set_stream.argtypes = [ctypes.c_void_p, ctypes.c_void_p]
self.core.cuda_context_set_stream.restype = None
self.core.cuda_context_can_access_peer.argtypes = [ctypes.c_void_p, ctypes.c_void_p]
self.core.cuda_context_can_access_peer.restype = ctypes.c_int
self.core.cuda_stream_create.argtypes = [ctypes.c_void_p]
self.core.cuda_stream_create.restype = ctypes.c_void_p
self.core.cuda_stream_destroy.argtypes = [ctypes.c_void_p, ctypes.c_void_p]
self.core.cuda_stream_destroy.restype = None
self.core.cuda_stream_synchronize.argtypes = [ctypes.c_void_p, ctypes.c_void_p]
self.core.cuda_stream_synchronize.restype = None
self.core.cuda_stream_wait_event.argtypes = [ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p]
self.core.cuda_stream_wait_event.restype = None
self.core.cuda_stream_wait_stream.argtypes = [
ctypes.c_void_p,
ctypes.c_void_p,
ctypes.c_void_p,
ctypes.c_void_p,
]
self.core.cuda_stream_wait_stream.restype = None
self.core.cuda_event_create.argtypes = [ctypes.c_void_p, ctypes.c_uint]
self.core.cuda_event_create.restype = ctypes.c_void_p
self.core.cuda_event_destroy.argtypes = [ctypes.c_void_p, ctypes.c_void_p]
self.core.cuda_event_destroy.restype = None
self.core.cuda_event_record.argtypes = [ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p]
self.core.cuda_event_record.restype = None
self.core.cuda_graph_begin_capture.argtypes = [ctypes.c_void_p]
self.core.cuda_graph_begin_capture.restype = None
self.core.cuda_graph_end_capture.argtypes = [ctypes.c_void_p]
self.core.cuda_graph_end_capture.restype = ctypes.c_void_p
self.core.cuda_graph_launch.argtypes = [ctypes.c_void_p, ctypes.c_void_p]
self.core.cuda_graph_launch.restype = None
self.core.cuda_graph_destroy.argtypes = [ctypes.c_void_p, ctypes.c_void_p]
self.core.cuda_graph_destroy.restype = None
self.core.cuda_compile_program.argtypes = [
ctypes.c_char_p,
ctypes.c_int,
ctypes.c_char_p,
ctypes.c_bool,
ctypes.c_bool,
ctypes.c_bool,
ctypes.c_bool,
ctypes.c_char_p,
]
self.core.cuda_compile_program.restype = ctypes.c_size_t
self.core.cuda_load_module.argtypes = [ctypes.c_void_p, ctypes.c_char_p]
self.core.cuda_load_module.restype = ctypes.c_void_p
self.core.cuda_unload_module.argtypes = [ctypes.c_void_p, ctypes.c_void_p]
self.core.cuda_unload_module.restype = None
self.core.cuda_get_kernel.argtypes = [ctypes.c_void_p, ctypes.c_void_p, ctypes.c_char_p]
self.core.cuda_get_kernel.restype = ctypes.c_void_p
self.core.cuda_launch_kernel.argtypes = [
ctypes.c_void_p,
ctypes.c_void_p,
ctypes.c_size_t,
ctypes.POINTER(ctypes.c_void_p),
]
self.core.cuda_launch_kernel.restype = ctypes.c_size_t
self.core.cuda_graphics_map.argtypes = [ctypes.c_void_p, ctypes.c_void_p]
self.core.cuda_graphics_map.restype = None
self.core.cuda_graphics_unmap.argtypes = [ctypes.c_void_p, ctypes.c_void_p]
self.core.cuda_graphics_unmap.restype = None
self.core.cuda_graphics_device_ptr_and_size.argtypes = [
ctypes.c_void_p,
ctypes.c_void_p,
ctypes.POINTER(ctypes.c_uint64),
ctypes.POINTER(ctypes.c_size_t),
]
self.core.cuda_graphics_device_ptr_and_size.restype = None
self.core.cuda_graphics_register_gl_buffer.argtypes = [ctypes.c_void_p, ctypes.c_uint32, ctypes.c_uint]
self.core.cuda_graphics_register_gl_buffer.restype = ctypes.c_void_p
self.core.cuda_graphics_unregister_resource.argtypes = [ctypes.c_void_p, ctypes.c_void_p]
self.core.cuda_graphics_unregister_resource.restype = None
self.core.init.restype = ctypes.c_int
error = self.core.init()
if error != 0:
raise Exception("Warp initialization failed")
self.device_map = {} # device lookup by alias
self.context_map = {} # device lookup by context
# register CPU device
cpu_name = platform.processor()
if not cpu_name:
cpu_name = "CPU"
self.cpu_device = Device(self, "cpu")
self.device_map["cpu"] = self.cpu_device
self.context_map[None] = self.cpu_device
cuda_device_count = self.core.cuda_device_get_count()
if cuda_device_count > 0:
# get CUDA Toolkit and driver versions
self.toolkit_version = self.core.cuda_toolkit_version()
self.driver_version = self.core.cuda_driver_version()
# get all architectures supported by NVRTC
num_archs = self.core.nvrtc_supported_arch_count()
if num_archs > 0:
archs = (ctypes.c_int * num_archs)()
self.core.nvrtc_supported_archs(archs)
self.nvrtc_supported_archs = list(archs)
else:
self.nvrtc_supported_archs = []
# register CUDA devices
self.cuda_devices = []
self.cuda_primary_devices = []
for i in range(cuda_device_count):
alias = f"cuda:{i}"
device = Device(self, alias, ordinal=i, is_primary=True)
self.cuda_devices.append(device)
self.cuda_primary_devices.append(device)
self.device_map[alias] = device
# set default device
if cuda_device_count > 0:
if self.core.cuda_context_get_current() is not None:
self.set_default_device("cuda")
else:
self.set_default_device("cuda:0")
else:
# CUDA not available
self.set_default_device("cpu")
# initialize kernel cache
warp.build.init_kernel_cache(warp.config.kernel_cache_dir)
# print device and version information
if not warp.config.quiet:
print(f"Warp {warp.config.version} initialized:")
if cuda_device_count > 0:
toolkit_version = (self.toolkit_version // 1000, (self.toolkit_version % 1000) // 10)
driver_version = (self.driver_version // 1000, (self.driver_version % 1000) // 10)
print(
f" CUDA Toolkit: {toolkit_version[0]}.{toolkit_version[1]}, Driver: {driver_version[0]}.{driver_version[1]}"
)
else:
if self.core.is_cuda_enabled():
# Warp was compiled with CUDA support, but no devices are available
print(" CUDA devices not available")
else:
# Warp was compiled without CUDA support
print(" CUDA support not enabled in this build")
print(" Devices:")
print(f' "{self.cpu_device.alias}" | {self.cpu_device.name}')
for cuda_device in self.cuda_devices:
print(f' "{cuda_device.alias}" | {cuda_device.name} (sm_{cuda_device.arch})')
print(f" Kernel cache: {warp.config.kernel_cache_dir}")
# CUDA compatibility check
if cuda_device_count > 0 and not self.core.is_cuda_compatibility_enabled():
if self.driver_version < self.toolkit_version:
print("******************************************************************")
print("* WARNING: *")
print("* Warp was compiled without CUDA compatibility support *")
print("* (quick build). The CUDA Toolkit version used to build *")
print("* Warp is not fully supported by the current driver. *")
print("* Some CUDA functionality may not work correctly! *")
print("* Update the driver or rebuild Warp without the --quick flag. *")
print("******************************************************************")
# global tape
self.tape = None
def load_dll(self, dll_path):
try:
if sys.version_info[0] > 3 or sys.version_info[0] == 3 and sys.version_info[1] >= 8:
dll = ctypes.CDLL(dll_path, winmode=0)
else:
dll = ctypes.CDLL(dll_path)
except OSError:
raise RuntimeError(f"Failed to load the shared library '{dll_path}'")
return dll
def get_device(self, ident: Devicelike = None) -> Device:
if isinstance(ident, Device):
return ident
elif ident is None:
return self.default_device
elif isinstance(ident, str):
if ident == "cuda":
return self.get_current_cuda_device()
else:
return self.device_map[ident]
else:
raise RuntimeError(f"Unable to resolve device from argument of type {type(ident)}")
def set_default_device(self, ident: Devicelike):
self.default_device = self.get_device(ident)
def get_current_cuda_device(self):
current_context = self.core.cuda_context_get_current()
if current_context is not None:
current_device = self.context_map.get(current_context)
if current_device is not None:
# this is a known device
return current_device
elif self.core.cuda_context_is_primary(current_context):
# this is a primary context that we haven't used yet
ordinal = self.core.cuda_context_get_device_ordinal(current_context)
device = self.cuda_devices[ordinal]
self.context_map[current_context] = device
return device
else:
# this is an unseen non-primary context, register it as a new device with a unique alias
alias = f"cuda!{current_context:x}"
return self.map_cuda_device(alias, current_context)
elif self.default_device.is_cuda:
return self.default_device
elif self.cuda_devices:
return self.cuda_devices[0]
else:
raise RuntimeError("CUDA is not available")
def rename_device(self, device, alias):
del self.device_map[device.alias]
device.alias = alias
self.device_map[alias] = device
return device
def map_cuda_device(self, alias, context=None) -> Device:
if context is None:
context = self.core.cuda_context_get_current()
if context is None:
raise RuntimeError(f"Unable to determine CUDA context for device alias '{alias}'")
# check if this alias already exists
if alias in self.device_map:
device = self.device_map[alias]
if context == device.context:
# device already exists with the same alias, that's fine
return device
else:
raise RuntimeError(f"Device alias '{alias}' already exists")
# check if this context already has an associated Warp device
if context in self.context_map:
# rename the device
device = self.context_map[context]
return self.rename_device(device, alias)
else:
# it's an unmapped context
# get the device ordinal
ordinal = self.core.cuda_context_get_device_ordinal(context)
# check if this is a primary context (we could get here if it's a device that hasn't been used yet)
if self.core.cuda_context_is_primary(context):
# rename the device
device = self.cuda_primary_devices[ordinal]
return self.rename_device(device, alias)
else:
# create a new Warp device for this context
device = Device(self, alias, ordinal=ordinal, is_primary=False, context=context)
self.device_map[alias] = device
self.context_map[context] = device
self.cuda_devices.append(device)
return device
def unmap_cuda_device(self, alias):
device = self.device_map.get(alias)
# make sure the alias refers to a CUDA device
if device is None or not device.is_cuda:
raise RuntimeError(f"Invalid CUDA device alias '{alias}'")
del self.device_map[alias]
del self.context_map[device.context]
self.cuda_devices.remove(device)
def verify_cuda_device(self, device: Devicelike = None):
if warp.config.verify_cuda:
device = runtime.get_device(device)
if not device.is_cuda:
return
err = self.core.cuda_context_check(device.context)
if err != 0:
raise RuntimeError(f"CUDA error detected: {err}")
def assert_initialized():
assert runtime is not None, "Warp not initialized, call wp.init() before use"
# global entry points
def is_cpu_available():
return runtime.llvm
def is_cuda_available():
return get_cuda_device_count() > 0
def is_device_available(device):
return device in get_devices()
def get_devices() -> List[Device]:
"""Returns a list of devices supported in this environment."""
assert_initialized()
devices = []
if is_cpu_available():
devices.append(runtime.cpu_device)
for cuda_device in runtime.cuda_devices:
devices.append(cuda_device)
return devices
def get_cuda_device_count() -> int:
"""Returns the number of CUDA devices supported in this environment."""
assert_initialized()
return len(runtime.cuda_devices)
def get_cuda_device(ordinal: Union[int, None] = None) -> Device:
"""Returns the CUDA device with the given ordinal or the current CUDA device if ordinal is None."""
assert_initialized()
if ordinal is None:
return runtime.get_current_cuda_device()
else:
return runtime.cuda_devices[ordinal]
def get_cuda_devices() -> List[Device]:
"""Returns a list of CUDA devices supported in this environment."""
assert_initialized()
return runtime.cuda_devices
def get_preferred_device() -> Device:
"""Returns the preferred compute device, CUDA if available and CPU otherwise."""
assert_initialized()
if is_cuda_available():
return runtime.cuda_devices[0]
elif is_cpu_available():
return runtime.cpu_device
else:
return None
def get_device(ident: Devicelike = None) -> Device:
"""Returns the device identified by the argument."""
assert_initialized()
return runtime.get_device(ident)
def set_device(ident: Devicelike):
"""Sets the target device identified by the argument."""
assert_initialized()
device = runtime.get_device(ident)
runtime.set_default_device(device)
device.make_current()
def map_cuda_device(alias: str, context: ctypes.c_void_p = None) -> Device:
"""Assign a device alias to a CUDA context.
This function can be used to create a wp.Device for an external CUDA context.
If a wp.Device already exists for the given context, it's alias will change to the given value.
Args:
alias: A unique string to identify the device.
context: A CUDA context pointer (CUcontext). If None, the currently bound CUDA context will be used.
Returns:
The associated wp.Device.
"""
assert_initialized()
return runtime.map_cuda_device(alias, context)
def unmap_cuda_device(alias: str):
"""Remove a CUDA device with the given alias."""
assert_initialized()
runtime.unmap_cuda_device(alias)
def get_stream(device: Devicelike = None) -> Stream:
"""Return the stream currently used by the given device"""
return get_device(device).stream
def set_stream(stream, device: Devicelike = None):
"""Set the stream to be used by the given device.
If this is an external stream, caller is responsible for guaranteeing the lifetime of the stream.
Consider using wp.ScopedStream instead.
"""
get_device(device).stream = stream
def record_event(event: Event = None):
"""Record a CUDA event on the current stream.
Args:
event: Event to record. If None, a new Event will be created.
Returns:
The recorded event.
"""
return get_stream().record_event(event)
def wait_event(event: Event):
"""Make the current stream wait for a CUDA event.
Args:
event: Event to wait for.
"""
get_stream().wait_event(event)
def wait_stream(stream: Stream, event: Event = None):
"""Make the current stream wait for another CUDA stream to complete its work.
Args:
event: Event to be used. If None, a new Event will be created.
"""
get_stream().wait_stream(stream, event=event)
class RegisteredGLBuffer:
"""
Helper object to register a GL buffer with CUDA so that it can be mapped to a Warp array.
"""
# Specifies no hints about how this resource will be used.
# It is therefore assumed that this resource will be
# read from and written to by CUDA. This is the default value.
NONE = 0x00
# Specifies that CUDA will not write to this resource.
READ_ONLY = 0x01
# Specifies that CUDA will not read from this resource and will write over the
# entire contents of the resource, so none of the data previously
# stored in the resource will be preserved.
WRITE_DISCARD = 0x02
def __init__(self, gl_buffer_id: int, device: Devicelike = None, flags: int = NONE):
"""Create a new RegisteredGLBuffer object.
Args:
gl_buffer_id: The OpenGL buffer id (GLuint).
device: The device to register the buffer with. If None, the current device will be used.
flags: A combination of the flags constants.
"""
self.gl_buffer_id = gl_buffer_id
self.device = get_device(device)
self.context = self.device.context
self.resource = runtime.core.cuda_graphics_register_gl_buffer(self.context, gl_buffer_id, flags)
def __del__(self):
runtime.core.cuda_graphics_unregister_resource(self.context, self.resource)
def map(self, dtype, shape) -> warp.array:
"""Map the OpenGL buffer to a Warp array.
Args:
dtype: The type of each element in the array.
shape: The shape of the array.
Returns:
A Warp array object representing the mapped OpenGL buffer.
"""
runtime.core.cuda_graphics_map(self.context, self.resource)
ctypes.POINTER(ctypes.c_uint64), ctypes.POINTER(ctypes.c_size_t)
ptr = ctypes.c_uint64(0)
size = ctypes.c_size_t(0)
runtime.core.cuda_graphics_device_ptr_and_size(
self.context, self.resource, ctypes.byref(ptr), ctypes.byref(size)
)
return warp.array(ptr=ptr.value, dtype=dtype, shape=shape, device=self.device, owner=False)
def unmap(self):
"""Unmap the OpenGL buffer."""
runtime.core.cuda_graphics_unmap(self.context, self.resource)
def zeros(
shape: Tuple = None,
dtype=float,
device: Devicelike = None,
requires_grad: bool = False,
pinned: bool = False,
**kwargs,
) -> warp.array:
"""Return a zero-initialized array
Args:
shape: Array dimensions
dtype: Type of each element, e.g.: warp.vec3, warp.mat33, etc
device: Device that array will live on
requires_grad: Whether the array will be tracked for back propagation
pinned: Whether the array uses pinned host memory (only applicable to CPU arrays)
Returns:
A warp.array object representing the allocation
"""
arr = empty(shape=shape, dtype=dtype, device=device, requires_grad=requires_grad, pinned=pinned, **kwargs)
# use the CUDA default stream for synchronous behaviour with other streams
with warp.ScopedStream(arr.device.null_stream):
arr.zero_()
return arr
def zeros_like(
src: warp.array, device: Devicelike = None, requires_grad: bool = None, pinned: bool = None
) -> warp.array:
"""Return a zero-initialized array with the same type and dimension of another array
Args:
src: The template array to use for shape, data type, and device
device: The device where the new array will be created (defaults to src.device)
requires_grad: Whether the array will be tracked for back propagation
pinned: Whether the array uses pinned host memory (only applicable to CPU arrays)
Returns:
A warp.array object representing the allocation
"""
arr = empty_like(src, device=device, requires_grad=requires_grad, pinned=pinned)
arr.zero_()
return arr
def full(
shape: Tuple = None,
value=0,
dtype=Any,
device: Devicelike = None,
requires_grad: bool = False,
pinned: bool = False,
**kwargs,
) -> warp.array:
"""Return an array with all elements initialized to the given value
Args:
shape: Array dimensions
value: Element value
dtype: Type of each element, e.g.: float, warp.vec3, warp.mat33, etc
device: Device that array will live on
requires_grad: Whether the array will be tracked for back propagation
pinned: Whether the array uses pinned host memory (only applicable to CPU arrays)
Returns:
A warp.array object representing the allocation
"""
if dtype == Any:
# determine dtype from value
value_type = type(value)
if value_type == int:
dtype = warp.int32
elif value_type == float:
dtype = warp.float32
elif value_type in warp.types.scalar_types or hasattr(value_type, "_wp_scalar_type_"):
dtype = value_type
elif isinstance(value, warp.codegen.StructInstance):
dtype = value._cls
elif hasattr(value, "__len__"):
# a sequence, assume it's a vector or matrix value
try:
# try to convert to a numpy array first
na = np.array(value, copy=False)
except Exception as e:
raise ValueError(f"Failed to interpret the value as a vector or matrix: {e}")
# determine the scalar type
scalar_type = warp.types.np_dtype_to_warp_type.get(na.dtype)
if scalar_type is None:
raise ValueError(f"Failed to convert {na.dtype} to a Warp data type")
# determine if vector or matrix
if na.ndim == 1:
dtype = warp.types.vector(na.size, scalar_type)
elif na.ndim == 2:
dtype = warp.types.matrix(na.shape, scalar_type)
else:
raise ValueError("Values with more than two dimensions are not supported")
else:
raise ValueError(f"Invalid value type for Warp array: {value_type}")
arr = empty(shape=shape, dtype=dtype, device=device, requires_grad=requires_grad, pinned=pinned, **kwargs)
# use the CUDA default stream for synchronous behaviour with other streams
with warp.ScopedStream(arr.device.null_stream):
arr.fill_(value)
return arr
def full_like(
src: warp.array, value: Any, device: Devicelike = None, requires_grad: bool = None, pinned: bool = None
) -> warp.array:
"""Return an array with all elements initialized to the given value with the same type and dimension of another array
Args:
src: The template array to use for shape, data type, and device
value: Element value
device: The device where the new array will be created (defaults to src.device)
requires_grad: Whether the array will be tracked for back propagation
pinned: Whether the array uses pinned host memory (only applicable to CPU arrays)
Returns:
A warp.array object representing the allocation
"""
arr = empty_like(src, device=device, requires_grad=requires_grad, pinned=pinned)
arr.fill_(value)
return arr
def clone(src: warp.array, device: Devicelike = None, requires_grad: bool = None, pinned: bool = None) -> warp.array:
"""Clone an existing array, allocates a copy of the src memory
Args:
src: The source array to copy
device: The device where the new array will be created (defaults to src.device)
requires_grad: Whether the array will be tracked for back propagation
pinned: Whether the array uses pinned host memory (only applicable to CPU arrays)
Returns:
A warp.array object representing the allocation
"""
arr = empty_like(src, device=device, requires_grad=requires_grad, pinned=pinned)
warp.copy(arr, src)
return arr
def empty(
shape: Tuple = None,
dtype=float,
device: Devicelike = None,
requires_grad: bool = False,
pinned: bool = False,
**kwargs,
) -> warp.array:
"""Returns an uninitialized array
Args:
shape: Array dimensions
dtype: Type of each element, e.g.: `warp.vec3`, `warp.mat33`, etc
device: Device that array will live on
requires_grad: Whether the array will be tracked for back propagation
pinned: Whether the array uses pinned host memory (only applicable to CPU arrays)
Returns:
A warp.array object representing the allocation
"""
# backwards compatibility for case where users called wp.empty(n=length, ...)
if "n" in kwargs:
shape = (kwargs["n"],)
del kwargs["n"]
# ensure shape is specified, even if creating a zero-sized array
if shape is None:
shape = 0
return warp.array(shape=shape, dtype=dtype, device=device, requires_grad=requires_grad, pinned=pinned, **kwargs)
def empty_like(
src: warp.array, device: Devicelike = None, requires_grad: bool = None, pinned: bool = None
) -> warp.array:
"""Return an uninitialized array with the same type and dimension of another array
Args:
src: The template array to use for shape, data type, and device
device: The device where the new array will be created (defaults to src.device)
requires_grad: Whether the array will be tracked for back propagation
pinned: Whether the array uses pinned host memory (only applicable to CPU arrays)
Returns:
A warp.array object representing the allocation
"""
if device is None:
device = src.device
if requires_grad is None:
if hasattr(src, "requires_grad"):
requires_grad = src.requires_grad
else:
requires_grad = False
if pinned is None:
if hasattr(src, "pinned"):
pinned = src.pinned
else:
pinned = False
arr = empty(shape=src.shape, dtype=src.dtype, device=device, requires_grad=requires_grad, pinned=pinned)
return arr
def from_numpy(
arr: np.ndarray,
dtype: Optional[type] = None,
shape: Optional[Sequence[int]] = None,
device: Optional[Devicelike] = None,
requires_grad: bool = False,
) -> warp.array:
if dtype is None:
base_type = warp.types.np_dtype_to_warp_type.get(arr.dtype)
if base_type is None:
raise RuntimeError("Unsupported NumPy data type '{}'.".format(arr.dtype))
dim_count = len(arr.shape)
if dim_count == 2:
dtype = warp.types.vector(length=arr.shape[1], dtype=base_type)
elif dim_count == 3:
dtype = warp.types.matrix(shape=(arr.shape[1], arr.shape[2]), dtype=base_type)
else:
dtype = base_type
return warp.array(
data=arr,
dtype=dtype,
shape=shape,
owner=False,
device=device,
requires_grad=requires_grad,
)
# given a kernel destination argument type and a value convert
# to a c-type that can be passed to a kernel
def pack_arg(kernel, arg_type, arg_name, value, device, adjoint=False):
if warp.types.is_array(arg_type):
if value is None:
# allow for NULL arrays
return arg_type.__ctype__()
else:
# check for array type
# - in forward passes, array types have to match
# - in backward passes, indexed array gradients are regular arrays
if adjoint:
array_matches = type(value) == warp.array
else:
array_matches = type(value) == type(arg_type)
if not array_matches:
adj = "adjoint " if adjoint else ""
raise RuntimeError(
f"Error launching kernel '{kernel.key}', {adj}argument '{arg_name}' expects an array of type {type(arg_type)}, but passed value has type {type(value)}."
)
# check subtype
if not warp.types.types_equal(value.dtype, arg_type.dtype):
adj = "adjoint " if adjoint else ""
raise RuntimeError(
f"Error launching kernel '{kernel.key}', {adj}argument '{arg_name}' expects an array with dtype={arg_type.dtype} but passed array has dtype={value.dtype}."
)
# check dimensions
if value.ndim != arg_type.ndim:
adj = "adjoint " if adjoint else ""
raise RuntimeError(
f"Error launching kernel '{kernel.key}', {adj}argument '{arg_name}' expects an array with {arg_type.ndim} dimension(s) but the passed array has {value.ndim} dimension(s)."
)
# check device
# if a.device != device and not device.can_access(a.device):
if value.device != device:
raise RuntimeError(
f"Error launching kernel '{kernel.key}', trying to launch on device='{device}', but input array for argument '{arg_name}' is on device={value.device}."
)
return value.__ctype__()
elif isinstance(arg_type, warp.codegen.Struct):
assert value is not None
return value.__ctype__()
# try to convert to a value type (vec3, mat33, etc)
elif issubclass(arg_type, ctypes.Array):
if warp.types.types_equal(type(value), arg_type):
return value
else:
# try constructing the required value from the argument (handles tuple / list, Gf.Vec3 case)
try:
return arg_type(value)
except Exception:
raise ValueError(f"Failed to convert argument for param {arg_name} to {type_str(arg_type)}")
elif isinstance(value, bool):
return ctypes.c_bool(value)
elif isinstance(value, arg_type):
try:
# try to pack as a scalar type
if arg_type is warp.types.float16:
return arg_type._type_(warp.types.float_to_half_bits(value.value))
else:
return arg_type._type_(value.value)
except Exception:
raise RuntimeError(
"Error launching kernel, unable to pack kernel parameter type "
f"{type(value)} for param {arg_name}, expected {arg_type}"
)
else:
try:
# try to pack as a scalar type
if arg_type is warp.types.float16:
return arg_type._type_(warp.types.float_to_half_bits(value))
else:
return arg_type._type_(value)
except Exception as e:
print(e)
raise RuntimeError(
"Error launching kernel, unable to pack kernel parameter type "
f"{type(value)} for param {arg_name}, expected {arg_type}"
)
# represents all data required for a kernel launch
# so that launches can be replayed quickly, use `wp.launch(..., record_cmd=True)`
class Launch:
def __init__(self, kernel, device, hooks=None, params=None, params_addr=None, bounds=None):
# if not specified look up hooks
if not hooks:
module = kernel.module
if not module.load(device):
return
hooks = module.get_kernel_hooks(kernel, device)
# if not specified set a zero bound
if not bounds:
bounds = warp.types.launch_bounds_t(0)
# if not specified then build a list of default value params for args
if not params:
params = []
params.append(bounds)
for a in kernel.adj.args:
if isinstance(a.type, warp.types.array):
params.append(a.type.__ctype__())
elif isinstance(a.type, warp.codegen.Struct):
params.append(a.type().__ctype__())
else:
params.append(pack_arg(kernel, a.type, a.label, 0, device, False))
kernel_args = [ctypes.c_void_p(ctypes.addressof(x)) for x in params]
kernel_params = (ctypes.c_void_p * len(kernel_args))(*kernel_args)
params_addr = kernel_params
self.kernel = kernel
self.hooks = hooks
self.params = params
self.params_addr = params_addr
self.device = device
self.bounds = bounds
def set_dim(self, dim):
self.bounds = warp.types.launch_bounds_t(dim)
# launch bounds always at index 0
self.params[0] = self.bounds
# for CUDA kernels we need to update the address to each arg
if self.params_addr:
self.params_addr[0] = ctypes.c_void_p(ctypes.addressof(self.bounds))
# set kernel param at an index, will convert to ctype as necessary
def set_param_at_index(self, index, value):
arg_type = self.kernel.adj.args[index].type
arg_name = self.kernel.adj.args[index].label
carg = pack_arg(self.kernel, arg_type, arg_name, value, self.device, False)
self.params[index + 1] = carg
# for CUDA kernels we need to update the address to each arg
if self.params_addr:
self.params_addr[index + 1] = ctypes.c_void_p(ctypes.addressof(carg))
# set kernel param at an index without any type conversion
# args must be passed as ctypes or basic int / float types
def set_param_at_index_from_ctype(self, index, value):
if isinstance(value, ctypes.Structure):
# not sure how to directly assign struct->struct without reallocating using ctypes
self.params[index + 1] = value
# for CUDA kernels we need to update the address to each arg
if self.params_addr:
self.params_addr[index + 1] = ctypes.c_void_p(ctypes.addressof(value))
else:
self.params[index + 1].__init__(value)
# set kernel param by argument name
def set_param_by_name(self, name, value):
for i, arg in enumerate(self.kernel.adj.args):
if arg.label == name:
self.set_param_at_index(i, value)
# set kernel param by argument name with no type conversions
def set_param_by_name_from_ctype(self, name, value):
# lookup argument index
for i, arg in enumerate(self.kernel.adj.args):
if arg.label == name:
self.set_param_at_index_from_ctype(i, value)
# set all params
def set_params(self, values):
for i, v in enumerate(values):
self.set_param_at_index(i, v)
# set all params without performing type-conversions
def set_params_from_ctypes(self, values):
for i, v in enumerate(values):
self.set_param_at_index_from_ctype(i, v)
def launch(self) -> Any:
if self.device.is_cpu:
self.hooks.forward(*self.params)
else:
runtime.core.cuda_launch_kernel(self.device.context, self.hooks.forward, self.bounds.size, self.params_addr)
def launch(
kernel,
dim: Tuple[int],
inputs: List,
outputs: List = [],
adj_inputs: List = [],
adj_outputs: List = [],
device: Devicelike = None,
stream: Stream = None,
adjoint=False,
record_tape=True,
record_cmd=False,
):
"""Launch a Warp kernel on the target device
Kernel launches are asynchronous with respect to the calling Python thread.
Args:
kernel: The name of a Warp kernel function, decorated with the ``@wp.kernel`` decorator
dim: The number of threads to launch the kernel, can be an integer, or a Tuple of ints with max of 4 dimensions
inputs: The input parameters to the kernel
outputs: The output parameters (optional)
adj_inputs: The adjoint inputs (optional)
adj_outputs: The adjoint outputs (optional)
device: The device to launch on (optional)
stream: The stream to launch on (optional)
adjoint: Whether to run forward or backward pass (typically use False)
record_tape: When true the launch will be recorded the global wp.Tape() object when present
record_cmd: When True the launch will be returned as a ``Launch`` command object, the launch will not occur until the user calls ``cmd.launch()``
"""
assert_initialized()
# if stream is specified, use the associated device
if stream is not None:
device = stream.device
else:
device = runtime.get_device(device)
# check function is a Kernel
if isinstance(kernel, Kernel) is False:
raise RuntimeError("Error launching kernel, can only launch functions decorated with @wp.kernel.")
# debugging aid
if warp.config.print_launches:
print(f"kernel: {kernel.key} dim: {dim} inputs: {inputs} outputs: {outputs} device: {device}")
# construct launch bounds
bounds = warp.types.launch_bounds_t(dim)
if bounds.size > 0:
# first param is the number of threads
params = []
params.append(bounds)
# converts arguments to kernel's expected ctypes and packs into params
def pack_args(args, params, adjoint=False):
for i, a in enumerate(args):
arg_type = kernel.adj.args[i].type
arg_name = kernel.adj.args[i].label
params.append(pack_arg(kernel, arg_type, arg_name, a, device, adjoint))
fwd_args = inputs + outputs
adj_args = adj_inputs + adj_outputs
if (len(fwd_args)) != (len(kernel.adj.args)):
raise RuntimeError(
f"Error launching kernel '{kernel.key}', passed {len(fwd_args)} arguments but kernel requires {len(kernel.adj.args)}."
)
# if it's a generic kernel, infer the required overload from the arguments
if kernel.is_generic:
fwd_types = kernel.infer_argument_types(fwd_args)
kernel = kernel.get_overload(fwd_types)
# delay load modules, including new overload if needed
module = kernel.module
if not module.load(device):
return
# late bind
hooks = module.get_kernel_hooks(kernel, device)
pack_args(fwd_args, params)
pack_args(adj_args, params, adjoint=True)
# run kernel
if device.is_cpu:
if adjoint:
if hooks.backward is None:
raise RuntimeError(
f"Failed to find backward kernel '{kernel.key}' from module '{kernel.module.name}' for device '{device}'"
)
hooks.backward(*params)
else:
if hooks.forward is None:
raise RuntimeError(
f"Failed to find forward kernel '{kernel.key}' from module '{kernel.module.name}' for device '{device}'"
)
if record_cmd:
launch = Launch(
kernel=kernel, hooks=hooks, params=params, params_addr=None, bounds=bounds, device=device
)
return launch
else:
hooks.forward(*params)
else:
kernel_args = [ctypes.c_void_p(ctypes.addressof(x)) for x in params]
kernel_params = (ctypes.c_void_p * len(kernel_args))(*kernel_args)
with warp.ScopedStream(stream):
if adjoint:
if hooks.backward is None:
raise RuntimeError(
f"Failed to find backward kernel '{kernel.key}' from module '{kernel.module.name}' for device '{device}'"
)
runtime.core.cuda_launch_kernel(device.context, hooks.backward, bounds.size, kernel_params)
else:
if hooks.forward is None:
raise RuntimeError(
f"Failed to find forward kernel '{kernel.key}' from module '{kernel.module.name}' for device '{device}'"
)
if record_cmd:
launch = Launch(
kernel=kernel,
hooks=hooks,
params=params,
params_addr=kernel_params,
bounds=bounds,
device=device,
)
return launch
else:
# launch
runtime.core.cuda_launch_kernel(device.context, hooks.forward, bounds.size, kernel_params)
try:
runtime.verify_cuda_device(device)
except Exception as e:
print(f"Error launching kernel: {kernel.key} on device {device}")
raise e
# record on tape if one is active
if runtime.tape and record_tape:
runtime.tape.record_launch(kernel, dim, inputs, outputs, device)
def synchronize():
"""Manually synchronize the calling CPU thread with any outstanding CUDA work on all devices
This method allows the host application code to ensure that any kernel launches
or memory copies have completed.
"""
if is_cuda_available():
# save the original context to avoid side effects
saved_context = runtime.core.cuda_context_get_current()
# TODO: only synchronize devices that have outstanding work
for device in runtime.cuda_devices:
# avoid creating primary context if the device has not been used yet
if device.has_context:
if device.is_capturing:
raise RuntimeError(f"Cannot synchronize device {device} while graph capture is active")
runtime.core.cuda_context_synchronize(device.context)
# restore the original context to avoid side effects
runtime.core.cuda_context_set_current(saved_context)
def synchronize_device(device: Devicelike = None):
"""Manually synchronize the calling CPU thread with any outstanding CUDA work on the specified device
This method allows the host application code to ensure that any kernel launches
or memory copies have completed.
Args:
device: Device to synchronize. If None, synchronize the current CUDA device.
"""
device = runtime.get_device(device)
if device.is_cuda:
if device.is_capturing:
raise RuntimeError(f"Cannot synchronize device {device} while graph capture is active")
runtime.core.cuda_context_synchronize(device.context)
def synchronize_stream(stream_or_device=None):
"""Manually synchronize the calling CPU thread with any outstanding CUDA work on the specified stream.
Args:
stream_or_device: `wp.Stream` or a device. If the argument is a device, synchronize the device's current stream.
"""
if isinstance(stream_or_device, Stream):
stream = stream_or_device
else:
stream = runtime.get_device(stream_or_device).stream
runtime.core.cuda_stream_synchronize(stream.device.context, stream.cuda_stream)
def force_load(device: Union[Device, str] = None, modules: List[Module] = None):
"""Force user-defined kernels to be compiled and loaded
Args:
device: The device or list of devices to load the modules on. If None, load on all devices.
modules: List of modules to load. If None, load all imported modules.
"""
if is_cuda_available():
# save original context to avoid side effects
saved_context = runtime.core.cuda_context_get_current()
if device is None:
devices = get_devices()
else:
devices = [get_device(device)]
if modules is None:
modules = user_modules.values()
for d in devices:
for m in modules:
m.load(d)
if is_cuda_available():
# restore original context to avoid side effects
runtime.core.cuda_context_set_current(saved_context)
def load_module(
module: Union[Module, ModuleType, str] = None, device: Union[Device, str] = None, recursive: bool = False
):
"""Force user-defined module to be compiled and loaded
Args:
module: The module to load. If None, load the current module.
device: The device to load the modules on. If None, load on all devices.
recursive: Whether to load submodules. E.g., if the given module is `warp.sim`, this will also load `warp.sim.model`, `warp.sim.articulation`, etc.
Note: A module must be imported before it can be loaded by this function.
"""
if module is None:
# if module not specified, use the module that called us
module = inspect.getmodule(inspect.stack()[1][0])
module_name = module.__name__
elif isinstance(module, Module):
module_name = module.name
elif isinstance(module, ModuleType):
module_name = module.__name__
elif isinstance(module, str):
module_name = module
else:
raise TypeError(f"Argument must be a module, got {type(module)}")
modules = []
# add the given module, if found
m = user_modules.get(module_name)
if m is not None:
modules.append(m)
# add submodules, if recursive
if recursive:
prefix = module_name + "."
for name, mod in user_modules.items():
if name.startswith(prefix):
modules.append(mod)
force_load(device=device, modules=modules)
def set_module_options(options: Dict[str, Any], module: Optional[Any] = None):
"""Set options for the current module.
Options can be used to control runtime compilation and code-generation
for the current module individually. Available options are listed below.
* **mode**: The compilation mode to use, can be "debug", or "release", defaults to the value of ``warp.config.mode``.
* **max_unroll**: The maximum fixed-size loop to unroll (default 16)
Args:
options: Set of key-value option pairs
"""
if module is None:
m = inspect.getmodule(inspect.stack()[1][0])
else:
m = module
get_module(m.__name__).options.update(options)
get_module(m.__name__).unload()
def get_module_options(module: Optional[Any] = None) -> Dict[str, Any]:
"""Returns a list of options for the current module."""
if module is None:
m = inspect.getmodule(inspect.stack()[1][0])
else:
m = module
return get_module(m.__name__).options
def capture_begin(device: Devicelike = None, stream=None, force_module_load=True):
"""Begin capture of a CUDA graph
Captures all subsequent kernel launches and memory operations on CUDA devices.
This can be used to record large numbers of kernels and replay them with low-overhead.
Args:
device: The device to capture on, if None the current CUDA device will be used
stream: The CUDA stream to capture on
force_module_load: Whether or not to force loading of all kernels before capture, in general it is better to use :func:`~warp.load_module()` to selectively load kernels.
"""
if warp.config.verify_cuda is True:
raise RuntimeError("Cannot use CUDA error verification during graph capture")
if stream is not None:
device = stream.device
else:
device = runtime.get_device(device)
if not device.is_cuda:
raise RuntimeError("Must be a CUDA device")
if force_module_load:
force_load(device)
device.is_capturing = True
with warp.ScopedStream(stream):
runtime.core.cuda_graph_begin_capture(device.context)
def capture_end(device: Devicelike = None, stream=None) -> Graph:
"""Ends the capture of a CUDA graph
Returns:
A handle to a CUDA graph object that can be launched with :func:`~warp.capture_launch()`
"""
if stream is not None:
device = stream.device
else:
device = runtime.get_device(device)
if not device.is_cuda:
raise RuntimeError("Must be a CUDA device")
with warp.ScopedStream(stream):
graph = runtime.core.cuda_graph_end_capture(device.context)
device.is_capturing = False
if graph is None:
raise RuntimeError(
"Error occurred during CUDA graph capture. This could be due to an unintended allocation or CPU/GPU synchronization event."
)
else:
return Graph(device, graph)
def capture_launch(graph: Graph, stream: Stream = None):
"""Launch a previously captured CUDA graph
Args:
graph: A Graph as returned by :func:`~warp.capture_end()`
stream: A Stream to launch the graph on (optional)
"""
if stream is not None:
if stream.device != graph.device:
raise RuntimeError(f"Cannot launch graph from device {graph.device} on stream from device {stream.device}")
device = stream.device
else:
device = graph.device
with warp.ScopedStream(stream):
runtime.core.cuda_graph_launch(device.context, graph.exec)
def copy(
dest: warp.array, src: warp.array, dest_offset: int = 0, src_offset: int = 0, count: int = 0, stream: Stream = None
):
"""Copy array contents from src to dest
Args:
dest: Destination array, must be at least as big as source buffer
src: Source array
dest_offset: Element offset in the destination array
src_offset: Element offset in the source array
count: Number of array elements to copy (will copy all elements if set to 0)
stream: The stream on which to perform the copy (optional)
"""
if not warp.types.is_array(src) or not warp.types.is_array(dest):
raise RuntimeError("Copy source and destination must be arrays")
# backwards compatibility, if count is zero then copy entire src array
if count <= 0:
count = src.size
if count == 0:
return
# copying non-contiguous arrays requires that they are on the same device
if not (src.is_contiguous and dest.is_contiguous) and src.device != dest.device:
if dest.is_contiguous:
# make a contiguous copy of the source array
src = src.contiguous()
else:
# make a copy of the source array on the destination device
src = src.to(dest.device)
if src.is_contiguous and dest.is_contiguous:
bytes_to_copy = count * warp.types.type_size_in_bytes(src.dtype)
src_size_in_bytes = src.size * warp.types.type_size_in_bytes(src.dtype)
dst_size_in_bytes = dest.size * warp.types.type_size_in_bytes(dest.dtype)
src_offset_in_bytes = src_offset * warp.types.type_size_in_bytes(src.dtype)
dst_offset_in_bytes = dest_offset * warp.types.type_size_in_bytes(dest.dtype)
src_ptr = src.ptr + src_offset_in_bytes
dst_ptr = dest.ptr + dst_offset_in_bytes
if src_offset_in_bytes + bytes_to_copy > src_size_in_bytes:
raise RuntimeError(
f"Trying to copy source buffer with size ({bytes_to_copy}) from offset ({src_offset_in_bytes}) is larger than source size ({src_size_in_bytes})"
)
if dst_offset_in_bytes + bytes_to_copy > dst_size_in_bytes:
raise RuntimeError(
f"Trying to copy source buffer with size ({bytes_to_copy}) to offset ({dst_offset_in_bytes}) is larger than destination size ({dst_size_in_bytes})"
)
if src.device.is_cpu and dest.device.is_cpu:
runtime.core.memcpy_h2h(dst_ptr, src_ptr, bytes_to_copy)
else:
# figure out the CUDA context/stream for the copy
if stream is not None:
copy_device = stream.device
elif dest.device.is_cuda:
copy_device = dest.device
else:
copy_device = src.device
with warp.ScopedStream(stream):
if src.device.is_cpu and dest.device.is_cuda:
runtime.core.memcpy_h2d(copy_device.context, dst_ptr, src_ptr, bytes_to_copy)
elif src.device.is_cuda and dest.device.is_cpu:
runtime.core.memcpy_d2h(copy_device.context, dst_ptr, src_ptr, bytes_to_copy)
elif src.device.is_cuda and dest.device.is_cuda:
if src.device == dest.device:
runtime.core.memcpy_d2d(copy_device.context, dst_ptr, src_ptr, bytes_to_copy)
else:
runtime.core.memcpy_peer(copy_device.context, dst_ptr, src_ptr, bytes_to_copy)
else:
raise RuntimeError("Unexpected source and destination combination")
else:
# handle non-contiguous and indexed arrays
if src.shape != dest.shape:
raise RuntimeError("Incompatible array shapes")
src_elem_size = warp.types.type_size_in_bytes(src.dtype)
dst_elem_size = warp.types.type_size_in_bytes(dest.dtype)
if src_elem_size != dst_elem_size:
raise RuntimeError("Incompatible array data types")
# can't copy to/from fabric arrays of arrays, because they are jagged arrays of arbitrary lengths
# TODO?
if (
isinstance(src, (warp.fabricarray, warp.indexedfabricarray))
and src.ndim > 1
or isinstance(dest, (warp.fabricarray, warp.indexedfabricarray))
and dest.ndim > 1
):
raise RuntimeError("Copying to/from Fabric arrays of arrays is not supported")
src_desc = src.__ctype__()
dst_desc = dest.__ctype__()
src_ptr = ctypes.pointer(src_desc)
dst_ptr = ctypes.pointer(dst_desc)
src_type = warp.types.array_type_id(src)
dst_type = warp.types.array_type_id(dest)
if src.device.is_cuda:
with warp.ScopedStream(stream):
runtime.core.array_copy_device(src.device.context, dst_ptr, src_ptr, dst_type, src_type, src_elem_size)
else:
runtime.core.array_copy_host(dst_ptr, src_ptr, dst_type, src_type, src_elem_size)
# copy gradient, if needed
if hasattr(src, "grad") and src.grad is not None and hasattr(dest, "grad") and dest.grad is not None:
copy(dest.grad, src.grad, stream=stream)
def type_str(t):
if t is None:
return "None"
elif t == Any:
return "Any"
elif t == Callable:
return "Callable"
elif t == Tuple[int, int]:
return "Tuple[int, int]"
elif isinstance(t, int):
return str(t)
elif isinstance(t, List):
return "Tuple[" + ", ".join(map(type_str, t)) + "]"
elif isinstance(t, warp.array):
return f"Array[{type_str(t.dtype)}]"
elif isinstance(t, warp.indexedarray):
return f"IndexedArray[{type_str(t.dtype)}]"
elif isinstance(t, warp.fabricarray):
return f"FabricArray[{type_str(t.dtype)}]"
elif isinstance(t, warp.indexedfabricarray):
return f"IndexedFabricArray[{type_str(t.dtype)}]"
elif hasattr(t, "_wp_generic_type_str_"):
generic_type = t._wp_generic_type_str_
# for concrete vec/mat types use the short name
if t in warp.types.vector_types:
return t.__name__
# for generic vector / matrix type use a Generic type hint
if generic_type == "vec_t":
# return f"Vector"
return f"Vector[{type_str(t._wp_type_params_[0])},{type_str(t._wp_scalar_type_)}]"
elif generic_type == "quat_t":
# return f"Quaternion"
return f"Quaternion[{type_str(t._wp_scalar_type_)}]"
elif generic_type == "mat_t":
# return f"Matrix"
return f"Matrix[{type_str(t._wp_type_params_[0])},{type_str(t._wp_type_params_[1])},{type_str(t._wp_scalar_type_)}]"
elif generic_type == "transform_t":
# return f"Transformation"
return f"Transformation[{type_str(t._wp_scalar_type_)}]"
else:
raise TypeError("Invalid vector or matrix dimensions")
else:
return t.__name__
def print_function(f, file, noentry=False):
"""Writes a function definition to a file for use in reST documentation
Args:
f: The function being written
file: The file object for output
noentry: If True, then the :noindex: and :nocontentsentry: directive
options will be added
Returns:
A bool indicating True if f was written to file
"""
if f.hidden:
return False
args = ", ".join(f"{k}: {type_str(v)}" for k, v in f.input_types.items())
return_type = ""
try:
# todo: construct a default value for each of the functions args
# so we can generate the return type for overloaded functions
return_type = " -> " + type_str(f.value_func(None, None, None))
except Exception:
pass
print(f".. function:: {f.key}({args}){return_type}", file=file)
if noentry:
print(" :noindex:", file=file)
print(" :nocontentsentry:", file=file)
print("", file=file)
if f.doc != "":
if not f.missing_grad:
print(f" {f.doc}", file=file)
else:
print(f" {f.doc} [1]_", file=file)
print("", file=file)
print(file=file)
return True
def print_builtins(file):
header = (
"..\n"
" Autogenerated File - Do not edit. Run build_docs.py to generate.\n"
"\n"
".. functions:\n"
".. currentmodule:: warp\n"
"\n"
"Kernel Reference\n"
"================"
)
print(header, file=file)
# type definitions of all functions by group
print("\nScalar Types", file=file)
print("------------", file=file)
for t in warp.types.scalar_types:
print(f".. class:: {t.__name__}", file=file)
print("\n\nVector Types", file=file)
print("------------", file=file)
for t in warp.types.vector_types:
print(f".. class:: {t.__name__}", file=file)
print("\nGeneric Types", file=file)
print("-------------", file=file)
print(".. class:: Int", file=file)
print(".. class:: Float", file=file)
print(".. class:: Scalar", file=file)
print(".. class:: Vector", file=file)
print(".. class:: Matrix", file=file)
print(".. class:: Quaternion", file=file)
print(".. class:: Transformation", file=file)
print(".. class:: Array", file=file)
# build dictionary of all functions by group
groups = {}
for k, f in builtin_functions.items():
# build dict of groups
if f.group not in groups:
groups[f.group] = []
# append all overloads to the group
for o in f.overloads:
groups[f.group].append(o)
# Keep track of what function names have been written
written_functions = {}
for k, g in groups.items():
print("\n", file=file)
print(k, file=file)
print("---------------", file=file)
for f in g:
if f.key in written_functions:
# Add :noindex: + :nocontentsentry: since Sphinx gets confused
print_function(f, file=file, noentry=True)
else:
if print_function(f, file=file):
written_functions[f.key] = []
# footnotes
print(".. rubric:: Footnotes", file=file)
print(".. [1] Note: function gradients not implemented for backpropagation.", file=file)
def export_stubs(file):
"""Generates stub file for auto-complete of builtin functions"""
import textwrap
print(
"# Autogenerated file, do not edit, this file provides stubs for builtins autocomplete in VSCode, PyCharm, etc",
file=file,
)
print("", file=file)
print("from typing import Any", file=file)
print("from typing import Tuple", file=file)
print("from typing import Callable", file=file)
print("from typing import TypeVar", file=file)
print("from typing import Generic", file=file)
print("from typing import overload as over", file=file)
print(file=file)
# type hints, these need to be mirrored into the stubs file
print('Length = TypeVar("Length", bound=int)', file=file)
print('Rows = TypeVar("Rows", bound=int)', file=file)
print('Cols = TypeVar("Cols", bound=int)', file=file)
print('DType = TypeVar("DType")', file=file)
print('Int = TypeVar("Int")', file=file)
print('Float = TypeVar("Float")', file=file)
print('Scalar = TypeVar("Scalar")', file=file)
print("Vector = Generic[Length, Scalar]", file=file)
print("Matrix = Generic[Rows, Cols, Scalar]", file=file)
print("Quaternion = Generic[Float]", file=file)
print("Transformation = Generic[Float]", file=file)
print("Array = Generic[DType]", file=file)
# prepend __init__.py
with open(os.path.join(os.path.dirname(file.name), "__init__.py")) as header_file:
# strip comment lines
lines = [line for line in header_file if not line.startswith("#")]
header = "".join(lines)
print(header, file=file)
print(file=file)
for k, g in builtin_functions.items():
for f in g.overloads:
args = ", ".join(f"{k}: {type_str(v)}" for k, v in f.input_types.items())
return_str = ""
if f.export is False or f.hidden is True: # or f.generic:
continue
try:
# todo: construct a default value for each of the functions args
# so we can generate the return type for overloaded functions
return_type = f.value_func(None, None, None)
if return_type:
return_str = " -> " + type_str(return_type)
except Exception:
pass
print("@over", file=file)
print(f"def {f.key}({args}){return_str}:", file=file)
print(' """', file=file)
print(textwrap.indent(text=f.doc, prefix=" "), file=file)
print(' """', file=file)
print(" ...\n\n", file=file)
def export_builtins(file):
def ctype_str(t):
if isinstance(t, int):
return "int"
elif isinstance(t, float):
return "float"
else:
return t.__name__
for k, g in builtin_functions.items():
for f in g.overloads:
if f.export is False or f.generic:
continue
simple = True
for k, v in f.input_types.items():
if isinstance(v, warp.array) or v == Any or v == Callable or v == Tuple:
simple = False
break
# only export simple types that don't use arrays
# or templated types
if not simple or f.variadic:
continue
args = ", ".join(f"{ctype_str(v)} {k}" for k, v in f.input_types.items())
params = ", ".join(f.input_types.keys())
return_type = ""
try:
# todo: construct a default value for each of the functions args
# so we can generate the return type for overloaded functions
return_type = ctype_str(f.value_func(None, None, None))
except Exception:
continue
if return_type.startswith("Tuple"):
continue
if args == "":
print(
f"WP_API void {f.mangled_name}({return_type}* ret) {{ *ret = wp::{f.key}({params}); }}", file=file
)
elif return_type == "None":
print(f"WP_API void {f.mangled_name}({args}) {{ wp::{f.key}({params}); }}", file=file)
else:
print(
f"WP_API void {f.mangled_name}({args}, {return_type}* ret) {{ *ret = wp::{f.key}({params}); }}",
file=file,
)
# initialize global runtime
runtime = None
def init():
"""Initialize the Warp runtime. This function must be called before any other API call. If an error occurs an exception will be raised."""
global runtime
if runtime is None:
runtime = Runtime()
| warp-main | warp/context.py |
# Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import cProfile
import math
import sys
import timeit
import warnings
from typing import Any, Tuple, Union
import numpy as np
import warp as wp
import warp.types
def warp_showwarning(message, category, filename, lineno, file=None, line=None):
"""Version of warnings.showwarning that always prints to sys.stdout."""
msg = warnings.WarningMessage(message, category, filename, lineno, sys.stdout, line)
warnings._showwarnmsg_impl(msg)
def warn(message, category=None, stacklevel=1):
with warnings.catch_warnings():
warnings.simplefilter("default") # Change the filter in this process
warnings.showwarning = warp_showwarning
warnings.warn(message, category, stacklevel + 1) # Increment stacklevel by 1 since we are in a wrapper
def length(a):
return np.linalg.norm(a)
def length_sq(a):
return np.dot(a, a)
def cross(a, b):
return np.array((a[1] * b[2] - a[2] * b[1], a[2] * b[0] - a[0] * b[2], a[0] * b[1] - a[1] * b[0]), dtype=np.float32)
# NumPy has no normalize() method..
def normalize(v):
norm = np.linalg.norm(v)
if norm == 0.0:
return v
return v / norm
def skew(v):
return np.array([[0, -v[2], v[1]], [v[2], 0, -v[0]], [-v[1], v[0], 0]])
# math utils
# def quat(i, j, k, w):
# return np.array([i, j, k, w])
def quat_identity():
return np.array((0.0, 0.0, 0.0, 1.0))
def quat_inverse(q):
return np.array((-q[0], -q[1], -q[2], q[3]))
def quat_from_axis_angle(axis, angle):
v = normalize(np.array(axis))
half = angle * 0.5
w = math.cos(half)
sin_theta_over_two = math.sin(half)
v *= sin_theta_over_two
return np.array((v[0], v[1], v[2], w))
def quat_to_axis_angle(quat):
w2 = quat[3] * quat[3]
if w2 > 1 - 1e-7:
return np.zeros(3), 0.0
angle = 2 * np.arccos(quat[3])
xyz = quat[:3] / np.sqrt(1 - w2)
return xyz, angle
# quat_rotate a vector
def quat_rotate(q, x):
x = np.array(x)
axis = np.array((q[0], q[1], q[2]))
return x * (2.0 * q[3] * q[3] - 1.0) + np.cross(axis, x) * q[3] * 2.0 + axis * np.dot(axis, x) * 2.0
# multiply two quats
def quat_multiply(a, b):
return np.array(
(
a[3] * b[0] + b[3] * a[0] + a[1] * b[2] - b[1] * a[2],
a[3] * b[1] + b[3] * a[1] + a[2] * b[0] - b[2] * a[0],
a[3] * b[2] + b[3] * a[2] + a[0] * b[1] - b[0] * a[1],
a[3] * b[3] - a[0] * b[0] - a[1] * b[1] - a[2] * b[2],
)
)
# convert to mat33
def quat_to_matrix(q):
c1 = quat_rotate(q, np.array((1.0, 0.0, 0.0)))
c2 = quat_rotate(q, np.array((0.0, 1.0, 0.0)))
c3 = quat_rotate(q, np.array((0.0, 0.0, 1.0)))
return np.array([c1, c2, c3]).T
def quat_rpy(roll, pitch, yaw):
cy = math.cos(yaw * 0.5)
sy = math.sin(yaw * 0.5)
cr = math.cos(roll * 0.5)
sr = math.sin(roll * 0.5)
cp = math.cos(pitch * 0.5)
sp = math.sin(pitch * 0.5)
w = cy * cr * cp + sy * sr * sp
x = cy * sr * cp - sy * cr * sp
y = cy * cr * sp + sy * sr * cp
z = sy * cr * cp - cy * sr * sp
return (x, y, z, w)
def quat_from_matrix(m):
tr = m[0, 0] + m[1, 1] + m[2, 2]
h = 0.0
if tr >= 0.0:
h = math.sqrt(tr + 1.0)
w = 0.5 * h
h = 0.5 / h
x = (m[2, 1] - m[1, 2]) * h
y = (m[0, 2] - m[2, 0]) * h
z = (m[1, 0] - m[0, 1]) * h
else:
i = 0
if m[1, 1] > m[0, 0]:
i = 1
if m[2, 2] > m[i, i]:
i = 2
if i == 0:
h = math.sqrt((m[0, 0] - (m[1, 1] + m[2, 2])) + 1.0)
x = 0.5 * h
h = 0.5 / h
y = (m[0, 1] + m[1, 0]) * h
z = (m[2, 0] + m[0, 2]) * h
w = (m[2, 1] - m[1, 2]) * h
elif i == 1:
h = math.sqrt((m[1, 1] - (m[2, 2] + m[0, 0])) + 1.0)
y = 0.5 * h
h = 0.5 / h
z = (m[1, 2] + m[2, 1]) * h
x = (m[0, 1] + m[1, 0]) * h
w = (m[0, 2] - m[2, 0]) * h
elif i == 2:
h = math.sqrt((m[2, 2] - (m[0, 0] + m[1, 1])) + 1.0)
z = 0.5 * h
h = 0.5 / h
x = (m[2, 0] + m[0, 2]) * h
y = (m[1, 2] + m[2, 1]) * h
w = (m[1, 0] - m[0, 1]) * h
return normalize(np.array([x, y, z, w]))
@wp.func
def quat_between_vectors(a: wp.vec3, b: wp.vec3) -> wp.quat:
"""
Compute the quaternion that rotates vector a to vector b
"""
a = wp.normalize(a)
b = wp.normalize(b)
c = wp.cross(a, b)
d = wp.dot(a, b)
q = wp.quat(c[0], c[1], c[2], 1.0 + d)
return wp.normalize(q)
# rigid body transform
# def transform(x, r):
# return (np.array(x), np.array(r))
def transform_identity():
return wp.transform(np.array((0.0, 0.0, 0.0)), quat_identity())
# se(3) -> SE(3), Park & Lynch pg. 105, screw in [w, v] normalized form
def transform_exp(s, angle):
w = np.array(s[0:3])
v = np.array(s[3:6])
if length(w) < 1.0:
r = quat_identity()
else:
r = quat_from_axis_angle(w, angle)
t = v * angle + (1.0 - math.cos(angle)) * np.cross(w, v) + (angle - math.sin(angle)) * np.cross(w, np.cross(w, v))
return (t, r)
def transform_inverse(t):
q_inv = quat_inverse(t.q)
return wp.transform(-quat_rotate(q_inv, t.p), q_inv)
def transform_vector(t, v):
return quat_rotate(t.q, v)
def transform_point(t, p):
return np.array(t.p) + quat_rotate(t.q, p)
def transform_multiply(t, u):
return wp.transform(quat_rotate(t.q, u.p) + t.p, quat_multiply(t.q, u.q))
# flatten an array of transforms (p,q) format to a 7-vector
def transform_flatten(t):
return np.array([*t.p, *t.q])
# expand a 7-vec to a tuple of arrays
def transform_expand(t):
return wp.transform(np.array(t[0:3]), np.array(t[3:7]))
# convert array of transforms to a array of 7-vecs
def transform_flatten_list(xforms):
exp = lambda t: transform_flatten(t)
return list(map(exp, xforms))
def transform_expand_list(xforms):
exp = lambda t: transform_expand(t)
return list(map(exp, xforms))
def transform_inertia(m, I, p, q):
"""
Transforms the inertia tensor described by the given mass and 3x3 inertia
matrix to a new frame described by the given position and orientation.
"""
R = quat_to_matrix(q)
# Steiner's theorem
return R @ I @ R.T + m * (np.dot(p, p) * np.eye(3) - np.outer(p, p))
# spatial operators
# AdT
def spatial_adjoint(t):
R = quat_to_matrix(t.q)
w = skew(t.p)
A = np.zeros((6, 6))
A[0:3, 0:3] = R
A[3:6, 0:3] = np.dot(w, R)
A[3:6, 3:6] = R
return A
# (AdT)^-T
def spatial_adjoint_dual(t):
R = quat_to_matrix(t.q)
w = skew(t.p)
A = np.zeros((6, 6))
A[0:3, 0:3] = R
A[0:3, 3:6] = np.dot(w, R)
A[3:6, 3:6] = R
return A
# AdT*s
def transform_twist(t_ab, s_b):
return np.dot(spatial_adjoint(t_ab), s_b)
# AdT^{-T}*s
def transform_wrench(t_ab, f_b):
return np.dot(spatial_adjoint_dual(t_ab), f_b)
# transform spatial inertia (6x6) in b frame to a frame
def transform_spatial_inertia(t_ab, I_b):
t_ba = transform_inverse(t_ab)
# todo: write specialized method
I_a = np.dot(np.dot(spatial_adjoint(t_ba).T, I_b), spatial_adjoint(t_ba))
return I_a
def translate_twist(p_ab, s_b):
w = s_b[0:3]
v = np.cross(p_ab, s_b[0:3]) + s_b[3:6]
return np.array((*w, *v))
def translate_wrench(p_ab, s_b):
w = s_b[0:3] + np.cross(p_ab, s_b[3:6])
v = s_b[3:6]
return np.array((*w, *v))
# def spatial_vector(v=(0.0, 0.0, 0.0, 0.0, 0.0, 0.0)):
# return np.array(v)
# ad_V pg. 289 L&P, pg. 25 Featherstone
def spatial_cross(a, b):
w = np.cross(a[0:3], b[0:3])
v = np.cross(a[3:6], b[0:3]) + np.cross(a[0:3], b[3:6])
return np.array((*w, *v))
# ad_V^T pg. 290 L&P, pg. 25 Featurestone, note this does not includes the sign flip in the definition
def spatial_cross_dual(a, b):
w = np.cross(a[0:3], b[0:3]) + np.cross(a[3:6], b[3:6])
v = np.cross(a[0:3], b[3:6])
return np.array((*w, *v))
def spatial_dot(a, b):
return np.dot(a, b)
def spatial_outer(a, b):
return np.outer(a, b)
# def spatial_matrix():
# return np.zeros((6, 6))
def spatial_matrix_from_inertia(I, m):
G = spatial_matrix()
G[0:3, 0:3] = I
G[3, 3] = m
G[4, 4] = m
G[5, 5] = m
return G
# solves x = I^(-1)b
def spatial_solve(I, b):
return np.dot(np.linalg.inv(I), b)
# helper to retrive body angular velocity from a twist v_s in se(3)
def get_body_angular_velocity(v_s):
return v_s[0:3]
# helper to compute velocity of a point p on a body given it's spatial twist v_s
def get_body_linear_velocity(v_s, p):
dpdt = v_s[3:6] + np.cross(v_s[0:3], p)
return dpdt
# helper to build a body twist given the angular and linear velocity of
# the center of mass specified in the world frame, returns the body
# twist with respect to the origin (v_s)
def get_body_twist(w_m, v_m, p_m):
lin = v_m + np.cross(p_m, w_m)
return (*w_m, *lin)
def array_scan(in_array, out_array, inclusive=True):
if in_array.device != out_array.device:
raise RuntimeError("Array storage devices do not match")
if in_array.size != out_array.size:
raise RuntimeError("Array storage sizes do not match")
if in_array.dtype != out_array.dtype:
raise RuntimeError("Array data types do not match")
from warp.context import runtime
if in_array.device.is_cpu:
if in_array.dtype == wp.int32:
runtime.core.array_scan_int_host(in_array.ptr, out_array.ptr, in_array.size, inclusive)
elif in_array.dtype == wp.float32:
runtime.core.array_scan_float_host(in_array.ptr, out_array.ptr, in_array.size, inclusive)
else:
raise RuntimeError("Unsupported data type")
elif in_array.device.is_cuda:
if in_array.dtype == wp.int32:
runtime.core.array_scan_int_device(in_array.ptr, out_array.ptr, in_array.size, inclusive)
elif in_array.dtype == wp.float32:
runtime.core.array_scan_float_device(in_array.ptr, out_array.ptr, in_array.size, inclusive)
else:
raise RuntimeError("Unsupported data type")
def radix_sort_pairs(keys, values, count: int):
if keys.device != values.device:
raise RuntimeError("Array storage devices do not match")
if keys.size < 2 * count or values.size < 2 * count:
raise RuntimeError("Array storage must be large enough to contain 2*count elements")
from warp.context import runtime
if keys.device.is_cpu:
if keys.dtype == wp.int32 and values.dtype == wp.int32:
runtime.core.radix_sort_pairs_int_host(keys.ptr, values.ptr, count)
else:
raise RuntimeError("Unsupported data type")
elif keys.device.is_cuda:
if keys.dtype == wp.int32 and values.dtype == wp.int32:
runtime.core.radix_sort_pairs_int_device(keys.ptr, values.ptr, count)
else:
raise RuntimeError("Unsupported data type")
def runlength_encode(values, run_values, run_lengths, run_count=None, value_count=None):
if run_values.device != values.device or run_lengths.device != values.device:
raise RuntimeError("Array storage devices do not match")
if value_count is None:
value_count = values.size
if run_values.size < value_count or run_lengths.size < value_count:
raise RuntimeError("Output array storage sizes must be at least equal to value_count")
if values.dtype != run_values.dtype:
raise RuntimeError("values and run_values data types do not match")
if run_lengths.dtype != wp.int32:
raise RuntimeError("run_lengths array must be of type int32")
# User can provide a device output array for storing the number of runs
# For convenience, if no such array is provided, number of runs is returned on host
if run_count is None:
host_return = True
run_count = wp.empty(shape=(1,), dtype=int, device=values.device)
else:
host_return = False
if run_count.device != values.device:
raise RuntimeError("run_count storage devices does not match other arrays")
if run_count.dtype != wp.int32:
raise RuntimeError("run_count array must be of type int32")
from warp.context import runtime
if values.device.is_cpu:
if values.dtype == wp.int32:
runtime.core.runlength_encode_int_host(
values.ptr, run_values.ptr, run_lengths.ptr, run_count.ptr, value_count
)
else:
raise RuntimeError("Unsupported data type")
elif values.device.is_cuda:
if values.dtype == wp.int32:
runtime.core.runlength_encode_int_device(
values.ptr, run_values.ptr, run_lengths.ptr, run_count.ptr, value_count
)
else:
raise RuntimeError("Unsupported data type")
if host_return:
return int(run_count.numpy()[0])
def array_sum(values, out=None, value_count=None, axis=None):
if value_count is None:
if axis is None:
value_count = values.size
else:
value_count = values.shape[axis]
if axis is None:
output_shape = (1,)
else:
def output_dim(ax, dim):
return 1 if ax == axis else dim
output_shape = tuple(output_dim(ax, dim) for ax, dim in enumerate(values.shape))
type_length = wp.types.type_length(values.dtype)
scalar_type = wp.types.type_scalar_type(values.dtype)
# User can provide a device output array for storing the number of runs
# For convenience, if no such array is provided, number of runs is returned on host
if out is None:
host_return = True
out = wp.empty(shape=output_shape, dtype=values.dtype, device=values.device)
else:
host_return = False
if out.device != values.device:
raise RuntimeError("out storage device should match values array")
if out.dtype != values.dtype:
raise RuntimeError(f"out array should have type {values.dtype.__name__}")
if out.shape != output_shape:
raise RuntimeError(f"out array should have shape {output_shape}")
from warp.context import runtime
if values.device.is_cpu:
if scalar_type == wp.float32:
native_func = runtime.core.array_sum_float_host
elif scalar_type == wp.float64:
native_func = runtime.core.array_sum_double_host
else:
raise RuntimeError("Unsupported data type")
elif values.device.is_cuda:
if scalar_type == wp.float32:
native_func = runtime.core.array_sum_float_device
elif scalar_type == wp.float64:
native_func = runtime.core.array_sum_double_device
else:
raise RuntimeError("Unsupported data type")
if axis is None:
stride = wp.types.type_size_in_bytes(values.dtype)
native_func(values.ptr, out.ptr, value_count, stride, type_length)
if host_return:
return out.numpy()[0]
else:
stride = values.strides[axis]
for idx in np.ndindex(output_shape):
out_offset = sum(i * s for i, s in zip(idx, out.strides))
val_offset = sum(i * s for i, s in zip(idx, values.strides))
native_func(
values.ptr + val_offset,
out.ptr + out_offset,
value_count,
stride,
type_length,
)
if host_return:
return out
def array_inner(a, b, out=None, count=None, axis=None):
if a.size != b.size:
raise RuntimeError("Array storage sizes do not match")
if a.device != b.device:
raise RuntimeError("Array storage sizes do not match")
if a.dtype != b.dtype:
raise RuntimeError("Array data types do not match")
if count is None:
if axis is None:
count = a.size
else:
count = a.shape[axis]
if axis is None:
output_shape = (1,)
else:
def output_dim(ax, dim):
return 1 if ax == axis else dim
output_shape = tuple(output_dim(ax, dim) for ax, dim in enumerate(a.shape))
type_length = wp.types.type_length(a.dtype)
scalar_type = wp.types.type_scalar_type(a.dtype)
# User can provide a device output array for storing the number of runs
# For convenience, if no such array is provided, number of runs is returned on host
if out is None:
host_return = True
out = wp.empty(shape=output_shape, dtype=scalar_type, device=a.device)
else:
host_return = False
if out.device != a.device:
raise RuntimeError("out storage device should match values array")
if out.dtype != scalar_type:
raise RuntimeError(f"out array should have type {scalar_type.__name__}")
if out.shape != output_shape:
raise RuntimeError(f"out array should have shape {output_shape}")
from warp.context import runtime
if a.device.is_cpu:
if scalar_type == wp.float32:
native_func = runtime.core.array_inner_float_host
elif scalar_type == wp.float64:
native_func = runtime.core.array_inner_double_host
else:
raise RuntimeError("Unsupported data type")
elif a.device.is_cuda:
if scalar_type == wp.float32:
native_func = runtime.core.array_inner_float_device
elif scalar_type == wp.float64:
native_func = runtime.core.array_inner_double_device
else:
raise RuntimeError("Unsupported data type")
if axis is None:
stride_a = wp.types.type_size_in_bytes(a.dtype)
stride_b = wp.types.type_size_in_bytes(b.dtype)
native_func(a.ptr, b.ptr, out.ptr, count, stride_a, stride_b, type_length)
if host_return:
return out.numpy()[0]
else:
stride_a = a.strides[axis]
stride_b = b.strides[axis]
for idx in np.ndindex(output_shape):
out_offset = sum(i * s for i, s in zip(idx, out.strides))
a_offset = sum(i * s for i, s in zip(idx, a.strides))
b_offset = sum(i * s for i, s in zip(idx, b.strides))
native_func(
a.ptr + a_offset,
b.ptr + b_offset,
out.ptr + out_offset,
count,
stride_a,
stride_b,
type_length,
)
if host_return:
return out
_copy_kernel_cache = dict()
def array_cast(in_array, out_array, count=None):
def make_copy_kernel(dest_dtype, src_dtype):
import re
import warp.context
def copy_kernel(
dest: Any,
src: Any,
):
dest[wp.tid()] = dest_dtype(src[wp.tid()])
module = wp.get_module(copy_kernel.__module__)
key = f"{copy_kernel.__name__}_{warp.context.type_str(src_dtype)}_{warp.context.type_str(dest_dtype)}"
key = re.sub("[^0-9a-zA-Z_]+", "", key)
if key not in _copy_kernel_cache:
_copy_kernel_cache[key] = wp.Kernel(func=copy_kernel, key=key, module=module)
return _copy_kernel_cache[key]
if in_array.device != out_array.device:
raise RuntimeError("Array storage devices do not match")
in_array_data_shape = getattr(in_array.dtype, "_shape_", ())
out_array_data_shape = getattr(out_array.dtype, "_shape_", ())
if in_array.ndim != out_array.ndim or in_array_data_shape != out_array_data_shape:
# Number of dimensions or data type shape do not match.
# Flatten arrays and do cast at the scalar level
in_array = in_array.flatten()
out_array = out_array.flatten()
in_array_data_length = warp.types.type_length(in_array.dtype)
out_array_data_length = warp.types.type_length(out_array.dtype)
in_array_scalar_type = wp.types.type_scalar_type(in_array.dtype)
out_array_scalar_type = wp.types.type_scalar_type(out_array.dtype)
in_array = wp.array(
data=None,
ptr=in_array.ptr,
capacity=in_array.capacity,
owner=False,
device=in_array.device,
dtype=in_array_scalar_type,
shape=in_array.shape[0] * in_array_data_length,
)
out_array = wp.array(
data=None,
ptr=out_array.ptr,
capacity=out_array.capacity,
owner=False,
device=out_array.device,
dtype=out_array_scalar_type,
shape=out_array.shape[0] * out_array_data_length,
)
if count is not None:
count *= in_array_data_length
if count is None:
count = in_array.size
if in_array.ndim == 1:
dim = count
elif count < in_array.size:
raise RuntimeError("Partial cast is not supported for arrays with more than one dimension")
else:
dim = in_array.shape
if in_array.dtype == out_array.dtype:
# Same data type, can simply copy
wp.copy(dest=out_array, src=in_array, count=count)
else:
copy_kernel = make_copy_kernel(src_dtype=in_array.dtype, dest_dtype=out_array.dtype)
wp.launch(kernel=copy_kernel, dim=dim, inputs=[out_array, in_array], device=out_array.device)
# code snippet for invoking cProfile
# cp = cProfile.Profile()
# cp.enable()
# for i in range(1000):
# self.state = self.integrator.forward(self.model, self.state, self.sim_dt)
# cp.disable()
# cp.print_stats(sort='tottime')
# exit(0)
# helper kernels for initializing NVDB volumes from a dense array
@wp.kernel
def copy_dense_volume_to_nano_vdb_v(volume: wp.uint64, values: wp.array(dtype=wp.vec3, ndim=3)):
i, j, k = wp.tid()
wp.volume_store_v(volume, i, j, k, values[i, j, k])
@wp.kernel
def copy_dense_volume_to_nano_vdb_f(volume: wp.uint64, values: wp.array(dtype=wp.float32, ndim=3)):
i, j, k = wp.tid()
wp.volume_store_f(volume, i, j, k, values[i, j, k])
@wp.kernel
def copy_dense_volume_to_nano_vdb_i(volume: wp.uint64, values: wp.array(dtype=wp.int32, ndim=3)):
i, j, k = wp.tid()
wp.volume_store_i(volume, i, j, k, values[i, j, k])
# represent an edge between v0, v1 with connected faces f0, f1, and opposite vertex o0, and o1
# winding is such that first tri can be reconstructed as {v0, v1, o0}, and second tri as { v1, v0, o1 }
class MeshEdge:
def __init__(self, v0, v1, o0, o1, f0, f1):
self.v0 = v0 # vertex 0
self.v1 = v1 # vertex 1
self.o0 = o0 # opposite vertex 1
self.o1 = o1 # opposite vertex 2
self.f0 = f0 # index of tri1
self.f1 = f1 # index of tri2
class MeshAdjacency:
def __init__(self, indices, num_tris):
# map edges (v0, v1) to faces (f0, f1)
self.edges = {}
self.indices = indices
for index, tri in enumerate(indices):
self.add_edge(tri[0], tri[1], tri[2], index)
self.add_edge(tri[1], tri[2], tri[0], index)
self.add_edge(tri[2], tri[0], tri[1], index)
def add_edge(self, i0, i1, o, f): # index1, index2, index3, index of triangle
key = (min(i0, i1), max(i0, i1))
edge = None
if key in self.edges:
edge = self.edges[key]
if edge.f1 != -1:
print("Detected non-manifold edge")
return
else:
# update other side of the edge
edge.o1 = o
edge.f1 = f
else:
# create new edge with opposite yet to be filled
edge = MeshEdge(i0, i1, o, -1, f, -1)
self.edges[key] = edge
def opposite_vertex(self, edge):
pass
def mem_report():
def _mem_report(tensors, mem_type):
"""Print the selected tensors of type
There are two major storage types in our major concern:
- GPU: tensors transferred to CUDA devices
- CPU: tensors remaining on the system memory (usually unimportant)
Args:
- tensors: the tensors of specified type
- mem_type: 'CPU' or 'GPU' in current implementation"""
total_numel = 0
total_mem = 0
visited_data = []
for tensor in tensors:
if tensor.is_sparse:
continue
# a data_ptr indicates a memory block allocated
data_ptr = tensor.storage().data_ptr()
if data_ptr in visited_data:
continue
visited_data.append(data_ptr)
numel = tensor.storage().size()
total_numel += numel
element_size = tensor.storage().element_size()
mem = numel * element_size / 1024 / 1024 # 32bit=4Byte, MByte
total_mem += mem
element_type = type(tensor).__name__
size = tuple(tensor.size())
# print('%s\t\t%s\t\t%.2f' % (
# element_type,
# size,
# mem) )
print("Type: %s Total Tensors: %d \tUsed Memory Space: %.2f MBytes" % (mem_type, total_numel, total_mem))
import gc
import torch
gc.collect()
LEN = 65
objects = gc.get_objects()
# print('%s\t%s\t\t\t%s' %('Element type', 'Size', 'Used MEM(MBytes)') )
tensors = [obj for obj in objects if torch.is_tensor(obj)]
cuda_tensors = [t for t in tensors if t.is_cuda]
host_tensors = [t for t in tensors if not t.is_cuda]
_mem_report(cuda_tensors, "GPU")
_mem_report(host_tensors, "CPU")
print("=" * LEN)
def lame_parameters(E, nu):
l = (E * nu) / ((1.0 + nu) * (1.0 - 2.0 * nu))
mu = E / (2.0 * (1.0 + nu))
return (l, mu)
class ScopedDevice:
def __init__(self, device):
self.device = wp.get_device(device)
def __enter__(self):
# save the previous default device
self.saved_device = self.device.runtime.default_device
# make this the default device
self.device.runtime.default_device = self.device
# make it the current CUDA device so that device alias "cuda" will evaluate to this device
self.device.context_guard.__enter__()
return self.device
def __exit__(self, exc_type, exc_value, traceback):
# restore original CUDA context
self.device.context_guard.__exit__(exc_type, exc_value, traceback)
# restore original target device
self.device.runtime.default_device = self.saved_device
class ScopedStream:
def __init__(self, stream):
self.stream = stream
if stream is not None:
self.device = stream.device
self.device_scope = ScopedDevice(self.device)
def __enter__(self):
if self.stream is not None:
self.device_scope.__enter__()
self.saved_stream = self.device.stream
self.device.stream = self.stream
return self.stream
def __exit__(self, exc_type, exc_value, traceback):
if self.stream is not None:
self.device.stream = self.saved_stream
self.device_scope.__exit__(exc_type, exc_value, traceback)
# timer utils
class ScopedTimer:
indent = -1
enabled = True
def __init__(
self,
name,
active=True,
print=True,
detailed=False,
dict=None,
use_nvtx=False,
color="rapids",
synchronize=False,
):
"""Context manager object for a timer
Parameters:
name (str): Name of timer
active (bool): Enables this timer
print (bool): At context manager exit, print elapsed time to sys.stdout
detailed (bool): Collects additional profiling data using cProfile and calls ``print_stats()`` at context exit
dict (dict): A dictionary of lists to which the elapsed time will be appended using ``name`` as a key
use_nvtx (bool): If true, timing functionality is replaced by an NVTX range
color (int or str): ARGB value (e.g. 0x00FFFF) or color name (e.g. 'cyan') associated with the NVTX range
synchronize (bool): Synchronize the CPU thread with any outstanding CUDA work to return accurate GPU timings
Attributes:
elapsed (float): The duration of the ``with`` block used with this object
"""
self.name = name
self.active = active and self.enabled
self.print = print
self.detailed = detailed
self.dict = dict
self.use_nvtx = use_nvtx
self.color = color
self.synchronize = synchronize
self.elapsed = 0.0
if self.dict is not None:
if name not in self.dict:
self.dict[name] = []
def __enter__(self):
if self.active:
if self.synchronize:
wp.synchronize()
if self.use_nvtx:
import nvtx
self.nvtx_range_id = nvtx.start_range(self.name, color=self.color)
return
self.start = timeit.default_timer()
ScopedTimer.indent += 1
if self.detailed:
self.cp = cProfile.Profile()
self.cp.clear()
self.cp.enable()
return self
def __exit__(self, exc_type, exc_value, traceback):
if self.active:
if self.synchronize:
wp.synchronize()
if self.use_nvtx:
import nvtx
nvtx.end_range(self.nvtx_range_id)
return
if self.detailed:
self.cp.disable()
self.cp.print_stats(sort="tottime")
self.elapsed = (timeit.default_timer() - self.start) * 1000.0
if self.dict is not None:
self.dict[self.name].append(self.elapsed)
indent = ""
for i in range(ScopedTimer.indent):
indent += "\t"
if self.print:
print("{}{} took {:.2f} ms".format(indent, self.name, self.elapsed))
ScopedTimer.indent -= 1
| warp-main | warp/utils.py |
import warp as wp
import warp.types
import warp.utils
from typing import Tuple, Any, Union
_struct_cache = dict()
class BsrMatrix:
"""Untyped base class for BSR and CSR matrices.
Should not be constructed directly but through functions such as :func:`bsr_zeros`.
Attributes:
nrow (int): Number of rows of blocks
ncol (int): Number of columns of blocks
nnz (int): Number of non-zero blocks: equal to `offsets[-1]`, cached on host for convenience
offsets (wp.array(dtype=int)): Array of size at least 1 + nrows containing start and end offsets og blocks in each row
columns (wp.array(dtype=int)): Array of size at least equal to nnz containing block column indices
values (wp.array(dtype=dtype)): Array of size at least equal to nnz containing block values
"""
@property
def scalar_type(self) -> type:
"""Scalar type for each of the blocks' coefficients. FOr CSR matrices, this is equal to the block type"""
return warp.types.type_scalar_type(self.values.dtype)
@property
def block_shape(self) -> Tuple[int, int]:
"""Shape of the individual blocks"""
return getattr(self.values.dtype, "_shape_", (1, 1))
@property
def block_size(self) -> Tuple[int, int]:
"""Size of the individual blocks, i.e. number of rows per block times number of columsn per block"""
return warp.types.type_length(self.values.dtype)
@property
def shape(self) -> Tuple[int, int]:
"""Shape of the matrix, i.e. number of rows/columns of blocks times number of rows/columsn per block"""
block_shape = self.block_shape
return (self.nrow * block_shape[0], self.ncol * block_shape[1])
def bsr_matrix_t(dtype: type):
dtype = wp.types.type_to_warp(dtype)
class BsrMatrixTyped(BsrMatrix):
nrow: int
"""Number of rows of blocks"""
ncol: int
"""Number of columns of blocks"""
nnz: int
"""Number of non-zero blocks: equal to offsets[-1], cached on host for convenience"""
offsets: wp.array(dtype=int)
"""Array of size at least 1 + nrows"""
columns: wp.array(dtype=int)
"""Array of size at least equal to nnz"""
values: wp.array(dtype=dtype)
module = wp.get_module(BsrMatrix.__module__)
if hasattr(dtype, "_shape_"):
type_str = f"{warp.types.type_scalar_type(dtype).__name__}_{dtype._shape_[0]}_{dtype._shape_[1]}"
else:
type_str = dtype.__name__
key = f"{BsrMatrix.__qualname__}_{type_str}"
if key not in _struct_cache:
_struct_cache[key] = wp.codegen.Struct(
cls=BsrMatrixTyped,
key=key,
module=module,
)
return _struct_cache[key]
def bsr_zeros(
rows_of_blocks: int, cols_of_blocks: int, block_type: type, device: wp.context.Devicelike = None
) -> BsrMatrix:
"""
Constructs an empty BSR or CS matrix with the given shape
"""
bsr = bsr_matrix_t(block_type)()
bsr.nrow = rows_of_blocks
bsr.ncol = cols_of_blocks
bsr.nnz = 0
bsr.columns = wp.empty(shape=(0,), dtype=int, device=device)
bsr.values = wp.empty(shape=(0,), dtype=block_type, device=device)
bsr.offsets = wp.zeros(shape=(bsr.nrow + 1,), dtype=int, device=device)
return bsr
def _bsr_ensure_fits(bsr: BsrMatrix, nrow: int = None, nnz: int = None):
if nrow is None:
nrow = bsr.nrow
if nnz is None:
nnz = bsr.nnz
if bsr.offsets.size < nrow + 1:
bsr.offsets = wp.empty(shape=(nrow + 1,), dtype=int, device=bsr.offsets.device)
if bsr.columns.size < nnz:
bsr.columns = wp.empty(shape=(nnz,), dtype=int, device=bsr.columns.device)
if bsr.values.size < nnz:
bsr.values = wp.empty(shape=(nnz,), dtype=bsr.values.dtype, device=bsr.values.device)
def bsr_set_from_triplets(
dest: BsrMatrix,
rows: wp.array(dtype=int),
columns: wp.array(dtype=int),
values: wp.array(dtype=Any),
):
"""
Fills a BSR matrix `dest` with values defined by COO triplets `rows`, `columns`, `values`.
Values must be either one-dimensional with data type identical to the `dest` matrix block times,
or a 3d array with data type equal to the `dest` matrix scalar type.
Previous blocks of `dest` are discarded.
"""
if values.device != columns.device or values.device != rows.device or values.device != dest.values.device:
raise ValueError("All arguments must reside on the same device")
if values.shape[0] != rows.shape[0] or values.shape[0] != columns.shape[0]:
raise ValueError("All triplet arrays must have the same length")
# Accept either array1d(dtype) or contiguous array3d(scalar_type) as values
if values.ndim == 1:
if values.dtype != dest.values.dtype:
raise ValueError("Values array type must correspond to that of dest matrix")
elif values.ndim == 3:
if values.shape[1:] != dest.block_shape:
raise ValueError(
f"Last two dimensions in values array ({values.shape[1:]}) shoudl correspond to matrix block shape {(dest.block_shape)})"
)
if warp.types.type_scalar_type(values.dtype) != dest.scalar_type:
raise ValueError("Scalar type of values array should correspond to that of matrix")
if not values.is_contiguous:
raise ValueError("Multi-dimensional values array should be contiguous")
else:
raise ValueError("Number of dimension for values array should be 1 or 3")
nnz = rows.shape[0]
# Increase dest array sizes if needed
_bsr_ensure_fits(dest, nnz=nnz)
device = dest.values.device
scalar_type = dest.scalar_type
from warp.context import runtime
if device.is_cpu:
if scalar_type == wp.float32:
native_func = runtime.core.bsr_matrix_from_triplets_float_host
elif scalar_type == wp.float64:
native_func = runtime.core.bsr_matrix_from_triplets_double_host
else:
if scalar_type == wp.float32:
native_func = runtime.core.bsr_matrix_from_triplets_float_device
elif scalar_type == wp.float64:
native_func = runtime.core.bsr_matrix_from_triplets_double_device
if not native_func:
raise NotImplementedError(f"bsr_from_triplets not implemented for scalar type {scalar_type}")
dest.nnz = native_func(
dest.block_shape[0],
dest.block_shape[1],
dest.nrow,
nnz,
rows.ptr,
columns.ptr,
values.ptr,
dest.offsets.ptr,
dest.columns.ptr,
dest.values.ptr,
)
def bsr_assign(dest: BsrMatrix, src: BsrMatrix):
"""Copies the content of the `src` matrix to `dest`, possibly casting the block values."""
if dest.values.device != src.values.device:
raise ValueError("Source and destination matrices must reside on the same device")
if dest.block_shape != src.block_shape:
raise ValueError("Source and destination matrices must have the same block shape")
dest.nrow = src.nrow
dest.ncol = src.ncol
dest.nnz = src.nnz
_bsr_ensure_fits(dest)
wp.copy(dest=dest.offsets, src=src.offsets, count=src.nrow + 1)
if src.nnz > 0:
wp.copy(dest=dest.columns, src=src.columns, count=src.nnz)
warp.utils.array_cast(out_array=dest.values, in_array=src.values, count=src.nnz)
def bsr_copy(A: BsrMatrix, scalar_type=None):
"""Returns a copy of matrix A, possibly casting values to a new scalar type"""
if scalar_type is None:
block_type = A.values.dtype
elif A.block_shape == (1, 1):
block_type = scalar_type
else:
block_type = wp.types.matrix(shape=A.block_shape, dtype=scalar_type)
copy = bsr_zeros(rows_of_blocks=A.nrow, cols_of_blocks=A.ncol, block_type=block_type, device=A.values.device)
bsr_assign(dest=copy, src=A)
return copy
def bsr_set_transpose(dest: BsrMatrix, src: BsrMatrix):
"""Assigns the transposed matrix `src` to matrix `dest`"""
if dest.values.device != src.values.device:
raise ValueError("All arguments must reside on the same device")
if dest.scalar_type != src.scalar_type:
raise ValueError("All arguments must have the same scalar type")
if src.block_shape == (1, 1):
transpose_block_shape = (1, 1)
else:
transpose_block_shape = src.block_shape[::-1]
if dest.block_shape != transpose_block_shape:
raise ValueError(f"Destination block shape must be {transpose_block_shape}")
dest.nrow = src.ncol
dest.ncol = src.nrow
dest.nnz = src.nnz
# Increase dest array sizes if needed
_bsr_ensure_fits(dest)
from warp.context import runtime
if dest.values.device.is_cpu:
if dest.scalar_type == wp.float32:
native_func = runtime.core.bsr_transpose_float_host
elif dest.scalar_type == wp.float64:
native_func = runtime.core.bsr_transpose_double_host
else:
if dest.scalar_type == wp.float32:
native_func = runtime.core.bsr_transpose_float_device
elif dest.scalar_type == wp.float64:
native_func = runtime.core.bsr_transpose_double_device
if not native_func:
raise NotImplementedError(f"bsr_set_transpose not implemented for scalar type {dest.scalar_type}")
native_func(
src.block_shape[0],
src.block_shape[1],
src.nrow,
src.ncol,
src.nnz,
src.offsets.ptr,
src.columns.ptr,
src.values.ptr,
dest.offsets.ptr,
dest.columns.ptr,
dest.values.ptr,
)
def bsr_transposed(A: BsrMatrix):
"""Returns a copy of the transposed matrix `A`"""
if A.block_shape == (1, 1):
block_type = A.values.dtype
else:
block_type = wp.types.matrix(shape=A.block_shape[::-1], dtype=A.scalar_type)
transposed = bsr_zeros(rows_of_blocks=A.ncol, cols_of_blocks=A.nrow, block_type=block_type, device=A.values.device)
bsr_set_transpose(dest=transposed, src=A)
return transposed
@wp.kernel
def _bsr_get_diag_kernel(
A_offsets: wp.array(dtype=int),
A_columns: wp.array(dtype=int),
A_values: wp.array(dtype=Any),
out: wp.array(dtype=Any),
):
row = wp.tid()
beg = A_offsets[row]
end = A_offsets[row + 1]
diag = wp.lower_bound(A_columns, beg, end, row)
if A_columns[diag] == row:
out[row] = A_values[diag]
def bsr_get_diag(A: BsrMatrix, out: wp.array = None):
"""Returns the block diagonal of a square sparse matrix"""
if A.nrow != A.ncol:
raise ValueError("bsr_get_diag is only available for square sparse matrices")
if out is None:
out = wp.zeros(shape=(A.nrow,), dtype=A.values.dtype, device=A.values.device)
else:
if out.dtype != A.values.dtype:
raise ValueError(f"Output array must have type {A.values.dtype}")
if out.device != A.values.device:
raise ValueError(f"Output array must reside on device {A.values.device}")
if out.shape[0] < A.nrow:
raise ValueError(f"Output array must be of length at least {A.nrow}")
wp.launch(
kernel=_bsr_get_diag_kernel, dim=A.nrow, device=A.values.device, inputs=[A.offsets, A.columns, A.values, out]
)
return out
@wp.kernel
def _bsr_set_diag_kernel(
A_offsets: wp.array(dtype=int),
A_columns: wp.array(dtype=int),
):
row = wp.tid()
A_offsets[row + 1] = row + 1
A_columns[row] = row
if row == 0:
A_offsets[0] = 0
def bsr_set_diag(A: BsrMatrix, diag: wp.array):
"""Sets A as a block-diagonal square matrix"""
A.nrow = diag.shape[0]
A.ncol = diag.shape[0]
A.nnz = diag.shape[0]
A.values = diag
if A.columns.size < A.nrow:
A.columns = wp.empty(shape=(A.nrow,), dtype=int, device=diag.device)
if A.offsets.size < A.nrow + 1:
A.offsets = wp.empty(shape=(A.nrow + 1,), dtype=int, device=diag.device)
wp.launch(kernel=_bsr_set_diag_kernel, dim=A.nrow, device=A.values.device, inputs=[A.offsets, A.columns])
def bsr_diag(diag: wp.array):
"""Creates a square block-diagonal BSR matrix from the values array `diag`"""
A = bsr_zeros(rows_of_blocks=diag.shape[0], cols_of_blocks=diag.shape[0], block_type=diag.dtype, device=diag.device)
bsr_set_diag(A, diag)
return A
@wp.kernel
def _bsr_get_block_row(dest_offset: int, bsr_offsets: wp.array(dtype=int), rows: wp.array(dtype=int)):
i = wp.tid()
row = wp.lower_bound(bsr_offsets, i + 1) - 1
rows[dest_offset + i] = row
@wp.kernel
def _bsr_axpy_add_block(
src_offset: int,
scale: Any,
rows: wp.array(dtype=int),
cols: wp.array(dtype=int),
dst_offsets: wp.array(dtype=int),
dst_columns: wp.array(dtype=int),
src_values: wp.array(dtype=Any),
dst_values: wp.array(dtype=Any),
):
i = wp.tid()
row = rows[i + src_offset]
col = cols[i + src_offset]
beg = dst_offsets[row]
end = dst_offsets[row + 1]
block = wp.lower_bound(dst_columns, beg, end, col)
dst_values[block] = dst_values[block] + scale * src_values[i]
def bsr_axpy(x: BsrMatrix, y: BsrMatrix, alpha: float = 1.0, beta: float = 1.0):
"""
Performs the operation `y := alpha * X + beta * y` on BSR matrices `x` and `y`
"""
if y is None:
y = bsr_zeros(x.nrow, x.ncol, block_type=x.block_type, device=x.values.device)
beta = 0.0
device = y.values.device
if x.values.device != y.values.device:
raise ValueError("All arguments must reside on the same device")
if x.scalar_type != y.scalar_type or x.block_shape != y.block_shape:
raise ValueError("Matrices must have the same block type")
if x.nrow != y.nrow or x.ncol != y.ncol:
raise ValueError("Matrices must have the same number of rows and columns")
alpha = y.scalar_type(alpha)
beta = y.scalar_type(beta)
sum_nnz = x.nnz + y.nnz
sum_rows = wp.empty(shape=(sum_nnz), dtype=int, device=device)
sum_cols = wp.empty(shape=(sum_nnz), dtype=int, device=device)
if y.nnz > 0:
wp.copy(sum_cols, y.columns, 0, 0, y.nnz)
wp.launch(kernel=_bsr_get_block_row, device=device, dim=y.nnz, inputs=[0, y.offsets, sum_rows])
if x.nnz > 0:
wp.copy(sum_cols, x.columns, y.nnz, 0, x.nnz)
wp.launch(kernel=_bsr_get_block_row, device=device, dim=x.nnz, inputs=[y.nnz, x.offsets, sum_rows])
# Increase dest array sizes if needed
if y.columns.shape[0] < sum_nnz:
y.columns = wp.empty(shape=(sum_nnz,), dtype=int, device=device)
from warp.context import runtime
if device.is_cpu:
native_func = runtime.core.bsr_matrix_from_triplets_float_host
else:
native_func = runtime.core.bsr_matrix_from_triplets_float_device
sum_nnz = native_func(
y.block_shape[0],
y.block_shape[1],
y.nrow,
sum_nnz,
sum_rows.ptr,
sum_cols.ptr,
0,
y.offsets.ptr,
y.columns.ptr,
0,
)
sum_values = wp.zeros(shape=(sum_nnz,), dtype=y.values.dtype, device=device)
wp.launch(
kernel=_bsr_axpy_add_block,
device=device,
dim=y.nnz,
inputs=[0, beta, sum_rows, sum_cols, y.offsets, y.columns, y.values, sum_values],
)
wp.launch(
kernel=_bsr_axpy_add_block,
device=device,
dim=x.nnz,
inputs=[y.nnz, alpha, sum_rows, sum_cols, y.offsets, y.columns, x.values, sum_values],
)
y.values = sum_values
y.nnz = sum_nnz
return y
@wp.kernel
def _bsr_mm_count_coeffs(
z_nnz: int,
x_offsets: wp.array(dtype=int),
x_columns: wp.array(dtype=int),
y_offsets: wp.array(dtype=int),
counts: wp.array(dtype=int),
):
row = wp.tid()
count = int(0)
x_beg = x_offsets[row]
x_end = x_offsets[row + 1]
for x_block in range(x_beg, x_end):
x_col = x_columns[x_block]
count += y_offsets[x_col + 1] - y_offsets[x_col]
counts[row + 1] = count
if row == 0:
counts[0] = z_nnz
@wp.kernel
def _bsr_mm_list_coeffs(
x_offsets: wp.array(dtype=int),
x_columns: wp.array(dtype=int),
y_offsets: wp.array(dtype=int),
y_columns: wp.array(dtype=int),
mm_offsets: wp.array(dtype=int),
mm_rows: wp.array(dtype=int),
mm_cols: wp.array(dtype=int),
):
row = wp.tid()
mm_block = mm_offsets[row]
x_beg = x_offsets[row]
x_end = x_offsets[row + 1]
for x_block in range(x_beg, x_end):
x_col = x_columns[x_block]
y_beg = y_offsets[x_col]
y_end = y_offsets[x_col + 1]
for y_block in range(y_beg, y_end):
mm_cols[mm_block] = y_columns[y_block]
mm_rows[mm_block] = row
mm_block += 1
@wp.kernel
def _bsr_mm_compute_values(
alpha: Any,
x_offsets: wp.array(dtype=int),
x_columns: wp.array(dtype=int),
x_values: wp.array(dtype=Any),
y_offsets: wp.array(dtype=int),
y_columns: wp.array(dtype=int),
y_values: wp.array(dtype=Any),
mm_offsets: wp.array(dtype=int),
mm_cols: wp.array(dtype=int),
mm_values: wp.array(dtype=Any),
):
row = wp.tid()
mm_beg = mm_offsets[row]
mm_end = mm_offsets[row + 1]
x_beg = x_offsets[row]
x_end = x_offsets[row + 1]
for x_block in range(x_beg, x_end):
x_col = x_columns[x_block]
ax_val = alpha * x_values[x_block]
y_beg = y_offsets[x_col]
y_end = y_offsets[x_col + 1]
for y_block in range(y_beg, y_end):
mm_block = wp.lower_bound(mm_cols, mm_beg, mm_end, y_columns[y_block])
mm_values[mm_block] = mm_values[mm_block] + ax_val * y_values[y_block]
_pinned_temp_count_buffer = {}
def _get_pinned_temp_count_buffer(device):
device = str(device)
if device not in _pinned_temp_count_buffer:
_pinned_temp_count_buffer[device] = wp.empty(shape=(1,), dtype=int, pinned=True, device="cpu")
return _pinned_temp_count_buffer[device]
def bsr_mm(x: BsrMatrix, y: BsrMatrix, z: BsrMatrix = None, alpha: float = 1.0, beta: float = 0.0):
"""
Performs the operation `z := alpha * X * Y + beta * z` on BSR matrices `x`, `y` and `z`
"""
if z is None:
z_block_shape = (x.block_shape[0], y.block_shape[1])
if z_block_shape == (1, 1):
z_block_type = x.scalar_type
else:
z_block_type = wp.types.matrix(shape=z_block_shape, dtype=x.scalar_type)
z = bsr_zeros(x.nrow, y.ncol, block_type=z_block_type, device=x.values.device)
beta = 0.0
if x.values.device != y.values.device or x.values.device != z.values.device:
raise ValueError("All arguments must reside on the same device")
if x.scalar_type != y.scalar_type or x.scalar_type != z.scalar_type:
raise ValueError("Matrices must have the same scalar type")
if x.block_shape[0] != z.block_shape[0] or y.block_shape[1] != z.block_shape[1]:
raise ValueError("Incompatible blocks sizes for matrix multiplication")
if x.nrow != z.nrow or z.ncol != y.ncol:
raise ValueError("Incompatible number of rows/columns for matrix multiplication")
device = z.values.device
alpha = z.scalar_type(alpha)
beta = z.scalar_type(beta)
# Prefix sum of number of (unmerged) mm blocks per row
mm_row_counts = wp.empty(shape=(z.nrow + 1,), dtype=int, device=device)
wp.launch(
kernel=_bsr_mm_count_coeffs,
device=device,
dim=z.nrow,
inputs=[z.nnz, x.offsets, x.columns, y.offsets, mm_row_counts],
)
warp.utils.array_scan(mm_row_counts, mm_row_counts)
# Get back total counts on host
if device.is_cuda:
mm_tot_count = _get_pinned_temp_count_buffer(device)
wp.copy(dest=mm_tot_count, src=mm_row_counts, src_offset=z.nrow, count=1)
wp.synchronize_stream(wp.get_stream())
mm_nnz = int(mm_tot_count.numpy()[0])
else:
mm_nnz = int(mm_row_counts.numpy()[z.nrow])
mm_rows = wp.empty(shape=(mm_nnz), dtype=int, device=device)
mm_cols = wp.empty(shape=(mm_nnz), dtype=int, device=device)
# Copy z rows columns
wp.copy(mm_cols, z.columns, 0, 0, z.nnz)
wp.launch(kernel=_bsr_get_block_row, device=device, dim=z.nnz, inputs=[0, z.offsets, mm_rows])
# Fill unmerged mm blocks rows and columns
wp.launch(
kernel=_bsr_mm_list_coeffs,
device=device,
dim=z.nrow,
inputs=[x.offsets, x.columns, y.offsets, y.columns, mm_row_counts, mm_rows, mm_cols],
)
# Increase dest array sizes if needed
if z.columns.shape[0] < mm_nnz:
z.columns = wp.empty(shape=(mm_nnz,), dtype=int, device=device)
from warp.context import runtime
if device.is_cpu:
native_func = runtime.core.bsr_matrix_from_triplets_float_host
else:
native_func = runtime.core.bsr_matrix_from_triplets_float_device
mm_nnz = native_func(
z.block_shape[0],
z.block_shape[1],
z.nrow,
mm_nnz,
mm_rows.ptr,
mm_cols.ptr,
0,
z.offsets.ptr,
z.columns.ptr,
0,
)
mm_values = wp.zeros(shape=(mm_nnz,), dtype=z.values.dtype, device=device)
# Copy blocks from z
wp.launch(
kernel=_bsr_axpy_add_block,
device=device,
dim=z.nnz,
inputs=[0, beta, mm_rows, mm_cols, z.offsets, z.columns, z.values, mm_values],
)
# Update z to point to result blocks
z.values = mm_values
z.nnz = mm_nnz
# Add mm blocks to z values
if z.block_shape == (1, 1) and x.block_shape != (1, 1):
# Result block type is scalar, but operands are matrices
# Cast result to (1x1) matrix to perform multiplication
mm_values = mm_values.view(wp.types.matrix(shape=(1, 1), dtype=z.scalar_type))
wp.launch(
kernel=_bsr_mm_compute_values,
device=device,
dim=z.nrow,
inputs=[alpha, x.offsets, x.columns, x.values, y.offsets, y.columns, y.values, z.offsets, z.columns, mm_values],
)
return z
@wp.kernel
def _bsr_mv_kernel(
alpha: Any,
A_offsets: wp.array(dtype=int),
A_columns: wp.array(dtype=int),
A_values: wp.array(dtype=Any),
x: wp.array(dtype=Any),
beta: Any,
y: wp.array(dtype=Any),
):
row = wp.tid()
beg = A_offsets[row]
end = A_offsets[row + 1]
yr = y[row]
v = yr - yr # WAR to get zero with correct type
for block in range(beg, end):
v = v + A_values[block] * x[A_columns[block]]
y[row] = beta * yr + alpha * v
def bsr_mv(A: BsrMatrix, x: wp.array, y: wp.array, alpha: float = 1.0, beta: float = 0.0):
"""
Naive implementation of sparse matrix-vector product, `y := alpha * A * x + beta * y`.
"""
alpha = A.scalar_type(alpha)
beta = A.scalar_type(beta)
# if A.scalar_type != x.dtype or A.scalar_type != y.dtype:
# raise ValueError("A, x and y must have the same data types")
if A.values.device != x.device or A.values.device != y.device:
raise ValueError("A, x and y must reide on the same device")
if x.shape[0] != A.ncol:
raise ValueError("Number of columns of A must match number of rows of x")
if y.shape[0] != A.nrow:
raise ValueError("Number of rows of A must match number of rows of y")
# Promote scalar vectors to length-1 vecs
block_shape = A.block_shape
if block_shape != (1, 1):
if block_shape[0] == 1:
if y.dtype == A.scalar_type:
y = y.view(dtype=wp.vec(length=1, dtype=A.scalar_type))
if block_shape[1] == 1:
if x.dtype == A.scalar_type:
x = x.view(dtype=wp.vec(length=1, dtype=A.scalar_type))
wp.launch(
kernel=_bsr_mv_kernel,
device=A.values.device,
dim=A.nrow,
inputs=[alpha, A.offsets, A.columns, A.values, x, beta, y],
)
| warp-main | warp/sparse.py |
# Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import ctypes
import numpy
import warp
# return the warp device corresponding to a torch device
def device_from_torch(torch_device):
"""Return the warp device corresponding to a torch device."""
return warp.get_device(str(torch_device))
def device_to_torch(wp_device):
"""Return the torch device corresponding to a warp device."""
device = warp.get_device(wp_device)
if device.is_cpu or device.is_primary:
return str(device)
elif device.is_cuda and device.is_uva:
# it's not a primary context, but torch can access the data ptr directly thanks to UVA
return f"cuda:{device.ordinal}"
raise RuntimeError(f"Warp device {device} is not compatible with torch")
def dtype_from_torch(torch_dtype):
"""Return the Warp dtype corresponding to a torch dtype."""
# initialize lookup table on first call to defer torch import
if dtype_from_torch.type_map is None:
import torch
dtype_from_torch.type_map = {
torch.float64: warp.float64,
torch.float32: warp.float32,
torch.float16: warp.float16,
torch.int64: warp.int64,
torch.int32: warp.int32,
torch.int16: warp.int16,
torch.int8: warp.int8,
torch.uint8: warp.uint8,
torch.bool: warp.bool,
# currently unsupported by Warp
# torch.bfloat16:
# torch.complex64:
# torch.complex128:
}
warp_dtype = dtype_from_torch.type_map.get(torch_dtype)
if warp_dtype is not None:
return warp_dtype
else:
raise TypeError(f"Invalid or unsupported data type: {torch_dtype}")
dtype_from_torch.type_map = None
def dtype_is_compatible(torch_dtype, warp_dtype):
"""Evaluates whether the given torch dtype is compatible with the given warp dtype."""
# initialize lookup table on first call to defer torch import
if dtype_is_compatible.compatible_sets is None:
import torch
dtype_is_compatible.compatible_sets = {
torch.float64: {warp.float64},
torch.float32: {warp.float32},
torch.float16: {warp.float16},
# allow aliasing integer tensors as signed or unsigned integer arrays
torch.int64: {warp.int64, warp.uint64},
torch.int32: {warp.int32, warp.uint32},
torch.int16: {warp.int16, warp.uint16},
torch.int8: {warp.int8, warp.uint8},
torch.uint8: {warp.uint8, warp.int8},
torch.bool: {warp.bool, warp.uint8, warp.int8},
# currently unsupported by Warp
# torch.bfloat16:
# torch.complex64:
# torch.complex128:
}
compatible_set = dtype_is_compatible.compatible_sets.get(torch_dtype)
if compatible_set is not None:
if hasattr(warp_dtype, "_wp_scalar_type_"):
return warp_dtype._wp_scalar_type_ in compatible_set
else:
return warp_dtype in compatible_set
else:
raise TypeError(f"Invalid or unsupported data type: {torch_dtype}")
dtype_is_compatible.compatible_sets = None
# wrap a torch tensor to a wp array, data is not copied
def from_torch(t, dtype=None, requires_grad=None, grad=None):
"""Wrap a PyTorch tensor to a Warp array without copying the data.
Args:
t (torch.Tensor): The torch tensor to wrap.
dtype (warp.dtype, optional): The target data type of the resulting Warp array. Defaults to the tensor value type mapped to a Warp array value type.
requires_grad (bool, optional): Whether the resulting array should wrap the tensor's gradient, if it exists (the grad tensor will be allocated otherwise). Defaults to the tensor's `requires_grad` value.
Returns:
warp.array: The wrapped array.
"""
if dtype is None:
dtype = dtype_from_torch(t.dtype)
elif not dtype_is_compatible(t.dtype, dtype):
raise RuntimeError(f"Incompatible data types: {t.dtype} and {dtype}")
# get size of underlying data type to compute strides
ctype_size = ctypes.sizeof(dtype._type_)
shape = tuple(t.shape)
strides = tuple(s * ctype_size for s in t.stride())
# if target is a vector or matrix type
# then check if trailing dimensions match
# the target type and update the shape
if hasattr(dtype, "_shape_"):
dtype_shape = dtype._shape_
dtype_dims = len(dtype._shape_)
if dtype_dims > len(shape) or dtype_shape != shape[-dtype_dims:]:
raise RuntimeError(
f"Could not convert Torch tensor with shape {shape} to Warp array with dtype={dtype}, ensure that source inner shape is {dtype_shape}"
)
# ensure the inner strides are contiguous
stride = ctype_size
for i in range(dtype_dims):
if strides[-i - 1] != stride:
raise RuntimeError(
f"Could not convert Torch tensor with shape {shape} to Warp array with dtype={dtype}, because the source inner strides are not contiguous"
)
stride *= dtype_shape[-i - 1]
shape = tuple(shape[:-dtype_dims]) or (1,)
strides = tuple(strides[:-dtype_dims]) or (ctype_size,)
requires_grad = t.requires_grad if requires_grad is None else requires_grad
if grad is not None:
if not isinstance(grad, warp.array):
import torch
if isinstance(grad, torch.Tensor):
grad = from_torch(grad, dtype=dtype)
else:
raise ValueError(f"Invalid gradient type: {type(grad)}")
elif requires_grad:
# wrap the tensor gradient, allocate if necessary
if t.grad is None:
# allocate a zero-filled gradient tensor if it doesn't exist
import torch
t.grad = torch.zeros_like(t, requires_grad=False)
grad = from_torch(t.grad, dtype=dtype)
a = warp.types.array(
ptr=t.data_ptr(),
dtype=dtype,
shape=shape,
strides=strides,
device=device_from_torch(t.device),
copy=False,
owner=False,
grad=grad,
requires_grad=requires_grad,
)
# save a reference to the source tensor, otherwise it will be deallocated
a._tensor = t
return a
def to_torch(a, requires_grad=None):
"""
Convert a Warp array to a PyTorch tensor without copying the data.
Args:
a (warp.array): The Warp array to convert.
requires_grad (bool, optional): Whether the resulting tensor should convert the array's gradient, if it exists, to a grad tensor. Defaults to the array's `requires_grad` value.
Returns:
torch.Tensor: The converted tensor.
"""
import torch
if requires_grad is None:
requires_grad = a.requires_grad
# Torch does not support structured arrays
if isinstance(a.dtype, warp.codegen.Struct):
raise RuntimeError("Cannot convert structured Warp arrays to Torch.")
if a.device.is_cpu:
# Torch has an issue wrapping CPU objects
# that support the __array_interface__ protocol
# in this case we need to workaround by going
# to an ndarray first, see https://pearu.github.io/array_interface_pytorch.html
t = torch.as_tensor(numpy.asarray(a))
t.requires_grad = requires_grad
if requires_grad and a.requires_grad:
t.grad = torch.as_tensor(numpy.asarray(a.grad))
return t
elif a.device.is_cuda:
# Torch does support the __cuda_array_interface__
# correctly, but we must be sure to maintain a reference
# to the owning object to prevent memory allocs going out of scope
t = torch.as_tensor(a, device=device_to_torch(a.device))
t.requires_grad = requires_grad
if requires_grad and a.requires_grad:
t.grad = torch.as_tensor(a.grad, device=device_to_torch(a.device))
return t
else:
raise RuntimeError("Unsupported device")
def stream_from_torch(stream_or_device=None):
"""Convert from a PyTorch CUDA stream to a Warp.Stream."""
import torch
if isinstance(stream_or_device, torch.cuda.Stream):
stream = stream_or_device
else:
# assume arg is a torch device
stream = torch.cuda.current_stream(stream_or_device)
device = device_from_torch(stream.device)
warp_stream = warp.Stream(device, cuda_stream=stream.cuda_stream)
# save a reference to the source stream, otherwise it may be destroyed
warp_stream._torch_stream = stream
return warp_stream
def stream_to_torch(stream_or_device=None):
"""Convert from a Warp.Stream to a PyTorch CUDA stream."""
import torch
if isinstance(stream_or_device, warp.Stream):
stream = stream_or_device
else:
# assume arg is a warp device
stream = warp.get_device(stream_or_device).stream
device = device_to_torch(stream.device)
torch_stream = torch.cuda.ExternalStream(stream.cuda_stream, device=device)
# save a reference to the source stream, otherwise it may be destroyed
torch_stream._warp_stream = stream
return torch_stream
| warp-main | warp/torch.py |
# Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
from __future__ import annotations
import ast
import builtins
import ctypes
import inspect
import re
import sys
import textwrap
import types
from typing import Any, Callable, Mapping
import warp.config
from warp.types import *
# map operator to function name
builtin_operators = {}
# see https://www.ics.uci.edu/~pattis/ICS-31/lectures/opexp.pdf for a
# nice overview of python operators
builtin_operators[ast.Add] = "add"
builtin_operators[ast.Sub] = "sub"
builtin_operators[ast.Mult] = "mul"
builtin_operators[ast.MatMult] = "mul"
builtin_operators[ast.Div] = "div"
builtin_operators[ast.FloorDiv] = "floordiv"
builtin_operators[ast.Pow] = "pow"
builtin_operators[ast.Mod] = "mod"
builtin_operators[ast.UAdd] = "pos"
builtin_operators[ast.USub] = "neg"
builtin_operators[ast.Not] = "unot"
builtin_operators[ast.Gt] = ">"
builtin_operators[ast.Lt] = "<"
builtin_operators[ast.GtE] = ">="
builtin_operators[ast.LtE] = "<="
builtin_operators[ast.Eq] = "=="
builtin_operators[ast.NotEq] = "!="
builtin_operators[ast.BitAnd] = "bit_and"
builtin_operators[ast.BitOr] = "bit_or"
builtin_operators[ast.BitXor] = "bit_xor"
builtin_operators[ast.Invert] = "invert"
builtin_operators[ast.LShift] = "lshift"
builtin_operators[ast.RShift] = "rshift"
def get_annotations(obj: Any) -> Mapping[str, Any]:
"""Alternative to `inspect.get_annotations()` for Python 3.9 and older."""
# See https://docs.python.org/3/howto/annotations.html#accessing-the-annotations-dict-of-an-object-in-python-3-9-and-older
if isinstance(obj, type):
return obj.__dict__.get("__annotations__", {})
return getattr(obj, "__annotations__", {})
def struct_instance_repr_recursive(inst: StructInstance, depth: int) -> str:
indent = "\t"
if inst._cls.ctype._fields_ == [("_dummy_", ctypes.c_int)]:
return f"{inst._cls.key}()"
lines = []
lines.append(f"{inst._cls.key}(")
for field_name, _ in inst._cls.ctype._fields_:
if field_name == "_dummy_":
continue
field_value = getattr(inst, field_name, None)
if isinstance(field_value, StructInstance):
field_value = struct_instance_repr_recursive(field_value, depth + 1)
lines.append(f"{indent * (depth + 1)}{field_name}={field_value},")
lines.append(f"{indent * depth})")
return "\n".join(lines)
class StructInstance:
def __init__(self, cls: Struct, ctype):
super().__setattr__("_cls", cls)
# maintain a c-types object for the top-level instance the struct
if not ctype:
super().__setattr__("_ctype", cls.ctype())
else:
super().__setattr__("_ctype", ctype)
# create Python attributes for each of the struct's variables
for field, var in cls.vars.items():
if isinstance(var.type, warp.codegen.Struct):
self.__dict__[field] = StructInstance(var.type, getattr(self._ctype, field))
elif isinstance(var.type, warp.types.array):
self.__dict__[field] = None
else:
self.__dict__[field] = var.type()
def __setattr__(self, name, value):
if name not in self._cls.vars:
raise RuntimeError(f"Trying to set Warp struct attribute that does not exist {name}")
var = self._cls.vars[name]
# update our ctype flat copy
if isinstance(var.type, array):
if value is None:
# create array with null pointer
setattr(self._ctype, name, array_t())
else:
# wp.array
assert isinstance(value, array)
assert types_equal(
value.dtype, var.type.dtype
), "assign to struct member variable {} failed, expected type {}, got type {}".format(
name, type_repr(var.type.dtype), type_repr(value.dtype)
)
setattr(self._ctype, name, value.__ctype__())
elif isinstance(var.type, Struct):
# assign structs by-value, otherwise we would have problematic cases transferring ownership
# of the underlying ctypes data between shared Python struct instances
if not isinstance(value, StructInstance):
raise RuntimeError(
f"Trying to assign a non-structure value to a struct attribute with type: {self._cls.key}"
)
# destination attribution on self
dest = getattr(self, name)
if dest._cls.key is not value._cls.key:
raise RuntimeError(
f"Trying to assign a structure of type {value._cls.key} to an attribute of {self._cls.key}"
)
# update all nested ctype vars by deep copy
for n in dest._cls.vars:
setattr(dest, n, getattr(value, n))
# early return to avoid updating our Python StructInstance
return
elif issubclass(var.type, ctypes.Array):
# vector/matrix type, e.g. vec3
if value is None:
setattr(self._ctype, name, var.type())
elif types_equal(type(value), var.type):
setattr(self._ctype, name, value)
else:
# conversion from list/tuple, ndarray, etc.
setattr(self._ctype, name, var.type(value))
else:
# primitive type
if value is None:
# zero initialize
setattr(self._ctype, name, var.type._type_())
else:
if hasattr(value, "_type_"):
# assigning warp type value (e.g.: wp.float32)
value = value.value
# float16 needs conversion to uint16 bits
if var.type == warp.float16:
setattr(self._ctype, name, float_to_half_bits(value))
else:
setattr(self._ctype, name, value)
# update Python instance
super().__setattr__(name, value)
def __ctype__(self):
return self._ctype
def __repr__(self):
return struct_instance_repr_recursive(self, 0)
# type description used in numpy structured arrays
def numpy_dtype(self):
return self._cls.numpy_dtype()
# value usable in numpy structured arrays of .numpy_dtype(), e.g. (42, 13.37, [1.0, 2.0, 3.0])
def numpy_value(self):
npvalue = []
for name, var in self._cls.vars.items():
# get the attribute value
value = getattr(self._ctype, name)
if isinstance(var.type, array):
# array_t
npvalue.append(value.numpy_value())
elif isinstance(var.type, Struct):
# nested struct
npvalue.append(value.numpy_value())
elif issubclass(var.type, ctypes.Array):
if len(var.type._shape_) == 1:
# vector
npvalue.append(list(value))
else:
# matrix
npvalue.append([list(row) for row in value])
else:
# scalar
if var.type == warp.float16:
npvalue.append(half_bits_to_float(value))
else:
npvalue.append(value)
return tuple(npvalue)
class Struct:
def __init__(self, cls, key, module):
self.cls = cls
self.module = module
self.key = key
self.vars = {}
annotations = get_annotations(self.cls)
for label, type in annotations.items():
self.vars[label] = Var(label, type)
fields = []
for label, var in self.vars.items():
if isinstance(var.type, array):
fields.append((label, array_t))
elif isinstance(var.type, Struct):
fields.append((label, var.type.ctype))
elif issubclass(var.type, ctypes.Array):
fields.append((label, var.type))
else:
fields.append((label, var.type._type_))
class StructType(ctypes.Structure):
# if struct is empty, add a dummy field to avoid launch errors on CPU device ("ffi_prep_cif failed")
_fields_ = fields or [("_dummy_", ctypes.c_int)]
self.ctype = StructType
# create default constructor (zero-initialize)
self.default_constructor = warp.context.Function(
func=None,
key=self.key,
namespace="",
value_func=lambda *_: self,
input_types={},
initializer_list_func=lambda *_: False,
native_func=make_full_qualified_name(self.cls),
)
# build a constructor that takes each param as a value
input_types = {label: var.type for label, var in self.vars.items()}
self.value_constructor = warp.context.Function(
func=None,
key=self.key,
namespace="",
value_func=lambda *_: self,
input_types=input_types,
initializer_list_func=lambda *_: False,
native_func=make_full_qualified_name(self.cls),
)
self.default_constructor.add_overload(self.value_constructor)
if module:
module.register_struct(self)
def __call__(self):
"""
This function returns s = StructInstance(self)
s uses self.cls as template.
To enable autocomplete on s, we inherit from self.cls.
For example,
@wp.struct
class A:
# annotations
...
The type annotations are inherited in A(), allowing autocomplete in kernels
"""
# return StructInstance(self)
class NewStructInstance(self.cls, StructInstance):
def __init__(inst):
StructInstance.__init__(inst, self, None)
return NewStructInstance()
def initializer(self):
return self.default_constructor
# return structured NumPy dtype, including field names, formats, and offsets
def numpy_dtype(self):
names = []
formats = []
offsets = []
for name, var in self.vars.items():
names.append(name)
offsets.append(getattr(self.ctype, name).offset)
if isinstance(var.type, array):
# array_t
formats.append(array_t.numpy_dtype())
elif isinstance(var.type, Struct):
# nested struct
formats.append(var.type.numpy_dtype())
elif issubclass(var.type, ctypes.Array):
scalar_typestr = type_typestr(var.type._wp_scalar_type_)
if len(var.type._shape_) == 1:
# vector
formats.append(f"{var.type._length_}{scalar_typestr}")
else:
# matrix
formats.append(f"{var.type._shape_}{scalar_typestr}")
else:
# scalar
formats.append(type_typestr(var.type))
return {"names": names, "formats": formats, "offsets": offsets, "itemsize": ctypes.sizeof(self.ctype)}
# constructs a Warp struct instance from a pointer to the ctype
def from_ptr(self, ptr):
if not ptr:
raise RuntimeError("NULL pointer exception")
# create a new struct instance
instance = self()
for name, var in self.vars.items():
offset = getattr(self.ctype, name).offset
if isinstance(var.type, array):
# We could reconstruct wp.array from array_t, but it's problematic.
# There's no guarantee that the original wp.array is still allocated and
# no easy way to make a backref.
# Instead, we just create a stub annotation, which is not a fully usable array object.
setattr(instance, name, array(dtype=var.type.dtype, ndim=var.type.ndim))
elif isinstance(var.type, Struct):
# nested struct
value = var.type.from_ptr(ptr + offset)
setattr(instance, name, value)
elif issubclass(var.type, ctypes.Array):
# vector/matrix
value = var.type.from_ptr(ptr + offset)
setattr(instance, name, value)
else:
# scalar
cvalue = ctypes.cast(ptr + offset, ctypes.POINTER(var.type._type_)).contents
if var.type == warp.float16:
setattr(instance, name, half_bits_to_float(cvalue))
else:
setattr(instance, name, cvalue.value)
return instance
def compute_type_str(base_name, template_params):
if template_params is None or len(template_params) == 0:
return base_name
else:
def param2str(p):
if isinstance(p, int):
return str(p)
return p.__name__
return f"{base_name}<{','.join(map(param2str, template_params))}>"
class Var:
def __init__(self, label, type, requires_grad=False, constant=None, prefix=True, is_adjoint=False):
# convert built-in types to wp types
if type == float:
type = float32
elif type == int:
type = int32
self.label = label
self.type = type
self.requires_grad = requires_grad
self.constant = constant
self.prefix = prefix
self.is_adjoint = is_adjoint
def __str__(self):
return self.label
def ctype(self):
if is_array(self.type):
if hasattr(self.type.dtype, "_wp_generic_type_str_"):
dtypestr = compute_type_str(self.type.dtype._wp_generic_type_str_, self.type.dtype._wp_type_params_)
elif isinstance(self.type.dtype, Struct):
dtypestr = make_full_qualified_name(self.type.dtype.cls)
else:
dtypestr = str(self.type.dtype.__name__)
classstr = type(self.type).__name__
return f"{classstr}_t<{dtypestr}>"
elif isinstance(self.type, Struct):
return make_full_qualified_name(self.type.cls)
elif hasattr(self.type, "_wp_generic_type_str_"):
return compute_type_str(self.type._wp_generic_type_str_, self.type._wp_type_params_)
else:
return str(self.type.__name__)
def emit(self, prefix: str = "var"):
if self.prefix:
return f"{prefix}_{self.label}"
else:
return self.label
class Block:
# Represents a basic block of instructions, e.g.: list
# of straight line instructions inside a for-loop or conditional
def __init__(self):
# list of statements inside this block
self.body_forward = []
self.body_replay = []
self.body_reverse = []
# list of vars declared in this block
self.vars = []
class Adjoint:
# Source code transformer, this class takes a Python function and
# generates forward and backward SSA forms of the function instructions
def __init__(
adj,
func,
overload_annotations=None,
is_user_function=False,
skip_forward_codegen=False,
skip_reverse_codegen=False,
custom_reverse_mode=False,
custom_reverse_num_input_args=-1,
transformers: List[ast.NodeTransformer] = [],
):
adj.func = func
adj.is_user_function = is_user_function
# whether the generation of the forward code is skipped for this function
adj.skip_forward_codegen = skip_forward_codegen
# whether the generation of the adjoint code is skipped for this function
adj.skip_reverse_codegen = skip_reverse_codegen
# build AST from function object
adj.source = inspect.getsource(func)
# get source code lines and line number where function starts
adj.raw_source, adj.fun_lineno = inspect.getsourcelines(func)
# keep track of line number in function code
adj.lineno = None
# ensures that indented class methods can be parsed as kernels
adj.source = textwrap.dedent(adj.source)
# extract name of source file
adj.filename = inspect.getsourcefile(func) or "unknown source file"
# build AST and apply node transformers
adj.tree = ast.parse(adj.source)
adj.transformers = transformers
for transformer in transformers:
adj.tree = transformer.visit(adj.tree)
adj.fun_name = adj.tree.body[0].name
# whether the forward code shall be used for the reverse pass and a custom
# function signature is applied to the reverse version of the function
adj.custom_reverse_mode = custom_reverse_mode
# the number of function arguments that pertain to the forward function
# input arguments (i.e. the number of arguments that are not adjoint arguments)
adj.custom_reverse_num_input_args = custom_reverse_num_input_args
# parse argument types
argspec = inspect.getfullargspec(func)
# ensure all arguments are annotated
if overload_annotations is None:
# use source-level argument annotations
if len(argspec.annotations) < len(argspec.args):
raise RuntimeError(f"Incomplete argument annotations on function {adj.fun_name}")
adj.arg_types = argspec.annotations
else:
# use overload argument annotations
for arg_name in argspec.args:
if arg_name not in overload_annotations:
raise RuntimeError(f"Incomplete overload annotations for function {adj.fun_name}")
adj.arg_types = overload_annotations.copy()
adj.args = []
for name, type in adj.arg_types.items():
# skip return hint
if name == "return":
continue
# add variable for argument
arg = Var(name, type, False)
adj.args.append(arg)
# generate function ssa form and adjoint
def build(adj, builder):
adj.builder = builder
adj.symbols = {} # map from symbols to adjoint variables
adj.variables = [] # list of local variables (in order)
adj.return_var = None # return type for function or kernel
adj.loop_symbols = [] # symbols at the start of each loop
# blocks
adj.blocks = [Block()]
adj.loop_blocks = []
# holds current indent level
adj.prefix = ""
# used to generate new label indices
adj.label_count = 0
# update symbol map for each argument
for a in adj.args:
adj.symbols[a.label] = a
# recursively evaluate function body
try:
adj.eval(adj.tree.body[0])
except Exception as e:
try:
lineno = adj.lineno + adj.fun_lineno
line = adj.source.splitlines()[adj.lineno]
msg = f'Error while parsing function "{adj.fun_name}" at {adj.filename}:{lineno}:\n{line}\n'
ex, data, traceback = sys.exc_info()
e = ex("".join([msg] + list(data.args))).with_traceback(traceback)
finally:
raise e
if builder is not None:
for a in adj.args:
if isinstance(a.type, Struct):
builder.build_struct_recursive(a.type)
elif isinstance(a.type, warp.types.array) and isinstance(a.type.dtype, Struct):
builder.build_struct_recursive(a.type.dtype)
# code generation methods
def format_template(adj, template, input_vars, output_var):
# output var is always the 0th index
args = [output_var] + input_vars
s = template.format(*args)
return s
# generates a list of formatted args
def format_args(adj, prefix, args):
arg_strs = []
for a in args:
if type(a) == warp.context.Function:
# functions don't have a var_ prefix so strip it off here
if prefix == "var":
arg_strs.append(a.key)
else:
arg_strs.append(f"{prefix}_{a.key}")
elif isinstance(a, Var):
arg_strs.append(a.emit(prefix))
else:
arg_strs.append(f"{prefix}_{a}")
return arg_strs
# generates argument string for a forward function call
def format_forward_call_args(adj, args, use_initializer_list):
arg_str = ", ".join(adj.format_args("var", args))
if use_initializer_list:
return "{{{}}}".format(arg_str)
return arg_str
# generates argument string for a reverse function call
def format_reverse_call_args(
adj, args, args_out, non_adjoint_args, non_adjoint_outputs, use_initializer_list, has_output_args=True
):
formatted_var = adj.format_args("var", args)
formatted_out = []
if has_output_args and len(args_out) > 1:
formatted_out = adj.format_args("var", args_out)
formatted_var_adj = adj.format_args(
"&adj" if use_initializer_list else "adj", [a for i, a in enumerate(args) if i not in non_adjoint_args]
)
formatted_out_adj = adj.format_args("adj", [a for i, a in enumerate(args_out) if i not in non_adjoint_outputs])
if len(formatted_var_adj) == 0 and len(formatted_out_adj) == 0:
# there are no adjoint arguments, so we don't need to call the reverse function
return None
if use_initializer_list:
var_str = "{{{}}}".format(", ".join(formatted_var))
out_str = "{{{}}}".format(", ".join(formatted_out))
adj_str = "{{{}}}".format(", ".join(formatted_var_adj))
out_adj_str = ", ".join(formatted_out_adj)
if len(args_out) > 1:
arg_str = ", ".join([var_str, out_str, adj_str, out_adj_str])
else:
arg_str = ", ".join([var_str, adj_str, out_adj_str])
else:
arg_str = ", ".join(formatted_var + formatted_out + formatted_var_adj + formatted_out_adj)
return arg_str
def indent(adj):
adj.prefix = adj.prefix + " "
def dedent(adj):
adj.prefix = adj.prefix[:-4]
def begin_block(adj):
b = Block()
# give block a unique id
b.label = adj.label_count
adj.label_count += 1
adj.blocks.append(b)
return b
def end_block(adj):
return adj.blocks.pop()
def add_var(adj, type=None, constant=None, name=None):
if name is None:
index = len(adj.variables)
name = str(index)
# allocate new variable
v = Var(name, type=type, constant=constant)
adj.variables.append(v)
adj.blocks[-1].vars.append(v)
return v
# append a statement to the forward pass
def add_forward(adj, statement, replay=None, skip_replay=False):
adj.blocks[-1].body_forward.append(adj.prefix + statement)
if not skip_replay:
if replay:
# if custom replay specified then output it
adj.blocks[-1].body_replay.append(adj.prefix + replay)
else:
# by default just replay the original statement
adj.blocks[-1].body_replay.append(adj.prefix + statement)
# append a statement to the reverse pass
def add_reverse(adj, statement):
adj.blocks[-1].body_reverse.append(adj.prefix + statement)
def add_constant(adj, n):
output = adj.add_var(type=type(n), constant=n)
return output
def add_comp(adj, op_strings, left, comps):
output = adj.add_var(builtins.bool)
s = "var_" + str(output) + " = " + ("(" * len(comps)) + "var_" + str(left) + " "
for op, comp in zip(op_strings, comps):
s += op + " var_" + str(comp) + ") "
s = s.rstrip() + ";"
adj.add_forward(s)
return output
def add_bool_op(adj, op_string, exprs):
output = adj.add_var(builtins.bool)
command = (
"var_" + str(output) + " = " + (" " + op_string + " ").join(["var_" + str(expr) for expr in exprs]) + ";"
)
adj.add_forward(command)
return output
def add_call(adj, func, args, min_outputs=None, templates=[], kwds=None):
# if func is overloaded then perform overload resolution here
# we validate argument types before they go to generated native code
resolved_func = None
if func.is_builtin():
for f in func.overloads:
match = True
# skip type checking for variadic functions
if not f.variadic:
# check argument counts match are compatible (may be some default args)
if len(f.input_types) < len(args):
match = False
continue
# check argument types equal
for i, (arg_name, arg_type) in enumerate(f.input_types.items()):
# if arg type registered as Any, treat as
# template allowing any type to match
if arg_type == Any:
continue
# handle function refs as a special case
if arg_type == Callable and type(args[i]) is warp.context.Function:
continue
# look for default values for missing args
if i >= len(args):
if arg_name not in f.defaults:
match = False
break
else:
# otherwise check arg type matches input variable type
if not types_equal(arg_type, args[i].type, match_generic=True):
match = False
break
# check output dimensions match expectations
if min_outputs:
try:
value_type = f.value_func(args, kwds, templates)
if len(value_type) != min_outputs:
match = False
continue
except Exception:
# value func may fail if the user has given
# incorrect args, so we need to catch this
match = False
continue
# found a match, use it
if match:
resolved_func = f
break
else:
# user-defined function
arg_types = [a.type for a in args]
resolved_func = func.get_overload(arg_types)
if resolved_func is None:
arg_types = []
for x in args:
if isinstance(x, Var):
# shorten Warp primitive type names
if isinstance(x.type, list):
if len(x.type) != 1:
raise Exception("Argument must not be the result from a multi-valued function")
arg_type = x.type[0]
else:
arg_type = x.type
if arg_type.__module__ == "warp.types":
arg_types.append(arg_type.__name__)
else:
arg_types.append(arg_type.__module__ + "." + arg_type.__name__)
if isinstance(x, warp.context.Function):
arg_types.append("function")
raise Exception(
f"Couldn't find function overload for '{func.key}' that matched inputs with types: [{', '.join(arg_types)}]"
)
else:
func = resolved_func
# push any default values onto args
for i, (arg_name, arg_type) in enumerate(func.input_types.items()):
if i >= len(args):
if arg_name in f.defaults:
const = adj.add_constant(func.defaults[arg_name])
args.append(const)
else:
match = False
break
# if it is a user-function then build it recursively
if not func.is_builtin():
adj.builder.build_function(func)
# evaluate the function type based on inputs
value_type = func.value_func(args, kwds, templates)
func_name = compute_type_str(func.native_func, templates)
use_initializer_list = func.initializer_list_func(args, templates)
if value_type is None:
# handles expression (zero output) functions, e.g.: void do_something();
forward_call = "{}{}({});".format(
func.namespace, func_name, adj.format_forward_call_args(args, use_initializer_list)
)
replay_call = forward_call
if func.custom_replay_func is not None:
replay_call = "{}replay_{}({});".format(
func.namespace, func_name, adj.format_forward_call_args(args, use_initializer_list)
)
if func.skip_replay:
adj.add_forward(forward_call, replay="// " + replay_call)
else:
adj.add_forward(forward_call, replay=replay_call)
if not func.missing_grad and len(args):
arg_str = adj.format_reverse_call_args(args, [], {}, {}, use_initializer_list)
if arg_str is not None:
reverse_call = "{}adj_{}({});".format(func.namespace, func.native_func, arg_str)
adj.add_reverse(reverse_call)
return None
elif not isinstance(value_type, list) or len(value_type) == 1:
# handle simple function (one output)
if isinstance(value_type, list):
value_type = value_type[0]
output = adj.add_var(value_type)
forward_call = "var_{} = {}{}({});".format(
output, func.namespace, func_name, adj.format_forward_call_args(args, use_initializer_list)
)
replay_call = forward_call
if func.custom_replay_func is not None:
replay_call = "var_{} = {}replay_{}({});".format(
output, func.namespace, func_name, adj.format_forward_call_args(args, use_initializer_list)
)
if func.skip_replay:
adj.add_forward(forward_call, replay="// " + replay_call)
else:
adj.add_forward(forward_call, replay=replay_call)
if not func.missing_grad and len(args):
arg_str = adj.format_reverse_call_args(args, [output], {}, {}, use_initializer_list)
if arg_str is not None:
reverse_call = "{}adj_{}({});".format(func.namespace, func.native_func, arg_str)
adj.add_reverse(reverse_call)
return output
else:
# handle multiple value functions
output = [adj.add_var(v) for v in value_type]
forward_call = "{}{}({});".format(
func.namespace, func_name, adj.format_forward_call_args(args + output, use_initializer_list)
)
adj.add_forward(forward_call)
if not func.missing_grad and len(args):
arg_str = adj.format_reverse_call_args(
args, output, {}, {}, use_initializer_list, has_output_args=func.custom_grad_func is None
)
if arg_str is not None:
reverse_call = "{}adj_{}({});".format(func.namespace, func.native_func, arg_str)
adj.add_reverse(reverse_call)
if len(output) == 1:
return output[0]
return output
def add_return(adj, var):
if var is None or len(var) == 0:
adj.add_forward("return;", "goto label{};".format(adj.label_count))
elif len(var) == 1:
adj.add_forward("return var_{};".format(var[0]), "goto label{};".format(adj.label_count))
adj.add_reverse("adj_" + str(var[0]) + " += adj_ret;")
else:
for i, v in enumerate(var):
adj.add_forward("ret_{} = var_{};".format(i, v))
adj.add_reverse("adj_{} += adj_ret_{};".format(v, i))
adj.add_forward("return;", "goto label{};".format(adj.label_count))
adj.add_reverse("label{}:;".format(adj.label_count))
adj.label_count += 1
# define an if statement
def begin_if(adj, cond):
adj.add_forward("if (var_{}) {{".format(cond))
adj.add_reverse("}")
adj.indent()
def end_if(adj, cond):
adj.dedent()
adj.add_forward("}")
adj.add_reverse(f"if (var_{cond}) {{")
def begin_else(adj, cond):
adj.add_forward(f"if (!var_{cond}) {{")
adj.add_reverse("}")
adj.indent()
def end_else(adj, cond):
adj.dedent()
adj.add_forward("}")
adj.add_reverse(f"if (!var_{cond}) {{")
# define a for-loop
def begin_for(adj, iter):
cond_block = adj.begin_block()
adj.loop_blocks.append(cond_block)
adj.add_forward(f"for_start_{cond_block.label}:;")
adj.indent()
# evaluate cond
adj.add_forward(f"if (iter_cmp(var_{iter}) == 0) goto for_end_{cond_block.label};")
# evaluate iter
val = adj.add_call(warp.context.builtin_functions["iter_next"], [iter])
adj.begin_block()
return val
def end_for(adj, iter):
body_block = adj.end_block()
cond_block = adj.end_block()
adj.loop_blocks.pop()
####################
# forward pass
for i in cond_block.body_forward:
adj.blocks[-1].body_forward.append(i)
for i in body_block.body_forward:
adj.blocks[-1].body_forward.append(i)
adj.add_forward(f"goto for_start_{cond_block.label};", skip_replay=True)
adj.dedent()
adj.add_forward(f"for_end_{cond_block.label}:;", skip_replay=True)
####################
# reverse pass
reverse = []
# reverse iterator
reverse.append(adj.prefix + f"var_{iter} = wp::iter_reverse(var_{iter});")
for i in cond_block.body_forward:
reverse.append(i)
# zero adjoints
for i in body_block.vars:
if isinstance(i.type, Struct):
reverse.append(adj.prefix + f"\tadj_{i} = {i.ctype()}{{}};")
else:
reverse.append(adj.prefix + f"\tadj_{i} = {i.ctype()}(0);")
# replay
for i in body_block.body_replay:
reverse.append(i)
# reverse
for i in reversed(body_block.body_reverse):
reverse.append(i)
reverse.append(adj.prefix + f"\tgoto for_start_{cond_block.label};")
reverse.append(adj.prefix + f"for_end_{cond_block.label}:;")
adj.blocks[-1].body_reverse.extend(reversed(reverse))
# define a while loop
def begin_while(adj, cond):
# evaulate condition in its own block
# so we can control replay
cond_block = adj.begin_block()
adj.loop_blocks.append(cond_block)
cond_block.body_forward.append(f"while_start_{cond_block.label}:;")
c = adj.eval(cond)
cond_block.body_forward.append(f"if ((var_{c}) == false) goto while_end_{cond_block.label};")
# being block around loop
adj.begin_block()
adj.indent()
def end_while(adj):
adj.dedent()
body_block = adj.end_block()
cond_block = adj.end_block()
adj.loop_blocks.pop()
####################
# forward pass
for i in cond_block.body_forward:
adj.blocks[-1].body_forward.append(i)
for i in body_block.body_forward:
adj.blocks[-1].body_forward.append(i)
adj.blocks[-1].body_forward.append(f"goto while_start_{cond_block.label};")
adj.blocks[-1].body_forward.append(f"while_end_{cond_block.label}:;")
####################
# reverse pass
reverse = []
# cond
for i in cond_block.body_forward:
reverse.append(i)
# zero adjoints of local vars
for i in body_block.vars:
if isinstance(i.type, Struct):
reverse.append(f"adj_{i} = {i.ctype()}{{}};")
else:
reverse.append(f"adj_{i} = {i.ctype()}(0);")
# replay
for i in body_block.body_replay:
reverse.append(i)
# reverse
for i in reversed(body_block.body_reverse):
reverse.append(i)
reverse.append(f"goto while_start_{cond_block.label};")
reverse.append(f"while_end_{cond_block.label}:;")
# output
adj.blocks[-1].body_reverse.extend(reversed(reverse))
def emit_FunctionDef(adj, node):
for f in node.body:
adj.eval(f)
def emit_If(adj, node):
if len(node.body) == 0:
return None
# eval condition
cond = adj.eval(node.test)
# save symbol map
symbols_prev = adj.symbols.copy()
# eval body
adj.begin_if(cond)
for stmt in node.body:
adj.eval(stmt)
adj.end_if(cond)
# detect existing symbols with conflicting definitions (variables assigned inside the branch)
# and resolve with a phi (select) function
for items in symbols_prev.items():
sym = items[0]
var1 = items[1]
var2 = adj.symbols[sym]
if var1 != var2:
# insert a phi function that selects var1, var2 based on cond
out = adj.add_call(warp.context.builtin_functions["select"], [cond, var1, var2])
adj.symbols[sym] = out
symbols_prev = adj.symbols.copy()
# evaluate 'else' statement as if (!cond)
if len(node.orelse) > 0:
adj.begin_else(cond)
for stmt in node.orelse:
adj.eval(stmt)
adj.end_else(cond)
# detect existing symbols with conflicting definitions (variables assigned inside the else)
# and resolve with a phi (select) function
for items in symbols_prev.items():
sym = items[0]
var1 = items[1]
var2 = adj.symbols[sym]
if var1 != var2:
# insert a phi function that selects var1, var2 based on cond
# note the reversed order of vars since we want to use !cond as our select
out = adj.add_call(warp.context.builtin_functions["select"], [cond, var2, var1])
adj.symbols[sym] = out
def emit_Compare(adj, node):
# node.left, node.ops (list of ops), node.comparators (things to compare to)
# e.g. (left ops[0] node.comparators[0]) ops[1] node.comparators[1]
left = adj.eval(node.left)
comps = [adj.eval(comp) for comp in node.comparators]
op_strings = [builtin_operators[type(op)] for op in node.ops]
return adj.add_comp(op_strings, left, comps)
def emit_BoolOp(adj, node):
# op, expr list values
op = node.op
if isinstance(op, ast.And):
func = "&&"
elif isinstance(op, ast.Or):
func = "||"
else:
raise KeyError("Op {} is not supported".format(op))
return adj.add_bool_op(func, [adj.eval(expr) for expr in node.values])
def emit_Name(adj, node):
# lookup symbol, if it has already been assigned to a variable then return the existing mapping
if node.id in adj.symbols:
return adj.symbols[node.id]
# try and resolve the name using the function's globals context (used to lookup constants + functions)
obj = adj.func.__globals__.get(node.id)
if obj is None:
# Lookup constant in captured contents
capturedvars = dict(
zip(adj.func.__code__.co_freevars, [c.cell_contents for c in (adj.func.__closure__ or [])])
)
obj = capturedvars.get(str(node.id), None)
if obj is None:
raise KeyError("Referencing undefined symbol: " + str(node.id))
if warp.types.is_value(obj):
# evaluate constant
out = adj.add_constant(obj)
adj.symbols[node.id] = out
return out
# the named object is either a function, class name, or module
# pass it back to the caller for processing
return obj
def emit_Attribute(adj, node):
try:
val = adj.eval(node.value)
if isinstance(val, types.ModuleType) or isinstance(val, type):
out = getattr(val, node.attr)
if warp.types.is_value(out):
return adj.add_constant(out)
return out
# create a Var that points to the struct attribute, i.e.: directly generates `struct.attr` when used
attr_name = val.label + "." + node.attr
attr_type = val.type.vars[node.attr].type
return Var(attr_name, attr_type)
except KeyError:
raise RuntimeError(f"Error, `{node.attr}` is not an attribute of '{val.label}' ({val.type})")
def emit_String(adj, node):
# string constant
return adj.add_constant(node.s)
def emit_Num(adj, node):
# lookup constant, if it has already been assigned then return existing var
key = (node.n, type(node.n))
if key in adj.symbols:
return adj.symbols[key]
else:
out = adj.add_constant(node.n)
adj.symbols[key] = out
return out
def emit_NameConstant(adj, node):
if node.value is True:
return adj.add_constant(True)
elif node.value is False:
return adj.add_constant(False)
elif node.value is None:
raise TypeError("None type unsupported")
def emit_Constant(adj, node):
if isinstance(node, ast.Str):
return adj.emit_String(node)
elif isinstance(node, ast.Num):
return adj.emit_Num(node)
else:
assert isinstance(node, ast.NameConstant)
return adj.emit_NameConstant(node)
def emit_BinOp(adj, node):
# evaluate binary operator arguments
left = adj.eval(node.left)
right = adj.eval(node.right)
name = builtin_operators[type(node.op)]
func = warp.context.builtin_functions[name]
return adj.add_call(func, [left, right])
def emit_UnaryOp(adj, node):
# evaluate unary op arguments
arg = adj.eval(node.operand)
name = builtin_operators[type(node.op)]
func = warp.context.builtin_functions[name]
return adj.add_call(func, [arg])
def materialize_redefinitions(adj, symbols):
# detect symbols with conflicting definitions (assigned inside the for loop)
for items in symbols.items():
sym = items[0]
var1 = items[1]
var2 = adj.symbols[sym]
if var1 != var2:
if warp.config.verbose and not adj.custom_reverse_mode:
lineno = adj.lineno + adj.fun_lineno
line = adj.source.splitlines()[adj.lineno]
msg = f'Warning: detected mutated variable {sym} during a dynamic for-loop in function "{adj.fun_name}" at {adj.filename}:{lineno}: this is a non-differentiable operation.\n{line}\n'
print(msg)
if var1.constant is not None:
raise Exception(
"Error mutating a constant {} inside a dynamic loop, use the following syntax: pi = float(3.141) to declare a dynamic variable".format(
sym
)
)
# overwrite the old variable value (violates SSA)
adj.add_call(warp.context.builtin_functions["copy"], [var1, var2])
# reset the symbol to point to the original variable
adj.symbols[sym] = var1
def emit_While(adj, node):
adj.begin_while(node.test)
adj.loop_symbols.append(adj.symbols.copy())
# eval body
for s in node.body:
adj.eval(s)
adj.materialize_redefinitions(adj.loop_symbols[-1])
adj.loop_symbols.pop()
adj.end_while()
def is_num(adj, a):
# simple constant
if isinstance(a, ast.Num):
return True
# expression of form -constant
elif isinstance(a, ast.UnaryOp) and isinstance(a.op, ast.USub) and isinstance(a.operand, ast.Num):
return True
else:
# try and resolve the expression to an object
# e.g.: wp.constant in the globals scope
obj, path = adj.resolve_path(a)
if warp.types.is_int(obj):
return True
else:
return False
def eval_num(adj, a):
if isinstance(a, ast.Num):
return a.n
elif isinstance(a, ast.UnaryOp) and isinstance(a.op, ast.USub) and isinstance(a.operand, ast.Num):
return -a.operand.n
else:
# try and resolve the expression to an object
# e.g.: wp.constant in the globals scope
obj, path = adj.resolve_path(a)
if warp.types.is_int(obj):
return obj
else:
return False
# detects whether a loop contains a break (or continue) statement
def contains_break(adj, body):
for s in body:
if isinstance(s, ast.Break):
return True
elif isinstance(s, ast.Continue):
return True
elif isinstance(s, ast.If):
if adj.contains_break(s.body):
return True
if adj.contains_break(s.orelse):
return True
else:
# note that nested for or while loops containing a break statement
# do not affect the current loop
pass
return False
# returns a constant range() if unrollable, otherwise None
def get_unroll_range(adj, loop):
if not isinstance(loop.iter, ast.Call) or not isinstance(loop.iter.func, ast.Name) or loop.iter.func.id != "range":
return None
for a in loop.iter.args:
# if all range() arguments are numeric constants we will unroll
# note that this only handles trivial constants, it will not unroll
# constant compile-time expressions e.g.: range(0, 3*2)
if not adj.is_num(a):
return None
# range(end)
if len(loop.iter.args) == 1:
start = 0
end = adj.eval_num(loop.iter.args[0])
step = 1
# range(start, end)
elif len(loop.iter.args) == 2:
start = adj.eval_num(loop.iter.args[0])
end = adj.eval_num(loop.iter.args[1])
step = 1
# range(start, end, step)
elif len(loop.iter.args) == 3:
start = adj.eval_num(loop.iter.args[0])
end = adj.eval_num(loop.iter.args[1])
step = adj.eval_num(loop.iter.args[2])
# test if we're above max unroll count
max_iters = abs(end - start) // abs(step)
max_unroll = adj.builder.options["max_unroll"]
if max_iters > max_unroll:
if warp.config.verbose:
print(
f"Warning: fixed-size loop count of {max_iters} is larger than the module 'max_unroll' limit of {max_unroll}, will generate dynamic loop."
)
return None
if adj.contains_break(loop.body):
if warp.config.verbose:
print("Warning: 'break' or 'continue' found in loop body, will generate dynamic loop.")
return None
# unroll
return range(start, end, step)
def emit_For(adj, node):
# try and unroll simple range() statements that use constant args
unroll_range = adj.get_unroll_range(node)
if unroll_range:
for i in unroll_range:
const_iter = adj.add_constant(i)
var_iter = adj.add_call(warp.context.builtin_functions["int"], [const_iter])
adj.symbols[node.target.id] = var_iter
# eval body
for s in node.body:
adj.eval(s)
# otherwise generate a dynamic loop
else:
# evaluate the Iterable
iter = adj.eval(node.iter)
adj.symbols[node.target.id] = adj.begin_for(iter)
# for loops should be side-effect free, here we store a copy
adj.loop_symbols.append(adj.symbols.copy())
# eval body
for s in node.body:
adj.eval(s)
adj.materialize_redefinitions(adj.loop_symbols[-1])
adj.loop_symbols.pop()
adj.end_for(iter)
def emit_Break(adj, node):
adj.materialize_redefinitions(adj.loop_symbols[-1])
adj.add_forward(f"goto for_end_{adj.loop_blocks[-1].label};")
def emit_Continue(adj, node):
adj.materialize_redefinitions(adj.loop_symbols[-1])
adj.add_forward(f"goto for_start_{adj.loop_blocks[-1].label};")
def emit_Expr(adj, node):
return adj.eval(node.value)
def emit_Call(adj, node):
# try and lookup function in globals by
# resolving path (e.g.: module.submodule.attr)
func, path = adj.resolve_path(node.func)
templates = []
if isinstance(func, warp.context.Function) is False:
if len(path) == 0:
raise RuntimeError(f"Unrecognized syntax for function call, path not valid: '{node.func}'")
attr = path[-1]
caller = func
func = None
# try and lookup function name in builtins (e.g.: using `dot` directly without wp prefix)
if attr in warp.context.builtin_functions:
func = warp.context.builtin_functions[attr]
# vector class type e.g.: wp.vec3f constructor
if func is None and hasattr(caller, "_wp_generic_type_str_"):
templates = caller._wp_type_params_
func = warp.context.builtin_functions.get(caller._wp_constructor_)
# scalar class type e.g.: wp.int8 constructor
if func is None and hasattr(caller, "__name__") and caller.__name__ in warp.context.builtin_functions:
func = warp.context.builtin_functions.get(caller.__name__)
# struct constructor
if func is None and isinstance(caller, Struct):
adj.builder.build_struct_recursive(caller)
func = caller.initializer()
if func is None:
raise RuntimeError(
f"Could not find function {'.'.join(path)} as a built-in or user-defined function. Note that user functions must be annotated with a @wp.func decorator to be called from a kernel."
)
args = []
# eval all arguments
for arg in node.args:
var = adj.eval(arg)
args.append(var)
# eval all keyword ags
def kwval(kw):
if isinstance(kw.value, ast.Num):
return kw.value.n
elif isinstance(kw.value, ast.Tuple):
return tuple(adj.eval_num(e) for e in kw.value.elts)
else:
return adj.resolve_path(kw.value)[0]
kwds = {kw.arg: kwval(kw) for kw in node.keywords}
# get expected return count, e.g.: for multi-assignment
min_outputs = None
if hasattr(node, "expects"):
min_outputs = node.expects
# add var with value type from the function
out = adj.add_call(func=func, args=args, kwds=kwds, templates=templates, min_outputs=min_outputs)
return out
def emit_Index(adj, node):
# the ast.Index node appears in 3.7 versions
# when performing array slices, e.g.: x = arr[i]
# but in version 3.8 and higher it does not appear
return adj.eval(node.value)
def emit_Subscript(adj, node):
if hasattr(node.value, "attr") and node.value.attr == "adjoint":
# handle adjoint of a variable, i.e. wp.adjoint[var]
var = adj.eval(node.slice)
var_name = var.label
var = Var(f"adj_{var_name}", type=var.type, constant=None, prefix=False, is_adjoint=True)
adj.symbols[var.label] = var
return var
target = adj.eval(node.value)
indices = []
if isinstance(node.slice, ast.Tuple):
# handles the x[i,j] case (Python 3.8.x upward)
for arg in node.slice.elts:
var = adj.eval(arg)
indices.append(var)
elif isinstance(node.slice, ast.Index) and isinstance(node.slice.value, ast.Tuple):
# handles the x[i,j] case (Python 3.7.x)
for arg in node.slice.value.elts:
var = adj.eval(arg)
indices.append(var)
else:
# simple expression, e.g.: x[i]
var = adj.eval(node.slice)
indices.append(var)
if is_array(target.type):
if len(indices) == target.type.ndim:
# handles array loads (where each dimension has an index specified)
out = adj.add_call(warp.context.builtin_functions["load"], [target, *indices])
else:
# handles array views (fewer indices than dimensions)
out = adj.add_call(warp.context.builtin_functions["view"], [target, *indices])
else:
# handles non-array type indexing, e.g: vec3, mat33, etc
out = adj.add_call(warp.context.builtin_functions["index"], [target, *indices])
out.is_adjoint = target.is_adjoint
return out
def emit_Assign(adj, node):
# handle the case where we are assigning multiple output variables
if isinstance(node.targets[0], ast.Tuple):
# record the expected number of outputs on the node
# we do this so we can decide which function to
# call based on the number of expected outputs
if isinstance(node.value, ast.Call):
node.value.expects = len(node.targets[0].elts)
# evaluate values
if isinstance(node.value, ast.Tuple):
out = [adj.eval(v) for v in node.value.elts]
else:
out = adj.eval(node.value)
names = []
for v in node.targets[0].elts:
if isinstance(v, ast.Name):
names.append(v.id)
else:
raise RuntimeError(
"Multiple return functions can only assign to simple variables, e.g.: x, y = func()"
)
if len(names) != len(out):
raise RuntimeError(
"Multiple return functions need to receive all their output values, incorrect number of values to unpack (expected {}, got {})".format(
len(out), len(names)
)
)
for name, rhs in zip(names, out):
if name in adj.symbols:
if not types_equal(rhs.type, adj.symbols[name].type):
raise TypeError(
"Error, assigning to existing symbol {} ({}) with different type ({})".format(
name, adj.symbols[name].type, rhs.type
)
)
adj.symbols[name] = rhs
return out
# handles the case where we are assigning to an array index (e.g.: arr[i] = 2.0)
elif isinstance(node.targets[0], ast.Subscript):
if hasattr(node.targets[0].value, "attr") and node.targets[0].value.attr == "adjoint":
# handle adjoint of a variable, i.e. wp.adjoint[var]
src_var = adj.eval(node.targets[0].slice)
var = Var(f"adj_{src_var.label}", type=src_var.type, constant=None, prefix=False)
adj.symbols[var.label] = var
value = adj.eval(node.value)
adj.add_forward(f"{var.emit()} = {value.emit()};")
return var
target = adj.eval(node.targets[0].value)
value = adj.eval(node.value)
slice = node.targets[0].slice
indices = []
if isinstance(slice, ast.Tuple):
# handles the x[i, j] case (Python 3.8.x upward)
for arg in slice.elts:
var = adj.eval(arg)
indices.append(var)
elif isinstance(slice, ast.Index) and isinstance(slice.value, ast.Tuple):
# handles the x[i, j] case (Python 3.7.x)
for arg in slice.value.elts:
var = adj.eval(arg)
indices.append(var)
else:
# simple expression, e.g.: x[i]
var = adj.eval(slice)
indices.append(var)
if is_array(target.type):
adj.add_call(warp.context.builtin_functions["store"], [target, *indices, value])
elif type_is_vector(target.type) or type_is_matrix(target.type):
adj.add_call(warp.context.builtin_functions["indexset"], [target, *indices, value])
if warp.config.verbose and not adj.custom_reverse_mode:
lineno = adj.lineno + adj.fun_lineno
line = adj.source.splitlines()[adj.lineno]
node_source = adj.get_node_source(node.targets[0].value)
print(
f"Warning: mutating {node_source} in function {adj.fun_name} at {adj.filename}:{lineno}: this is a non-differentiable operation.\n{line}\n"
)
else:
raise RuntimeError("Can only subscript assign array, vector, and matrix types")
return var
elif isinstance(node.targets[0], ast.Name):
# symbol name
name = node.targets[0].id
# evaluate rhs
rhs = adj.eval(node.value)
# check type matches if symbol already defined
if name in adj.symbols:
if not types_equal(rhs.type, adj.symbols[name].type):
raise TypeError(
"Error, assigning to existing symbol {} ({}) with different type ({})".format(
name, adj.symbols[name].type, rhs.type
)
)
# handle simple assignment case (a = b), where we generate a value copy rather than reference
if isinstance(node.value, ast.Name):
out = adj.add_var(rhs.type)
adj.add_call(warp.context.builtin_functions["copy"], [out, rhs])
else:
out = rhs
# update symbol map (assumes lhs is a Name node)
adj.symbols[name] = out
return out
elif isinstance(node.targets[0], ast.Attribute):
rhs = adj.eval(node.value)
attr = adj.emit_Attribute(node.targets[0])
adj.add_call(warp.context.builtin_functions["copy"], [attr, rhs])
if warp.config.verbose and not adj.custom_reverse_mode:
lineno = adj.lineno + adj.fun_lineno
line = adj.source.splitlines()[adj.lineno]
msg = f'Warning: detected mutated struct {attr.label} during function "{adj.fun_name}" at {adj.filename}:{lineno}: this is a non-differentiable operation.\n{line}\n'
print(msg)
else:
raise RuntimeError("Error, unsupported assignment statement.")
def emit_Return(adj, node):
if node.value is None:
var = None
elif isinstance(node.value, ast.Tuple):
var = tuple(adj.eval(arg) for arg in node.value.elts)
else:
var = (adj.eval(node.value),)
if adj.return_var is not None:
old_ctypes = tuple(v.ctype() for v in adj.return_var)
new_ctypes = tuple(v.ctype() for v in var)
if old_ctypes != new_ctypes:
raise TypeError(
f"Error, function returned different types, previous: [{', '.join(old_ctypes)}], new [{', '.join(new_ctypes)}]"
)
else:
adj.return_var = var
adj.add_return(var)
def emit_AugAssign(adj, node):
# convert inplace operations (+=, -=, etc) to ssa form, e.g.: c = a + b
left = adj.eval(node.target)
if left.is_adjoint:
# replace augassign with assignment statement + binary op
new_node = ast.Assign(targets=[node.target], value=ast.BinOp(node.target, node.op, node.value))
adj.eval(new_node)
return
right = adj.eval(node.value)
# lookup
name = builtin_operators[type(node.op)]
func = warp.context.builtin_functions[name]
out = adj.add_call(func, [left, right])
# update symbol map
adj.symbols[node.target.id] = out
def emit_Tuple(adj, node):
# LHS for expressions, such as i, j, k = 1, 2, 3
for elem in node.elts:
adj.eval(elem)
def emit_Pass(adj, node):
pass
def eval(adj, node):
if hasattr(node, "lineno"):
adj.set_lineno(node.lineno - 1)
node_visitors = {
ast.FunctionDef: Adjoint.emit_FunctionDef,
ast.If: Adjoint.emit_If,
ast.Compare: Adjoint.emit_Compare,
ast.BoolOp: Adjoint.emit_BoolOp,
ast.Name: Adjoint.emit_Name,
ast.Attribute: Adjoint.emit_Attribute,
ast.Str: Adjoint.emit_String, # Deprecated in 3.8; use Constant
ast.Num: Adjoint.emit_Num, # Deprecated in 3.8; use Constant
ast.NameConstant: Adjoint.emit_NameConstant, # Deprecated in 3.8; use Constant
ast.Constant: Adjoint.emit_Constant,
ast.BinOp: Adjoint.emit_BinOp,
ast.UnaryOp: Adjoint.emit_UnaryOp,
ast.While: Adjoint.emit_While,
ast.For: Adjoint.emit_For,
ast.Break: Adjoint.emit_Break,
ast.Continue: Adjoint.emit_Continue,
ast.Expr: Adjoint.emit_Expr,
ast.Call: Adjoint.emit_Call,
ast.Index: Adjoint.emit_Index, # Deprecated in 3.8; Use the index value directly instead.
ast.Subscript: Adjoint.emit_Subscript,
ast.Assign: Adjoint.emit_Assign,
ast.Return: Adjoint.emit_Return,
ast.AugAssign: Adjoint.emit_AugAssign,
ast.Tuple: Adjoint.emit_Tuple,
ast.Pass: Adjoint.emit_Pass,
}
emit_node = node_visitors.get(type(node))
if emit_node is not None:
if adj.is_user_function:
if hasattr(node, "value") and hasattr(node.value, "func") and hasattr(node.value.func, "attr"):
if node.value.func.attr == "tid":
lineno = adj.lineno + adj.fun_lineno
line = adj.source.splitlines()[adj.lineno]
warp.utils.warn(
"Calling wp.tid() from a @wp.func is deprecated and will be removed in a future Warp "
"version. Instead, obtain the indices from a @wp.kernel and pass them as "
f"arguments to this function {adj.fun_name}, {adj.filename}:{lineno}:\n{line}\n",
PendingDeprecationWarning,
stacklevel=2,
)
return emit_node(adj, node)
else:
raise Exception("Error, ast node of type {} not supported".format(type(node)))
# helper to evaluate expressions of the form
# obj1.obj2.obj3.attr in the function's global scope
def resolve_path(adj, node):
modules = []
while isinstance(node, ast.Attribute):
modules.append(node.attr)
node = node.value
if isinstance(node, ast.Name):
modules.append(node.id)
# reverse list since ast presents it backward order
path = [*reversed(modules)]
if len(path) == 0:
return None, path
# try and evaluate object path
try:
# Look up the closure info and append it to adj.func.__globals__
# in case you want to define a kernel inside a function and refer
# to variables you've declared inside that function:
extract_contents = (
lambda contents: contents
if isinstance(contents, warp.context.Function) or not callable(contents)
else contents
)
capturedvars = dict(
zip(
adj.func.__code__.co_freevars,
[extract_contents(c.cell_contents) for c in (adj.func.__closure__ or [])],
)
)
vars_dict = {**adj.func.__globals__, **capturedvars}
func = eval(".".join(path), vars_dict)
return func, path
except Exception:
pass
# I added this so people can eg do this kind of thing
# in a kernel:
# v = vec3(0.0,0.2,0.4)
# vec3 is now an alias and is not in warp.context.builtin_functions.
# This means it can't be directly looked up in Adjoint.add_call, and
# needs to be looked up by digging some information out of the
# python object it actually came from.
# Before this fix, resolve_path was returning None, as the
# "vec3" symbol is not available. In this situation I'm assuming
# it's a member of the warp module and trying to look it up:
try:
evalstr = ".".join(["warp"] + path)
func = eval(evalstr, {"warp": warp})
return func, path
except Exception:
return None, path
# annotate generated code with the original source code line
def set_lineno(adj, lineno):
if adj.lineno is None or adj.lineno != lineno:
line = lineno + adj.fun_lineno
source = adj.raw_source[lineno].strip().ljust(80 - len(adj.prefix), " ")
adj.add_forward(f"// {source} <L {line}>")
adj.add_reverse(f"// adj: {source} <L {line}>")
adj.lineno = lineno
def get_node_source(adj, node):
# return the Python code corresponding to the given AST node
return ast.get_source_segment("".join(adj.raw_source), node)
# ----------------
# code generation
cpu_module_header = """
#define WP_NO_CRT
#include "builtin.h"
// avoid namespacing of float type for casting to float type, this is to avoid wp::float(x), which is not valid in C++
#define float(x) cast_float(x)
#define adj_float(x, adj_x, adj_ret) adj_cast_float(x, adj_x, adj_ret)
#define int(x) cast_int(x)
#define adj_int(x, adj_x, adj_ret) adj_cast_int(x, adj_x, adj_ret)
using namespace wp;
"""
cuda_module_header = """
#define WP_NO_CRT
#include "builtin.h"
// avoid namespacing of float type for casting to float type, this is to avoid wp::float(x), which is not valid in C++
#define float(x) cast_float(x)
#define adj_float(x, adj_x, adj_ret) adj_cast_float(x, adj_x, adj_ret)
#define int(x) cast_int(x)
#define adj_int(x, adj_x, adj_ret) adj_cast_int(x, adj_x, adj_ret)
using namespace wp;
"""
struct_template = """
struct {name}
{{
{struct_body}
CUDA_CALLABLE {name}({forward_args})
{forward_initializers}
{{
}}
CUDA_CALLABLE {name}& operator += (const {name}&) {{ return *this; }}
}};
static CUDA_CALLABLE void adj_{name}({reverse_args})
{{
{reverse_body}}}
CUDA_CALLABLE void adj_atomic_add({name}* p, {name} t)
{{
{atomic_add_body}}}
"""
cpu_forward_function_template = """
// {filename}:{lineno}
static {return_type} {name}(
{forward_args})
{{
{forward_body}}}
"""
cpu_reverse_function_template = """
// {filename}:{lineno}
static void adj_{name}(
{reverse_args})
{{
{reverse_body}}}
"""
cuda_forward_function_template = """
// {filename}:{lineno}
static CUDA_CALLABLE {return_type} {name}(
{forward_args})
{{
{forward_body}}}
"""
cuda_reverse_function_template = """
// {filename}:{lineno}
static CUDA_CALLABLE void adj_{name}(
{reverse_args})
{{
{reverse_body}}}
"""
cuda_kernel_template = """
extern "C" __global__ void {name}_cuda_kernel_forward(
{forward_args})
{{
size_t _idx = grid_index();
if (_idx >= dim.size)
return;
set_launch_bounds(dim);
{forward_body}}}
extern "C" __global__ void {name}_cuda_kernel_backward(
{reverse_args})
{{
size_t _idx = grid_index();
if (_idx >= dim.size)
return;
set_launch_bounds(dim);
{reverse_body}}}
"""
cpu_kernel_template = """
void {name}_cpu_kernel_forward(
{forward_args})
{{
{forward_body}}}
void {name}_cpu_kernel_backward(
{reverse_args})
{{
{reverse_body}}}
"""
cpu_module_template = """
extern "C" {{
// Python CPU entry points
WP_API void {name}_cpu_forward(
{forward_args})
{{
set_launch_bounds(dim);
for (size_t i=0; i < dim.size; ++i)
{{
s_threadIdx = i;
{name}_cpu_kernel_forward(
{forward_params});
}}
}}
WP_API void {name}_cpu_backward(
{reverse_args})
{{
set_launch_bounds(dim);
for (size_t i=0; i < dim.size; ++i)
{{
s_threadIdx = i;
{name}_cpu_kernel_backward(
{reverse_params});
}}
}}
}} // extern C
"""
cuda_module_header_template = """
extern "C" {{
// Python CUDA entry points
WP_API void {name}_cuda_forward(
void* stream,
{forward_args});
WP_API void {name}_cuda_backward(
void* stream,
{reverse_args});
}} // extern C
"""
cpu_module_header_template = """
extern "C" {{
// Python CPU entry points
WP_API void {name}_cpu_forward(
{forward_args});
WP_API void {name}_cpu_backward(
{reverse_args});
}} // extern C
"""
# converts a constant Python value to equivalent C-repr
def constant_str(value):
value_type = type(value)
if value_type == bool or value_type == builtins.bool:
if value:
return "true"
else:
return "false"
elif value_type == str:
# ensure constant strings are correctly escaped
return '"' + str(value.encode("unicode-escape").decode()) + '"'
elif isinstance(value, ctypes.Array):
if value_type._wp_scalar_type_ == float16:
# special case for float16, which is stored as uint16 in the ctypes.Array
from warp.context import runtime
scalar_value = runtime.core.half_bits_to_float
else:
scalar_value = lambda x: x
# list of scalar initializer values
initlist = []
for i in range(value._length_):
x = ctypes.Array.__getitem__(value, i)
initlist.append(str(scalar_value(x)))
dtypestr = f"wp::initializer_array<{value._length_},wp::{value._wp_scalar_type_.__name__}>"
# construct value from initializer array, e.g. wp::initializer_array<4,wp::float32>{1.0, 2.0, 3.0, 4.0}
return f"{dtypestr}{{{', '.join(initlist)}}}"
elif value_type in warp.types.scalar_types:
# make sure we emit the value of objects, e.g. uint32
return str(value.value)
else:
# otherwise just convert constant to string
return str(value)
def indent(args, stops=1):
sep = ",\n"
for i in range(stops):
sep += " "
# return sep + args.replace(", ", "," + sep)
return sep.join(args)
# generates a C function name based on the python function name
def make_full_qualified_name(func):
if not isinstance(func, str):
func = func.__qualname__
return re.sub("[^0-9a-zA-Z_]+", "", func.replace(".", "__"))
def codegen_struct(struct, device="cpu", indent_size=4):
name = make_full_qualified_name(struct.cls)
body = []
indent_block = " " * indent_size
for label, var in struct.vars.items():
body.append(var.ctype() + " " + label + ";\n")
forward_args = []
reverse_args = []
forward_initializers = []
reverse_body = []
atomic_add_body = []
# forward args
for label, var in struct.vars.items():
forward_args.append(f"{var.ctype()} const& {label} = {{}}")
reverse_args.append(f"{var.ctype()} const&")
atomic_add_body.append(f"{indent_block}adj_atomic_add(&p->{label}, t.{label});\n")
prefix = f"{indent_block}," if forward_initializers else ":"
forward_initializers.append(f"{indent_block}{prefix} {label}{{{label}}}\n")
# reverse args
for label, var in struct.vars.items():
reverse_args.append(var.ctype() + " & adj_" + label)
if is_array(var.type):
reverse_body.append(f"{indent_block}adj_{label} = adj_ret.{label};\n")
else:
reverse_body.append(f"{indent_block}adj_{label} += adj_ret.{label};\n")
reverse_args.append(name + " & adj_ret")
return struct_template.format(
name=name,
struct_body="".join([indent_block + l for l in body]),
forward_args=indent(forward_args),
forward_initializers="".join(forward_initializers),
reverse_args=indent(reverse_args),
reverse_body="".join(reverse_body),
atomic_add_body="".join(atomic_add_body),
)
def codegen_func_forward_body(adj, device="cpu", indent=4):
body = []
indent_block = " " * indent
for f in adj.blocks[0].body_forward:
body += [f + "\n"]
return "".join([indent_block + l for l in body])
def codegen_func_forward(adj, func_type="kernel", device="cpu"):
s = ""
# primal vars
s += " //---------\n"
s += " // primal vars\n"
for var in adj.variables:
if var.constant is None:
s += f" {var.ctype()} {var.emit()};\n"
else:
s += f" const {var.ctype()} {var.emit()} = {constant_str(var.constant)};\n"
# forward pass
s += " //---------\n"
s += " // forward\n"
if device == "cpu":
s += codegen_func_forward_body(adj, device=device, indent=4)
elif device == "cuda":
if func_type == "kernel":
s += codegen_func_forward_body(adj, device=device, indent=8)
else:
s += codegen_func_forward_body(adj, device=device, indent=4)
return s
def codegen_func_reverse_body(adj, device="cpu", indent=4):
body = []
indent_block = " " * indent
# forward pass
body += ["//---------\n"]
body += ["// forward\n"]
for f in adj.blocks[0].body_replay:
body += [f + "\n"]
# reverse pass
body += ["//---------\n"]
body += ["// reverse\n"]
for l in reversed(adj.blocks[0].body_reverse):
body += [l + "\n"]
body += ["return;\n"]
return "".join([indent_block + l for l in body])
def codegen_func_reverse(adj, func_type="kernel", device="cpu"):
s = ""
# primal vars
s += " //---------\n"
s += " // primal vars\n"
for var in adj.variables:
if var.constant is None:
s += f" {var.ctype()} {var.emit()};\n"
else:
s += f" const {var.ctype()} {var.emit()} = {constant_str(var.constant)};\n"
# dual vars
s += " //---------\n"
s += " // dual vars\n"
for var in adj.variables:
if isinstance(var.type, Struct):
s += f" {var.ctype()} {var.emit('adj')};\n"
else:
s += f" {var.ctype()} {var.emit('adj')}(0);\n"
if device == "cpu":
s += codegen_func_reverse_body(adj, device=device, indent=4)
elif device == "cuda":
if func_type == "kernel":
s += codegen_func_reverse_body(adj, device=device, indent=8)
else:
s += codegen_func_reverse_body(adj, device=device, indent=4)
else:
raise ValueError("Device {} not supported for codegen".format(device))
return s
def codegen_func(adj, c_func_name: str, device="cpu", options={}):
# forward header
if adj.return_var is not None and len(adj.return_var) == 1:
return_type = adj.return_var[0].ctype()
else:
return_type = "void"
has_multiple_outputs = adj.return_var is not None and len(adj.return_var) != 1
forward_args = []
reverse_args = []
# forward args
for i, arg in enumerate(adj.args):
s = f"{arg.ctype()} {arg.emit()}"
forward_args.append(s)
if not adj.custom_reverse_mode or i < adj.custom_reverse_num_input_args:
reverse_args.append(s)
if has_multiple_outputs:
for i, arg in enumerate(adj.return_var):
forward_args.append(arg.ctype() + " & ret_" + str(i))
reverse_args.append(arg.ctype() + " & ret_" + str(i))
# reverse args
for i, arg in enumerate(adj.args):
if adj.custom_reverse_mode and i >= adj.custom_reverse_num_input_args:
break
# indexed array gradients are regular arrays
if isinstance(arg.type, indexedarray):
_arg = Var(arg.label, array(dtype=arg.type.dtype, ndim=arg.type.ndim))
reverse_args.append(_arg.ctype() + " & adj_" + arg.label)
else:
reverse_args.append(arg.ctype() + " & adj_" + arg.label)
if has_multiple_outputs:
for i, arg in enumerate(adj.return_var):
reverse_args.append(arg.ctype() + " & adj_ret_" + str(i))
elif return_type != "void":
reverse_args.append(return_type + " & adj_ret")
# custom output reverse args (user-declared)
if adj.custom_reverse_mode:
for arg in adj.args[adj.custom_reverse_num_input_args :]:
reverse_args.append(f"{arg.ctype()} & {arg.emit()}")
if device == "cpu":
forward_template = cpu_forward_function_template
reverse_template = cpu_reverse_function_template
elif device == "cuda":
forward_template = cuda_forward_function_template
reverse_template = cuda_reverse_function_template
else:
raise ValueError("Device {} is not supported".format(device))
# codegen body
forward_body = codegen_func_forward(adj, func_type="function", device=device)
s = ""
if not adj.skip_forward_codegen:
s += forward_template.format(
name=c_func_name,
return_type=return_type,
forward_args=indent(forward_args),
forward_body=forward_body,
filename=adj.filename,
lineno=adj.fun_lineno,
)
if not adj.skip_reverse_codegen:
if adj.custom_reverse_mode:
reverse_body = "\t// user-defined adjoint code\n" + forward_body
else:
if options.get("enable_backward", True):
reverse_body = codegen_func_reverse(adj, func_type="function", device=device)
else:
reverse_body = '\t// reverse mode disabled (module option "enable_backward" is False)\n'
s += reverse_template.format(
name=c_func_name,
return_type=return_type,
reverse_args=indent(reverse_args),
forward_body=forward_body,
reverse_body=reverse_body,
filename=adj.filename,
lineno=adj.fun_lineno,
)
return s
def codegen_kernel(kernel, device, options):
# Update the module's options with the ones defined on the kernel, if any.
options = dict(options)
options.update(kernel.options)
adj = kernel.adj
forward_args = ["launch_bounds_t dim"]
reverse_args = ["launch_bounds_t dim"]
# forward args
for arg in adj.args:
forward_args.append(arg.ctype() + " var_" + arg.label)
reverse_args.append(arg.ctype() + " var_" + arg.label)
# reverse args
for arg in adj.args:
# indexed array gradients are regular arrays
if isinstance(arg.type, indexedarray):
_arg = Var(arg.label, array(dtype=arg.type.dtype, ndim=arg.type.ndim))
reverse_args.append(_arg.ctype() + " adj_" + arg.label)
else:
reverse_args.append(arg.ctype() + " adj_" + arg.label)
# codegen body
forward_body = codegen_func_forward(adj, func_type="kernel", device=device)
if options["enable_backward"]:
reverse_body = codegen_func_reverse(adj, func_type="kernel", device=device)
else:
reverse_body = ""
if device == "cpu":
template = cpu_kernel_template
elif device == "cuda":
template = cuda_kernel_template
else:
raise ValueError("Device {} is not supported".format(device))
s = template.format(
name=kernel.get_mangled_name(),
forward_args=indent(forward_args),
reverse_args=indent(reverse_args),
forward_body=forward_body,
reverse_body=reverse_body,
)
return s
def codegen_module(kernel, device="cpu"):
if device != "cpu":
return ""
adj = kernel.adj
# build forward signature
forward_args = ["launch_bounds_t dim"]
forward_params = ["dim"]
for arg in adj.args:
if hasattr(arg.type, "_wp_generic_type_str_"):
# vectors and matrices are passed from Python by pointer
forward_args.append(f"const {arg.ctype()}* var_" + arg.label)
forward_params.append(f"*var_{arg.label}")
else:
forward_args.append(f"{arg.ctype()} var_{arg.label}")
forward_params.append("var_" + arg.label)
# build reverse signature
reverse_args = [*forward_args]
reverse_params = [*forward_params]
for arg in adj.args:
if isinstance(arg.type, indexedarray):
# indexed array gradients are regular arrays
_arg = Var(arg.label, array(dtype=arg.type.dtype, ndim=arg.type.ndim))
reverse_args.append(f"const {_arg.ctype()} adj_{arg.label}")
reverse_params.append(f"adj_{_arg.label}")
elif hasattr(arg.type, "_wp_generic_type_str_"):
# vectors and matrices are passed from Python by pointer
reverse_args.append(f"const {arg.ctype()}* adj_{arg.label}")
reverse_params.append(f"*adj_{arg.label}")
else:
reverse_args.append(f"{arg.ctype()} adj_{arg.label}")
reverse_params.append(f"adj_{arg.label}")
s = cpu_module_template.format(
name=kernel.get_mangled_name(),
forward_args=indent(forward_args),
reverse_args=indent(reverse_args),
forward_params=indent(forward_params, 3),
reverse_params=indent(reverse_params, 3),
)
return s
| warp-main | warp/codegen.py |
#
# \file generator.py
#
# \brief Generates the CUTLASS Library's instances
#
import enum
import os.path
import shutil
import functools
import operator
import collections
from library import *
###################################################################################################
#
# Data structure modeling a GEMM operation
#
###################################################################################################
#
class GemmOperation:
#
def __init__(self, gemm_kind, arch, tile_description, A, B, C, element_epilogue, \
epilogue_functor = EpilogueFunctor.LinearCombination, swizzling_functor = SwizzlingFunctor.Identity8):
self.operation_kind = OperationKind.Gemm
self.arch = arch
self.tile_description = tile_description
self.gemm_kind = gemm_kind
self.A = A
self.B = B
self.C = C
self.element_epilogue = element_epilogue
self.epilogue_functor = epilogue_functor
self.swizzling_functor = swizzling_functor
#
def is_complex(self):
complex_operators = [
MathOperation.multiply_add_complex,
MathOperation.multiply_add_complex_gaussian,
MathOperation.multiply_add_complex_fast_f32
]
return self.tile_description.math_instruction.math_operation in complex_operators
#
def is_planar_complex(self):
return self.gemm_kind in (GemmKind.PlanarComplex, GemmKind.PlanarComplexArray)
#
def accumulator_type(self):
accum = self.tile_description.math_instruction.element_accumulator
if self.is_complex():
return get_complex_from_real(accum)
return accum
#
def short_math_name(self):
if self.tile_description.math_instruction.math_operation == MathOperation.multiply_add_complex_gaussian:
return "g%s" % ShortDataTypeNames[self.accumulator_type()]
return ShortDataTypeNames[self.accumulator_type()]
#
def core_name(self):
''' The basic operation kind is prefixed with a letter indicating the accumulation type. '''
inst_shape = ''
inst_operation = ''
intermediate_type = ''
math_operations_map = {
MathOperation.xor_popc: 'xor',
}
if self.tile_description.math_instruction.opcode_class == OpcodeClass.TensorOp or \
self.tile_description.math_instruction.opcode_class == OpcodeClass.WmmaTensorOp:
math_op = self.tile_description.math_instruction.math_operation
math_op_string = math_operations_map[math_op] if math_op in math_operations_map.keys() else ''
inst_shape = "%d%d%d" % tuple(self.tile_description.math_instruction.instruction_shape)
inst_shape += math_op_string
if self.tile_description.math_instruction.element_a != self.A.element and \
self.tile_description.math_instruction.element_a != self.tile_description.math_instruction.element_accumulator:
intermediate_type = DataTypeNames[self.tile_description.math_instruction.element_a]
return "%s%s%s%s" % (self.short_math_name(), inst_shape, intermediate_type, GemmKindNames[self.gemm_kind])
#
def extended_name(self):
''' Append data types if they differ from compute type. '''
if self.is_complex():
extended_name = "${core_name}"
else:
if self.C.element != self.tile_description.math_instruction.element_accumulator and \
self.A.element != self.tile_description.math_instruction.element_accumulator:
extended_name = "${element_c}_${core_name}_${element_a}"
elif self.C.element == self.tile_description.math_instruction.element_accumulator and \
self.A.element != self.tile_description.math_instruction.element_accumulator:
extended_name = "${core_name}_${element_a}"
else:
extended_name = "${core_name}"
extended_name = SubstituteTemplate(extended_name, {
'element_a': DataTypeNames[self.A.element],
'element_c': DataTypeNames[self.C.element],
'core_name': self.core_name()
})
return extended_name
#
def layout_name(self):
if self.is_complex() or self.is_planar_complex():
return "%s%s" % (
ShortComplexLayoutNames[(self.A.layout, self.A.complex_transform)],
ShortComplexLayoutNames[(self.B.layout, self.B.complex_transform)]
)
return "%s%s" % (ShortLayoutTypeNames[self.A.layout], ShortLayoutTypeNames[self.B.layout])
#
def procedural_name(self):
''' The full procedural name indicates architecture, extended name, tile size, and layout. '''
threadblock = self.tile_description.procedural_name()
opcode_class_name = OpcodeClassNames[self.tile_description.math_instruction.opcode_class]
alignment = max([self.A.alignment, self.B.alignment, self.C.alignment])
return SubstituteTemplate(
"cutlass_${opcode_class}_${extended_name}_${threadblock}_${layout}_align${alignment}",
{
'opcode_class': opcode_class_name,
'extended_name': self.extended_name(),
'threadblock': threadblock,
'layout': self.layout_name(),
'alignment': "%d" % self.A.alignment,
}
)
#
def configuration_name(self):
''' The full procedural name indicates architecture, extended name, tile size, and layout. '''
return self.procedural_name()
###################################################################################################
#
# Data structure modeling a grouped GEMM operation
#
###################################################################################################
#
class GroupedGemmOperation(GemmOperation):
#
def __init__(self, gemm_kind, arch, tile_description, A, B, C, element_epilogue, \
epilogue_functor = EpilogueFunctor.LinearCombination, swizzling_functor = SwizzlingFunctor.Identity8, \
scheduler_mode = GroupScheduleMode.Device):
super().__init__(gemm_kind, arch, tile_description, A, B, C, element_epilogue, \
epilogue_functor, swizzling_functor)
self.scheduler_mode = scheduler_mode
#
def procedural_name(self):
''' The full procedural name indicates architecture, extended name, tile size, and layout. '''
base = super().procedural_name()
return SubstituteTemplate(
base + "_schedule${schedule}",
{
'schedule': ShortGroupScheduleModeNames[self.scheduler_mode]
})
###################################################################################################
#
# Emits single instances of a CUTLASS device-wide operator
#
###################################################################################################
#
class EmitGemmInstance:
''' Responsible for emitting a CUTLASS template definition'''
def __init__(self, operation_suffix = ''):
self.operation_suffix = operation_suffix
self.includes = []
self.gemm_template = """
// Gemm operator ${operation_name}
using Operation_${operation_name} = cutlass::gemm::device::Gemm<
${element_a}, ${layout_a},
${element_b}, ${layout_b},
${element_c}, ${layout_c},
${element_accumulator},
${opcode_class},
${arch},
cutlass::gemm::GemmShape<${threadblock_shape_m}, ${threadblock_shape_n}, ${threadblock_shape_k}>,
cutlass::gemm::GemmShape<${warp_shape_m}, ${warp_shape_n}, ${warp_shape_k}>,
cutlass::gemm::GemmShape<${instruction_shape_m}, ${instruction_shape_n}, ${instruction_shape_k}>,
${epilogue_functor}<
${element_c},
${epilogue_vector_length},
${element_accumulator},
${element_epilogue}
>,
${swizzling_functor},
${stages},
${align_a},
${align_b},
false,
${math_operation}
${residual}
>;
"""
self.gemm_complex_template = """
// Gemm operator ${operation_name}
using Operation_${operation_name} = cutlass::gemm::device::GemmComplex<
${element_a}, ${layout_a},
${element_b}, ${layout_b},
${element_c}, ${layout_c},
${element_accumulator},
${opcode_class},
${arch},
cutlass::gemm::GemmShape<${threadblock_shape_m}, ${threadblock_shape_n}, ${threadblock_shape_k}>,
cutlass::gemm::GemmShape<${warp_shape_m}, ${warp_shape_n}, ${warp_shape_k}>,
cutlass::gemm::GemmShape<${instruction_shape_m}, ${instruction_shape_n}, ${instruction_shape_k}>,
${epilogue_functor}<
${element_c},
${epilogue_vector_length},
${element_accumulator},
${element_epilogue}
>,
${swizzling_functor},
${stages},
${transform_a},
${transform_b},
${math_operation}
${residual}
>;
"""
#
def instance_template(self):
return """
${compile_guard_start}
manifest.append(new ${gemm_kind}<Operation_${operation_name}>("${operation_name}"));
${compile_guard_end}
"""
#
def emit(self, operation):
warp_shape = [operation.tile_description.threadblock_shape[idx] // operation.tile_description.warp_count[idx] for idx in range(3)]
epilogue_vector_length = int(min(operation.C.alignment * DataTypeSize[operation.C.element], 128) / DataTypeSize[operation.C.element])
residual = ''
values = {
'operation_name': operation.procedural_name(),
'element_a': DataTypeTag[operation.A.element],
'layout_a': LayoutTag[operation.A.layout],
'element_b': DataTypeTag[operation.B.element],
'layout_b': LayoutTag[operation.B.layout],
'element_c': DataTypeTag[operation.C.element],
'layout_c': LayoutTag[operation.C.layout],
'element_accumulator': DataTypeTag[operation.accumulator_type()],
'opcode_class': OpcodeClassTag[operation.tile_description.math_instruction.opcode_class],
'arch': "cutlass::arch::Sm%d" % operation.arch,
'threadblock_shape_m': str(operation.tile_description.threadblock_shape[0]),
'threadblock_shape_n': str(operation.tile_description.threadblock_shape[1]),
'threadblock_shape_k': str(operation.tile_description.threadblock_shape[2]),
'warp_shape_m': str(warp_shape[0]),
'warp_shape_n': str(warp_shape[1]),
'warp_shape_k': str(warp_shape[2]),
'instruction_shape_m': str(operation.tile_description.math_instruction.instruction_shape[0]),
'instruction_shape_n': str(operation.tile_description.math_instruction.instruction_shape[1]),
'instruction_shape_k': str(operation.tile_description.math_instruction.instruction_shape[2]),
'epilogue_vector_length': str(epilogue_vector_length),
'element_epilogue': str(DataTypeTag[operation.element_epilogue]),
'epilogue_functor': EpilogueFunctorTag[operation.epilogue_functor],
'swizzling_functor': SwizzlingFunctorTag[operation.swizzling_functor],
'stages': str(operation.tile_description.stages),
'align_a': str(operation.A.alignment),
'align_b': str(operation.B.alignment),
'transform_a': ComplexTransformTag[operation.A.complex_transform],
'transform_b': ComplexTransformTag[operation.B.complex_transform],
'math_operation': MathOperationTag[operation.tile_description.math_instruction.math_operation],
'residual': residual
}
template = self.gemm_complex_template if operation.is_complex() else self.gemm_template
return SubstituteTemplate(template, values)
###################################################################################################
class EmitSparseGemmInstance:
''' Responsible for emitting a CUTLASS template definition'''
def __init__(self, operation_suffix = ''):
self.operation_suffix = operation_suffix
self.includes = []
self.gemm_template = """
// Gemm operator ${operation_name}
using Operation_${operation_name} = cutlass::gemm::device::SparseGemm<
${element_a}, ${layout_a},
${element_b}, ${layout_b},
${element_c}, ${layout_c},
${element_accumulator},
${opcode_class},
${arch},
cutlass::gemm::GemmShape<${threadblock_shape_m}, ${threadblock_shape_n}, ${threadblock_shape_k}>,
cutlass::gemm::GemmShape<${warp_shape_m}, ${warp_shape_n}, ${warp_shape_k}>,
cutlass::gemm::GemmShape<${instruction_shape_m}, ${instruction_shape_n}, ${instruction_shape_k}>,
${epilogue_functor}<
${element_c},
${epilogue_vector_length},
${element_accumulator},
${element_epilogue}
>,
${swizzling_functor},
${stages},
${align_a},
${align_b},
false,
${math_operation}
${residual}
>;
"""
#
def instance_template(self):
return """
${compile_guard_start}
manifest.append(new ${gemm_kind}<Operation_${operation_name}>("${operation_name}"));
${compile_guard_end}
"""
#
def emit(self, operation):
warp_shape = [operation.tile_description.threadblock_shape[idx] // operation.tile_description.warp_count[idx] for idx in range(3)]
epilogue_vector_length = int(min(operation.C.alignment * DataTypeSize[operation.C.element], 128) / DataTypeSize[operation.C.element])
residual = ''
values = {
'operation_name': operation.procedural_name(),
'element_a': DataTypeTag[operation.A.element],
'layout_a': LayoutTag[operation.A.layout],
'element_b': DataTypeTag[operation.B.element],
'layout_b': LayoutTag[operation.B.layout],
'element_c': DataTypeTag[operation.C.element],
'layout_c': LayoutTag[operation.C.layout],
'element_accumulator': DataTypeTag[operation.accumulator_type()],
'opcode_class': OpcodeClassTag[operation.tile_description.math_instruction.opcode_class],
'arch': "cutlass::arch::Sm%d" % operation.arch,
'threadblock_shape_m': str(operation.tile_description.threadblock_shape[0]),
'threadblock_shape_n': str(operation.tile_description.threadblock_shape[1]),
'threadblock_shape_k': str(operation.tile_description.threadblock_shape[2]),
'warp_shape_m': str(warp_shape[0]),
'warp_shape_n': str(warp_shape[1]),
'warp_shape_k': str(warp_shape[2]),
'instruction_shape_m': str(operation.tile_description.math_instruction.instruction_shape[0]),
'instruction_shape_n': str(operation.tile_description.math_instruction.instruction_shape[1]),
'instruction_shape_k': str(operation.tile_description.math_instruction.instruction_shape[2]),
'epilogue_vector_length': str(epilogue_vector_length),
'element_epilogue': str(DataTypeTag[operation.element_epilogue]),
'epilogue_functor': EpilogueFunctorTag[operation.epilogue_functor],
'swizzling_functor': SwizzlingFunctorTag[operation.swizzling_functor],
'stages': str(operation.tile_description.stages),
'align_a': str(operation.A.alignment),
'align_b': str(operation.B.alignment),
'transform_a': ComplexTransformTag[operation.A.complex_transform],
'transform_b': ComplexTransformTag[operation.B.complex_transform],
'math_operation': MathOperationTag[operation.tile_description.math_instruction.math_operation],
'residual': residual
}
template = self.gemm_template
return SubstituteTemplate(template, values)
###################################################################################################
#
class EmitGemmUniversalInstance:
''' Responsible for emitting a CUTLASS template definition'''
def __init__(self, operation_suffix = ''):
self.operation_suffix = operation_suffix
self.includes = [
"cutlass/cutlass.h",
"cutlass/numeric_types.h",
"cutlass/arch/arch.h",
"cutlass/arch/mma.h",
"cutlass/layout/matrix.h",
"cutlass/gemm/device/gemm.h",
"cutlass/gemm/device/gemm_universal_adapter.h",
"cutlass/gemm/kernel/default_gemm_universal.h",
]
self.builtin_epilogue_functor_template = """
${epilogue_functor}<
${element_c},
${epilogue_vector_length},
${element_accumulator},
${element_epilogue}
>
"""
self.gemm_template = """
// Gemm operator ${operation_name}
using ${operation_name}_base =
typename cutlass::gemm::kernel::DefaultGemmUniversal<
${element_b}, ${layout_b}, ${transform_b}, ${align_b}, // transposed B operand
${element_a}, ${layout_a}, ${transform_a}, ${align_a}, // transposed A operand
${element_c}, ${layout_c},
${element_accumulator},
${opcode_class},
${arch},
cutlass::gemm::GemmShape<${threadblock_shape_m}, ${threadblock_shape_n}, ${threadblock_shape_k}>,
cutlass::gemm::GemmShape<${warp_shape_m}, ${warp_shape_n}, ${warp_shape_k}>,
cutlass::gemm::GemmShape<${instruction_shape_m}, ${instruction_shape_n}, ${instruction_shape_k}>,
${epilogue_functor},
${swizzling_functor},
${stages},
${math_operation}
>::GemmKernel;
// Define named type
struct ${operation_name}${operation_suffix} :
public ${operation_name}_base { };
"""
self.gemm_template_interleaved = """
// Gemm operator ${operation_name}
using ${operation_name}_base =
typename cutlass::gemm::kernel::DefaultGemmUniversal<
${element_a}, ${layout_a}, ${transform_a}, ${align_a},
${element_b}, ${layout_b}, ${transform_b}, ${align_b},
${element_c}, ${layout_c},
${element_accumulator},
${opcode_class},
${arch},
cutlass::gemm::GemmShape<${threadblock_shape_m}, ${threadblock_shape_n}, ${threadblock_shape_k}>,
cutlass::gemm::GemmShape<${warp_shape_m}, ${warp_shape_n}, ${warp_shape_k}>,
cutlass::gemm::GemmShape<${instruction_shape_m}, ${instruction_shape_n}, ${instruction_shape_k}>,
${epilogue_functor},
${swizzling_functor},
${stages},
${math_operation}
>::GemmKernel;
// Define named type
struct ${operation_name}${operation_suffix} :
public ${operation_name}_base { };
"""
#
def instance_template(self):
return """
${compile_guard_start}
manifest.append(new ${gemm_kind}<
cutlass::gemm::device::GemmUniversalAdapter<${operation_name}>
>("${operation_name}"));
${compile_guard_end}
"""
#
def emit(self, operation):
threadblock_shape = operation.tile_description.threadblock_shape
warp_count = operation.tile_description.warp_count
warp_shape = [threadblock_shape[idx] // warp_count[idx] for idx in range(3)]
transpose_layouts = {
LayoutType.ColumnMajor: LayoutType.RowMajor,
LayoutType.RowMajor: LayoutType.ColumnMajor
}
if operation.A.layout in transpose_layouts.keys() and \
operation.B.layout in transpose_layouts.keys() and \
operation.C.layout in transpose_layouts.keys():
instance_layout_A = transpose_layouts[operation.A.layout]
instance_layout_B = transpose_layouts[operation.B.layout]
instance_layout_C = transpose_layouts[operation.C.layout]
gemm_template = self.gemm_template
else:
instance_layout_A, instance_layout_B, instance_layout_C = \
(operation.A.layout, operation.B.layout, operation.C.layout)
gemm_template = self.gemm_template_interleaved
#
# Support built-in epilogue functors or user-defined functions
if isinstance(operation.epilogue_functor, enum.Enum):
epilogue_vector_length = \
min(operation.C.alignment * DataTypeSize[operation.C.element], 128) // DataTypeSize[operation.C.element]
values = {
'epilogue_vector_length': str(epilogue_vector_length),
'element_epilogue': str(DataTypeTag[operation.element_epilogue]),
'epilogue_functor': EpilogueFunctorTag[operation.epilogue_functor],
}
epilogue_functor = SubstituteTemplate(self.builtin_epilogue_functor_template, values)
else:
epilogue_functor = self.epilogue_functor.emit_declaration()
#
values = {
'operation_name': operation.procedural_name(),
'operation_suffix': self.operation_suffix,
'element_a': DataTypeTag[operation.A.element],
'layout_a': LayoutTag[instance_layout_A],
'element_b': DataTypeTag[operation.B.element],
'layout_b': LayoutTag[instance_layout_B],
'element_c': DataTypeTag[operation.C.element],
'layout_c': LayoutTag[instance_layout_C],
'element_accumulator': DataTypeTag[operation.accumulator_type()],
'opcode_class': OpcodeClassTag[operation.tile_description.math_instruction.opcode_class],
'arch': "cutlass::arch::Sm%d" % operation.arch,
'threadblock_shape_m': str(operation.tile_description.threadblock_shape[0]),
'threadblock_shape_n': str(operation.tile_description.threadblock_shape[1]),
'threadblock_shape_k': str(operation.tile_description.threadblock_shape[2]),
'warp_shape_m': str(warp_shape[0]),
'warp_shape_n': str(warp_shape[1]),
'warp_shape_k': str(warp_shape[2]),
'instruction_shape_m': str(operation.tile_description.math_instruction.instruction_shape[0]),
'instruction_shape_n': str(operation.tile_description.math_instruction.instruction_shape[1]),
'instruction_shape_k': str(operation.tile_description.math_instruction.instruction_shape[2]),
'epilogue_functor': epilogue_functor,
'swizzling_functor': SwizzlingFunctorTag[operation.swizzling_functor],
'stages': str(operation.tile_description.stages),
'align_a': str(operation.A.alignment),
'align_b': str(operation.B.alignment),
'transform_a': ComplexTransformTag[operation.A.complex_transform],
'transform_b': ComplexTransformTag[operation.B.complex_transform],
'math_operation': MathOperationTag[operation.tile_description.math_instruction.math_operation]
}
return SubstituteTemplate(gemm_template, values)
###################################################################################################
#
class EmitGemmPlanarComplexInstance:
''' Responsible for emitting a CUTLASS template definition'''
def __init__(self, operation_suffix = ''):
self.operation_suffix = operation_suffix
self.includes = []
self.template = """
// Gemm operator ${operation_name}
using Operation_${operation_name} = typename cutlass::gemm::kernel::DefaultGemmPlanarComplexUniversal<
${element_a}, ${layout_a}, ${transform_a}, ${alignment_a},
${element_b}, ${layout_b}, ${transform_b}, ${alignment_b},
${element_c}, cutlass::layout::RowMajor,
${element_accumulator},
${opcode_class},
${arch},
cutlass::gemm::GemmShape<${threadblock_shape_m}, ${threadblock_shape_n}, ${threadblock_shape_k}>,
cutlass::gemm::GemmShape<${warp_shape_m}, ${warp_shape_n}, ${warp_shape_k}>,
cutlass::gemm::GemmShape<${instruction_shape_m}, ${instruction_shape_n}, ${instruction_shape_k}>,
cutlass::epilogue::thread::LinearCombinationPlanarComplex<
${element_c},
${alignment_c},
${element_accumulator},
${element_epilogue}
>,
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>,
${stages},
${math_operator}
>::GemmKernel;
struct ${operation_name} :
public Operation_${operation_name} { };
"""
#
def instance_template(self):
return """
${compile_guard_start}
manifest.append(new ${gemm_kind}<
cutlass::gemm::device::GemmUniversalAdapter<${operation_name}>
>("${operation_name}"));
${compile_guard_end}
"""
#
def emit(self, operation):
warp_shape = [operation.tile_description.threadblock_shape[idx] // operation.tile_description.warp_count[idx] for idx in range(3)]
# exchange and transpose A and B types, layouts, and complex transforms since the C layout is row-major
transposed_layout_A = TransposedLayout[operation.A.layout]
transposed_layout_B = TransposedLayout[operation.B.layout]
values = {
'operation_name': operation.procedural_name(),
'element_a': DataTypeTag[operation.B.element],
'layout_a': LayoutTag[transposed_layout_B],
'transform_a': ComplexTransformTag[operation.B.complex_transform],
'alignment_a': str(operation.B.alignment),
'element_b': DataTypeTag[operation.A.element],
'layout_b': LayoutTag[transposed_layout_A],
'transform_b': ComplexTransformTag[operation.A.complex_transform],
'alignment_b': str(operation.A.alignment),
'element_c': DataTypeTag[operation.C.element],
'layout_c': LayoutTag[operation.C.layout],
'element_accumulator': DataTypeTag[operation.tile_description.math_instruction.element_accumulator],
'opcode_class': OpcodeClassTag[operation.tile_description.math_instruction.opcode_class],
'arch': "cutlass::arch::Sm%d" % operation.arch,
'threadblock_shape_m': str(operation.tile_description.threadblock_shape[0]),
'threadblock_shape_n': str(operation.tile_description.threadblock_shape[1]),
'threadblock_shape_k': str(operation.tile_description.threadblock_shape[2]),
'warp_shape_m': str(warp_shape[0]),
'warp_shape_n': str(warp_shape[1]),
'warp_shape_k': str(warp_shape[2]),
'instruction_shape_m': str(operation.tile_description.math_instruction.instruction_shape[0]),
'instruction_shape_n': str(operation.tile_description.math_instruction.instruction_shape[1]),
'instruction_shape_k': str(operation.tile_description.math_instruction.instruction_shape[2]),
'alignment_c': str(operation.C.alignment),
'element_epilogue': str(DataTypeTag[operation.element_epilogue]),
'stages': str(operation.tile_description.stages),
'math_operator': 'cutlass::arch::OpMultiplyAdd'
}
return SubstituteTemplate(self.template, values)
###################################################################################################
#
class EmitGemmPlanarComplexArrayInstance:
''' Responsible for emitting a CUTLASS template definition'''
def __init__(self, operation_suffix = ''):
self.operation_suffix = operation_suffix
self.includes = []
self.template = """
// Gemm operator ${operation_name}
using Operation_${operation_name} = typename cutlass::gemm::kernel::DefaultGemmPlanarComplexUniversal<
${element_a}, ${layout_a}, ${transform_a}, ${alignment_a},
${element_b}, ${layout_b}, ${transform_b}, ${alignment_b},
${element_c}, cutlass::layout::RowMajor,
${element_accumulator},
${opcode_class},
${arch},
cutlass::gemm::GemmShape<${threadblock_shape_m}, ${threadblock_shape_n}, ${threadblock_shape_k}>,
cutlass::gemm::GemmShape<${warp_shape_m}, ${warp_shape_n}, ${warp_shape_k}>,
cutlass::gemm::GemmShape<${instruction_shape_m}, ${instruction_shape_n}, ${instruction_shape_k}>,
cutlass::epilogue::thread::LinearCombinationPlanarComplex<
${element_c},
${alignment_c},
${element_accumulator},
${element_epilogue}
>,
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>,
${stages},
${math_operator}
>::GemmArrayKernel;
struct ${operation_name} : public Operation_${operation_name} { };
"""
#
def instance_template(self):
return """
${compile_guard_start}
manifest.append(new ${gemm_kind}<
cutlass::gemm::device::GemmUniversalAdapter<${operation_name}>
>("${operation_name}"));
${compile_guard_end}
"""
#
def emit(self, operation):
warp_shape = [operation.tile_description.threadblock_shape[idx] // operation.tile_description.warp_count[idx] for idx in range(3)]
# exchange and transpose A and B types, layouts, and complex transforms since the C layout is row-major
transposed_layout_A = TransposedLayout[operation.A.layout]
transposed_layout_B = TransposedLayout[operation.B.layout]
values = {
'operation_name': operation.procedural_name(),
'element_a': DataTypeTag[operation.B.element],
'layout_a': LayoutTag[transposed_layout_B],
'transform_a': ComplexTransformTag[operation.B.complex_transform],
'alignment_a': str(operation.B.alignment),
'element_b': DataTypeTag[operation.A.element],
'layout_b': LayoutTag[transposed_layout_A],
'transform_b': ComplexTransformTag[operation.A.complex_transform],
'alignment_b': str(operation.A.alignment),
'element_c': DataTypeTag[operation.C.element],
'layout_c': LayoutTag[operation.C.layout],
'element_accumulator': DataTypeTag[operation.tile_description.math_instruction.element_accumulator],
'opcode_class': OpcodeClassTag[operation.tile_description.math_instruction.opcode_class],
'arch': "cutlass::arch::Sm%d" % operation.arch,
'threadblock_shape_m': str(operation.tile_description.threadblock_shape[0]),
'threadblock_shape_n': str(operation.tile_description.threadblock_shape[1]),
'threadblock_shape_k': str(operation.tile_description.threadblock_shape[2]),
'warp_shape_m': str(warp_shape[0]),
'warp_shape_n': str(warp_shape[1]),
'warp_shape_k': str(warp_shape[2]),
'instruction_shape_m': str(operation.tile_description.math_instruction.instruction_shape[0]),
'instruction_shape_n': str(operation.tile_description.math_instruction.instruction_shape[1]),
'instruction_shape_k': str(operation.tile_description.math_instruction.instruction_shape[2]),
'alignment_c': str(operation.C.alignment),
'element_epilogue': str(DataTypeTag[operation.element_epilogue]),
'stages': str(operation.tile_description.stages),
'math_operator': 'cutlass::arch::OpMultiplyAdd'
}
return SubstituteTemplate(self.template, values)
###################################################################################################
#
class EmitGemmGroupedInstance:
''' Responsible for emitting a CUTLASS template definition'''
def __init__(self, operation_suffix = ''):
self.operation_suffix = operation_suffix
self.includes = [
"cutlass/cutlass.h",
"cutlass/numeric_types.h",
"cutlass/arch/arch.h",
"cutlass/arch/mma.h",
"cutlass/layout/matrix.h",
"cutlass/gemm/device/gemm.h",
"cutlass/gemm/kernel/gemm_grouped.h",
"cutlass/gemm/kernel/default_gemm_grouped.h",
"cutlass/gemm/device/gemm_grouped.h"
]
self.builtin_epilogue_functor_template = """
${epilogue_functor}<
${element_c},
${epilogue_vector_length},
${element_accumulator},
${element_epilogue}
>
"""
self.gemm_template = """
// Gemm operator ${operation_name}
using ${operation_name}_base =
typename cutlass::gemm::kernel::DefaultGemmGrouped<
${element_a}, ${layout_a}, ${transform_a}, ${align_a},
${element_b}, ${layout_b}, ${transform_b}, ${align_b},
${element_c}, ${layout_c},
${element_accumulator},
${opcode_class},
${arch},
cutlass::gemm::GemmShape<${threadblock_shape_m}, ${threadblock_shape_n}, ${threadblock_shape_k}>,
cutlass::gemm::GemmShape<${warp_shape_m}, ${warp_shape_n}, ${warp_shape_k}>,
cutlass::gemm::GemmShape<${instruction_shape_m}, ${instruction_shape_n}, ${instruction_shape_k}>,
${epilogue_functor},
${swizzling_functor},
${stages},
${scheduler_mode},
${math_operation}
>::GemmKernel;
// Define named type
struct ${operation_name}${operation_suffix} :
public ${operation_name}_base { };
"""
#
def instance_template(self):
return """
${compile_guard_start}
manifest.append(new ${gemm_kind}<
cutlass::gemm::device::GemmGrouped<${operation_name}>
>("${operation_name}"));
${compile_guard_end}
"""
#
def emit(self, operation):
threadblock_shape = operation.tile_description.threadblock_shape
warp_count = operation.tile_description.warp_count
warp_shape = [threadblock_shape[idx] // warp_count[idx] for idx in range(3)]
transpose_layouts = {
LayoutType.ColumnMajor: LayoutType.RowMajor,
LayoutType.RowMajor: LayoutType.ColumnMajor
}
instance_layout_A, instance_layout_B, instance_layout_C = \
(operation.A.layout, operation.B.layout, operation.C.layout)
#
# Support built-in epilogue functors or user-defined functions
if isinstance(operation.epilogue_functor, enum.Enum):
epilogue_vector_length = \
min(operation.C.alignment * DataTypeSize[operation.C.element], 128) // DataTypeSize[operation.C.element]
values = {
'epilogue_vector_length': str(epilogue_vector_length),
'element_epilogue': str(DataTypeTag[operation.element_epilogue]),
'epilogue_functor': EpilogueFunctorTag[operation.epilogue_functor],
}
epilogue_functor = SubstituteTemplate(self.builtin_epilogue_functor_template, values)
else:
epilogue_functor = self.epilogue_functor.emit_declaration()
#
values = {
'operation_name': operation.procedural_name(),
'operation_suffix': self.operation_suffix,
'element_a': DataTypeTag[operation.A.element],
'layout_a': LayoutTag[instance_layout_A],
'element_b': DataTypeTag[operation.B.element],
'layout_b': LayoutTag[instance_layout_B],
'element_c': DataTypeTag[operation.C.element],
'layout_c': LayoutTag[instance_layout_C],
'element_accumulator': DataTypeTag[operation.accumulator_type()],
'opcode_class': OpcodeClassTag[operation.tile_description.math_instruction.opcode_class],
'arch': "cutlass::arch::Sm%d" % operation.arch,
'threadblock_shape_m': str(operation.tile_description.threadblock_shape[0]),
'threadblock_shape_n': str(operation.tile_description.threadblock_shape[1]),
'threadblock_shape_k': str(operation.tile_description.threadblock_shape[2]),
'warp_shape_m': str(warp_shape[0]),
'warp_shape_n': str(warp_shape[1]),
'warp_shape_k': str(warp_shape[2]),
'instruction_shape_m': str(operation.tile_description.math_instruction.instruction_shape[0]),
'instruction_shape_n': str(operation.tile_description.math_instruction.instruction_shape[1]),
'instruction_shape_k': str(operation.tile_description.math_instruction.instruction_shape[2]),
'epilogue_functor': epilogue_functor,
'swizzling_functor': SwizzlingFunctorTag[operation.swizzling_functor],
'stages': str(operation.tile_description.stages),
'align_a': str(operation.A.alignment),
'align_b': str(operation.B.alignment),
'transform_a': ComplexTransformTag[operation.A.complex_transform],
'transform_b': ComplexTransformTag[operation.B.complex_transform],
'scheduler_mode': GroupScheduleModeTag[operation.scheduler_mode],
'math_operation': MathOperationTag[operation.tile_description.math_instruction.math_operation]
}
return SubstituteTemplate(self.gemm_template, values)
###################################################################################################
#
# Emitters functions for all targets
#
###################################################################################################
class EmitGemmConfigurationLibrary:
def __init__(self, operation_path, configuration_name):
self.configuration_name = configuration_name
self.configuration_path = os.path.join(operation_path, "%s.cu" % configuration_name).replace('\\', '/')
self.instance_emitter = {
GemmKind.Gemm: EmitGemmInstance,
GemmKind.Sparse: EmitSparseGemmInstance,
GemmKind.Universal: EmitGemmUniversalInstance,
GemmKind.PlanarComplex: EmitGemmPlanarComplexInstance,
GemmKind.PlanarComplexArray: EmitGemmPlanarComplexArrayInstance,
GemmKind.Grouped: EmitGemmGroupedInstance
}
self.gemm_kind_wrappers = {
GemmKind.Gemm: 'GemmOperation',
GemmKind.Sparse: 'GemmSparseOperation',
GemmKind.Universal: 'GemmUniversalOperation',
GemmKind.PlanarComplex: 'GemmPlanarComplexOperation',
GemmKind.PlanarComplexArray: 'GemmPlanarComplexArrayOperation',
GemmKind.Grouped: 'GemmGroupedOperation'
}
self.wmma_guard_start = "#if defined(CUTLASS_ARCH_WMMA_SM${sm_number}_ENABLED)"
self.separator = """
///////////////////////////////////////////////////////////////////////////////////////////////////
"""
self.header_template = """
/*
Generated by gemm_operation.py - Do not edit.
*/
"""
self.initialize_function_template = """
///////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace library {
///////////////////////////////////////////////////////////////////////////////////////////////////
void initialize_${configuration_name}(Manifest &manifest) {
"""
self.epilogue_template = """
}
///////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace library
} // namespace cutlass
///////////////////////////////////////////////////////////////////////////////////////////////////
"""
def __enter__(self):
self.configuration_file = open(self.configuration_path, "w")
self.configuration_file.write(self.header_template)
self.configuration_file.write(self.separator)
self.includes = collections.OrderedDict([
("cutlass/cutlass.h", None),
("cutlass/library/library.h", None),
("cutlass/library/manifest.h", None),
("library_internal.h", None),
("gemm_operation.h", None),
("cutlass/arch/wmma.h", None),
])
self.instance_definitions = []
self.instance_wrappers = []
self.operations = []
return self
def emit(self, operation):
emitter = self.instance_emitter[operation.gemm_kind]()
for incl in emitter.includes:
self.includes[incl] = None
self.operations.append(operation)
self.instance_definitions.append(emitter.emit(operation))
self.instance_wrappers.append(SubstituteTemplate(emitter.instance_template(), {
'configuration_name': self.configuration_name,
'operation_name': operation.procedural_name(),
'gemm_kind': self.gemm_kind_wrappers[operation.gemm_kind],
'compile_guard_start': SubstituteTemplate(self.wmma_guard_start, {'sm_number': str(operation.arch)}) \
if operation.tile_description.math_instruction.opcode_class == OpcodeClass.WmmaTensorOp else "",
'compile_guard_end': "#endif" \
if operation.tile_description.math_instruction.opcode_class == OpcodeClass.WmmaTensorOp else ""
}))
def __exit__(self, exception_type, exception_value, traceback):
# Write includes
for incl, _ in self.includes.items():
include_statement = "#include \"%s\"\n" % incl
self.configuration_file.write(include_statement)
self.configuration_file.write(self.separator)
# Write instance definitions in top-level namespace
for instance_definition in self.instance_definitions:
self.configuration_file.write(instance_definition)
# Add wrapper objects within initialize() function
self.configuration_file.write(SubstituteTemplate(self.initialize_function_template, {
'configuration_name': self.configuration_name
}))
for instance_wrapper in self.instance_wrappers:
self.configuration_file.write(instance_wrapper)
self.configuration_file.write(self.epilogue_template)
self.configuration_file.close()
###################################################################################################
###################################################################################################
| warp-main | warp/native/cutlass/tools/library/scripts/gemm_operation.py |
#
# \file generator.py
#
# \brief Generates the CUTLASS Library's instances
#
import re
###################################################################################################
import enum
# The following block implements enum.auto() for Python 3.5 variants that don't include it such
# as the default 3.5.2 on Ubuntu 16.04.
#
# https://codereview.stackexchange.com/questions/177309/reimplementing-pythons-enum-auto-for-compatibility
try:
from enum import auto as enum_auto
except ImportError:
__cutlass_library_auto_enum = 0
def enum_auto() -> int:
global __cutlass_library_auto_enum
i = __cutlass_library_auto_enum
__cutlass_library_auto_enum += 1
return i
###################################################################################################
#
class GeneratorTarget(enum.Enum):
Library = enum_auto()
#
GeneratorTargetNames = {
GeneratorTarget.Library: 'library'
}
#
###################################################################################################
#
class DataType(enum.Enum):
b1 = enum_auto()
u4 = enum_auto()
u8 = enum_auto()
u16 = enum_auto()
u32 = enum_auto()
u64 = enum_auto()
s4 = enum_auto()
s8 = enum_auto()
s16 = enum_auto()
s32 = enum_auto()
s64 = enum_auto()
f16 = enum_auto()
bf16 = enum_auto()
f32 = enum_auto()
tf32 = enum_auto()
f64 = enum_auto()
cf16 = enum_auto()
cbf16 = enum_auto()
cf32 = enum_auto()
ctf32 = enum_auto()
cf64 = enum_auto()
cs4 = enum_auto()
cs8 = enum_auto()
cs16 = enum_auto()
cs32 = enum_auto()
cs64 = enum_auto()
cu4 = enum_auto()
cu8 = enum_auto()
cu16 = enum_auto()
cu32 = enum_auto()
cu64 = enum_auto()
invalid = enum_auto()
#
ShortDataTypeNames = {
DataType.s32: 'i',
DataType.f16: 'h',
DataType.f32: 's',
DataType.f64: 'd',
DataType.cf32: 'c',
DataType.cf64: 'z',
}
#
DataTypeNames = {
DataType.b1: "b1",
DataType.u4: "u4",
DataType.u8: "u8",
DataType.u16: "u16",
DataType.u32: "u32",
DataType.u64: "u64",
DataType.s4: "s4",
DataType.s8: "s8",
DataType.s16: "s16",
DataType.s32: "s32",
DataType.s64: "s64",
DataType.f16: "f16",
DataType.bf16: "bf16",
DataType.f32: "f32",
DataType.tf32: "tf32",
DataType.f64: "f64",
DataType.cf16: "cf16",
DataType.cbf16: "cbf16",
DataType.cf32: "cf32",
DataType.ctf32: "ctf32",
DataType.cf64: "cf64",
DataType.cu4: "cu4",
DataType.cu8: "cu8",
DataType.cu16: "cu16",
DataType.cu32: "cu32",
DataType.cu64: "cu64",
DataType.cs4: "cs4",
DataType.cs8: "cs8",
DataType.cs16: "cs16",
DataType.cs32: "cs32",
DataType.cs64: "cs64",
}
DataTypeTag = {
DataType.b1: "cutlass::uint1b_t",
DataType.u4: "cutlass::uint4b_t",
DataType.u8: "uint8_t",
DataType.u16: "uint16_t",
DataType.u32: "uint32_t",
DataType.u64: "uint64_t",
DataType.s4: "cutlass::int4b_t",
DataType.s8: "int8_t",
DataType.s16: "int16_t",
DataType.s32: "int32_t",
DataType.s64: "int64_t",
DataType.f16: "cutlass::half_t",
DataType.bf16: "cutlass::bfloat16_t",
DataType.f32: "float",
DataType.tf32: "cutlass::tfloat32_t",
DataType.f64: "double",
DataType.cf16: "cutlass::complex<cutlass::half_t>",
DataType.cbf16: "cutlass::complex<cutlass::bfloat16_t>",
DataType.cf32: "cutlass::complex<float>",
DataType.ctf32: "cutlass::complex<cutlass::tfloat32_t>",
DataType.cf64: "cutlass::complex<double>",
DataType.cu4: "cutlass::complex<cutlass::uint4b_t>",
DataType.cu8: "cutlass::complex<cutlass::uint8_t>",
DataType.cu16: "cutlass::complex<cutlass::uint16_t>",
DataType.cu32: "cutlass::complex<cutlass::uint32_t>",
DataType.cu64: "cutlass::complex<cutlass::uint64_t>",
DataType.cs4: "cutlass::complex<cutlass::int4b_t>",
DataType.cs8: "cutlass::complex<cutlass::int8_t>",
DataType.cs16: "cutlass::complex<cutlass::int16_t>",
DataType.cs32: "cutlass::complex<cutlass::int32_t>",
DataType.cs64: "cutlass::complex<cutlass::int64_t>",
}
DataTypeSize = {
DataType.b1: 1,
DataType.u4: 4,
DataType.u8: 8,
DataType.u16: 16,
DataType.u32: 32,
DataType.u64: 64,
DataType.s4: 4,
DataType.s8: 8,
DataType.s16: 16,
DataType.s32: 32,
DataType.s64: 64,
DataType.f16: 16,
DataType.bf16: 16,
DataType.f32: 32,
DataType.tf32: 32,
DataType.f64: 64,
DataType.cf16: 32,
DataType.cbf16: 32,
DataType.cf32: 64,
DataType.ctf32: 32,
DataType.cf64: 128,
DataType.cu4: 8,
DataType.cu8: 16,
DataType.cu16: 32,
DataType.cu32: 64,
DataType.cu64: 128,
DataType.cs4: 8,
DataType.cs8: 16,
DataType.cs16: 32,
DataType.cs32: 64,
DataType.cs64: 128,
}
###################################################################################################
#
class BlasMode(enum.Enum):
symmetric = enum_auto()
hermitian = enum_auto()
#
BlasModeTag = {
BlasMode.symmetric: 'cutlass::BlasMode::kSymmetric',
BlasMode.hermitian: 'cutlass::BlasMode::kHermitian',
}
#
class ComplexTransform(enum.Enum):
none = enum_auto()
conj = enum_auto()
#
ComplexTransformTag = {
ComplexTransform.none: 'cutlass::ComplexTransform::kNone',
ComplexTransform.conj: 'cutlass::ComplexTransform::kConjugate',
}
#
RealComplexBijection = [
(DataType.f16, DataType.cf16),
(DataType.f32, DataType.cf32),
(DataType.f64, DataType.cf64),
]
#
def is_complex(data_type):
for r, c in RealComplexBijection:
if data_type == c:
return True
return False
#
def get_complex_from_real(real_type):
for r, c in RealComplexBijection:
if real_type == r:
return c
return DataType.invalid
#
def get_real_from_complex(complex_type):
for r, c in RealComplexBijection:
if complex_type == c:
return r
return DataType.invalid
#
class ComplexMultiplyOp(enum.Enum):
multiply_add = enum_auto()
gaussian = enum_auto()
###################################################################################################
#
class MathOperation(enum.Enum):
multiply_add = enum_auto()
multiply_add_saturate = enum_auto()
xor_popc = enum_auto()
multiply_add_fast_bf16 = enum_auto()
multiply_add_fast_f16 = enum_auto()
multiply_add_fast_f32 = enum_auto()
multiply_add_complex_fast_f32 = enum_auto()
multiply_add_complex = enum_auto()
multiply_add_complex_gaussian = enum_auto()
#
MathOperationTag = {
MathOperation.multiply_add: 'cutlass::arch::OpMultiplyAdd',
MathOperation.multiply_add_saturate: 'cutlass::arch::OpMultiplyAddSaturate',
MathOperation.xor_popc: 'cutlass::arch::OpXorPopc',
MathOperation.multiply_add_fast_bf16: 'cutlass::arch::OpMultiplyAddFastBF16',
MathOperation.multiply_add_fast_f16: 'cutlass::arch::OpMultiplyAddFastF16',
MathOperation.multiply_add_fast_f32: 'cutlass::arch::OpMultiplyAddFastF32',
MathOperation.multiply_add_complex_fast_f32: 'cutlass::arch::OpMultiplyAddComplexFastF32',
MathOperation.multiply_add_complex: 'cutlass::arch::OpMultiplyAddComplex',
MathOperation.multiply_add_complex_gaussian: 'cutlass::arch::OpMultiplyAddGaussianComplex',
}
###################################################################################################
#
class LayoutType(enum.Enum):
ColumnMajor = enum_auto()
RowMajor = enum_auto()
ColumnMajorInterleaved2 = enum_auto()
RowMajorInterleaved2 = enum_auto()
ColumnMajorInterleaved32 = enum_auto()
RowMajorInterleaved32 = enum_auto()
ColumnMajorInterleaved64 = enum_auto()
RowMajorInterleaved64 = enum_auto()
TensorNHWC = enum_auto()
TensorNDHWC = enum_auto()
TensorNCHW = enum_auto()
TensorNGHWC = enum_auto()
TensorNC32HW32 = enum_auto()
TensorNC64HW64 = enum_auto()
TensorC32RSK32 = enum_auto()
TensorC64RSK64 = enum_auto()
#
LayoutTag = {
LayoutType.ColumnMajor: 'cutlass::layout::ColumnMajor',
LayoutType.RowMajor: 'cutlass::layout::RowMajor',
LayoutType.ColumnMajorInterleaved2: 'cutlass::layout::ColumnMajorInterleaved<2>',
LayoutType.RowMajorInterleaved2: 'cutlass::layout::RowMajorInterleaved<2>',
LayoutType.ColumnMajorInterleaved32: 'cutlass::layout::ColumnMajorInterleaved<32>',
LayoutType.RowMajorInterleaved32: 'cutlass::layout::RowMajorInterleaved<32>',
LayoutType.ColumnMajorInterleaved64: 'cutlass::layout::ColumnMajorInterleaved<64>',
LayoutType.RowMajorInterleaved64: 'cutlass::layout::RowMajorInterleaved<64>',
LayoutType.TensorNHWC: 'cutlass::layout::TensorNHWC',
LayoutType.TensorNDHWC: 'cutlass::layout::TensorNDHWC',
LayoutType.TensorNCHW: 'cutlass::layout::TensorNCHW',
LayoutType.TensorNGHWC: 'cutlass::layout::TensorNGHWC',
LayoutType.TensorNC32HW32: 'cutlass::layout::TensorNCxHWx<32>',
LayoutType.TensorC32RSK32: 'cutlass::layout::TensorCxRSKx<32>',
LayoutType.TensorNC64HW64: 'cutlass::layout::TensorNCxHWx<64>',
LayoutType.TensorC64RSK64: 'cutlass::layout::TensorCxRSKx<64>',
}
#
TransposedLayout = {
LayoutType.ColumnMajor: LayoutType.RowMajor,
LayoutType.RowMajor: LayoutType.ColumnMajor,
LayoutType.ColumnMajorInterleaved2: LayoutType.RowMajorInterleaved2,
LayoutType.RowMajorInterleaved2: LayoutType.ColumnMajorInterleaved2,
LayoutType.ColumnMajorInterleaved32: LayoutType.RowMajorInterleaved32,
LayoutType.RowMajorInterleaved32: LayoutType.ColumnMajorInterleaved32,
LayoutType.ColumnMajorInterleaved64: LayoutType.RowMajorInterleaved64,
LayoutType.RowMajorInterleaved64: LayoutType.ColumnMajorInterleaved64,
LayoutType.TensorNHWC: LayoutType.TensorNHWC
}
#
ShortLayoutTypeNames = {
LayoutType.ColumnMajor: 'n',
LayoutType.ColumnMajorInterleaved2: 'n2',
LayoutType.ColumnMajorInterleaved32: 'n32',
LayoutType.ColumnMajorInterleaved64: 'n64',
LayoutType.RowMajor: 't',
LayoutType.RowMajorInterleaved2: 't2',
LayoutType.RowMajorInterleaved32: 't32',
LayoutType.RowMajorInterleaved64: 't64',
LayoutType.TensorNHWC: 'nhwc',
LayoutType.TensorNDHWC: 'ndhwc',
LayoutType.TensorNCHW: 'nchw',
LayoutType.TensorNGHWC: 'nghwc',
LayoutType.TensorNC32HW32: 'nc32hw32',
LayoutType.TensorNC64HW64: 'nc64hw64',
LayoutType.TensorC32RSK32: 'c32rsk32',
LayoutType.TensorC64RSK64: 'c64rsk64'
}
#
ShortComplexLayoutNames = {
(LayoutType.ColumnMajor, ComplexTransform.none): 'n',
(LayoutType.ColumnMajor, ComplexTransform.conj): 'c',
(LayoutType.RowMajor, ComplexTransform.none): 't',
(LayoutType.RowMajor, ComplexTransform.conj): 'h'
}
###################################################################################################
#
class SideMode(enum.Enum):
Left = enum_auto()
Right = enum_auto()
#
SideModeTag = {
SideMode.Left: 'cutlass::SideMode::kLeft',
SideMode.Right: 'cutlass::SideMode::kRight'
}
#
ShortSideModeNames = {
SideMode.Left: 'ls',
SideMode.Right: 'rs'
}
###################################################################################################
#
class FillMode(enum.Enum):
Lower = enum_auto()
Upper = enum_auto()
#
FillModeTag = {
FillMode.Lower: 'cutlass::FillMode::kLower',
FillMode.Upper: 'cutlass::FillMode::kUpper'
}
#
ShortFillModeNames = {
FillMode.Lower: 'l',
FillMode.Upper: 'u'
}
###################################################################################################
#
class DiagType(enum.Enum):
NonUnit = enum_auto()
Unit = enum_auto()
#
DiagTypeTag = {
DiagType.NonUnit: 'cutlass::DiagType::kNonUnit',
DiagType.Unit: 'cutlass::DiagType::kUnit'
}
#
ShortDiagTypeNames = {
DiagType.NonUnit: 'nu',
DiagType.Unit: 'un'
}
###################################################################################################
#
class OpcodeClass(enum.Enum):
Simt = enum_auto()
TensorOp = enum_auto()
WmmaTensorOp = enum_auto()
SparseTensorOp = enum_auto()
OpcodeClassNames = {
OpcodeClass.Simt: 'simt',
OpcodeClass.TensorOp: 'tensorop',
OpcodeClass.WmmaTensorOp: 'wmma_tensorop',
}
OpcodeClassTag = {
OpcodeClass.Simt: 'cutlass::arch::OpClassSimt',
OpcodeClass.TensorOp: 'cutlass::arch::OpClassTensorOp',
OpcodeClass.WmmaTensorOp: 'cutlass::arch::OpClassWmmaTensorOp',
}
###################################################################################################
#
class OperationKind(enum.Enum):
Gemm = enum_auto()
RankK = enum_auto()
Rank2K = enum_auto()
Trmm = enum_auto()
Symm = enum_auto()
Conv2d = enum_auto()
Conv3d = enum_auto()
#
OperationKindNames = {
OperationKind.Gemm: 'gemm'
, OperationKind.RankK: 'rank_k'
, OperationKind.Rank2K: 'rank_2k'
, OperationKind.Trmm: 'trmm'
, OperationKind.Symm: 'symm'
, OperationKind.Conv2d: 'conv2d'
, OperationKind.Conv3d: 'conv3d'
}
#
class Target(enum.Enum):
library = enum_auto()
#
ArchitectureNames = {
50: 'maxwell',
60: 'pascal',
61: 'pascal',
70: 'volta',
75: 'turing',
80: 'ampere',
}
#
SharedMemPerCC = {
70: 96, # 96KB of SMEM
72: 96, # 96KB of SMEM
75: 64, # 64KB of SMEM
80: 163, # 163KB of SMEM - 1KB reserved for the driver
86: 99, # 99KB of SMEM - 1KB reserved for the driver
87: 163, # 163KB of SMEM - 1KB reserved for the driver
89: 99, # 99KB of SMEM - 1KB reserved for the driver
90: 227, # 227KB of SMEM - 1KB reserved for the driver
}
###################################################################################################
#
def SubstituteTemplate(template, values):
text = template
changed = True
while changed:
changed = False
for key, value in values.items():
regex = "\\$\\{%s\\}" % key
newtext = re.sub(regex, value, text)
if newtext != text:
changed = True
text = newtext
return text
###################################################################################################
#
class GemmKind(enum.Enum):
Gemm = enum_auto()
Sparse = enum_auto()
Universal = enum_auto()
PlanarComplex = enum_auto()
PlanarComplexArray = enum_auto()
Grouped = enum_auto()
#
GemmKindNames = {
GemmKind.Gemm: "gemm",
GemmKind.Sparse: "spgemm",
GemmKind.Universal: "gemm",
GemmKind.PlanarComplex: "gemm_planar_complex",
GemmKind.PlanarComplexArray: "gemm_planar_complex_array",
GemmKind.Grouped: "gemm_grouped"
}
#
class RankKKind(enum.Enum):
Universal = enum_auto()
#
RankKKindNames = {
RankKKind.Universal: "rank_k"
}
#
class TrmmKind(enum.Enum):
Universal = enum_auto()
#
TrmmKindNames = {
TrmmKind.Universal: "trmm"
}
#
class SymmKind(enum.Enum):
Universal = enum_auto()
#
SymmKindNames = {
SymmKind.Universal: "symm"
}
#
class EpilogueFunctor(enum.Enum):
LinearCombination = enum_auto()
LinearCombinationClamp = enum_auto()
#
EpilogueFunctorTag = {
EpilogueFunctor.LinearCombination: 'cutlass::epilogue::thread::LinearCombination',
EpilogueFunctor.LinearCombinationClamp: 'cutlass::epilogue::thread::LinearCombinationClamp',
}
#
class SwizzlingFunctor(enum.Enum):
Identity1 = enum_auto()
Identity2 = enum_auto()
Identity4 = enum_auto()
Identity8 = enum_auto()
Horizontal = enum_auto()
StridedDgradIdentity1 = enum_auto()
StridedDgradIdentity4 = enum_auto()
StridedDgradHorizontal = enum_auto()
StreamK = enum_auto()
#
SwizzlingFunctorTag = {
SwizzlingFunctor.Identity1: 'cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<1>',
SwizzlingFunctor.Identity2: 'cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<2>',
SwizzlingFunctor.Identity4: 'cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<4>',
SwizzlingFunctor.Identity8: 'cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<8>',
SwizzlingFunctor.Horizontal: 'cutlass::gemm::threadblock::GemmHorizontalThreadblockSwizzle',
SwizzlingFunctor.StridedDgradIdentity1: 'cutlass::conv::threadblock::StridedDgradIdentityThreadblockSwizzle<1>',
SwizzlingFunctor.StridedDgradIdentity4: 'cutlass::conv::threadblock::StridedDgradIdentityThreadblockSwizzle<4>',
SwizzlingFunctor.StridedDgradHorizontal: 'cutlass::conv::threadblock::StridedDgradHorizontalThreadblockSwizzle',
SwizzlingFunctor.StreamK: 'cutlass::gemm::threadblock::ThreadblockSwizzleStreamK',
}
#
class GroupScheduleMode(enum.Enum):
Device = enum_auto(),
Host = enum_auto()
#
GroupScheduleModeTag = {
GroupScheduleMode.Device: 'cutlass::gemm::kernel::GroupScheduleMode::kDeviceOnly',
GroupScheduleMode.Host: 'cutlass::gemm::kernel::GroupScheduleMode::kHostPrecompute'
}
#
ShortGroupScheduleModeNames = {
GroupScheduleMode.Device: 'Device',
GroupScheduleMode.Host: 'Host'
}
###################################################################################################
#
class ConvKind(enum.Enum):
Fprop = enum_auto()
Dgrad = enum_auto()
Wgrad = enum_auto()
#
ConvKindTag = {
ConvKind.Fprop: 'cutlass::conv::Operator::kFprop',
ConvKind.Dgrad: 'cutlass::conv::Operator::kDgrad',
ConvKind.Wgrad: 'cutlass::conv::Operator::kWgrad'
}
ConvKindNames = {
ConvKind.Fprop: 'fprop',
ConvKind.Dgrad: 'dgrad',
ConvKind.Wgrad: 'wgrad',
}
#
class IteratorAlgorithm(enum.Enum):
Analytic = enum_auto()
Optimized = enum_auto()
FixedChannels = enum_auto()
FewChannels = enum_auto()
FixedStrideDilation = enum_auto()
#
IteratorAlgorithmTag = {
IteratorAlgorithm.Analytic: 'cutlass::conv::IteratorAlgorithm::kAnalytic',
IteratorAlgorithm.Optimized: 'cutlass::conv::IteratorAlgorithm::kOptimized',
IteratorAlgorithm.FixedChannels: 'cutlass::conv::IteratorAlgorithm::kFixedChannels',
IteratorAlgorithm.FewChannels: 'cutlass::conv::IteratorAlgorithm::kFewChannels',
IteratorAlgorithm.FixedStrideDilation: 'cutlass::conv::IteratorAlgorithm::kFixedStrideDilation'
}
IteratorAlgorithmNames = {
IteratorAlgorithm.Analytic: 'analytic',
IteratorAlgorithm.Optimized: 'optimized',
IteratorAlgorithm.FixedChannels: 'fixed_channels',
IteratorAlgorithm.FewChannels: 'few_channels',
IteratorAlgorithm.FixedStrideDilation: 'fixed_stride_dilation'
}
#
class StrideSupport(enum.Enum):
Strided = enum_auto()
Unity = enum_auto()
Fixed = enum_auto()
#
StrideSupportTag = {
StrideSupport.Strided: 'cutlass::conv::StrideSupport::kStrided',
StrideSupport.Unity: 'cutlass::conv::StrideSupport::kUnity',
StrideSupport.Fixed: 'cutlass::conv::StrideSupport::kFixed'
}
StrideSupportNames = {
StrideSupport.Strided: '',
StrideSupport.Unity: 'unity_stride',
StrideSupport.Fixed: 'fixed_stride'
}
#
class GroupMode(enum.Enum):
NoneGroup = enum_auto() # dense conv (G=1)
SingleGroup = enum_auto() # grouped convolution (single group per CTA)
MultipleGroup = enum_auto() # grouped convolution ( multiple groups per CTA)
Depthwise = enum_auto() # Depthwise convolution ( C=K=G )
#
GroupModeTag = {
GroupMode.NoneGroup: 'cutlass::conv::GroupMode::kNone',
GroupMode.SingleGroup: 'cutlass::conv::GroupMode::kSingleGroup',
GroupMode.MultipleGroup: 'cutlass::conv::GroupMode::kMultipleGroup',
GroupMode.Depthwise: 'cutlass::conv::GroupMode::kDepthwise',
}
GroupModeNames = {
GroupMode.NoneGroup: '',
GroupMode.SingleGroup: 'single_group',
GroupMode.MultipleGroup: 'multiple_group',
GroupMode.Depthwise: 'depthwise',
}
###################################################################################################
#
class MathInstruction:
def __init__(self, instruction_shape, element_a, element_b, element_accumulator, opcode_class, math_operation = MathOperation.multiply_add):
self.instruction_shape = instruction_shape
self.element_a = element_a
self.element_b = element_b
self.element_accumulator = element_accumulator
self.opcode_class = opcode_class
self.math_operation = math_operation
#
class TileDescription:
def __init__(self, threadblock_shape, stages, warp_count, math_instruction, min_compute, max_compute):
self.threadblock_shape = threadblock_shape
self.stages = stages
self.warp_count = warp_count
self.math_instruction = math_instruction
self.minimum_compute_capability = min_compute
self.maximum_compute_capability = max_compute
def procedural_name(self):
return "%dx%d_%dx%d" % (self.threadblock_shape[0], self.threadblock_shape[1], self.threadblock_shape[2], self.stages)
#
class Direct2dConvFixedStrideDilationTileDescription:
def __init__(self, threadblock_output_shape, filter_shape, stages, stride, dilation, warp_count, math_instruction, min_compute, max_compute):
self.threadblock_shape = [threadblock_output_shape[0]*threadblock_output_shape[1]*threadblock_output_shape[2], threadblock_output_shape[3], filter_shape[0]*filter_shape[1]]
self.threadblock_output_shape = threadblock_output_shape
self.filter_shape = filter_shape
self.stages = stages
self.warp_count = warp_count
self.stride = stride
self.dilation = dilation
self.math_instruction = math_instruction
self.minimum_compute_capability = min_compute
self.maximum_compute_capability = max_compute
def procedural_name(self):
str_name = "%dx%dx%d_%dx%dx%dx%d_%d_filter%dx%d" % (self.threadblock_shape[0],
self.threadblock_shape[1],
self.threadblock_shape[2],
self.threadblock_output_shape[0],
self.threadblock_output_shape[1],
self.threadblock_output_shape[2],
self.threadblock_output_shape[3],
self.stages,
self.filter_shape[0],
self.filter_shape[1])
# Fixed Strided and dilation
if self.stride != [-1, -1] and self.dilation != [-1, -1]:
str_name += "_stride%dx%d_dilation%dx%d" % (self.stride[0],
self.stride[1],
self.dilation[0],
self.dilation[1])
return str_name
#
class TensorDescription:
def __init__(self, element, layout, alignment = 1, complex_transform = ComplexTransform.none):
self.element = element
self.layout = layout
self.alignment = alignment
self.complex_transform = complex_transform
#
class SymmetricTensorDescription:
def __init__(self, element, layout, fill_mode, alignment = 1, complex_transform = ComplexTransform.none, side_mode = SideMode.Left):
self.element = element
self.layout = layout
self.fill_mode = fill_mode
self.alignment = alignment
self.complex_transform = complex_transform
self.side_mode = side_mode
#
class TriangularTensorDescription:
def __init__(self, element, layout, side_mode, fill_mode, diag_type, alignment = 1, complex_transform = ComplexTransform.none):
self.element = element
self.layout = layout
self.side_mode = side_mode
self.fill_mode = fill_mode
self.diag_type = diag_type
self.alignment = alignment
self.complex_transform = complex_transform
###################################################################################################
#
def CalculateSmemUsage(operation):
cta_shape = operation.tile_description.threadblock_shape
stages = operation.tile_description.stages
if operation.operation_kind == OperationKind.Gemm and operation.gemm_kind == GemmKind.Sparse:
# Elements represented by 8 bits of metadata (based on 4:8, 2:4 or 1:2 sparsity)
if DataTypeSize[operation.A.element] == 32:
elements_per_8b_md = 2
elif DataTypeSize[operation.A.element] == 4:
elements_per_8b_md = 8
else:
elements_per_8b_md = 4
smem_per_stage = DataTypeSize[operation.A.element] * cta_shape[0] * (cta_shape[2] // 2) // 8 + \
DataTypeSize[operation.B.element] * cta_shape[1] * cta_shape[2] // 8 + \
cta_shape[0] * (cta_shape[2] // 2) // elements_per_8b_md
else:
# Few BLAS3 operations only have A tensor
smem_per_stage = DataTypeSize[operation.A.element] * cta_shape[0] * cta_shape[2] // 8 + \
DataTypeSize[operation.A.element] * cta_shape[1] * cta_shape[2] // 8
smem_usage = smem_per_stage * stages
return (smem_usage >> 10)
###################################################################################################
| warp-main | warp/native/cutlass/tools/library/scripts/library.py |
#
# \file generator.py
#
# \brief Generates the CUTLASS Library's instances
#
import enum
import os.path
import shutil
from library import *
from gemm_operation import *
from rank_k_operation import *
from rank_2k_operation import *
from trmm_operation import *
from symm_operation import *
from conv2d_operation import *
from conv3d_operation import *
###################################################################################################
class EmitOperationKindLibrary:
def __init__(self, generated_path, kind, args):
self.generated_path = generated_path
self.kind = kind
self.args = args
self.emitters = {
OperationKind.Gemm: EmitGemmConfigurationLibrary
, OperationKind.Conv2d: EmitConv2dConfigurationLibrary
, OperationKind.Conv3d: EmitConv3dConfigurationLibrary
, OperationKind.RankK: EmitRankKConfigurationLibrary
, OperationKind.Rank2K: EmitRank2KConfigurationLibrary
, OperationKind.Trmm: EmitTrmmConfigurationLibrary
, OperationKind.Symm: EmitSymmConfigurationLibrary
}
self.configurations = [];
self.header_template ="""
/*
Generated by manifest.py - Do not edit.
*/
#include "cutlass/cutlass.h"
#include "cutlass/library/library.h"
#include "cutlass/library/manifest.h"
namespace cutlass {
namespace library {
///////////////////////////////////////////////////////////////////////////////////////////////////
"""
self.entry_template = """
//
// Entry point to construct operations
//
void initialize_all_${operation_name}_operations(Manifest &manifest) {
"""
self.configuration_prototype_template = "void initialize_${configuration_name}(Manifest &manifest);\n"
self.configuration_template =" initialize_${configuration_name}(manifest);\n"
self.epilogue_template ="""
}
///////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace library
} // namespace cutlass
"""
#
def __enter__(self):
self.operation_path = os.path.join(self.generated_path, OperationKindNames[self.kind])
os.mkdir(self.operation_path)
self.top_level_path = os.path.join(self.operation_path, "all_%s_operations.cu" % OperationKindNames[self.kind])
self.top_level_file = open(self.top_level_path, "w")
self.top_level_file.write(self.header_template)
self.source_files = [self.top_level_path,]
return self
#
def emit(self, configuration_name, operations):
with self.emitters[self.kind](self.operation_path, configuration_name) as configuration_emitter:
for operation in operations:
configuration_emitter.emit(operation)
self.source_files.append(configuration_emitter.configuration_path)
self.configurations.append(configuration_name)
self.top_level_file.write(SubstituteTemplate(self.configuration_prototype_template, {'configuration_name': configuration_name} ))
#
def __exit__(self, exception_type, exception_value, traceback):
self.top_level_file.write(SubstituteTemplate(self.entry_template, {'operation_name': OperationKindNames[self.kind]}))
for configuration_name in self.configurations:
self.top_level_file.write(SubstituteTemplate(self.configuration_template, {'configuration_name': configuration_name}))
self.top_level_file.write(self.epilogue_template)
self.top_level_file.close()
class EmitInterfaceLibrary:
def __init__(self, generated_path, operation_count, args):
self.generated_path = generated_path
self.args = args
self.prototypes = []
self.fn_calls = []
self.operation_count = str(operation_count)
self.top_level_hdr_template = '''
/*
Generated by manifest.py - Do not edit.
*/
'''
self.top_level_prologue = '''
#include "cutlass/library/library.h"
#include "cutlass/library/manifest.h"
namespace cutlass {
\tnamespace library {
${prototypes}
\t\tvoid initialize_all(Manifest &manifest) {
\t\t\tmanifest.reserve(${operation_count});\n\n
${fn_calls}
\t\t\t}
\t} // namespace library
} // namespace cutlass
'''
#
def __enter__(self):
self.top_level_path = os.path.join(self.generated_path, 'initialize_all.cpp')
self.top_level_file = open(self.top_level_path, "w")
self.top_level_file.write(self.top_level_hdr_template)
self.source_files = [self.top_level_path,]
return self
#
def emit(self, operation_name):
self.prototypes.append(SubstituteTemplate(
"\t\tvoid initialize_all_${operation_kind}_operations(Manifest &manifest);",
{'operation_kind': operation_name}))
self.fn_calls.append(SubstituteTemplate(
"\t\t\tinitialize_all_${operation_kind}_operations(manifest);",
{'operation_kind': operation_name}))
#
def __exit__(self, exception_type, exception_value, traceback):
self.top_level_file.write(SubstituteTemplate(self.top_level_prologue, {'prototypes':"\n".join(self.prototypes),
'fn_calls':"\n".join(self.fn_calls),
'operation_count': self.operation_count}))
self.top_level_file.close()
###################################################################################################
###################################################################################################
class Options:
def __init__(self):
pass
###################################################################################################
#
class Manifest:
#
def __init__(self, args = None):
self.operations = {}
self.args = args
self.operation_count = 0
self.operations_by_name = {}
self.kernel_filter = ''
self.kernel_filter_list = []
self.kernel_names = []
self.operations_enabled = []
self.selected_kernels = []
self.ignore_kernel_names = []
self.compute_capabilities = [50,]
self.curr_build_dir = '.'
self.filter_by_cc = True
if self.args:
self.kernel_filter = self.args.kernels
self.curr_build_dir = args.curr_build_dir
architectures = args.architectures.split(';') if len(args.architectures) else ['50',]
self.compute_capabilities = [int(x) for x in architectures]
if args.filter_by_cc in ['false', 'False', '0']:
self.filter_by_cc = False
if args.operations == 'all':
self.operations_enabled = []
else:
operations_list = [
OperationKind.Gemm
, OperationKind.Conv2d
, OperationKind.Conv3d
, OperationKind.RankK
, OperationKind.Trmm
, OperationKind.Symm
]
self.operations_enabled = [x for x in operations_list if OperationKindNames[x] in args.operations.split(',')]
if args.kernels == 'all':
self.kernel_names = []
else:
self.kernel_names = [x for x in args.kernels.split(',') if x != '']
self.ignore_kernel_names = [x for x in args.ignore_kernels.split(',') if x != '']
if args.kernel_filter_file is None:
self.kernel_filter_list = []
else:
self.kernel_filter_list = self.get_kernel_filters(args.kernel_filter_file)
#
def get_kernel_filters (self, kernelListFile):
if os.path.isfile(kernelListFile):
with open(kernelListFile, 'r') as fileReader:
lines = [line.rstrip() for line in fileReader if not line.startswith("#")]
lines = [re.compile(line) for line in lines if line]
return lines
else:
return []
#
def filter_out_kernels(self, kernel_name, kernel_filter_list):
for kernel_filter_re in kernel_filter_list:
if kernel_filter_re.search(kernel_name) is not None:
return True
return False
#
def _filter_string_matches(self, filter_string, haystack):
''' Returns true if all substrings appear in the haystack in order'''
substrings = filter_string.split('*')
for sub in substrings:
idx = haystack.find(sub)
if idx < 0:
return False
haystack = haystack[idx + len(sub):]
return True
#
def filter(self, operation):
''' Filtering operations based on various criteria'''
# filter based on compute capability
enabled = not (self.filter_by_cc)
for cc in self.compute_capabilities:
if cc >= operation.tile_description.minimum_compute_capability and \
cc <= operation.tile_description.maximum_compute_capability and \
(cc not in SharedMemPerCC or SharedMemPerCC[cc] >= CalculateSmemUsage(operation)):
enabled = True
break
if not enabled:
return False
if len(self.operations_enabled) and not operation.operation_kind in self.operations_enabled:
return False
# eliminate duplicates
if operation.procedural_name() in self.operations_by_name.keys():
return False
# Filter based on list of valid substrings
if len(self.kernel_names):
name = operation.procedural_name()
enabled = False
# compare against the include list
for name_substr in self.kernel_names:
if self._filter_string_matches(name_substr, name):
enabled = True
break
# compare against the exclude list
for name_substr in self.ignore_kernel_names:
if self._filter_string_matches(name_substr, name):
enabled = False
break
if len(self.kernel_filter_list) > 0:
enabled = False
if self.filter_out_kernels(operation.procedural_name(), self.kernel_filter_list):
enabled = True
# todo: filter based on compute data type
return enabled
#
#
def append(self, operation):
'''
Inserts the operation.
operation_kind -> configuration_name -> []
'''
if self.filter(operation):
self.selected_kernels.append(operation.procedural_name())
self.operations_by_name[operation.procedural_name()] = operation
# add the configuration
configuration_name = operation.configuration_name()
if operation.operation_kind not in self.operations.keys():
self.operations[operation.operation_kind] = {}
if configuration_name not in self.operations[operation.operation_kind].keys():
self.operations[operation.operation_kind][configuration_name] = []
self.operations[operation.operation_kind][configuration_name].append(operation)
self.operation_count += 1
#
#
def emit(self, target = GeneratorTarget.Library):
operation_emitters = {
GeneratorTarget.Library: EmitOperationKindLibrary
}
interface_emitters = {
GeneratorTarget.Library: EmitInterfaceLibrary
}
generated_path = os.path.join(self.curr_build_dir, 'generated')
# create generated/
if os.path.exists(generated_path):
shutil.rmtree(generated_path)
os.mkdir(generated_path)
source_files = []
with interface_emitters[target](generated_path, self.operation_count, self.args) as iface_emitter:
for operation_kind, configurations in self.operations.items():
iface_emitter.emit(OperationKindNames[operation_kind])
source_files += iface_emitter.source_files
# for each operation kind, emit initializer for all configurations
for operation_kind, configurations in self.operations.items():
with operation_emitters[target](generated_path, operation_kind, self.args) as operation_kind_emitter:
for configuration_name, operations in configurations.items():
operation_kind_emitter.emit(configuration_name, operations)
source_files += operation_kind_emitter.source_files
# write the manifest.cmake file containing paths from all targets
manifest_path = os.path.join(generated_path, "manifest.cmake")
with open(manifest_path, "w") as manifest_file:
target_name = 'cutlass_library_objs'
target_text = SubstituteTemplate("""cutlass_target_sources(
${target_name}
BATCH_SOURCES ON
PRIVATE
""", { 'target_name': target_name})
manifest_file.write(target_text)
for source_file in source_files:
manifest_file.write(" %s\n" % str(source_file.replace('\\', '/')))
manifest_file.write(")")
#
###################################################################################################
| warp-main | warp/native/cutlass/tools/library/scripts/manifest.py |
#
# \file generator.py
#
# \brief Generates the CUTLASS Library's instances
#
#
import enum
import os.path
import shutil
import functools
import operator
from library import *
###################################################################################################
#
# Data structure modeling a Rank K update operation
#
###################################################################################################
#
class RankKOperation:
#
def __init__(self, rank_k_kind, arch, tile_description, A, C, element_epilogue, \
epilogue_functor = EpilogueFunctor.LinearCombination, swizzling_functor = SwizzlingFunctor.Identity8, \
blas_mode = BlasMode.symmetric):
self.blas_mode = blas_mode
self.operation_kind = OperationKind.RankK
self.arch = arch
self.tile_description = tile_description
self.rank_k_kind = rank_k_kind
self.A = A
self.C = C
self.element_epilogue = element_epilogue
self.epilogue_functor = epilogue_functor
self.swizzling_functor = swizzling_functor
#
def is_complex(self):
complex_operators = [
MathOperation.multiply_add_complex,
MathOperation.multiply_add_complex_gaussian,
MathOperation.multiply_add_complex_fast_f32
]
return self.tile_description.math_instruction.math_operation in complex_operators
return False
#
def is_planar_complex(self):
return False
#
def accumulator_type(self):
accum = self.tile_description.math_instruction.element_accumulator
if self.is_complex():
return get_complex_from_real(accum)
return accum
#
def short_math_name(self):
if self.tile_description.math_instruction.math_operation == MathOperation.multiply_add_complex_gaussian:
return "g%s" % ShortDataTypeNames[self.accumulator_type()]
return ShortDataTypeNames[self.accumulator_type()]
#
def core_name(self):
''' The basic operation kind is prefixed with a letter indicating the accumulation type. '''
inst_shape = ''
inst_operation = ''
intermediate_type = ''
math_operations_map = {
MathOperation.xor_popc: 'xor',
}
if self.tile_description.math_instruction.opcode_class == OpcodeClass.TensorOp or \
self.tile_description.math_instruction.opcode_class == OpcodeClass.WmmaTensorOp:
math_op = self.tile_description.math_instruction.math_operation
math_op_string = math_operations_map[math_op] if math_op in math_operations_map.keys() else ''
inst_shape = "%d%d%d" % tuple(self.tile_description.math_instruction.instruction_shape)
inst_shape += math_op_string
if self.tile_description.math_instruction.element_a != self.A.element and \
self.tile_description.math_instruction.element_a != self.tile_description.math_instruction.element_accumulator:
intermediate_type = DataTypeNames[self.tile_description.math_instruction.element_a]
operation_name = 'syrk' if self.blas_mode == BlasMode.symmetric else 'herk'
return "%s%s%s%s" % (self.short_math_name(), inst_shape, intermediate_type, operation_name)
#
def extended_name(self):
''' Append data types if they differ from compute type. '''
if self.is_complex():
extended_name = "${core_name}"
else:
if self.C.element != self.tile_description.math_instruction.element_accumulator and \
self.A.element != self.tile_description.math_instruction.element_accumulator:
extended_name = "${element_c}_${core_name}_${element_a}"
elif self.C.element == self.tile_description.math_instruction.element_accumulator and \
self.A.element != self.tile_description.math_instruction.element_accumulator:
extended_name = "${core_name}_${element_a}"
else:
extended_name = "${core_name}"
extended_name = SubstituteTemplate(extended_name, {
'element_a': DataTypeNames[self.A.element],
'element_c': DataTypeNames[self.C.element],
'core_name': self.core_name()
})
return extended_name
#
def layout_name(self):
if self.is_complex() or self.is_planar_complex():
return "%s" % (
ShortComplexLayoutNames[(self.A.layout, self.A.complex_transform)]
)
return "%s" % (ShortLayoutTypeNames[self.A.layout])
#
def fill_mode_name(self):
return "%s" % (ShortFillModeNames[self.C.fill_mode])
#
def procedural_name(self):
''' The full procedural name indicates architecture, extended name, tile size, and layout. '''
threadblock = self.tile_description.procedural_name()
opcode_class_name = OpcodeClassNames[self.tile_description.math_instruction.opcode_class]
alignment = max([self.A.alignment, self.C.alignment])
return SubstituteTemplate(
"cutlass_${opcode_class}_${extended_name}_${threadblock}_${layout}_${fill_mode}_align${alignment}",
{
'opcode_class': opcode_class_name,
'extended_name': self.extended_name(),
'threadblock': threadblock,
'layout': self.layout_name(),
'fill_mode': self.fill_mode_name(),
'alignment': "%d" % self.A.alignment,
}
)
#
def configuration_name(self):
''' The full procedural name indicates architecture, extended name, tile size, and layout. '''
return self.procedural_name()
###################################################################################################
#
# Emits single instances of a CUTLASS device-wide operator
#
###################################################################################################
#
class EmitRankKUniversalInstance:
''' Responsible for emitting a CUTLASS template definition'''
def __init__(self):
self.rank_k_template = """
// Rank K operator ${operation_name}
using Operation_${operation_name} =
typename cutlass::gemm::device::RankK<
${element_a}, ${layout_a},
${element_c}, ${layout_c}, ${fill_mode},
${element_accumulator},
${opcode_class},
${arch},
cutlass::gemm::GemmShape<${threadblock_shape_m}, ${threadblock_shape_n}, ${threadblock_shape_k}>,
cutlass::gemm::GemmShape<${warp_shape_m}, ${warp_shape_n}, ${warp_shape_k}>,
cutlass::gemm::GemmShape<${instruction_shape_m}, ${instruction_shape_n}, ${instruction_shape_k}>,
${epilogue_functor}<
${element_c},
${epilogue_vector_length},
${element_accumulator},
${element_epilogue}
>,
${swizzling_functor},
${stages},
${align_a},
${split_k_serial},
${math_operation}
>;
"""
self.rank_k_complex_template = """
// Rank K operator ${operation_name}
using Operation_${operation_name} =
typename cutlass::gemm::device::RankK<
${element_a}, ${layout_a},
${element_c}, ${layout_c}, ${fill_mode},
${element_accumulator},
${opcode_class},
${arch},
cutlass::gemm::GemmShape<${threadblock_shape_m}, ${threadblock_shape_n}, ${threadblock_shape_k}>,
cutlass::gemm::GemmShape<${warp_shape_m}, ${warp_shape_n}, ${warp_shape_k}>,
cutlass::gemm::GemmShape<${instruction_shape_m}, ${instruction_shape_n}, ${instruction_shape_k}>,
${epilogue_functor}<
${element_c},
${epilogue_vector_length},
${element_accumulator},
${element_epilogue}
>,
${swizzling_functor},
${stages},
${align_a},
${split_k_serial},
${math_operation},
${transform_a},
${blas_mode}
>;
"""
def emit(self, operation):
threadblock_shape = operation.tile_description.threadblock_shape
warp_count = operation.tile_description.warp_count
warp_shape = [threadblock_shape[idx] // warp_count[idx] for idx in range(3)]
epilogue_vector_length = int(min(operation.C.alignment * DataTypeSize[operation.C.element], 128) / DataTypeSize[operation.C.element])
values = {
'operation_name': operation.procedural_name(),
'element_a': DataTypeTag[operation.A.element],
'layout_a': LayoutTag[operation.A.layout],
'element_c': DataTypeTag[operation.C.element],
'layout_c': LayoutTag[operation.C.layout],
'fill_mode': FillModeTag[operation.C.fill_mode],
'element_accumulator': DataTypeTag[operation.accumulator_type()],
'opcode_class': OpcodeClassTag[operation.tile_description.math_instruction.opcode_class],
'arch': "cutlass::arch::Sm%d" % operation.arch,
'threadblock_shape_m': str(operation.tile_description.threadblock_shape[0]),
'threadblock_shape_n': str(operation.tile_description.threadblock_shape[1]),
'threadblock_shape_k': str(operation.tile_description.threadblock_shape[2]),
'warp_shape_m': str(warp_shape[0]),
'warp_shape_n': str(warp_shape[1]),
'warp_shape_k': str(warp_shape[2]),
'instruction_shape_m': str(operation.tile_description.math_instruction.instruction_shape[0]),
'instruction_shape_n': str(operation.tile_description.math_instruction.instruction_shape[1]),
'instruction_shape_k': str(operation.tile_description.math_instruction.instruction_shape[2]),
'epilogue_vector_length': str(epilogue_vector_length),
'element_epilogue': str(DataTypeTag[operation.element_epilogue]),
'epilogue_functor': EpilogueFunctorTag[operation.epilogue_functor],
'swizzling_functor': SwizzlingFunctorTag[operation.swizzling_functor],
'stages': str(operation.tile_description.stages),
'align_a': str(operation.A.alignment),
'split_k_serial': 'false',
'math_operation': MathOperationTag[operation.tile_description.math_instruction.math_operation],
'transform_a': ComplexTransformTag[operation.A.complex_transform],
'blas_mode': BlasModeTag[operation.blas_mode]
}
rank_k_template = self.rank_k_complex_template if operation.is_complex() else self.rank_k_template
return SubstituteTemplate(rank_k_template, values)
###################################################################################################
###################################################################################################
#
# Emitters functions for all targets
#
###################################################################################################
class EmitRankKConfigurationLibrary:
def __init__(self, operation_path, configuration_name):
self.configuration_name = configuration_name
self.configuration_path = os.path.join(operation_path, "%s.cu" % configuration_name).replace('\\', '/')
self.instance_emitter = {
RankKKind.Universal: EmitRankKUniversalInstance,
}
self.rank_k_kind_wrappers = {
RankKKind.Universal: 'RankKOperation',
}
self.instance_template = {
RankKKind.Universal: """
${compile_guard_start}
manifest.append(new ${rank_k_kind}<
Operation_${operation_name}
>("${operation_name}"));
${compile_guard_end}
"""
}
self.header_template = """
/*
Generated by rank_k_operation.py - Do not edit.
*/
///////////////////////////////////////////////////////////////////////////////////////////////////
#include "cutlass/cutlass.h"
#include "cutlass/library/library.h"
#include "cutlass/library/manifest.h"
#include "library_internal.h"
#include "rank_k_operation.h"
///////////////////////////////////////////////////////////////////////////////////////////////////
"""
self.initialize_function_template = """
///////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace library {
///////////////////////////////////////////////////////////////////////////////////////////////////
void initialize_${configuration_name}(Manifest &manifest) {
"""
self.epilogue_template = """
}
///////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace library
} // namespace cutlass
///////////////////////////////////////////////////////////////////////////////////////////////////
"""
def __enter__(self):
self.configuration_file = open(self.configuration_path, "w")
self.configuration_file.write(self.header_template)
self.instance_definitions = []
self.instance_wrappers = []
self.operations = []
return self
def emit(self, operation):
emitter = self.instance_emitter[operation.rank_k_kind]()
self.operations.append(operation)
self.instance_definitions.append(emitter.emit(operation))
self.instance_wrappers.append(SubstituteTemplate(self.instance_template[operation.rank_k_kind], {
'configuration_name': self.configuration_name,
'operation_name': operation.procedural_name(),
'rank_k_kind': self.rank_k_kind_wrappers[operation.rank_k_kind],
'compile_guard_start': SubstituteTemplate(self.wmma_guard_start, {'sm_number': str(operation.arch)}) \
if operation.tile_description.math_instruction.opcode_class == OpcodeClass.WmmaTensorOp else "",
'compile_guard_end': "#endif" \
if operation.tile_description.math_instruction.opcode_class == OpcodeClass.WmmaTensorOp else ""
}))
def __exit__(self, exception_type, exception_value, traceback):
# Write instance definitions in top-level namespace
for instance_definition in self.instance_definitions:
self.configuration_file.write(instance_definition)
# Add wrapper objects within initialize() function
self.configuration_file.write(SubstituteTemplate(self.initialize_function_template, {
'configuration_name': self.configuration_name
}))
for instance_wrapper in self.instance_wrappers:
self.configuration_file.write(instance_wrapper)
self.configuration_file.write(self.epilogue_template)
self.configuration_file.close()
###################################################################################################
| warp-main | warp/native/cutlass/tools/library/scripts/rank_k_operation.py |
#################################################################################################
#
# Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#################################################################################################
# System imports
import struct
import io
import ctypes
# CUDA Python import
from cuda import cuda
from cuda import nvrtc
# CUTLASS imports
from library import *
from gemm_operation import EmitGemmUniversalInstance
#################################################################################################
#
# CUTLASS Py Runtime Components
#
#################################################################################################
#
def MaxAlignment(fmt):
align = 1
for x in fmt:
align = max(align, struct.calcsize(x))
return align
#
def AlignedOffset(offset, align):
remainder = (offset % align)
if remainder:
offset += (align - remainder)
return offset
#
def PackInteger(host_workspace, offset, value):
fmt = "i"
padding = AlignedOffset(offset, 4)
struct.pack_into(fmt, host_workspace, offset, value)
return padding + struct.calcsize(fmt)
#
def PackDevicePointer(host_workspace, offset, value):
fmt = "P"
offset = AlignedOffset(offset, 8)
struct.pack_into(fmt, host_workspace, offset, value)
return offset + struct.calcsize(fmt)
#
def ceil_div(a, b):
return -(a // -b)
#################################################################################################
#
class PitchLinearCoord:
def __init__(self, contiguous, strided):
self.contiguous = contiguous
self.strided = strided
#
class GemmCoord:
def __init__(self, m = 1, n = 1, k = 1):
self.m = m
self.n = n
self.k = k
self.fmt = "iii"
#
def ceil_div(self, rhs):
return GemmCoord(ceil_div(self.m, rhs.m), ceil_div(self.n, rhs.n), ceil_div(self.k, rhs.k))
#
def size(self):
return struct.calcsize(self.fmt)
#
def alignment(self):
return MaxAlignment(self.fmt)
#
def pack_into(self, host_workspace, offset):
offset = AlignedOffset(offset, 4)
struct.pack_into(
self.fmt,
host_workspace,
offset,
self.m, self.n, self.k)
return offset + self.size()
#
class TensorRef:
def __init__(self, pointer = None, layout = 0):
self.pointer = pointer
self.layout = layout
def __str__(self):
return "(%x, %d)" % (self.pointer._ptr, self.layout)
#################################################################################################
#
class PredicatedTileAccessIteratorDesc:
'''
'''
def __init__(
self,
element_size_bits,
advance_rank,
threadblock_shape,
threadmap_iterations,
threadmap_delta):
self.element_size_bits = element_size_bits
self.advance_rank = advance_rank
self.threadblock_shape = threadblock_shape
self.threadmap_iterations = threadmap_iterations
self.threadmap_delta = threadmap_delta
#
class PredicatedTileAccessIteratorParams:
'''
'''
#
def __init__(self, desc, label):
self.desc = desc
self.label = label
self.fmt = "qqqq"
#
def size(self):
return struct.calcsize(self.fmt)
#
def alignment(self):
return MaxAlignment(self.fmt)
#
def initialize(self, host_workspace, offset, stride):
offset = AlignedOffset(offset, self.alignment())
inc_strided = stride * \
self.desc.threadmap_delta.strided * \
self.desc.element_size_bits // 8
if self.desc.advance_rank:
inc_advance = self.desc.threadblock_shape.strided * \
stride * \
self.desc.element_size_bits // 8
else:
inc_advance = self.desc.threadblock_shape.contiguous * \
self.desc.element_size_bits // 8
inc_next = inc_advance - (self.desc.threadmap_iterations.strided - 1) * \
self.desc.threadmap_delta.strided * \
stride * \
self.desc.element_size_bits // 8
struct.pack_into(
self.fmt,
host_workspace,
offset,
stride, inc_strided, inc_next, inc_advance)
return offset + self.size()
#
#################################################################################################
#
class EpilogueTileDesc:
'''
'''
def __init__(self, column, row, group, cluster, tile):
self.column = column
self.row = row
self.group = group
self.cluster = cluster
self.tile = tile
#
class EpilogueThreadMap:
'''
'''
def __init__(self, threads, elements_per_access, element_size_bits, shape, iterations, delta, count):
self.threads = threads
self.elements_per_access = elements_per_access
self.element_size_bits = element_size_bits
self.shape = shape
self.iterations = iterations
self.delta = delta
self.count = count
pass
#
class EpilogueTileIteratorParams:
'''
'''
#
def __init__(self, desc, label):
self.desc = desc
self.label = label
self.fmt = "qqqqqqqq"
#
def size(self):
return struct.calcsize(self.fmt)
#
def alignment(self):
return MaxAlignment(self.fmt)
#
def initialize(self, host_workspace, offset, stride):
stride = stride * self.desc.element_size_bits // 8
offset = AlignedOffset(offset, self.alignment())
increment_row = stride * self.desc.delta.row
increment_group = stride * self.desc.delta.group \
- stride * self.desc.delta.row * (self.desc.iterations.row - 1)
increment_cluster = stride * self.desc.delta.cluster \
- stride * self.desc.delta.group * (self.desc.iterations.group - 1) \
- stride * self.desc.delta.row * (self.desc.iterations.row - 1)
advance_row = stride * self.desc.shape.row
advance_group = stride * \
(self.desc.shape.group - 1) * \
self.desc.shape.row * \
self.desc.count.row
advance_cluster = stride * \
self.desc.count.group * \
self.desc.shape.group * \
self.desc.count.row * \
self.desc.shape.row
advance_tile = stride * \
self.desc.shape.group * \
self.desc.shape.row * \
self.desc.shape.cluster * \
self.desc.shape.tile
struct.pack_into(
self.fmt, \
host_workspace, \
offset, \
stride, \
increment_row, increment_group, increment_cluster, \
advance_row, advance_group, advance_cluster, advance_tile)
return offset + self.size()
#
#################################################################################################
#
# Launch configuration
#
#################################################################################################
class LaunchConfiguration:
def __init__(self, grid = [1,1,1], block = [1,1,1], smem = 0):
self.grid = grid
self.block = block
self.shared_memory_capacity = smem
#################################################################################################
#
# Functors
#
#################################################################################################
#
class Functor:
def __init__(self):
self.decl = ''
self.definition = ''
self.fmt = ''
self.identifier = ''
#
def emit_declaration(self):
return self.decl
#
def emit_definition(self):
return self.definition
#
def size(self):
'''
Size of the packed Params structure
'''
return struct.calcsize(self.fmt)
#
def alignment(self):
return MaxAlignment(self.fmt)
#
def initialize(self, host_workspace, offset, arguments):
return offset + self.size()
#################################################################################################
#
class LinearCombinationFunctorArguments:
def __init__(self, alpha = 1.0, beta = 0.0):
self.alpha = alpha
self.beta = beta
self.alpha_ptr = 0
self.beta_ptr = 0
#
class LinearCombinationFunctor(Functor):
def __init__(self):
super().__init__()
self.decl = """
cutlass::epilogue::thread::LinearCombination<
float,
1,
float,
float
>"""
self.identifier = 'linear_combination'
self.fmt = "ffPP"
#
def size(self):
'''
Size of the packed Params structure
'''
return struct.calcsize(self.fmt)
#
def alignment(self):
return MaxAlignment(self.fmt)
#
def initialize(self, host_workspace, offset, arguments):
offset = AlignedOffset(offset, self.alignment())
struct.pack_into(
self.fmt,
host_workspace, offset,
arguments.alpha, arguments.beta, arguments.alpha_ptr, arguments.beta_ptr)
return offset + self.size()
#################################################################################################
#
# Base class for an executable operation
#
#################################################################################################
#
class ExecutableOperation:
'''
'''
def __init__(self, operation):
self.operation = operation
self.module = None
self.kernel = None
#
def name(self):
return self.operation.procedural_name()
#
def emit(self):
return ''
#
def can_implement(self, configuration, arguments):
return False
#
def get_host_workspace_size(self, arguments):
return 0
#
def get_device_workspace_size(self, arguments):
return 0
#
def plan(self, arguments):
return LaunchConfiguration()
#
def initialize(self, host_workspace, device_workspace, launch_config, arguments, stream = cuda.CUstream(0)):
raise NotImplementedError()
#
def run(self, host_workspace, device_workspace, launch_config, stream = cuda.CUstream(0)):
cArg = (ctypes.c_char * len(host_workspace)).from_buffer(host_workspace)
packed = (ctypes.c_void_p * 1)()
packed[0] = ctypes.addressof(cArg)
err, = cuda.cuLaunchKernel(
self.kernel,
launch_config.grid[0], launch_config.grid[1], launch_config.grid[2],
launch_config.block[0], launch_config.block[1], launch_config.block[2],
launch_config.shared_memory_capacity,
stream,
packed,
0)
return err
#################################################################################################
#
class GemmArguments:
'''
'''
def __init__(self):
self.problem_size = GemmCoord(0, 0, 0)
self.A = TensorRef()
self.B = TensorRef()
self.C = TensorRef()
self.D = TensorRef()
self.output_op = LinearCombinationFunctorArguments()
#
class ThreadblockSwizzle:
def __init__(self, threadblock_shape, log_threadblock_cohort = 0):
self.threadblock_shape = threadblock_shape
self.log_threadblock_cohort = log_threadblock_cohort
def grid_tiled_shape(self, problem_size):
return GemmCoord(
ceil_div(problem_size.m, self.threadblock_shape.m),
ceil_div(problem_size.n, self.threadblock_shape.n),
1)
#
class Gemm(ExecutableOperation):
'''
GEMM manages the CUTLASS runtime components
'''
#
def __init__(self, operation):
super().__init__(operation)
self.emitter = EmitGemmUniversalInstance('_type')
self.threadblock_swizzle = ThreadblockSwizzle(GemmCoord(128, 128, 8))
self.threads = 256
self.shared_memory_capacity = (32 << 10)
self.params_A = PredicatedTileAccessIteratorParams(
PredicatedTileAccessIteratorDesc(
32,
1,
PitchLinearCoord(128, 8),
PitchLinearCoord(1, 4),
PitchLinearCoord(1, 2)), 'A')
self.params_B = PredicatedTileAccessIteratorParams(
PredicatedTileAccessIteratorDesc(
32,
1,
PitchLinearCoord(128, 8),
PitchLinearCoord(1, 4),
PitchLinearCoord(1, 2)), 'B')
self.params_C = EpilogueTileIteratorParams(
EpilogueThreadMap(
256,
1,
32,
EpilogueTileDesc(128, 1, 4, 4, 1),
EpilogueTileDesc(4, 1, 2, 1, 1),
EpilogueTileDesc(32, 1, 8, 1, 1),
EpilogueTileDesc(1, 4, 2, 1, 8)), 'C')
self.params_D = EpilogueTileIteratorParams(
EpilogueThreadMap(
256,
1,
32,
EpilogueTileDesc(128, 1, 4, 4, 1),
EpilogueTileDesc(4, 1, 2, 1, 1),
EpilogueTileDesc(32, 1, 8, 1, 1),
EpilogueTileDesc(1, 4, 2, 1, 8)), 'D')
self.output_op = LinearCombinationFunctor()
#
def emit(self):
return self.emitter.emit(self.operation)
#
def can_implement(self, configuration, arguments):
pass
#
def get_host_workspace_size(self, arguments):
return 336
#
def get_device_workspace_size(self, arguments):
return 0
#
def plan(self, arguments):
grid = self.threadblock_swizzle.grid_tiled_shape(arguments.problem_size)
return LaunchConfiguration([grid.m, grid.n, grid.k], [self.threads, 1, 1], self.shared_memory_capacity)
#
def initialize(self, host_workspace, device_workspace, launch_config, arguments, stream = cuda.CUstream(0)):
offset = 0
# Compute intermediate results
swizzle_log_tile = 0
gemm_mode = 0
batch_count = 1
gemm_k_size = arguments.problem_size.k
# Pack into the host workspace buffer
offset = arguments.problem_size.pack_into(host_workspace, offset)
grid_tiled_shape = self.threadblock_swizzle.grid_tiled_shape(arguments.problem_size)
offset = grid_tiled_shape.pack_into(host_workspace, offset)
offset = PackInteger(host_workspace, offset, swizzle_log_tile)
offset = self.params_A.initialize(host_workspace, offset, arguments.A.layout)
offset = self.params_B.initialize(host_workspace, offset, arguments.B.layout)
offset = self.params_C.initialize(host_workspace, offset, arguments.C.layout)
offset = self.params_D.initialize(host_workspace, offset, arguments.D.layout)
offset = self.output_op.initialize(host_workspace, offset, arguments.output_op)
offset = PackInteger(host_workspace, offset, gemm_mode)
offset = PackInteger(host_workspace, offset, batch_count)
offset = PackInteger(host_workspace, offset, gemm_k_size)
offset = PackDevicePointer(host_workspace, offset, int(arguments.A.pointer))
offset = PackDevicePointer(host_workspace, offset, int(arguments.B.pointer))
offset = PackDevicePointer(host_workspace, offset, int(arguments.C.pointer))
offset = PackDevicePointer(host_workspace, offset, int(arguments.D.pointer))
return offset
#################################################################################################
#
# Module represents a compilation unit
#
#################################################################################################
#
class CompilationOptions:
'''
Compilation options.
'''
#
def __init__(self, architectures = [80], include_paths = []):
self.includes = []
self.include_paths = include_paths
self.flags = ['-std=c++11', '-default-device']
self.architectures = architectures
#
def get(self):
options = []
for flag in self.flags:
options.append(bytes(str.encode(flag)))
for incl in self.include_paths:
options.append(bytes(str.encode('--include-path=%s' % incl)))
arch_list = "-arch="
for idx, arch in enumerate(self.architectures):
if idx:
arch_list += ","
arch_list += "sm_%d" % arch
options.append(bytes(str.encode(arch_list)))
return options
IncludeTemplate = r'''#include "${include}"
'''
KernelTemplate = r'''
extern "C"
__global__ void
${operation_name}(${operation_name}${operation_suffix}::Params params) {
// Dynamic shared memory base pointer
extern __shared__ int SharedStorageBase[];
// Declare pointer to dynamic shared memory.
${operation_name}${operation_suffix}::SharedStorage *shared_storage =
reinterpret_cast<${operation_name}${operation_suffix}::SharedStorage *>(SharedStorageBase);
${operation_name}${operation_suffix} op;
op(params, *shared_storage);
}
'''
#
class Module:
def __init__(self, name, operations, compilation_options):
self.name = name
self.operations = operations
self.module = None
self.log = None
self.cubin_image = None
self.source_buffer = ''
#
# Emit source
#
self.emit_()
#
# Compile
#
self.compile_(compilation_options)
#
# Load module
#
self.load_()
# Done
return
# Emit a source buffer
def emit_(self):
# 1. Includes
includes = []
for operation in self.operations:
for incl in operation.emitter.includes:
if incl not in includes:
includes.append(incl)
for incl in includes:
self.source_buffer += SubstituteTemplate(IncludeTemplate, { 'include': incl} )
# 2. Operations
for operation in self.operations:
self.source_buffer += operation.emit()
values = {
'operation_name': operation.name(),
'operation_suffix': operation.emitter.operation_suffix
}
self.source_buffer += SubstituteTemplate(KernelTemplate, values)
# Done
return
# Compile with NVRTC
def compile_(self, compilation_options):
err, program = nvrtc.nvrtcCreateProgram(
str.encode(self.source_buffer),
bytes(str.encode(self.name)),
0, [], [])
if err != nvrtc.nvrtcResult.NVRTC_SUCCESS:
raise RuntimeError('NVRTC Error: {}'.format(err))
# Compile program
options = compilation_options.get()
err, = nvrtc.nvrtcCompileProgram(program, len(options), options)
if err != nvrtc.nvrtcResult.NVRTC_SUCCESS:
error_string = 'NVRTC Error: {}\n'.format(err)
# Get log from compilation
err, logSize = nvrtc.nvrtcGetProgramLogSize(program)
if err != nvrtc.nvrtcResult.NVRTC_SUCCESS:
raise RuntimeError('NVRTC Error: {}'.format(err))
self.log = b' ' * logSize
err, = nvrtc.nvrtcGetProgramLog(program, self.log)
if err != nvrtc.nvrtcResult.NVRTC_SUCCESS:
raise RuntimeError('NVRTC Error: {}'.format(err))
raise RuntimeError(error_string + self.log.decode() + self.source_buffer)
# Get data from compilation
err, dataSize = nvrtc.nvrtcGetCUBINSize(program)
if err != nvrtc.nvrtcResult.NVRTC_SUCCESS:
raise RuntimeError('NVRTC Error: {}'.format(err))
self.cubin_image = b' ' * dataSize
err, = nvrtc.nvrtcGetCUBIN(program, self.cubin_image)
if err != nvrtc.nvrtcResult.NVRTC_SUCCESS:
raise RuntimeError('NVRTC Error: {}'.format(err))
return
#
def load_(self):
# Load data as module data
err, self.module = cuda.cuModuleLoadData(self.cubin_image)
if err != cuda.CUresult.CUDA_SUCCESS:
raise RuntimeError('Cuda Error: {}'.format(err))
# Get functions
for operation in self.operations:
err, operation.kernel = cuda.cuModuleGetFunction(
self.module,
bytes(str.encode(operation.name())))
if err != cuda.CUresult.CUDA_SUCCESS:
raise RuntimeError('Cuda Error: {}'.format(err))
operation.module = self
return
#################################################################################################
#
# Manifest represents an 'owner' for modules and operations
#
#################################################################################################
#
class Manifest:
#
def __init__(self):
self.operations = {}
self.modules = []
pass
#
def append_module(self, module):
'''
Appends a module and takes ownership of operations used to construct it.
'''
self.modules.append(module)
for operation in module.operations:
self.operations[operation.name()] = operation
#################################################################################################
| warp-main | warp/native/cutlass/tools/library/scripts/rt.py |
#
# \file generator.py
#
# \brief Generates the CUTLASS Library's instances
#
#
import enum
import os.path
import shutil
from library import *
###################################################################################################
#
class Conv2dOperation:
#
def __init__(self, conv_kind, iterator_algorithm, arch, tile_description, A, B, C, element_epilogue, \
stride_support, epilogue_functor = EpilogueFunctor.LinearCombination, swizzling_functor = SwizzlingFunctor.Identity1, \
group_mode = GroupMode.NoneGroup):
self.operation_kind = OperationKind.Conv2d
self.arch = arch
self.tile_description = tile_description
self.conv_kind = conv_kind
self.A = A
self.B = B
self.C = C
self.element_epilogue = element_epilogue
self.epilogue_functor = epilogue_functor
self.iterator_algorithm = iterator_algorithm
self.stride_support = stride_support
self.swizzling_functor = swizzling_functor
self.group_mode = group_mode
#
def is_complex(self):
complex_operators = [
MathOperation.multiply_add_complex,
MathOperation.multiply_add_complex_gaussian
]
return self.tile_description.math_instruction.math_operation in complex_operators
#
def accumulator_type(self):
accum = self.tile_description.math_instruction.element_accumulator
if self.is_complex():
return get_complex_from_real(accum)
return accum
#
def core_name(self):
''' The basic operation kind is prefixed with a letter indicating the accumulation type. '''
intermediate_type = ''
if self.tile_description.math_instruction.opcode_class == OpcodeClass.TensorOp:
inst_shape = "%d%d%d" % tuple(self.tile_description.math_instruction.instruction_shape)
if self.tile_description.math_instruction.element_a != self.A.element and \
self.tile_description.math_instruction.element_a != self.accumulator_type():
intermediate_type = DataTypeNames[self.tile_description.math_instruction.element_a]
else:
inst_shape = ''
return "%s%s%s%s_%s" % (ShortDataTypeNames[self.accumulator_type()], \
inst_shape, intermediate_type, ConvKindNames[self.conv_kind], IteratorAlgorithmNames[self.iterator_algorithm])
#
def extended_name(self):
''' Append data types if they differ from compute type. '''
if self.C.element != self.tile_description.math_instruction.element_accumulator and \
self.A.element != self.tile_description.math_instruction.element_accumulator:
extended_name = "${element_c}_${core_name}_${element_a}"
elif self.C.element == self.tile_description.math_instruction.element_accumulator and \
self.A.element != self.tile_description.math_instruction.element_accumulator:
extended_name = "${core_name}_${element_a}"
else:
extended_name = "${core_name}"
extended_name = SubstituteTemplate(extended_name, {
'element_a': DataTypeNames[self.A.element],
'element_c': DataTypeNames[self.C.element],
'core_name': self.core_name()
})
return extended_name
#
def layout_name(self):
return "%s" % (ShortLayoutTypeNames[self.A.layout])
#
def configuration_name(self):
''' The full procedural name indicates architecture, extended name, tile size, and layout. '''
opcode_class_name = OpcodeClassNames[self.tile_description.math_instruction.opcode_class]
threadblock = self.tile_description.procedural_name()
# grouped conv
if self.group_mode != GroupMode.NoneGroup:
group_conv_name = f"{GroupModeNames[self.group_mode]}_"
else:
group_conv_name = ""
if self.stride_support == StrideSupport.Unity:
configuration_name = "cutlass_${opcode_class}_${extended_name}_${threadblock}_${layout}_unity_stride_${group_conv_name}align${alignment}"
else:
configuration_name = "cutlass_${opcode_class}_${extended_name}_${threadblock}_${layout}_${group_conv_name}align${alignment}"
return SubstituteTemplate(
configuration_name,
{
'opcode_class': opcode_class_name,
'extended_name': self.extended_name(),
'threadblock': threadblock,
'layout': self.layout_name(),
'alignment': "%d" % self.A.alignment,
'group_conv_name': group_conv_name
}
)
#
def procedural_name(self):
''' The full procedural name indicates architecture, extended name, tile size, and layout. '''
return self.configuration_name()
###################################################################################################
#
# Emits single instances of a CUTLASS device-wide operator
#
###################################################################################################
class EmitConv2dInstance:
def __init__(self):
self.template = """
// Conv2d${conv_kind_name} ${iterator_algorithm_name} kernel instance "${operation_name}"
using ${operation_name}_base =
typename cutlass::conv::kernel::DefaultConv2d${conv_kind_name}<
${element_a},
${layout_a},
${element_b},
${layout_b},
${element_c},
${layout_c},
${element_accumulator},
${opcode_class},
${arch},
cutlass::gemm::GemmShape<${threadblock_shape_m}, ${threadblock_shape_n}, ${threadblock_shape_k}>,
cutlass::gemm::GemmShape<${warp_shape_m}, ${warp_shape_n}, ${warp_shape_k} >,
cutlass::gemm::GemmShape<${instruction_shape_m}, ${instruction_shape_n}, ${instruction_shape_k}>,
${epilogue_functor}<
${element_c},
${epilogue_vector_length},
${element_accumulator},
${element_epilogue}
>,
${swizzling_functor}, // cutlass::gemm::threadblock::GemmSplitKIdentityThreadblockSwizzle<>,
${stages},
${math_operator},
${iterator_algorithm},
${stride_support},
${align_a},
${align_b}
>::Kernel;
"""
self.template_group_conv = """
// Conv2d${conv_kind_name} ${iterator_algorithm_name} kernel instance "${operation_name}"
using ${operation_name}_base =
typename cutlass::conv::kernel::DefaultConv2dGroup${conv_kind_name}<
${element_a},
${layout_a},
${element_b},
${layout_b},
${element_c},
${layout_c},
${element_accumulator},
${opcode_class},
${arch},
cutlass::gemm::GemmShape<${threadblock_shape_m}, ${threadblock_shape_n}, ${threadblock_shape_k}>,
cutlass::gemm::GemmShape<${warp_shape_m}, ${warp_shape_n}, ${warp_shape_k} >,
cutlass::gemm::GemmShape<${instruction_shape_m}, ${instruction_shape_n}, ${instruction_shape_k}>,
${epilogue_functor}<
${element_c},
${epilogue_vector_length},
${element_accumulator},
${element_epilogue}
>,
${swizzling_functor}, // cutlass::gemm::threadblock::GemmSplitKIdentityThreadblockSwizzle<>,
${stages},
${math_operator},
${group_mode},
${iterator_algorithm},
${stride_support},
${align_a},
${align_b}
>::Kernel;
"""
self.template_depthwise_direct_conv = """
// Conv2d${conv_kind_name} ${iterator_algorithm_name} kernel instance "${operation_name}"
using ${operation_name}_base =
typename cutlass::conv::kernel::DefaultDepthwiseDirect2dConv${conv_kind_name}<
${element_a},
${layout_a},
${element_b},
${layout_b},
${element_c},
${layout_c},
${element_accumulator},
${opcode_class},
${arch},
cutlass::gemm::GemmShape<${threadblock_shape_m}, ${threadblock_shape_n}, ${threadblock_shape_k}>,
cutlass::conv::TensorNHWCShape<${threadblock_output_shape_n}, ${threadblock_output_shape_p}, ${threadblock_output_shape_q}, ${groups_per_cta}>,
cutlass::MatrixShape<${filter_shape_r}, ${filter_shape_s}>,
cutlass::gemm::GemmShape<${warp_shape_m}, ${warp_shape_n}, ${warp_shape_k}>,
cutlass::gemm::GemmShape<${instruction_shape_m}, ${instruction_shape_n}, ${instruction_shape_k}>,
${epilogue_functor}<
${element_c},
${epilogue_vector_length},
${element_accumulator},
${element_epilogue},
cutlass::epilogue::thread::ScaleType::OnlyAlphaScaling
>,
cutlass::conv::threadblock::DepthwiseDirect2dConvIdentityThreadblockSwizzle<
1,
${threadblock_output_shape_n},
${threadblock_output_shape_p},
${threadblock_output_shape_q}>,
${stages},
${math_operator},
${iterator_algorithm},
${stride_support},
cutlass::MatrixShape<${stride_r}, ${stride_s}>,
cutlass::MatrixShape<${dilation_r}, ${dilation_s}>
>::Kernel;
"""
def emit(self, operation):
warp_shape = [int(operation.tile_description.threadblock_shape[idx] / operation.tile_description.warp_count[idx]) for idx in range(3)]
epilogue_vector_length = int(min(operation.C.alignment * DataTypeSize[operation.C.element], 128) / DataTypeSize[operation.C.element])
values = {
'operation_name': operation.procedural_name(),
'conv_kind': ConvKindTag[operation.conv_kind],
'conv_kind_name': ConvKindNames[operation.conv_kind].capitalize(),
'element_a': DataTypeTag[operation.A.element],
'layout_a': LayoutTag[operation.A.layout],
'element_b': DataTypeTag[operation.B.element],
'layout_b': LayoutTag[operation.B.layout],
'element_c': DataTypeTag[operation.C.element],
'layout_c': LayoutTag[operation.C.layout],
'element_accumulator': DataTypeTag[operation.accumulator_type()],
'opcode_class': OpcodeClassTag[operation.tile_description.math_instruction.opcode_class],
'arch': "cutlass::arch::Sm%d" % operation.arch,
'threadblock_shape_m': str(operation.tile_description.threadblock_shape[0]),
'threadblock_shape_n': str(operation.tile_description.threadblock_shape[1]),
'threadblock_shape_k': str(operation.tile_description.threadblock_shape[2]),
'warp_shape_m': str(warp_shape[0]),
'warp_shape_n': str(warp_shape[1]),
'warp_shape_k': str(warp_shape[2]),
'instruction_shape_m': str(operation.tile_description.math_instruction.instruction_shape[0]),
'instruction_shape_n': str(operation.tile_description.math_instruction.instruction_shape[1]),
'instruction_shape_k': str(operation.tile_description.math_instruction.instruction_shape[2]),
'epilogue_vector_length': str(epilogue_vector_length),
'epilogue_functor': EpilogueFunctorTag[operation.epilogue_functor],
'element_epilogue': str(DataTypeTag[operation.element_epilogue]),
'swizzling_functor': SwizzlingFunctorTag[operation.swizzling_functor],
'stages': str(operation.tile_description.stages),
'iterator_algorithm': IteratorAlgorithmTag[operation.iterator_algorithm],
'iterator_algorithm_name': IteratorAlgorithmNames[operation.iterator_algorithm].capitalize(),
'stride_support': StrideSupportTag[operation.stride_support],
'math_operator': 'cutlass::arch::OpMultiplyAddComplex' if operation.is_complex() else \
MathOperationTag[operation.tile_description.math_instruction.math_operation],
'align_a': str(operation.A.alignment),
'align_b': str(operation.B.alignment),
}
if operation.group_mode == GroupMode.NoneGroup:
return SubstituteTemplate(self.template, values)
elif operation.group_mode == GroupMode.Depthwise:
values['group_mode'] = GroupModeTag[operation.group_mode]
# Setup other template params
values['threadblock_output_shape_n'] = str(operation.tile_description.threadblock_output_shape[0])
values['threadblock_output_shape_p'] = str(operation.tile_description.threadblock_output_shape[1])
values['threadblock_output_shape_q'] = str(operation.tile_description.threadblock_output_shape[2])
values['groups_per_cta'] = str(operation.tile_description.threadblock_output_shape[3])
values['filter_shape_r'] = str(operation.tile_description.filter_shape[0])
values['filter_shape_s'] = str(operation.tile_description.filter_shape[1])
values['stride_r'] = str(operation.tile_description.stride[0])
values['stride_s'] = str(operation.tile_description.stride[1])
values['dilation_r'] = str(operation.tile_description.dilation[0])
values['dilation_s'] = str(operation.tile_description.dilation[1])
return SubstituteTemplate(self.template_depthwise_direct_conv, values)
else:
values['group_mode'] = GroupModeTag[operation.group_mode]
return SubstituteTemplate(self.template_group_conv, values)
###################################################################################################
#
# Generator functions for all layouts
#
###################################################################################################
#
def GenerateConv2dTensorOp(manifest, tile_descriptions, min_cc, align = 128):
for tile in tile_descriptions:
for conv_kind in [ConvKind.Fprop, ConvKind.Dgrad, ConvKind.Wgrad]:
if conv_kind == ConvKind.Fprop or (tile.math_instruction.element_accumulator in [DataType.f16, DataType.f32]):
#
output_types = [tile.math_instruction.element_a, tile.math_instruction.element_accumulator] \
if DataTypeSize[tile.math_instruction.element_accumulator] == 32 \
else [tile.math_instruction.element_accumulator,]
for output_type in output_types:
A = TensorDescription(tile.math_instruction.element_a, LayoutType.TensorNHWC, int(align / DataTypeSize[tile.math_instruction.element_a]))
B = TensorDescription(tile.math_instruction.element_b, LayoutType.TensorNHWC, int(align / DataTypeSize[tile.math_instruction.element_b]))
C = TensorDescription(output_type, LayoutType.TensorNHWC, max(1, int(align / DataTypeSize[output_type])))
manifest.append(Conv2dOperation(conv_kind, min_cc, tile, A, B, C, tile.math_instruction.element_accumulator))
###################################################################################################
#
# Emitters functions for all targets
#
###################################################################################################
class EmitConv2dConfigurationLibrary:
def __init__(self, operation_path, configuration_name):
self.configuration_name = configuration_name
self.configuration_path = os.path.join(operation_path, "%s.cu" % configuration_name)
self.instance_emitter = EmitConv2dInstance()
self.instance_template = """
${operation_instance}
// Derived class
struct ${operation_name} :
public ${operation_name}_base { };
///////////////////////////////////////////////////////////////////////////////////////////////////
"""
self.header_template = """
/*
Generated by conv2d_operation.py - Do not edit.
*/
///////////////////////////////////////////////////////////////////////////////////////////////////
#include "cutlass/cutlass.h"
#include "cutlass/library/library.h"
#include "cutlass/library/manifest.h"
#include "library_internal.h"
#include "conv2d_operation.h"
///////////////////////////////////////////////////////////////////////////////////////////////////
"""
self.configuration_header = """
namespace cutlass {
namespace library {
// Initialize all instances
void initialize_${configuration_name}(Manifest &manifest) {
"""
self.configuration_instance = """
using Operation_${operation_name} = cutlass::conv::device::ImplicitGemmConvolution<
${operation_name}>;
manifest.append(new cutlass::library::Conv2dOperation<
Operation_${operation_name}>(
"${operation_name}"));
"""
self.configuration_direct_conv_instance = """
using Operation_${operation_name} = cutlass::conv::device::DirectConvolution<
${operation_name}>;
manifest.append(new cutlass::library::DirectConv2dOperation<
Operation_${operation_name}>(
"${operation_name}"));
"""
self.configuration_epilogue = """
}
"""
self.epilogue_template = """
///////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace library
} // namespace cutlass
///////////////////////////////////////////////////////////////////////////////////////////////////
"""
#
def __enter__(self):
self.configuration_file = open(self.configuration_path, "w")
self.configuration_file.write(SubstituteTemplate(self.header_template, {
'configuration_name': self.configuration_name
}))
self.operations = []
return self
#
def emit(self, operation):
self.operations.append(operation)
self.configuration_file.write(SubstituteTemplate(self.instance_template, {
'configuration_name': self.configuration_name,
'operation_name': operation.procedural_name(),
'operation_instance': self.instance_emitter.emit(operation)
}))
#
def __exit__(self, exception_type, exception_value, traceback):
self.configuration_file.write(SubstituteTemplate(self.configuration_header, {
'configuration_name': self.configuration_name
}))
for operation in self.operations:
if operation.group_mode == GroupMode.Depthwise:
self.configuration_file.write(SubstituteTemplate(self.configuration_direct_conv_instance, {
'configuration_name': self.configuration_name,
'operation_name': operation.procedural_name()
}))
else:
self.configuration_file.write(SubstituteTemplate(self.configuration_instance, {
'configuration_name': self.configuration_name,
'operation_name': operation.procedural_name()
}))
self.configuration_file.write(self.configuration_epilogue)
self.configuration_file.write(self.epilogue_template)
self.configuration_file.close()
###################################################################################################
###################################################################################################
| warp-main | warp/native/cutlass/tools/library/scripts/conv2d_operation.py |
#
# \file generator.py
#
# \brief Generates the CUTLASS Library's instances
#
import enum
import os.path
import shutil
import argparse
from library import *
from manifest import *
from itertools import product
###################################################################################################
#
def CudaToolkitVersionSatisfies(semantic_ver_string, major, minor, patch = 0):
# by default, use the latest CUDA Toolkit version
cuda_version = [11, 0, 132]
# Update cuda_version based on parsed string
if semantic_ver_string != '':
for i, x in enumerate([int(x) for x in semantic_ver_string.split('.')]):
if i < len(cuda_version):
cuda_version[i] = x
else:
cuda_version.append(x)
return cuda_version >= [major, minor, patch]
###################################################################################################
###################################################################################################
#
def EpilogueAlignment(max_alignment, tile, epilogue_steps = 8):
''' Helper to compute the maximum alignment of the epilogue '''
def product(X, identity = 1):
result = identity
for item in X:
result *= item
return result
elements_per_thread = product(tile.threadblock_shape[:-1]) // product(tile.warp_count) // 32 // epilogue_steps
return min(max_alignment, elements_per_thread)
#
def CreateGemmOperator(manifest, layouts, tile_descriptions, data_type, \
alignment_constraints, complex_transforms = None, epilogue_functor = EpilogueFunctor.LinearCombination, \
swizzling_functor = SwizzlingFunctor.Identity8):
# Use StreamK decomposition for basic GEMMs
# swizzling_functor = SwizzlingFunctor.StreamK):
if complex_transforms is None:
complex_transforms = [(ComplexTransform.none, ComplexTransform.none),]
element_a, element_b, element_c, element_epilogue = data_type
operations = []
# by default, only generate the largest tile and largest alignment
if manifest.kernel_filter == '':
tile_descriptions = [tile_descriptions[0],]
alignment_constraints = [alignment_constraints[0],]
for layout in layouts:
for tile_description in tile_descriptions:
for alignment in alignment_constraints:
for complex_transform in complex_transforms:
alignment_c = min(8, alignment)
A = TensorDescription(element_a, layout[0], alignment, complex_transform[0])
B = TensorDescription(element_b, layout[1], alignment, complex_transform[1])
C = TensorDescription(element_c, layout[2], alignment_c)
new_operation = GemmOperation(GemmKind.Universal, tile_description.minimum_compute_capability, \
tile_description, A, B, C, element_epilogue, epilogue_functor, swizzling_functor)
manifest.append(new_operation)
operations.append(new_operation)
return operations
#
def CreateSparseGemmOperator(manifest, layouts, tile_descriptions, data_type, \
alignment_constraints, complex_transforms = None, epilogue_functor = EpilogueFunctor.LinearCombination, \
swizzling_functor = SwizzlingFunctor.Identity8):
if complex_transforms is None:
complex_transforms = [(ComplexTransform.none, ComplexTransform.none),]
element_a, element_b, element_c, element_epilogue = data_type
gemm_kinds = [GemmKind.Sparse]
operations = []
# by default, only generate the largest tile and largest alignment
if manifest.kernel_filter == '':
tile_descriptions = [tile_descriptions[0],]
alignment_constraints = [alignment_constraints[0],]
for layout in layouts:
for tile_description in tile_descriptions:
for alignment in alignment_constraints:
for complex_transform in complex_transforms:
alignment_c = min(8, alignment)
A = TensorDescription(element_a, layout[0], alignment, complex_transform[0])
B = TensorDescription(element_b, layout[1], alignment, complex_transform[1])
C = TensorDescription(element_c, layout[2], alignment_c)
new_operation = GemmOperation(GemmKind.Sparse, tile_description.minimum_compute_capability, \
tile_description, A, B, C, element_epilogue, epilogue_functor, swizzling_functor)
manifest.append(new_operation)
operations.append(new_operation)
return operations
#
def CreateGemmPlanarComplexOperator(manifest, layouts, tile_descriptions, data_type, \
alignment_constraints, complex_transforms):
if complex_transforms is None:
complex_transforms = [(ComplexTransform.none, ComplexTransform.none),]
element_a, element_b, element_c, element_epilogue = data_type
gemm_kinds = [GemmKind.PlanarComplex, GemmKind.PlanarComplexArray]
# by default, only generate the largest tile and largest alignment
if manifest.kernel_filter == '':
tile_descriptions = [tile_descriptions[0],]
alignment_constraints = [alignment_constraints[0],]
for gemm_kind in gemm_kinds:
for layout in layouts:
for tile_description in tile_descriptions:
for alignment in alignment_constraints:
for complex_transform in complex_transforms:
alignment_c = min(8, alignment)
A = TensorDescription(element_a, layout[0], alignment, complex_transform[0])
B = TensorDescription(element_b, layout[1], alignment, complex_transform[1])
C = TensorDescription(element_c, layout[2], alignment_c)
manifest.append(GemmOperation(gemm_kind, \
tile_description.minimum_compute_capability, \
tile_description, A, B, C, element_epilogue))
return
#
def CreateGemmGroupedOperator(manifest, layouts, tile_descriptions, data_type, \
alignment_constraints, complex_transforms = None, epilogue_functor = EpilogueFunctor.LinearCombination, \
swizzling_functor = SwizzlingFunctor.Identity8):
if complex_transforms is None:
complex_transforms = [(ComplexTransform.none, ComplexTransform.none),]
element_a, element_b, element_c, element_epilogue = data_type
operations = []
# by default, only generate the largest tile and largest alignment
if manifest.kernel_filter == '':
tile_descriptions = [tile_descriptions[0],]
alignment_constraints = [alignment_constraints[0],]
for layout in layouts:
for tile_description in tile_descriptions:
for alignment in alignment_constraints:
for complex_transform in complex_transforms:
alignment_c = min(8, alignment)
A = TensorDescription(element_a, layout[0], alignment, complex_transform[0])
B = TensorDescription(element_b, layout[1], alignment, complex_transform[1])
C = TensorDescription(element_c, layout[2], alignment_c)
new_operation = GroupedGemmOperation(GemmKind.Grouped, tile_description.minimum_compute_capability, \
tile_description, A, B, C, element_epilogue, epilogue_functor, swizzling_functor)
manifest.append(new_operation)
operations.append(new_operation)
return operations
#
def CreateRankKOperator(manifest, layouts, fill_modes, tile_descriptions, data_type, \
alignment_constraints, blas_mode, epilogue_functor = EpilogueFunctor.LinearCombination, \
swizzling_functor = SwizzlingFunctor.Identity8):
element_a, element_c, element_epilogue = data_type
operations = []
# by default, only generate the largest tile and largest alignment
if manifest.kernel_filter == '':
tile_descriptions = [tile_descriptions[0],]
alignment_constraints = [alignment_constraints[0],]
for layout in layouts:
for fill_mode in fill_modes:
for tile_description in tile_descriptions:
for alignment in alignment_constraints:
# SERK supported layouts (RowMajor, ColumnMajor) with no conjugation
complex_transform = ComplexTransform.none
# HERK supported layouts (RowMajor + conj, ColumnMajor)
if blas_mode == BlasMode.hermitian and layout[0] == LayoutType.RowMajor:
complex_transform = ComplexTransform.conj
alignment_c = 1 # Alignment only applies to A in SYRK
A = TensorDescription(element_a, layout[0], alignment, complex_transform)
C = SymmetricTensorDescription(element_c, layout[1], fill_mode, alignment_c)
# Rank-K update
new_operation = RankKOperation(RankKKind.Universal, tile_description.minimum_compute_capability, \
tile_description, A, C, element_epilogue, epilogue_functor, swizzling_functor, blas_mode)
manifest.append(new_operation)
operations.append(new_operation)
# Rank-2K update
new_operation = Rank2KOperation(RankKKind.Universal, tile_description.minimum_compute_capability, \
tile_description, A, C, element_epilogue, epilogue_functor, swizzling_functor, blas_mode)
manifest.append(new_operation)
operations.append(new_operation)
return operations
#
def CreateTrmmOperator(manifest, layouts, side_modes, fill_modes, diag_types, tile_descriptions, data_type, \
alignment_constraints, complex_transforms = None, epilogue_functor = EpilogueFunctor.LinearCombination, \
swizzling_functor = SwizzlingFunctor.Identity8):
if complex_transforms is None:
complex_transforms = [(ComplexTransform.none),]
element_a, element_b, element_c, element_epilogue = data_type
operations = []
# by default, only generate the largest tile and largest alignment
if manifest.kernel_filter == '':
tile_descriptions = [tile_descriptions[0],]
alignment_constraints = [alignment_constraints[0],]
for layout in layouts:
for side_mode in side_modes:
for fill_mode in fill_modes:
for diag_type in diag_types:
for tile_description in tile_descriptions:
for alignment in alignment_constraints:
for complex_transform in complex_transforms:
alignment_c = min(8, alignment)
A = TriangularTensorDescription(element_a, layout[0], side_mode, fill_mode, diag_type,
alignment, complex_transform)
B = TensorDescription(element_b, layout[1], alignment)
C = TensorDescription(element_c, layout[2], alignment_c)
new_operation = TrmmOperation(TrmmKind.Universal, tile_description.minimum_compute_capability, \
tile_description, A, B, C, element_epilogue, epilogue_functor, swizzling_functor)
manifest.append(new_operation)
operations.append(new_operation)
return operations
#
def CreateSymmOperator(manifest, layouts, side_modes, fill_modes, tile_descriptions, data_type, \
alignment_constraints, blas_mode, epilogue_functor = EpilogueFunctor.LinearCombination, \
swizzling_functor = SwizzlingFunctor.Identity8):
element_a, element_b, element_c, element_epilogue = data_type
operations = []
# by default, only generate the largest tile and largest alignment
if manifest.kernel_filter == '':
tile_descriptions = [tile_descriptions[0],]
alignment_constraints = [alignment_constraints[0],]
for layout in layouts:
for side_mode in side_modes:
for fill_mode in fill_modes:
for tile_description in tile_descriptions:
for alignment in alignment_constraints:
# SYMM supported layouts (RowMajor, ColumnMajor) with no conjugation
complex_transform = ComplexTransform.none
alignment_a = 1 # No vectorized access for the triangular matrix
alignment_c = min(8, alignment)
A = SymmetricTensorDescription(element_a, layout[0], fill_mode, alignment_a, complex_transform, side_mode)
# tensor A and B have same data type and layout
B = TensorDescription(element_b, layout[0], alignment)
C = TensorDescription(element_c, layout[1], alignment_c)
# SYMM/HEMM update
new_operation = SymmOperation(SymmKind.Universal, tile_description.minimum_compute_capability, \
tile_description, A, B, C, element_epilogue, epilogue_functor, swizzling_functor, blas_mode)
manifest.append(new_operation)
operations.append(new_operation)
# SYMM/HEMM update
new_operation = SymmOperation(SymmKind.Universal, tile_description.minimum_compute_capability, \
tile_description, A, B, C, element_epilogue, epilogue_functor, swizzling_functor, blas_mode)
manifest.append(new_operation)
operations.append(new_operation)
return operations
###########################################################################################################
# ConvolutionOperator support variations
# ____________________________________________________________________
# ConvolutionalOperator | Analytic | Optimized
# ____________________________________________________________________
# | Fprop | (strided) | (strided)
# | Dgrad | (strided, unity*) | (strided, unity)
# | Wgrad | (strided) | (strided)
# ____________________________________________________________________
#
# Note : Operator marked (*) are supported but not generated to keep the instantiated kernel count low
###########################################################################################################
# Convolution for 2D operations
def CreateConv2dOperator(manifest, layout, tile_descriptions, data_type, alignment_constraints, \
conv_kinds = [ConvKind.Fprop, ConvKind.Dgrad, ConvKind.Wgrad], \
epilogue_functor = EpilogueFunctor.LinearCombination, swizzling_functor = SwizzlingFunctor.Identity4):
element_a, element_b, element_c, element_epilogue = data_type
# one exceptional case
# iterator algorithm (analytic and optimized)
#iterator_algorithms = [IteratorAlgorithm.Analytic, IteratorAlgorithm.Optimized]
iterator_algorithms = [IteratorAlgorithm.Optimized]
# by default, only generate the largest tile size, largest alignment, and optimized iterator
if manifest.kernel_filter == '':
tile_descriptions = [tile_descriptions[0],]
alignment_constraints = [alignment_constraints[0],]
iterator_algorithms = [IteratorAlgorithm.Optimized]
operations = []
for tile in tile_descriptions:
for alignment in alignment_constraints:
alignment_c = min(8, alignment)
A = TensorDescription(element_a, layout[0], alignment)
B = TensorDescription(element_b, layout[1], alignment)
C = TensorDescription(element_c, layout[2], alignment_c)
swizzling_functor_ = swizzling_functor
#
# Conv2d Fprop
#
if ConvKind.Fprop in conv_kinds:
# Strided support for Analytic and Optimized Fprop
for iterator_algorithm in iterator_algorithms:
new_operations = [
# None grouped kernel
Conv2dOperation(ConvKind.Fprop, iterator_algorithm, tile.minimum_compute_capability, tile,\
A, B, C, element_epilogue, StrideSupport.Strided, epilogue_functor, swizzling_functor_),
]
# Instance group conv kernel
if tile.math_instruction.opcode_class == OpcodeClass.TensorOp and A.layout == LayoutType.TensorNHWC:
# SingleGroup kernel
new_operations.append(Conv2dOperation(ConvKind.Fprop, iterator_algorithm, tile.minimum_compute_capability, tile,\
A, B, C, element_epilogue, StrideSupport.Strided, epilogue_functor, swizzling_functor_, group_mode=GroupMode.SingleGroup))
# Analytic iterator supports MultipleGroup mode
if iterator_algorithm == IteratorAlgorithm.Analytic:
new_operations.append(Conv2dOperation(ConvKind.Fprop, iterator_algorithm, tile.minimum_compute_capability, tile,\
A, B, C, element_epilogue, StrideSupport.Strided, epilogue_functor, swizzling_functor_, group_mode=GroupMode.MultipleGroup))
for new_operation in new_operations:
manifest.append(new_operation)
operations.append(new_operation)
#
# Conv2d Dgrad
#
if ConvKind.Dgrad in conv_kinds:
# Unity stride for Analytic and Optimized Dgrad
for iterator_algorithm in iterator_algorithms:
new_operation = Conv2dOperation(ConvKind.Dgrad, iterator_algorithm, tile.minimum_compute_capability, tile,\
A, B, C, element_epilogue, StrideSupport.Unity, epilogue_functor, swizzling_functor_)
manifest.append(new_operation)
operations.append(new_operation)
# Strided support for Analytic Dgrad
# strided dgrad uses a special threadblock swizzle
# note that SwizzlingFunctor.StridedDgradHorizontal might be
# better for problem sizes with large activation channel count
swizzling_functor_strided_dgrad_ = SwizzlingFunctor.StridedDgradIdentity1
if IteratorAlgorithm.Analytic in iterator_algorithms:
new_operation = Conv2dOperation(ConvKind.Dgrad, IteratorAlgorithm.Analytic, tile.minimum_compute_capability, tile,\
A, B, C, element_epilogue, StrideSupport.Strided, epilogue_functor, swizzling_functor_strided_dgrad_)
manifest.append(new_operation)
operations.append(new_operation)
# Strided support for Optimized Dgrad
if IteratorAlgorithm.Optimized in iterator_algorithms:
new_operation = Conv2dOperation(ConvKind.Dgrad, IteratorAlgorithm.Optimized, tile.minimum_compute_capability, tile,\
A, B, C, element_epilogue, StrideSupport.Strided, epilogue_functor, swizzling_functor_strided_dgrad_)
manifest.append(new_operation)
operations.append(new_operation)
#
# Conv2d Wgrad
#
if ConvKind.Wgrad in conv_kinds:
# Strided support for Analytic and Optimized Wgrad
for iterator_algorithm in iterator_algorithms:
new_operation = Conv2dOperation(ConvKind.Wgrad, iterator_algorithm, tile.minimum_compute_capability, tile,\
A, B, C, element_epilogue, StrideSupport.Strided, epilogue_functor, swizzling_functor_)
manifest.append(new_operation)
operations.append(new_operation)
return operations
# Convolution for 2D operations specialized for few channels
def CreateConv2dFixedChannelsOperator(manifest, layout, tile_descriptions, data_type, channel_counts, \
conv_kinds = [ConvKind.Fprop, ConvKind.Dgrad, ConvKind.Wgrad], \
epilogue_functor = EpilogueFunctor.LinearCombination, swizzling_functor = SwizzlingFunctor.Identity4):
element_a, element_b, element_c, element_epilogue = data_type
# one exceptional case
# iterator algorithm (analytic and optimized)
iterator_algorithms = [IteratorAlgorithm.FixedChannels,]
# by default, only generate the largest tile size, largest alignment, and optimized iterator
if manifest.kernel_filter == '':
tile_descriptions = [tile_descriptions[0],]
channel_counts = [channel_counts[0],]
operations = []
for tile in tile_descriptions:
for channel_count in channel_counts:
alignment_c = EpilogueAlignment(channel_count, tile)
A = TensorDescription(element_a, layout[0], channel_count)
B = TensorDescription(element_b, layout[1], channel_count)
C = TensorDescription(element_c, layout[2], alignment_c)
swizzling_functor_ = swizzling_functor
#
# Conv2d Fprop
#
if ConvKind.Fprop in conv_kinds:
# Strided support for Analytic and Optimized Fprop
for iterator_algorithm in iterator_algorithms:
new_operation = Conv2dOperation(ConvKind.Fprop, iterator_algorithm, tile.minimum_compute_capability, tile,\
A, B, C, element_epilogue, StrideSupport.Strided, epilogue_functor, swizzling_functor_)
manifest.append(new_operation)
operations.append(new_operation)
# Convolution for 2D operations specialized for few channels
def CreateConv2dFewChannelsOperator(manifest, layout, tile_descriptions, data_type, channel_counts, \
conv_kinds = [ConvKind.Fprop, ConvKind.Dgrad, ConvKind.Wgrad], \
epilogue_functor = EpilogueFunctor.LinearCombination, swizzling_functor = SwizzlingFunctor.Identity4):
element_a, element_b, element_c, element_epilogue = data_type
# one exceptional case
# iterator algorithm (analytic and optimized)
iterator_algorithms = [IteratorAlgorithm.FewChannels,]
# by default, only generate the largest tile size, largest alignment, and optimized iterator
if manifest.kernel_filter == '':
tile_descriptions = [tile_descriptions[0],]
channel_counts = [channel_counts[0],]
operations = []
for tile in tile_descriptions:
for channel_count in channel_counts:
alignment_c = EpilogueAlignment(channel_count, tile)
A = TensorDescription(element_a, layout[0], channel_count)
B = TensorDescription(element_b, layout[1], channel_count)
C = TensorDescription(element_c, layout[2], alignment_c)
swizzling_functor_ = swizzling_functor
#
# Conv2d Fprop
#
if ConvKind.Fprop in conv_kinds:
# Strided support for Analytic and Optimized Fprop
for iterator_algorithm in iterator_algorithms:
new_operation = Conv2dOperation(ConvKind.Fprop, iterator_algorithm, tile.minimum_compute_capability, tile,\
A, B, C, element_epilogue, StrideSupport.Strided, epilogue_functor, swizzling_functor_)
manifest.append(new_operation)
operations.append(new_operation)
# Convolution for 3D operations
def CreateConv3dOperator(manifest, layout, tile_descriptions, data_type, alignment, \
conv_kinds = [ConvKind.Fprop, ConvKind.Dgrad, ConvKind.Wgrad], epilogue_functor = EpilogueFunctor.LinearCombination):
element_a, element_b, element_c, element_epilogue = data_type
# one exceptional case
alignment_c = min(8, alignment)
# iterator algorithm (analytic and optimized)
# iterator_algorithms = [IteratorAlgorithm.Analytic, IteratorAlgorithm.Optimized]
iterator_algorithms = [IteratorAlgorithm.Optimized]
# by default, only generate the largest tile size and optimized iterators
if manifest.kernel_filter == '':
tile_descriptions = [tile_descriptions[0],]
iterator_algorithms = [IteratorAlgorithm.Optimized]
operations = []
# All tile sizes for Conv3dFprop and Conv3dWgrad
for tile in tile_descriptions:
A = TensorDescription(element_a, layout, alignment)
B = TensorDescription(element_b, layout, alignment)
C = TensorDescription(element_c, layout, alignment_c)
#
# Conv3d Fprop
#
if ConvKind.Fprop in conv_kinds:
# Strided support for Analytic and Optimized Fprop
for iterator_algorithm in iterator_algorithms:
new_operation = Conv3dOperation(ConvKind.Fprop, iterator_algorithm, tile.minimum_compute_capability, tile,\
A, B, C, element_epilogue, StrideSupport.Strided)
manifest.append(new_operation)
operations.append(new_operation)
#
# Conv3d Wgrad
#
if ConvKind.Wgrad in conv_kinds:
# Strided support for Analytic and Optimized Wgrad
for iterator_algorithm in iterator_algorithms:
new_operation = Conv3dOperation(ConvKind.Wgrad, iterator_algorithm, tile.minimum_compute_capability, tile,\
A, B, C, element_epilogue, StrideSupport.Strided, epilogue_functor)
manifest.append(new_operation)
operations.append(new_operation)
# All tile sizes for Conv3dDgrad
for tile in tile_descriptions:
A = TensorDescription(element_a, layout, alignment)
B = TensorDescription(element_b, layout, alignment)
C = TensorDescription(element_c, layout, alignment_c)
#
# Conv3d Dgrad
#
if ConvKind.Dgrad in conv_kinds:
# Unity stride for Optimized Dgrad
new_operation = Conv3dOperation(ConvKind.Dgrad, IteratorAlgorithm.Optimized, tile.minimum_compute_capability, tile,\
A, B, C, element_epilogue, StrideSupport.Unity, epilogue_functor)
manifest.append(new_operation)
operations.append(new_operation)
# Strided support for Analytic Dgrad
# Conv3dDgrad has a naive strided support which does not cut down redundant MMAs
new_operation = Conv3dOperation(ConvKind.Dgrad, IteratorAlgorithm.Analytic, tile.minimum_compute_capability, tile,\
A, B, C, element_epilogue, StrideSupport.Strided, epilogue_functor)
manifest.append(new_operation)
operations.append(new_operation)
return operations
# Convolution for Depthwise 2d conv
def CreateDepthwiseConv2dOperator(manifest, layout, tile_descriptions, data_type, alignment_constraints, \
conv_kinds = [ConvKind.Fprop, ConvKind.Dgrad, ConvKind.Wgrad], \
epilogue_functor = EpilogueFunctor.LinearCombination, swizzling_functor = SwizzlingFunctor.Identity4):
element_a, element_b, element_c, element_epilogue = data_type
# iterator algorithm (FixedStrideDilation, Optimized)
iterator_algorithms = [IteratorAlgorithm.FixedStrideDilation, IteratorAlgorithm.Optimized]
# by default, only generate the largest tile size, largest alignment, and optimized iterator
if manifest.kernel_filter == '':
tile_descriptions = [tile_descriptions[0],]
alignment_constraints = [alignment_constraints[0],]
operations = []
for tile in tile_descriptions:
for alignment in alignment_constraints:
alignment_c = min(8, alignment)
A = TensorDescription(element_a, layout[0], alignment)
B = TensorDescription(element_b, layout[1], alignment)
C = TensorDescription(element_c, layout[2], alignment_c)
swizzling_functor_ = swizzling_functor
if ConvKind.Fprop in conv_kinds:
# Strided support for Optimized and FixedStridedDilation Depthwise Conv
for iterator_algorithm in iterator_algorithms:
stride_support = StrideSupport.Strided
if iterator_algorithm == IteratorAlgorithm.FixedStrideDilation:
if tile.stride == [-1, -1] or tile.dilation == [-1,-1]:
continue
stride_support = StrideSupport.Fixed
if iterator_algorithm == IteratorAlgorithm.Optimized:
if tile.stride != [-1, -1] or tile.dilation != [-1,-1]:
continue
new_operation = Conv2dOperation(ConvKind.Fprop,
iterator_algorithm,
tile.minimum_compute_capability,
tile,
A, B, C,
element_epilogue,
stride_support,
epilogue_functor,
swizzling_functor_,
group_mode=GroupMode.Depthwise)
manifest.append(new_operation)
operations.append(new_operation)
return operations
###################################################################################################
###################################################################################################
#
def GenerateSM50_Simt(manifest, cuda_version):
layouts = [
(LayoutType.ColumnMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor),
(LayoutType.ColumnMajor, LayoutType.RowMajor, LayoutType.ColumnMajor),
(LayoutType.RowMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor),
(LayoutType.RowMajor, LayoutType.RowMajor, LayoutType.ColumnMajor),
]
math_instructions = [
MathInstruction( \
[1, 1, 1], \
DataType.f32, DataType.f32, DataType.f32, \
OpcodeClass.Simt, \
MathOperation.multiply_add),
MathInstruction( \
[1, 1, 1], \
DataType.f64, DataType.f64, DataType.f64, \
OpcodeClass.Simt, \
MathOperation.multiply_add),
]
min_cc = 50
max_cc = 1024
alignment_constraints = [1,]
for math_inst in math_instructions:
tile_descriptions = [
TileDescription([128, 128, 8], 2, [4, 2, 1], math_inst, min_cc, max_cc),
TileDescription([128, 64, 8], 2, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 128, 8], 2, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 64, 8], 2, [2, 1, 1], math_inst, min_cc, max_cc),
TileDescription([128, 32, 8], 2, [2, 1, 1], math_inst, min_cc, max_cc),
TileDescription([ 32, 128, 8], 2, [1, 2, 1], math_inst, min_cc, max_cc),
]
data_type = [
math_inst.element_a,
math_inst.element_b,
math_inst.element_accumulator,
math_inst.element_accumulator,
]
CreateGemmOperator(manifest, layouts, tile_descriptions, \
data_type, alignment_constraints)
if math_inst.element_a == DataType.f32:
conv_layout = (LayoutType.TensorNHWC, LayoutType.TensorNHWC, LayoutType.TensorNHWC)
CreateConv2dOperator(manifest, conv_layout, tile_descriptions, data_type, alignment_constraints)
#
#
def GenerateSM50_Simt_complex(manifest, cuda_version):
layouts = [
(LayoutType.ColumnMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor),
(LayoutType.ColumnMajor, LayoutType.RowMajor, LayoutType.ColumnMajor),
(LayoutType.RowMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor),
(LayoutType.RowMajor, LayoutType.RowMajor, LayoutType.ColumnMajor),
]
math_instructions = [
MathInstruction( \
[1, 1, 1], \
DataType.f32, DataType.f32, DataType.f32, \
OpcodeClass.Simt, \
MathOperation.multiply_add_complex),
]
min_cc = 50
max_cc = 1024
alignment_constraints = [1,]
for math_inst in math_instructions:
tile_descriptions = [
TileDescription([128, 64, 8], 2, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 128, 8], 2, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 64, 8], 2, [2, 1, 1], math_inst, min_cc, max_cc),
TileDescription([128, 32, 8], 2, [2, 1, 1], math_inst, min_cc, max_cc),
TileDescription([ 32, 128, 8], 2, [1, 2, 1], math_inst, min_cc, max_cc),
TileDescription([128, 128, 8], 2, [4, 2, 1], math_inst, min_cc, max_cc),
]
data_type = [
DataType.cf32,
DataType.cf32,
DataType.cf32,
DataType.cf32,
]
CreateGemmOperator(manifest, layouts, tile_descriptions, \
data_type, alignment_constraints)
conv_layout = (LayoutType.TensorNHWC, LayoutType.TensorNHWC, LayoutType.TensorNHWC)
CreateConv2dOperator(manifest, conv_layout, tile_descriptions, data_type, alignment_constraints)
#
#
def GenerateSM50(manifest, cuda_version):
GenerateSM50_Simt(manifest, cuda_version)
GenerateSM50_Simt_complex(manifest, cuda_version)
###################################################################################################
###################################################################################################
#
def GenerateSM60_Simt(manifest, cuda_version):
layouts = [
(LayoutType.ColumnMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor),
(LayoutType.ColumnMajor, LayoutType.RowMajor, LayoutType.ColumnMajor),
(LayoutType.RowMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor),
(LayoutType.RowMajor, LayoutType.RowMajor, LayoutType.ColumnMajor),
]
math_instructions = [
MathInstruction( \
[1, 1, 1], \
DataType.f16, DataType.f16, DataType.f16, \
OpcodeClass.Simt, \
MathOperation.multiply_add),
]
min_cc = 60
max_cc = 1024
alignment_constraints = [1,]
for math_inst in math_instructions:
tile_descriptions = [
TileDescription([256, 128, 8], 2, [4, 2, 1], math_inst, min_cc, max_cc),
TileDescription([128, 256, 8], 2, [4, 2, 1], math_inst, min_cc, max_cc),
TileDescription([128, 128, 8], 2, [4, 2, 1], math_inst, min_cc, max_cc),
TileDescription([128, 64, 8], 2, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 128, 8], 2, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 64, 8], 2, [2, 1, 1], math_inst, min_cc, max_cc),
TileDescription([128, 32, 8], 2, [2, 1, 1], math_inst, min_cc, max_cc),
TileDescription([ 32, 128, 8], 2, [1, 2, 1], math_inst, min_cc, max_cc),
]
data_type = [
math_inst.element_a,
math_inst.element_b,
math_inst.element_accumulator,
math_inst.element_accumulator,
]
CreateGemmOperator(manifest, layouts, tile_descriptions, \
data_type, alignment_constraints)
#
def GenerateSM60_Simt_DepthwiseConv2d(manifest, cuda_version):
math_instructions = [
MathInstruction( \
[1, 1, 1], \
DataType.f16, DataType.f16, DataType.f16, \
OpcodeClass.Simt, \
MathOperation.multiply_add),
]
min_cc = 60
max_cc = 1024
alignment_constraints = [8,]
filter_3x3 = [3, 3]
filter_5x5 = [5, 5]
# [stride_h, stride_w]
# [-1, -1] means all stride size.
strides = [[-1,-1], [1, 1], [2, 2]]
# [dilation_h, dilation_w]
# [-1, -1] means all dilation size.
dilations = [[-1,-1], [1, 1], [2, 2]]
#groups per thread block
g16 = 16
g32 = 32
g64 = 64
#output shape per thread block
npq_1x4x4 = [1, 4, 4]
npq_1x8x8 = [1, 8, 8]
npq_1x10x10 = [1, 10, 10]
tile_descriptions = []
for math_inst in math_instructions:
for stride, dilation in product(strides, dilations):
tile_descriptions.extend([
# filter3x3 ThreadBlock_output, filter, stage, warp
Direct2dConvFixedStrideDilationTileDescription(npq_1x8x8+[g32], filter_3x3, 3, stride, dilation,[4, 1, 1],math_inst, min_cc, max_cc),
Direct2dConvFixedStrideDilationTileDescription(npq_1x8x8+[g64], filter_3x3, 3, stride, dilation,[4, 1, 1],math_inst, min_cc, max_cc),
Direct2dConvFixedStrideDilationTileDescription(npq_1x8x8+[g16], filter_3x3, 3, stride, dilation,[4, 1, 1],math_inst, min_cc, max_cc),
Direct2dConvFixedStrideDilationTileDescription(npq_1x10x10+[g64], filter_3x3, 2, stride, dilation,[4, 1, 1],math_inst, min_cc, max_cc),
Direct2dConvFixedStrideDilationTileDescription(npq_1x4x4+[g32], filter_3x3, 4, stride, dilation, [4, 1, 1], math_inst, min_cc, max_cc),
Direct2dConvFixedStrideDilationTileDescription(npq_1x4x4+[g64], filter_3x3, 4, stride, dilation,[4, 1, 1], math_inst, min_cc, max_cc),
Direct2dConvFixedStrideDilationTileDescription(npq_1x4x4+[g16], filter_3x3, 4, stride, dilation, [4, 1, 1], math_inst, min_cc, max_cc),
# filter5x5 ThreadBlock_output, filter, stage, warp
Direct2dConvFixedStrideDilationTileDescription(npq_1x8x8+[g32], filter_5x5, 3, stride, dilation,[4, 1, 1],math_inst, min_cc, max_cc),
Direct2dConvFixedStrideDilationTileDescription(npq_1x8x8+[g64], filter_5x5, 3, stride, dilation,[4, 1, 1],math_inst, min_cc, max_cc),
Direct2dConvFixedStrideDilationTileDescription(npq_1x8x8+[g16], filter_5x5, 3, stride, dilation,[4, 1, 1],math_inst, min_cc, max_cc),
Direct2dConvFixedStrideDilationTileDescription(npq_1x10x10+[g64], filter_5x5, 2, stride, dilation,[4, 1, 1],math_inst, min_cc, max_cc),
Direct2dConvFixedStrideDilationTileDescription(npq_1x4x4+[g32], filter_5x5, 4, stride, dilation,[4, 1, 1],math_inst, min_cc, max_cc),
Direct2dConvFixedStrideDilationTileDescription(npq_1x4x4+[g64], filter_5x5, 4, stride, dilation,[4, 1, 1],math_inst, min_cc, max_cc),
Direct2dConvFixedStrideDilationTileDescription(npq_1x4x4+[g16], filter_5x5, 4, stride, dilation,[4, 1, 1],math_inst, min_cc, max_cc)
])
data_type = [
math_inst.element_a,
math_inst.element_b,
math_inst.element_accumulator,
math_inst.element_accumulator,
]
conv_layout = (LayoutType.TensorNHWC, LayoutType.TensorNHWC, LayoutType.TensorNHWC)
CreateDepthwiseConv2dOperator(manifest, conv_layout, tile_descriptions, data_type, alignment_constraints)
#
#
def GenerateSM60(manifest, cuda_version):
GenerateSM60_Simt(manifest, cuda_version)
GenerateSM60_Simt_DepthwiseConv2d(manifest, cuda_version)
###################################################################################################
###################################################################################################
#
def GenerateSM61_Simt(manifest, cuda_version):
layouts = [
(LayoutType.ColumnMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor),
(LayoutType.ColumnMajor, LayoutType.RowMajor, LayoutType.ColumnMajor),
(LayoutType.RowMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor),
(LayoutType.RowMajor, LayoutType.RowMajor, LayoutType.ColumnMajor),
]
math_instructions = [
MathInstruction( \
[1, 1, 4], \
DataType.s8, DataType.s8, DataType.s32, \
OpcodeClass.Simt, \
MathOperation.multiply_add),
]
min_cc = 61
max_cc = 1024
alignment_constraints = [1,]
for math_inst in math_instructions:
tile_descriptions = [
TileDescription([128, 128, 32], 2, [4, 2, 1], math_inst, min_cc, max_cc),
TileDescription([128, 64, 32], 2, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 128, 32], 2, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 64, 32], 2, [2, 1, 1], math_inst, min_cc, max_cc),
TileDescription([128, 32, 32], 2, [2, 1, 1], math_inst, min_cc, max_cc),
TileDescription([ 32, 128, 32], 2, [1, 2, 1], math_inst, min_cc, max_cc),
]
data_type = [
math_inst.element_a,
math_inst.element_b,
math_inst.element_accumulator,
math_inst.element_accumulator,
]
data_type_mixed = [
math_inst.element_a,
math_inst.element_b,
math_inst.element_a,
math_inst.element_accumulator,
]
CreateGemmOperator(manifest, layouts, tile_descriptions, \
data_type, alignment_constraints)
CreateGemmOperator(manifest, layouts, tile_descriptions, \
data_type_mixed, alignment_constraints)
#
#
def GenerateSM61(manifest, cuda_version):
GenerateSM61_Simt(manifest, cuda_version)
###################################################################################################
###################################################################################################
#
def GenerateSM70_TensorOp_884(manifest, cuda_version):
if not CudaToolkitVersionSatisfies(cuda_version, 10, 1):
return
layouts = [
(LayoutType.ColumnMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor),
(LayoutType.ColumnMajor, LayoutType.RowMajor, LayoutType.ColumnMajor),
(LayoutType.RowMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor),
(LayoutType.RowMajor, LayoutType.RowMajor, LayoutType.ColumnMajor),
]
math_instructions = [
MathInstruction( \
[8, 8, 4], \
DataType.f16, DataType.f16, DataType.f32, \
OpcodeClass.TensorOp, \
MathOperation.multiply_add),
MathInstruction( \
[8, 8, 4], \
DataType.f16, DataType.f16, DataType.f16, \
OpcodeClass.TensorOp, \
MathOperation.multiply_add),
]
min_cc = 70
max_cc = 75
alignment_constraints = [8, 4, 2, 1]
for math_inst in math_instructions:
tile_descriptions = [
TileDescription([256, 128, 32], 2, [4, 2, 1], math_inst, min_cc, max_cc),
TileDescription([128, 256, 32], 2, [2, 4, 1], math_inst, min_cc, max_cc),
TileDescription([128, 128, 32], 2, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([256, 64, 32], 2, [4, 1, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 256, 32], 2, [1, 4, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 128, 32], 2, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([128, 64, 32], 2, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 64, 32], 2, [2, 2, 1], math_inst, min_cc, max_cc),
]
data_type = [
math_inst.element_a,
math_inst.element_b,
math_inst.element_accumulator,
math_inst.element_accumulator,
]
CreateGemmOperator(manifest, layouts, tile_descriptions, \
data_type, alignment_constraints)
conv_layout = (LayoutType.TensorNHWC, LayoutType.TensorNHWC, LayoutType.TensorNHWC)
CreateConv2dOperator(manifest, conv_layout, tile_descriptions, data_type, alignment_constraints)
# Avoid emitting two kernels if the accumulator type does not differ from the input type (e.g. F16 accumulation)
if math_inst.element_a != math_inst.element_accumulator:
data_type_mixed = [
math_inst.element_a,
math_inst.element_b,
math_inst.element_a,
math_inst.element_accumulator,
]
CreateGemmOperator(manifest, layouts, tile_descriptions, \
data_type_mixed, alignment_constraints)
CreateConv2dOperator(manifest, conv_layout, tile_descriptions, data_type_mixed, alignment_constraints)
#
def GenerateSM70_PlanarComplexTensorOp_884(manifest, cuda_version):
if not CudaToolkitVersionSatisfies(cuda_version, 10, 1):
return
layouts = [
(LayoutType.ColumnMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor),
(LayoutType.ColumnMajor, LayoutType.RowMajor, LayoutType.ColumnMajor),
(LayoutType.RowMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor),
(LayoutType.RowMajor, LayoutType.RowMajor, LayoutType.ColumnMajor),
]
complex_transforms = [
(ComplexTransform.none, ComplexTransform.none),
(ComplexTransform.conj, ComplexTransform.none),
(ComplexTransform.none, ComplexTransform.conj),
(ComplexTransform.conj, ComplexTransform.conj)
]
math_instructions = [
MathInstruction( \
[8, 8, 4], \
DataType.f16, DataType.f16, DataType.f32, \
OpcodeClass.TensorOp, \
MathOperation.multiply_add),
MathInstruction( \
[8, 8, 4], \
DataType.f16, DataType.f16, DataType.f16, \
OpcodeClass.TensorOp, \
MathOperation.multiply_add),
]
min_cc = 70
max_cc = 75
alignment_constraints = [8, 2, 1]
for math_inst in math_instructions:
tile_descriptions = [
TileDescription([ 64, 64, 32], 2, [2, 2, 1], math_inst, min_cc, max_cc),
]
data_type = [
math_inst.element_a,
math_inst.element_b,
math_inst.element_accumulator,
math_inst.element_accumulator,
]
CreateGemmPlanarComplexOperator(manifest, layouts, tile_descriptions, \
data_type, alignment_constraints, complex_transforms)
# Avoid emitting two kernels if the accumulator type does not differ from the input type (e.g. F16 accumulation)
if math_inst.element_a != math_inst.element_accumulator:
data_type_mixed = [
math_inst.element_a,
math_inst.element_b,
math_inst.element_a,
math_inst.element_accumulator,
]
CreateGemmPlanarComplexOperator(manifest, layouts, tile_descriptions, \
data_type_mixed, alignment_constraints, complex_transforms)
#
def GenerateSM70_WmmaTensorOp_161616(manifest, cuda_version):
layouts = [
(LayoutType.ColumnMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor),
(LayoutType.ColumnMajor, LayoutType.RowMajor, LayoutType.ColumnMajor),
(LayoutType.RowMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor),
(LayoutType.RowMajor, LayoutType.RowMajor, LayoutType.ColumnMajor),
]
math_instructions = [
MathInstruction( \
[16, 16, 16], \
DataType.f16, DataType.f16, DataType.f32, \
OpcodeClass.WmmaTensorOp, \
MathOperation.multiply_add),
MathInstruction( \
[16, 16, 16], \
DataType.f16, DataType.f16, DataType.f16, \
OpcodeClass.WmmaTensorOp, \
MathOperation.multiply_add),
]
min_cc = 70
max_cc = 1024
alignment_constraints = [8,]
for math_inst in math_instructions:
tile_descriptions = [
TileDescription([128, 128, 32], 2, [4, 2, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 128, 32], 2, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([128, 64, 32], 2, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 64, 32], 2, [2, 2, 1], math_inst, min_cc, max_cc),
]
data_type = [
math_inst.element_a,
math_inst.element_b,
math_inst.element_accumulator,
math_inst.element_accumulator,
]
CreateGemmOperator(manifest, layouts, tile_descriptions, \
data_type, alignment_constraints)
# Avoid emitting two kernels if the accumulator type does not differ from the input type (e.g. F16 accumulation)
if math_inst.element_a != math_inst.element_accumulator:
data_type_mixed = [
math_inst.element_a,
math_inst.element_b,
math_inst.element_a,
math_inst.element_accumulator,
]
CreateGemmOperator(manifest, layouts, tile_descriptions, \
data_type_mixed, alignment_constraints)
#
##################################################################################################
#
def GenerateSM70(manifest, cuda_version):
GenerateSM70_TensorOp_884(manifest, cuda_version)
GenerateSM70_PlanarComplexTensorOp_884(manifest, cuda_version)
# To limit build size, WMMA GEMMs are disabled for now.
#
#GenerateSM70_WmmaTensorOp_161616(manifest, cuda_version)
###################################################################################################
###################################################################################################
#
def GenerateSM75_TensorOp_1688_FewChannels(manifest, cuda_version, math_inst):
min_cc = 75
max_cc = 1024
tile_descriptions = [
TileDescription([128, 64, 32], 2, [2, 4, 1], math_inst, min_cc, max_cc),
TileDescription([256, 64, 32], 2, [4, 2, 1], math_inst, min_cc, max_cc),
TileDescription([128, 128, 32], 2, [4, 2, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 256, 32], 2, [2, 4, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 128, 32], 2, [2, 4, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 64, 32], 2, [4, 2, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 128, 64], 2, [2, 2, 2], math_inst, min_cc, max_cc),
]
data_type = [
math_inst.element_a,
math_inst.element_b,
math_inst.element_accumulator,
math_inst.element_accumulator,
]
conv_layout = (LayoutType.TensorNHWC, LayoutType.TensorNHWC, LayoutType.TensorNHWC)
CreateConv2dFixedChannelsOperator(manifest, conv_layout, tile_descriptions, data_type, [4, 8])
CreateConv2dFewChannelsOperator(manifest, conv_layout, tile_descriptions, data_type, [1, 2, 4])
# Avoid emitting two kernels if the accumulator type does not differ from the input type (e.g. F16 accumulation)
if math_inst.element_a != math_inst.element_accumulator:
data_type_mixed = [
math_inst.element_a,
math_inst.element_b,
math_inst.element_a,
math_inst.element_accumulator,
]
CreateConv2dFixedChannelsOperator(manifest, conv_layout, tile_descriptions, data_type_mixed, [4, 8])
CreateConv2dFewChannelsOperator(manifest, conv_layout, tile_descriptions, data_type_mixed, [1, 2, 4])
#
def GenerateSM75_TensorOp_1688(manifest, cuda_version):
if not CudaToolkitVersionSatisfies(cuda_version, 10, 2):
return
layouts = [
(LayoutType.ColumnMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor),
(LayoutType.ColumnMajor, LayoutType.RowMajor, LayoutType.ColumnMajor),
(LayoutType.RowMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor),
(LayoutType.RowMajor, LayoutType.RowMajor, LayoutType.ColumnMajor),
]
math_instructions = [
MathInstruction( \
[16, 8, 8], \
DataType.f16, DataType.f16, DataType.f32, \
OpcodeClass.TensorOp, \
MathOperation.multiply_add),
MathInstruction( \
[16, 8, 8], \
DataType.f16, DataType.f16, DataType.f16, \
OpcodeClass.TensorOp, \
MathOperation.multiply_add),
]
min_cc = 75
max_cc = 1024
alignment_constraints = [8, 4, 2, 1]
for math_inst in math_instructions:
tile_descriptions = [
TileDescription([256, 128, 32], 2, [4, 2, 1], math_inst, min_cc, max_cc),
TileDescription([128, 256, 32], 2, [2, 4, 1], math_inst, min_cc, max_cc),
TileDescription([128, 128, 32], 2, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 256, 32], 2, [1, 4, 1], math_inst, min_cc, max_cc),
TileDescription([256, 64, 32], 2, [4, 1, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 128, 32], 2, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([128, 64, 32], 2, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 64, 32], 2, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 128, 64], 2, [1, 2, 2], math_inst, min_cc, max_cc),
]
data_type = [
math_inst.element_a,
math_inst.element_b,
math_inst.element_accumulator,
math_inst.element_accumulator,
]
CreateGemmOperator(manifest, layouts, tile_descriptions, \
data_type, alignment_constraints)
conv_layout = (LayoutType.TensorNHWC, LayoutType.TensorNHWC, LayoutType.TensorNHWC)
CreateConv2dOperator(manifest, conv_layout, tile_descriptions, data_type, alignment_constraints)
# Avoid emitting two kernels if the accumulator type does not differ from the input type (e.g. F16 accumulation)
if math_inst.element_a != math_inst.element_accumulator:
data_type_mixed = [
math_inst.element_a,
math_inst.element_b,
math_inst.element_a,
math_inst.element_accumulator,
]
CreateGemmOperator(manifest, layouts, tile_descriptions, \
data_type_mixed, alignment_constraints)
CreateConv2dOperator(manifest, conv_layout, tile_descriptions, data_type_mixed, alignment_constraints)
# Separate generator for 'few channels' specializations
GenerateSM75_TensorOp_1688_FewChannels(manifest, cuda_version, math_inst)
#
#
def GenerateSM75_PlanarComplexTensorOp_1688(manifest, cuda_version):
if not CudaToolkitVersionSatisfies(cuda_version, 10, 2):
return
layouts = [
(LayoutType.ColumnMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor),
(LayoutType.ColumnMajor, LayoutType.RowMajor, LayoutType.ColumnMajor),
(LayoutType.RowMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor),
(LayoutType.RowMajor, LayoutType.RowMajor, LayoutType.ColumnMajor),
]
complex_transforms = [
(ComplexTransform.none, ComplexTransform.none),
(ComplexTransform.conj, ComplexTransform.none),
(ComplexTransform.none, ComplexTransform.conj),
(ComplexTransform.conj, ComplexTransform.conj)
]
math_instructions = [
MathInstruction( \
[16, 8, 8], \
DataType.f16, DataType.f16, DataType.f32, \
OpcodeClass.TensorOp, \
MathOperation.multiply_add),
MathInstruction( \
[16, 8, 8], \
DataType.f16, DataType.f16, DataType.f16, \
OpcodeClass.TensorOp, \
MathOperation.multiply_add),
]
min_cc = 75
max_cc = 1024
alignment_constraints = [8, 2, 1]
for math_inst in math_instructions:
tile_descriptions = [
TileDescription([ 64, 128, 32], 2, [2, 4, 1], math_inst, min_cc, max_cc),
TileDescription([128, 64, 32], 2, [4, 2, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 64, 32], 2, [2, 2, 1], math_inst, min_cc, max_cc),
]
data_type = [
math_inst.element_a,
math_inst.element_b,
math_inst.element_accumulator,
math_inst.element_accumulator,
]
CreateGemmPlanarComplexOperator(manifest, layouts, tile_descriptions, \
data_type, alignment_constraints, complex_transforms)
# Avoid emitting two kernels if the accumulator type does not differ from the input type (e.g. F16 accumulation)
if math_inst.element_a != math_inst.element_accumulator:
data_type_mixed = [
math_inst.element_a,
math_inst.element_b,
math_inst.element_a,
math_inst.element_accumulator,
]
CreateGemmPlanarComplexOperator(manifest, layouts, tile_descriptions, \
data_type_mixed, alignment_constraints, complex_transforms)
#
def GenerateSM75_TensorOp_8816_TN(manifest, cuda_version):
if not CudaToolkitVersionSatisfies(cuda_version, 10, 2):
return
layouts = [
(LayoutType.RowMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor),
]
math_instructions = [
MathInstruction( \
[8, 8, 16], \
DataType.s8, DataType.s8, DataType.s32, \
OpcodeClass.TensorOp, \
MathOperation.multiply_add_saturate),
MathInstruction( \
[8, 8, 16], \
DataType.u8, DataType.u8, DataType.s32, \
OpcodeClass.TensorOp, \
MathOperation.multiply_add_saturate),
]
min_cc = 75
max_cc = 1024
alignment_constraints = [16,]
for math_inst in math_instructions:
tile_descriptions = [
TileDescription([256, 128, 64], 2, [4, 2, 1], math_inst, min_cc, max_cc),
TileDescription([128, 256, 64], 2, [2, 4, 1], math_inst, min_cc, max_cc),
TileDescription([128, 128, 64], 2, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 256, 64], 2, [1, 4, 1], math_inst, min_cc, max_cc),
TileDescription([256, 64, 64], 2, [4, 1, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 128, 64], 2, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([128, 64, 64], 2, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 64, 64], 2, [2, 2, 1], math_inst, min_cc, max_cc),
]
data_type = [
math_inst.element_a,
math_inst.element_b,
math_inst.element_accumulator,
DataType.s32,
]
CreateGemmOperator(manifest, layouts, tile_descriptions, \
data_type, alignment_constraints, None, EpilogueFunctor.LinearCombination)
conv_layout = (LayoutType.TensorNHWC, LayoutType.TensorNHWC, LayoutType.TensorNHWC)
CreateConv2dOperator(manifest, conv_layout, tile_descriptions,
data_type, alignment_constraints, [ConvKind.Fprop], EpilogueFunctor.LinearCombination)
# Avoid emitting two kernels if the accumulator type does not differ from the input type (e.g. F16 accumulation)
if math_inst.element_a != math_inst.element_accumulator:
data_type_mixed = [
math_inst.element_a,
math_inst.element_b,
math_inst.element_a,
DataType.f32,
]
operations = []
operations += CreateGemmOperator(manifest, layouts, tile_descriptions, \
data_type_mixed, alignment_constraints, None, EpilogueFunctor.LinearCombinationClamp)
operations += CreateConv2dOperator(manifest, conv_layout, tile_descriptions,
data_type_mixed, alignment_constraints, [ConvKind.Fprop], EpilogueFunctor.LinearCombinationClamp)
for op in operations:
if op.tile_description.threadblock_shape[1] >= 128:
op.C.alignment = 16
else:
op.C.alignment = 8
#
#
def GenerateSM75_TensorOp_8816_Interleaved(manifest, cuda_version):
if not CudaToolkitVersionSatisfies(cuda_version, 10, 2):
return
layouts = [
(LayoutType.ColumnMajorInterleaved32, LayoutType.RowMajorInterleaved32, LayoutType.ColumnMajorInterleaved32),
]
math_instructions = [
MathInstruction( \
[8, 8, 16], \
DataType.s8, DataType.s8, DataType.s32, \
OpcodeClass.TensorOp, \
MathOperation.multiply_add_saturate),
MathInstruction( \
[8, 8, 16], \
DataType.u8, DataType.u8, DataType.s32, \
OpcodeClass.TensorOp, \
MathOperation.multiply_add_saturate),
]
min_cc = 75
max_cc = 1024
alignment_constraints = [16,]
for math_inst in math_instructions:
tile_descriptions = [
TileDescription([256, 128, 64], 2, [4, 2, 1], math_inst, min_cc, max_cc),
TileDescription([128, 256, 64], 2, [2, 4, 1], math_inst, min_cc, max_cc),
TileDescription([128, 128, 64], 2, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([256, 64, 64], 2, [4, 1, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 256, 64], 2, [1, 4, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 128, 64], 2, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([128, 64, 64], 2, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 64, 64], 2, [2, 2, 1], math_inst, min_cc, max_cc),
]
data_type_mixed = [
math_inst.element_a,
math_inst.element_b,
math_inst.element_a,
DataType.f32,
]
operations = CreateGemmOperator(manifest, layouts, tile_descriptions, \
data_type_mixed, alignment_constraints, None, EpilogueFunctor.LinearCombinationClamp)
conv_layout = (LayoutType.TensorNC32HW32, LayoutType.TensorC32RSK32, LayoutType.TensorNC32HW32)
operations += CreateConv2dOperator(manifest, conv_layout, tile_descriptions,
data_type_mixed, alignment_constraints, [ConvKind.Fprop], EpilogueFunctor.LinearCombinationClamp)
for op in operations:
op.C.alignment = 8
#
#
def GenerateSM75_TensorOp_8832_TN(manifest, cuda_version):
if not CudaToolkitVersionSatisfies(cuda_version, 10, 2):
return
layouts = [
(LayoutType.RowMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor),
]
math_instructions = [
MathInstruction( \
[8, 8, 32], \
DataType.s4, DataType.s4, DataType.s32, \
OpcodeClass.TensorOp, \
MathOperation.multiply_add_saturate),
MathInstruction( \
[8, 8, 32], \
DataType.u4, DataType.u4, DataType.s32, \
OpcodeClass.TensorOp, \
MathOperation.multiply_add_saturate),
]
min_cc = 75
max_cc = 1024
alignment_constraints = [32,]
for math_inst in math_instructions:
tile_descriptions = [
TileDescription([256, 128, 128], 2, [4, 2, 1], math_inst, min_cc, max_cc),
TileDescription([128, 256, 128], 2, [2, 4, 1], math_inst, min_cc, max_cc),
TileDescription([128, 128, 128], 2, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([256, 64, 128], 2, [4, 1, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 256, 128], 2, [1, 4, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 128, 128], 2, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([128, 64, 128], 2, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 64, 128], 2, [2, 2, 1], math_inst, min_cc, max_cc),
]
data_type = [
math_inst.element_a,
math_inst.element_b,
math_inst.element_accumulator,
DataType.s32,
]
CreateGemmOperator(manifest, layouts, tile_descriptions, \
data_type, alignment_constraints, None, EpilogueFunctor.LinearCombination)
conv_layout = (LayoutType.TensorNHWC, LayoutType.TensorNHWC, LayoutType.TensorNHWC)
CreateConv2dOperator(manifest, conv_layout, tile_descriptions,
data_type, alignment_constraints, [ConvKind.Fprop], EpilogueFunctor.LinearCombination)
# Avoid emitting two kernels if the accumulator type does not differ from the input type (e.g. F16 accumulation)
if math_inst.element_a != math_inst.element_accumulator:
data_type_mixed = [
math_inst.element_a,
math_inst.element_b,
math_inst.element_a,
DataType.f32,
]
operations = []
operations += CreateGemmOperator(manifest, layouts, tile_descriptions, \
data_type_mixed, alignment_constraints, None, EpilogueFunctor.LinearCombinationClamp)
operations += CreateConv2dOperator(manifest, conv_layout, tile_descriptions,
data_type_mixed, alignment_constraints, [ConvKind.Fprop], EpilogueFunctor.LinearCombinationClamp)
for op in operations:
if op.tile_description.threadblock_shape[1] >= 128:
op.C.alignment = 16
elif op.tile_description.threadblock_shape[1] == 64:
op.C.alignment = 8
else:
op.C.alignment = 8
#
#
def GenerateSM75_TensorOp_8832_Interleaved(manifest, cuda_version):
if not CudaToolkitVersionSatisfies(cuda_version, 10, 2):
return
layouts = [
(LayoutType.ColumnMajorInterleaved64, LayoutType.RowMajorInterleaved64, LayoutType.ColumnMajorInterleaved64),
]
math_instructions = [
MathInstruction( \
[8, 8, 32], \
DataType.s4, DataType.s4, DataType.s32, \
OpcodeClass.TensorOp, \
MathOperation.multiply_add_saturate),
MathInstruction( \
[8, 8, 32], \
DataType.u4, DataType.u4, DataType.s32, \
OpcodeClass.TensorOp, \
MathOperation.multiply_add_saturate),
]
min_cc = 75
max_cc = 1024
alignment_constraints = [32,]
for math_inst in math_instructions:
tile_descriptions = [
TileDescription([256, 128, 128], 2, [4, 2, 1], math_inst, min_cc, max_cc),
TileDescription([128, 256, 128], 2, [2, 4, 1], math_inst, min_cc, max_cc),
TileDescription([128, 128, 128], 2, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([256, 64, 128], 2, [4, 1, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 256, 128], 2, [1, 4, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 128, 128], 2, [2, 2, 1], math_inst, min_cc, max_cc),
]
# Avoid emitting two kernels if the accumulator type does not differ from the input type (e.g. F16 accumulation)
if math_inst.element_a != math_inst.element_accumulator:
data_type_mixed = [
math_inst.element_a,
math_inst.element_b,
math_inst.element_a,
DataType.f32,
]
operations = CreateGemmOperator(manifest, layouts, tile_descriptions, \
data_type_mixed, alignment_constraints, None, EpilogueFunctor.LinearCombinationClamp)
conv_layout = (LayoutType.TensorNC64HW64, LayoutType.TensorC64RSK64, LayoutType.TensorNC64HW64)
operations += CreateConv2dOperator(manifest, conv_layout, tile_descriptions,
data_type_mixed, alignment_constraints, [ConvKind.Fprop], EpilogueFunctor.LinearCombinationClamp)
for op in operations:
op.C.alignment = 16
#
#
def GenerateSM75_TensorOp_88128(manifest, cuda_version):
if not CudaToolkitVersionSatisfies(cuda_version, 11, 0):
return
layouts = [
(LayoutType.RowMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor),
]
math_instructions = [
MathInstruction( \
[8, 8, 128], \
DataType.b1, DataType.b1, DataType.s32, \
OpcodeClass.TensorOp, \
MathOperation.xor_popc),
]
min_cc = 75
max_cc = 1024
alignment_constraints = [128,]
for math_inst in math_instructions:
tile_descriptions = [
TileDescription([256, 128, 512], 2, [4, 2, 1], math_inst, min_cc, max_cc),
TileDescription([128, 256, 512], 2, [2, 4, 1], math_inst, min_cc, max_cc),
TileDescription([128, 128, 512], 2, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 256, 512], 2, [1, 4, 1], math_inst, min_cc, max_cc),
TileDescription([256, 64, 512], 2, [4, 1, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 128, 512], 2, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([128, 64, 512], 2, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 64, 512], 2, [2, 2, 1], math_inst, min_cc, max_cc),
]
data_type = [DataType.b1, DataType.b1, DataType.s32, DataType.s32]
CreateGemmOperator(manifest, layouts, tile_descriptions, \
data_type, alignment_constraints)
#
#
def GenerateSM75_WmmaTensorOp_161616(manifest, cuda_version):
if not CudaToolkitVersionSatisfies(cuda_version, 10, 0):
return
layouts = [
(LayoutType.ColumnMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor),
(LayoutType.ColumnMajor, LayoutType.RowMajor, LayoutType.ColumnMajor),
(LayoutType.RowMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor),
(LayoutType.RowMajor, LayoutType.RowMajor, LayoutType.ColumnMajor),
]
math_instructions = [
MathInstruction( \
[16, 16, 16], \
DataType.s8, DataType.s8, DataType.s32, \
OpcodeClass.WmmaTensorOp, \
MathOperation.multiply_add),
]
min_cc = 75
max_cc = 1024
alignment_constraints = [16,]
for math_inst in math_instructions:
tile_descriptions = [
TileDescription([128, 128, 32], 2, [4, 2, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 128, 32], 2, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([128, 64, 32], 2, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 64, 32], 2, [2, 2, 1], math_inst, min_cc, max_cc),
]
data_type = [
math_inst.element_a,
math_inst.element_b,
math_inst.element_accumulator,
DataType.f32,
]
CreateGemmOperator(manifest, layouts, tile_descriptions, \
data_type, alignment_constraints)
# Avoid emitting two kernels if the accumulator type does not differ from the input type (e.g. F16 accumulation)
if math_inst.element_a != math_inst.element_accumulator:
data_type_mixed = [
math_inst.element_a,
math_inst.element_b,
math_inst.element_a,
DataType.f32,
]
CreateGemmOperator(manifest, layouts, tile_descriptions, \
data_type_mixed, alignment_constraints)
#
#
def GenerateSM75_Simt_complex(manifest, cuda_version):
math_instructions = [
MathInstruction( \
[1, 1, 1], \
DataType.f32, DataType.f32, DataType.f32, \
OpcodeClass.Simt, \
MathOperation.multiply_add_complex),
]
min_cc = 75
max_cc = 1024
alignment_constraints = [1,]
for math_inst in math_instructions:
tile_descriptions = [
TileDescription([128, 128, 8], 5, [4, 2, 1], math_inst, min_cc, max_cc)
]
data_type = [
DataType.cf32,
DataType.cf32,
DataType.cf32,
DataType.cf32
]
complex_transforms = [
(ComplexTransform.none, ComplexTransform.none),
(ComplexTransform.conj, ComplexTransform.none),
(ComplexTransform.none, ComplexTransform.conj),
(ComplexTransform.conj, ComplexTransform.conj)
]
conv_layout = (LayoutType.TensorNHWC, LayoutType.TensorNHWC, LayoutType.TensorNHWC)
CreateConv2dOperator(manifest, conv_layout, tile_descriptions, data_type, alignment_constraints)
#
def GenerateSM75(manifest, cuda_version):
GenerateSM75_TensorOp_1688(manifest, cuda_version)
GenerateSM75_PlanarComplexTensorOp_1688(manifest, cuda_version)
GenerateSM75_TensorOp_8816_TN(manifest, cuda_version)
GenerateSM75_TensorOp_8816_Interleaved(manifest, cuda_version)
GenerateSM75_TensorOp_8832_TN(manifest, cuda_version)
GenerateSM75_TensorOp_8832_Interleaved(manifest, cuda_version)
GenerateSM75_TensorOp_88128(manifest, cuda_version)
#GenerateSM75_WmmaTensorOp_161616(manifest, cuda_version)
GenerateSM75_Simt_complex(manifest, cuda_version)
###################################################################################################
###################################################################################################
#
def GenerateSM80_TensorOp_16816(manifest, cuda_version):
if not CudaToolkitVersionSatisfies(cuda_version, 11, 0):
return
layouts = [
(LayoutType.ColumnMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor),
(LayoutType.ColumnMajor, LayoutType.RowMajor, LayoutType.ColumnMajor),
(LayoutType.RowMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor),
(LayoutType.RowMajor, LayoutType.RowMajor, LayoutType.ColumnMajor),
]
math_instructions = [
MathInstruction( \
[16, 8, 16], \
DataType.f16, DataType.f16, DataType.f32, \
OpcodeClass.TensorOp, \
MathOperation.multiply_add),
MathInstruction( \
[16, 8, 16], \
DataType.f16, DataType.f16, DataType.f16, \
OpcodeClass.TensorOp, \
MathOperation.multiply_add),
MathInstruction( \
[16, 8, 16], \
DataType.bf16, DataType.bf16, DataType.f32, \
OpcodeClass.TensorOp, \
MathOperation.multiply_add),
]
min_cc = 80
max_cc = 1024
alignment_constraints = [8, 4, 2]
for math_inst in math_instructions:
tile_descriptions = [
TileDescription([256, 128, 32], 3, [4, 2, 1], math_inst, min_cc, max_cc),
TileDescription([128, 256, 32], 3, [2, 4, 1], math_inst, min_cc, max_cc),
TileDescription([256, 64, 32], 3, [4, 1, 1], math_inst, min_cc, max_cc),
TileDescription([256, 64, 32], 4, [4, 1, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 256, 32], 4, [1, 4, 1], math_inst, min_cc, max_cc),
TileDescription([128, 128, 32], 3, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([128, 128, 32], 4, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([128, 128, 32], 5, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([128, 64, 32], 6, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 128, 32], 6, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 64, 32], 10, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([256, 128, 64], 3, [4, 2, 1], math_inst, min_cc, max_cc),
TileDescription([128, 256, 64], 3, [2, 4, 1], math_inst, min_cc, max_cc),
TileDescription([256, 64, 64], 4, [4, 1, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 256, 64], 4, [1, 4, 1], math_inst, min_cc, max_cc),
TileDescription([128, 128, 64], 4, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([256, 64, 64], 3, [4, 1, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 256, 64], 3, [1, 4, 1], math_inst, min_cc, max_cc),
TileDescription([128, 128, 64], 3, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([128, 64, 64], 3, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 128, 64], 3, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 64, 64], 5, [2, 2, 1], math_inst, min_cc, max_cc),
]
data_type = [
math_inst.element_a,
math_inst.element_b,
math_inst.element_accumulator,
math_inst.element_accumulator,
]
CreateGemmOperator(manifest, layouts, tile_descriptions, \
data_type, alignment_constraints)
CreateGemmGroupedOperator(manifest, layouts, tile_descriptions, data_type, alignment_constraints)
conv_layout = (LayoutType.TensorNHWC, LayoutType.TensorNHWC, LayoutType.TensorNHWC)
CreateConv2dOperator(manifest, conv_layout, tile_descriptions, data_type, alignment_constraints)
CreateConv2dFixedChannelsOperator(manifest, conv_layout, tile_descriptions, data_type, [4, 8])
CreateConv3dOperator(manifest, LayoutType.TensorNDHWC, tile_descriptions, data_type, 8)
# Avoid emitting two kernels if the accumulator type does not differ from the input type (e.g. F16 accumulation)
if math_inst.element_a != math_inst.element_accumulator:
data_type_mixed = [
math_inst.element_a,
math_inst.element_b,
math_inst.element_a,
math_inst.element_accumulator,
]
CreateGemmOperator(manifest, layouts, tile_descriptions, \
data_type_mixed, alignment_constraints)
CreateConv2dOperator(manifest, conv_layout, tile_descriptions, data_type_mixed, alignment_constraints)
CreateConv2dFixedChannelsOperator(manifest, conv_layout, tile_descriptions, data_type_mixed, [4, 8])
CreateConv3dOperator(manifest, LayoutType.TensorNDHWC, tile_descriptions, data_type_mixed, 8)
#
#
def GenerateSM80_SparseTensorOp_16832(manifest, cuda_version):
if not CudaToolkitVersionSatisfies(cuda_version, 11, 1):
return
layouts = [
(LayoutType.ColumnMajor, LayoutType.ColumnMajor, LayoutType.RowMajor),
(LayoutType.ColumnMajor, LayoutType.RowMajor, LayoutType.RowMajor),
(LayoutType.RowMajor, LayoutType.ColumnMajor, LayoutType.RowMajor),
(LayoutType.RowMajor, LayoutType.RowMajor, LayoutType.RowMajor),
]
math_instructions = [
MathInstruction( \
[16, 8, 32], \
DataType.f16, DataType.f16, DataType.f32, \
OpcodeClass.TensorOp, \
MathOperation.multiply_add),
MathInstruction( \
[16, 8, 32], \
DataType.f16, DataType.f16, DataType.f16, \
OpcodeClass.TensorOp, \
MathOperation.multiply_add),
MathInstruction( \
[16, 8, 32], \
DataType.bf16, DataType.bf16, DataType.f32, \
OpcodeClass.TensorOp, \
MathOperation.multiply_add),
]
min_cc = 80
max_cc = 1024
alignment_constraints = [8]
for math_inst in math_instructions:
tile_descriptions = [
TileDescription([ 64, 128, 64], 6, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([256, 128, 64], 3, [4, 2, 1], math_inst, min_cc, max_cc),
TileDescription([128, 256, 64], 3, [2, 4, 1], math_inst, min_cc, max_cc),
TileDescription([128, 128, 64], 3, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([256, 64, 64], 3, [4, 1, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 256, 64], 4, [1, 4, 1], math_inst, min_cc, max_cc),
TileDescription([128, 64, 64], 3, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 64, 64], 4, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([128, 128, 128], 3, [2, 4, 1], math_inst, min_cc, max_cc),
TileDescription([256, 64, 128], 3, [4, 1, 1], math_inst, min_cc, max_cc),
TileDescription([128, 64, 128], 4, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 128, 128], 3, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 64, 128], 3, [2, 2, 1], math_inst, min_cc, max_cc),
]
data_type = [
math_inst.element_a,
math_inst.element_b,
math_inst.element_accumulator,
math_inst.element_accumulator,
]
CreateSparseGemmOperator(manifest, layouts, tile_descriptions, \
data_type, alignment_constraints)
# Avoid emitting two kernels if the accumulator type does not differ from the input type (e.g. F16 accumulation)
if math_inst.element_a != math_inst.element_accumulator:
data_type_mixed = [
math_inst.element_a,
math_inst.element_b,
math_inst.element_a,
math_inst.element_accumulator,
]
CreateSparseGemmOperator(manifest, layouts, tile_descriptions, \
data_type_mixed, alignment_constraints)
#
#
def GenerateSM80_PlanarComplexTensorOp_16816(manifest, cuda_version):
if not CudaToolkitVersionSatisfies(cuda_version, 11, 0):
return
layouts = [
(LayoutType.ColumnMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor),
(LayoutType.ColumnMajor, LayoutType.RowMajor, LayoutType.ColumnMajor),
(LayoutType.RowMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor),
(LayoutType.RowMajor, LayoutType.RowMajor, LayoutType.ColumnMajor),
]
complex_transforms = [
(ComplexTransform.none, ComplexTransform.none),
(ComplexTransform.conj, ComplexTransform.none),
(ComplexTransform.none, ComplexTransform.conj),
(ComplexTransform.conj, ComplexTransform.conj)
]
math_instructions = [
MathInstruction( \
[16, 8, 16], \
DataType.f16, DataType.f16, DataType.f32, \
OpcodeClass.TensorOp, \
MathOperation.multiply_add),
MathInstruction( \
[16, 8, 16], \
DataType.bf16, DataType.bf16, DataType.f32, \
OpcodeClass.TensorOp, \
MathOperation.multiply_add),
MathInstruction( \
[16, 8, 16], \
DataType.f16, DataType.f16, DataType.f16, \
OpcodeClass.TensorOp, \
MathOperation.multiply_add),
]
min_cc = 80
max_cc = 1024
alignment_constraints = [8, ]
for math_inst in math_instructions:
tile_descriptions = [
TileDescription([ 64, 128, 32], 3, [2, 4, 1], math_inst, min_cc, max_cc),
TileDescription([128, 64, 32], 3, [4, 2, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 64, 32], 4, [2, 2, 1], math_inst, min_cc, max_cc),
]
data_type = [
math_inst.element_a,
math_inst.element_b,
math_inst.element_accumulator,
math_inst.element_accumulator,
]
CreateGemmPlanarComplexOperator(manifest, layouts, tile_descriptions, \
data_type, alignment_constraints, complex_transforms)
# Avoid emitting two kernels if the accumulator type does not differ from the input type (e.g. F16 accumulation)
if math_inst.element_a != math_inst.element_accumulator:
data_type_mixed = [
math_inst.element_a,
math_inst.element_b,
math_inst.element_a,
math_inst.element_accumulator,
]
CreateGemmPlanarComplexOperator(manifest, layouts, tile_descriptions, \
data_type_mixed, alignment_constraints, complex_transforms)
#
def GenerateSM80_TensorOp_16832_TN(manifest, cuda_version):
if not CudaToolkitVersionSatisfies(cuda_version, 11, 0):
return
layouts = [
(LayoutType.RowMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor),
]
math_instructions = [
MathInstruction( \
[16, 8, 32], \
DataType.s8, DataType.s8, DataType.s32, \
OpcodeClass.TensorOp, \
MathOperation.multiply_add_saturate),
MathInstruction( \
[16, 8, 32], \
DataType.u8, DataType.u8, DataType.s32, \
OpcodeClass.TensorOp, \
MathOperation.multiply_add_saturate),
]
min_cc = 80
max_cc = 1024
smem_usage = 164
alignment_constraints = [16,]
for math_inst in math_instructions:
tile_descriptions = [
TileDescription([256, 128, 64], 3, [4, 2, 1], math_inst, min_cc, max_cc),
TileDescription([128, 256, 64], 3, [2, 4, 1], math_inst, min_cc, max_cc),
TileDescription([256, 64, 64], 4, [4, 1, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 256, 64], 4, [1, 4, 1], math_inst, min_cc, max_cc),
TileDescription([128, 128, 64], 5, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([128, 64, 64], 6, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 128, 64], 6, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 64, 64], 10, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([256, 128, 128], 3, [4, 2, 1], math_inst, min_cc, max_cc),
TileDescription([128, 256, 128], 3, [2, 4, 1], math_inst, min_cc, max_cc),
TileDescription([256, 64, 128], 4, [4, 1, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 256, 128], 4, [1, 4, 1], math_inst, min_cc, max_cc),
TileDescription([128, 128, 128], 4, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([128, 64, 128], 3, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 128, 128], 3, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 64, 128], 5, [2, 2, 1], math_inst, min_cc, max_cc),
]
data_type = [math_inst.element_a, math_inst.element_b, math_inst.element_accumulator, DataType.s32]
data_type_mixed = [math_inst.element_a, math_inst.element_b, math_inst.element_a, DataType.f32]
CreateGemmOperator(manifest, layouts, tile_descriptions, \
data_type, alignment_constraints, None, EpilogueFunctor.LinearCombination)
operations = []
operations += CreateGemmOperator(manifest, layouts, tile_descriptions, \
data_type_mixed, alignment_constraints, None, EpilogueFunctor.LinearCombinationClamp)
conv_layout = (LayoutType.TensorNHWC, LayoutType.TensorNHWC, LayoutType.TensorNHWC)
CreateConv2dOperator(manifest, conv_layout, tile_descriptions,
data_type, alignment_constraints, [ConvKind.Fprop], EpilogueFunctor.LinearCombination)
operations += CreateConv2dOperator(manifest, conv_layout, tile_descriptions,
data_type_mixed, alignment_constraints, [ConvKind.Fprop], EpilogueFunctor.LinearCombinationClamp)
for op in operations:
if op.tile_description.threadblock_shape[1] >= 128:
op.C.alignment = 16
else:
op.C.alignment = 8
#
#
def GenerateSM80_SparseTensorOp_16864_TN(manifest, cuda_version):
if not CudaToolkitVersionSatisfies(cuda_version, 11, 1):
return
layouts = [
(LayoutType.RowMajor, LayoutType.ColumnMajor, LayoutType.RowMajor),
]
math_inst = \
MathInstruction( \
[16, 8, 64], \
DataType.s8, DataType.s8, DataType.s32, \
OpcodeClass.TensorOp, \
MathOperation.multiply_add_saturate)
min_cc = 80
max_cc = 1024
alignment_constraints = [16,]
tile_descriptions = [
TileDescription([128, 64, 128], 3, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([256, 128, 128], 3, [4, 2, 1], math_inst, min_cc, max_cc),
TileDescription([128, 256, 128], 3, [2, 4, 1], math_inst, min_cc, max_cc),
TileDescription([128, 128, 128], 3, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([256, 64, 128], 3, [4, 1, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 256, 128], 4, [1, 4, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 128, 128], 6, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 64, 128], 4, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([128, 128, 256], 3, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([128, 64, 256], 4, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 128, 256], 3, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 64, 256], 3, [2, 2, 1], math_inst, min_cc, max_cc),
]
data_type = [DataType.s8, DataType.s8, DataType.s32, DataType.s32]
data_type_mixed = [DataType.s8, DataType.s8, DataType.s8, DataType.f32]
CreateSparseGemmOperator(manifest, layouts, tile_descriptions, \
data_type, alignment_constraints, None, EpilogueFunctor.LinearCombination)
operations = []
operations += CreateSparseGemmOperator(manifest, layouts, tile_descriptions, \
data_type_mixed, alignment_constraints, None, EpilogueFunctor.LinearCombinationClamp)
for op in operations:
if op.tile_description.threadblock_shape[1] >= 128:
op.C.alignment = 16
else:
op.C.alignment = 8
#
#
def GenerateSM80_TensorOp_16832_Interleaved(manifest, cuda_version):
if not CudaToolkitVersionSatisfies(cuda_version, 11, 0):
return
layouts = [
(LayoutType.ColumnMajorInterleaved32, LayoutType.RowMajorInterleaved32, LayoutType.ColumnMajorInterleaved32),
]
math_instructions = [
MathInstruction( \
[16, 8, 32], \
DataType.s8, DataType.s8, DataType.s32, \
OpcodeClass.TensorOp, \
MathOperation.multiply_add_saturate),
MathInstruction( \
[16, 8, 32], \
DataType.u8, DataType.u8, DataType.s32, \
OpcodeClass.TensorOp, \
MathOperation.multiply_add_saturate),
]
min_cc = 80
max_cc = 1024
alignment_constraints = [16,]
for math_inst in math_instructions:
tile_descriptions = [
TileDescription([256, 128, 64], 3, [4, 2, 1], math_inst, min_cc, max_cc),
TileDescription([128, 256, 64], 3, [2, 4, 1], math_inst, min_cc, max_cc),
TileDescription([256, 64, 64], 4, [4, 1, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 256, 64], 4, [1, 4, 1], math_inst, min_cc, max_cc),
TileDescription([128, 128, 64], 5, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([128, 64, 64], 6, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 128, 64], 6, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 64, 64], 10, [2, 2, 1], math_inst, min_cc, max_cc),
]
data_type_mixed = [math_inst.element_a, math_inst.element_b, math_inst.element_a, DataType.f32]
operations = CreateGemmOperator(manifest, layouts, tile_descriptions, \
data_type_mixed, alignment_constraints, None, EpilogueFunctor.LinearCombinationClamp)
conv_layout = (LayoutType.TensorNC32HW32, LayoutType.TensorC32RSK32, LayoutType.TensorNC32HW32)
operations += CreateConv2dOperator(manifest, conv_layout, tile_descriptions,
data_type_mixed, alignment_constraints, [ConvKind.Fprop], EpilogueFunctor.LinearCombinationClamp)
for op in operations:
op.C.alignment = 8
#
#
def GenerateSM80_TensorOp_16864_TN(manifest, cuda_version):
if not CudaToolkitVersionSatisfies(cuda_version, 11, 0):
return
layouts = [
(LayoutType.RowMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor),
]
math_instructions = [
MathInstruction( \
[16, 8, 64], \
DataType.s4, DataType.s4, DataType.s32, \
OpcodeClass.TensorOp, \
MathOperation.multiply_add_saturate),
MathInstruction( \
[16, 8, 64], \
DataType.u4, DataType.u4, DataType.s32, \
OpcodeClass.TensorOp, \
MathOperation.multiply_add_saturate),
]
min_cc = 80
max_cc = 1024
alignment_constraints = [32,]
for math_inst in math_instructions:
tile_descriptions = [
TileDescription([256, 128, 128], 3, [4, 2, 1], math_inst, min_cc, max_cc),
TileDescription([128, 256, 128], 3, [2, 4, 1], math_inst, min_cc, max_cc),
TileDescription([256, 64, 128], 4, [4, 1, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 256, 128], 4, [1, 4, 1], math_inst, min_cc, max_cc),
TileDescription([128, 128, 128], 5, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([128, 64, 128], 6, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 128, 128], 6, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 64, 128], 10, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([256, 128, 256], 3, [4, 2, 1], math_inst, min_cc, max_cc),
TileDescription([128, 256, 256], 3, [2, 4, 1], math_inst, min_cc, max_cc),
TileDescription([256, 64, 256], 4, [4, 1, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 256, 256], 4, [1, 4, 1], math_inst, min_cc, max_cc),
TileDescription([128, 128, 256], 4, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([128, 128, 256], 3, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([128, 64, 256], 3, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 128, 256], 3, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 64, 256], 5, [2, 2, 1], math_inst, min_cc, max_cc),
]
data_type = [math_inst.element_a, math_inst.element_b, math_inst.element_accumulator, DataType.s32]
data_type_mixed = [math_inst.element_a, math_inst.element_b, math_inst.element_a, DataType.f32]
CreateGemmOperator(manifest, layouts, tile_descriptions, \
data_type, alignment_constraints, None, EpilogueFunctor.LinearCombination)
operations = []
operations += CreateGemmOperator(manifest, layouts, tile_descriptions, \
data_type_mixed, alignment_constraints, None, EpilogueFunctor.LinearCombinationClamp)
conv_layout = (LayoutType.TensorNHWC, LayoutType.TensorNHWC, LayoutType.TensorNHWC)
CreateConv2dOperator(manifest, conv_layout, tile_descriptions,
data_type, alignment_constraints, [ConvKind.Fprop], EpilogueFunctor.LinearCombination)
operations += CreateConv2dOperator(manifest, conv_layout, tile_descriptions,
data_type_mixed, alignment_constraints, [ConvKind.Fprop], EpilogueFunctor.LinearCombinationClamp)
for op in operations:
if op.tile_description.threadblock_shape[1] >= 128:
op.C.alignment = 16
elif op.tile_description.threadblock_shape[1] == 64:
op.C.alignment = 8
else:
op.C.alignment = 8
#
#
def GenerateSM80_SparseTensorOp_168128_TN(manifest, cuda_version):
if not CudaToolkitVersionSatisfies(cuda_version, 11, 1):
return
layouts = [
(LayoutType.RowMajor, LayoutType.ColumnMajor, LayoutType.RowMajor),
]
math_inst = \
MathInstruction( \
[16, 8, 128], \
DataType.s4, DataType.s4, DataType.s32, \
OpcodeClass.TensorOp, \
MathOperation.multiply_add_saturate)
min_cc = 80
max_cc = 1024
alignment_constraints = [32,]
tile_descriptions = [
TileDescription([ 64, 64, 256], 4, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([256, 64, 256], 3, [4, 1, 1], math_inst, min_cc, max_cc),
TileDescription([256, 128, 256], 3, [4, 2, 1], math_inst, min_cc, max_cc),
TileDescription([128, 256, 256], 3, [2, 4, 1], math_inst, min_cc, max_cc),
TileDescription([128, 128, 256], 3, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 256, 256], 4, [1, 4, 1], math_inst, min_cc, max_cc),
TileDescription([128, 64, 256], 3, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 128, 256], 6, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([128, 128, 512], 3, [2, 4, 1], math_inst, min_cc, max_cc),
TileDescription([128, 64, 512], 4, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 128, 512], 3, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 64, 512], 3, [2, 2, 1], math_inst, min_cc, max_cc),
]
data_type = [DataType.s4, DataType.s4, DataType.s32, DataType.s32]
data_type_mixed = [DataType.s4, DataType.s4, DataType.s4, DataType.f32]
CreateSparseGemmOperator(manifest, layouts, tile_descriptions, \
data_type, alignment_constraints, None, EpilogueFunctor.LinearCombination)
operations = []
operations += CreateSparseGemmOperator(manifest, layouts, tile_descriptions, \
data_type_mixed, alignment_constraints, None, EpilogueFunctor.LinearCombinationClamp)
for op in operations:
if op.tile_description.threadblock_shape[1] > 128:
op.C.alignment = 16
else:
op.C.alignment = 8
#
#
def GenerateSM80_TensorOp_16864_Interleaved(manifest, cuda_version):
if not CudaToolkitVersionSatisfies(cuda_version, 11, 0):
return
layouts = [
(LayoutType.ColumnMajorInterleaved64, LayoutType.RowMajorInterleaved64, LayoutType.ColumnMajorInterleaved64),
]
math_instructions = [
MathInstruction( \
[16, 8, 64], \
DataType.s4, DataType.s4, DataType.s32, \
OpcodeClass.TensorOp, \
MathOperation.multiply_add_saturate),
MathInstruction( \
[16, 8, 64], \
DataType.u4, DataType.u4, DataType.s32, \
OpcodeClass.TensorOp, \
MathOperation.multiply_add_saturate),
]
min_cc = 80
max_cc = 1024
alignment_constraints = [32,]
for math_inst in math_instructions:
tile_descriptions = [
TileDescription([256, 128, 128], 3, [4, 2, 1], math_inst, min_cc, max_cc),
TileDescription([128, 256, 128], 3, [2, 4, 1], math_inst, min_cc, max_cc),
TileDescription([256, 64, 128], 4, [4, 1, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 256, 128], 4, [1, 4, 1], math_inst, min_cc, max_cc),
TileDescription([128, 128, 128], 5, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 128, 128], 6, [2, 2, 1], math_inst, min_cc, max_cc),
]
data_type_mixed = [math_inst.element_a, math_inst.element_b, math_inst.element_a, DataType.f32]
operations = []
operations += CreateGemmOperator(manifest, layouts, tile_descriptions, \
data_type_mixed, alignment_constraints, None, EpilogueFunctor.LinearCombinationClamp)
conv_layout = (LayoutType.TensorNC64HW64, LayoutType.TensorC64RSK64, LayoutType.TensorNC64HW64)
operations += CreateConv2dOperator(manifest, conv_layout, tile_descriptions,
data_type_mixed, alignment_constraints, [ConvKind.Fprop], EpilogueFunctor.LinearCombinationClamp)
for op in operations:
op.C.alignment = 16
#
#
def GenerateSM80_TensorOp_168256(manifest, cuda_version):
if not CudaToolkitVersionSatisfies(cuda_version, 11, 0):
return
layouts = [
(LayoutType.RowMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor),
]
math_instructions = [
MathInstruction( \
[16, 8, 256], \
DataType.b1, DataType.b1, DataType.s32, \
OpcodeClass.TensorOp, \
MathOperation.xor_popc),
]
min_cc = 80
max_cc = {
MathOperation.xor_popc: 1024
}
alignment_constraints = [128,]
for math_inst in math_instructions:
tile_descriptions = [
TileDescription([256, 128, 512], 3, [4, 2, 1], math_inst, min_cc, max_cc[math_inst.math_operation]),
TileDescription([128, 256, 512], 3, [2, 4, 1], math_inst, min_cc, max_cc[math_inst.math_operation]),
TileDescription([256, 64, 512], 4, [4, 1, 1], math_inst, min_cc, max_cc[math_inst.math_operation]),
TileDescription([ 64, 256, 512], 4, [1, 4, 1], math_inst, min_cc, max_cc[math_inst.math_operation]),
TileDescription([128, 128, 512], 5, [2, 2, 1], math_inst, min_cc, max_cc[math_inst.math_operation]),
TileDescription([128, 64, 512], 6, [2, 2, 1], math_inst, min_cc, max_cc[math_inst.math_operation]),
TileDescription([ 64, 128, 512], 6, [2, 2, 1], math_inst, min_cc, max_cc[math_inst.math_operation]),
TileDescription([ 64, 64, 512], 10, [2, 2, 1], math_inst, min_cc, max_cc[math_inst.math_operation]),
TileDescription([256, 128, 1024], 3, [4, 2, 1], math_inst, min_cc, max_cc[math_inst.math_operation]),
TileDescription([128, 256, 1024], 3, [2, 4, 1], math_inst, min_cc, max_cc[math_inst.math_operation]),
TileDescription([256, 64, 1024], 4, [4, 1, 1], math_inst, min_cc, max_cc[math_inst.math_operation]),
TileDescription([ 64, 256, 1024], 4, [1, 4, 1], math_inst, min_cc, max_cc[math_inst.math_operation]),
TileDescription([128, 128, 1024], 4, [2, 2, 1], math_inst, min_cc, max_cc[math_inst.math_operation]),
TileDescription([128, 64, 1024], 3, [2, 2, 1], math_inst, min_cc, max_cc[math_inst.math_operation]),
TileDescription([ 64, 128, 1024], 3, [2, 2, 1], math_inst, min_cc, max_cc[math_inst.math_operation]),
TileDescription([ 64, 64, 1024], 5, [2, 2, 1], math_inst, min_cc, max_cc[math_inst.math_operation]),
]
data_type = [DataType.b1, DataType.b1, DataType.s32, DataType.s32]
CreateGemmOperator(manifest, layouts, tile_descriptions, \
data_type, alignment_constraints)
#
#
def GenerateSM80_TensorOp_1688(manifest, cuda_version):
if not CudaToolkitVersionSatisfies(cuda_version, 11, 0):
return
layouts = [
(LayoutType.ColumnMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor),
(LayoutType.ColumnMajor, LayoutType.RowMajor, LayoutType.ColumnMajor),
(LayoutType.RowMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor),
(LayoutType.RowMajor, LayoutType.RowMajor, LayoutType.ColumnMajor),
]
math_instructions = [
MathInstruction( \
[16, 8, 8], \
DataType.tf32, DataType.tf32, DataType.f32, \
OpcodeClass.TensorOp, \
MathOperation.multiply_add)
]
min_cc = 80
max_cc = 1024
alignment_constraints = [4, 2, 1]
for math_inst in math_instructions:
tile_descriptions = [
TileDescription([256, 128, 16], 3, [4, 2, 1], math_inst, min_cc, max_cc),
TileDescription([128, 256, 16], 3, [2, 4, 1], math_inst, min_cc, max_cc),
TileDescription([256, 64, 16], 4, [4, 1, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 256, 16], 4, [1, 4, 1], math_inst, min_cc, max_cc),
TileDescription([128, 128, 16], 5, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([128, 128, 16], 4, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([128, 128, 16], 3, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([128, 64, 16], 6, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 128, 16], 6, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 64, 16], 10, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([256, 128, 32], 3, [4, 2, 1], math_inst, min_cc, max_cc),
TileDescription([128, 256, 32], 3, [2, 4, 1], math_inst, min_cc, max_cc),
TileDescription([256, 64, 32], 4, [4, 1, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 256, 32], 4, [1, 4, 1], math_inst, min_cc, max_cc),
TileDescription([128, 128, 32], 4, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([128, 128, 32], 3, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([128, 64, 32], 3, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([64, 128, 32], 3, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 64, 32], 5, [2, 2, 1], math_inst, min_cc, max_cc),
]
data_type = [
math_inst.element_a,
math_inst.element_b,
math_inst.element_accumulator,
math_inst.element_accumulator,
]
data_type_mixed = [
math_inst.element_a,
math_inst.element_b,
math_inst.element_a,
math_inst.element_accumulator,
]
CreateGemmOperator(manifest, layouts, tile_descriptions, \
data_type, alignment_constraints)
CreateGemmOperator(manifest, layouts, tile_descriptions, \
data_type_mixed, alignment_constraints)
conv_layout = (LayoutType.TensorNHWC, LayoutType.TensorNHWC, LayoutType.TensorNHWC)
CreateConv2dOperator(manifest, conv_layout, tile_descriptions, data_type, alignment_constraints)
CreateConv2dOperator(manifest, conv_layout, tile_descriptions, data_type_mixed, alignment_constraints)
#
#
def GenerateSM80_TensorOp_1688_fast_math(manifest, cuda_version):
if not CudaToolkitVersionSatisfies(cuda_version, 11, 0):
return
layouts = [
(LayoutType.ColumnMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor),
(LayoutType.ColumnMajor, LayoutType.RowMajor, LayoutType.ColumnMajor),
(LayoutType.RowMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor),
(LayoutType.RowMajor, LayoutType.RowMajor, LayoutType.ColumnMajor),
]
math_instructions = [
MathInstruction( \
[16, 8, 8], \
DataType.tf32, DataType.tf32, DataType.f32, \
OpcodeClass.TensorOp, \
MathOperation.multiply_add),
MathInstruction( \
[16, 8, 8], \
DataType.f16, DataType.f16, DataType.f32, \
OpcodeClass.TensorOp, \
MathOperation.multiply_add_fast_f16),
MathInstruction( \
[16, 8, 8], \
DataType.bf16, DataType.bf16, DataType.f32, \
OpcodeClass.TensorOp, \
MathOperation.multiply_add_fast_bf16),
]
min_cc = 80
max_cc = 1024
alignment_constraints = [4, 2, 1]
for math_inst in math_instructions:
tile_descriptions = [
TileDescription([256, 128, 16], 3, [4, 2, 1], math_inst, min_cc, max_cc),
TileDescription([128, 256, 16], 3, [2, 4, 1], math_inst, min_cc, max_cc),
TileDescription([256, 64, 16], 4, [4, 1, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 256, 16], 4, [1, 4, 1], math_inst, min_cc, max_cc),
TileDescription([128, 128, 16], 5, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([128, 128, 16], 4, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([128, 128, 16], 3, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([128, 64, 16], 6, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 128, 16], 6, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 64, 16], 10, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([256, 128, 32], 3, [4, 2, 1], math_inst, min_cc, max_cc),
TileDescription([128, 256, 32], 3, [2, 4, 1], math_inst, min_cc, max_cc),
TileDescription([256, 64, 32], 4, [4, 1, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 256, 32], 4, [1, 4, 1], math_inst, min_cc, max_cc),
TileDescription([128, 128, 32], 4, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([128, 128, 32], 3, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([128, 64, 32], 3, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 128, 32], 3, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 64, 32], 5, [2, 2, 1], math_inst, min_cc, max_cc),
]
data_type = [DataType.f32, DataType.f32, DataType.f32, DataType.f32]
CreateGemmOperator(manifest, layouts, tile_descriptions, \
data_type, alignment_constraints)
conv_layout = (LayoutType.TensorNHWC, LayoutType.TensorNHWC, LayoutType.TensorNHWC)
CreateConv2dOperator(manifest, conv_layout, tile_descriptions, data_type, alignment_constraints)
#
#
def GenerateSM80_TensorOp_1688_fast_fp32_math(manifest, cuda_version):
if not CudaToolkitVersionSatisfies(cuda_version, 11, 0):
return
layouts = [
(LayoutType.ColumnMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor),
(LayoutType.ColumnMajor, LayoutType.RowMajor, LayoutType.ColumnMajor),
(LayoutType.RowMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor),
(LayoutType.RowMajor, LayoutType.RowMajor, LayoutType.ColumnMajor),
]
math_instructions = [
MathInstruction( \
[16, 8, 8], \
DataType.f32, DataType.f32, DataType.f32, \
OpcodeClass.TensorOp, \
MathOperation.multiply_add_fast_f32),
]
min_cc = 80
max_cc = 1024
alignment_constraints = [4, 2, 1]
for math_inst in math_instructions:
tile_descriptions = [
TileDescription([128, 128, 16], 4, [4, 2, 1], math_inst, min_cc, max_cc),
TileDescription([128, 128, 16], 3, [4, 2, 1], math_inst, min_cc, max_cc),
TileDescription([256, 64, 16], 3, [4, 2, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 256, 16], 3, [2, 4, 1], math_inst, min_cc, max_cc),
TileDescription([128, 64, 16], 4, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 128, 16], 4, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 64, 16], 3, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([128, 128, 32], 3, [4, 2, 1], math_inst, min_cc, max_cc),
TileDescription([256, 64, 32], 3, [4, 2, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 256, 32], 3, [2, 4, 1], math_inst, min_cc, max_cc),
TileDescription([128, 64, 32], 3, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 128, 32], 3, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 64, 32], 3, [2, 2, 1], math_inst, min_cc, max_cc),
]
data_type = [DataType.f32, DataType.f32, DataType.f32, DataType.f32]
CreateGemmOperator(manifest, layouts, tile_descriptions, \
data_type, alignment_constraints)
conv_layout = (LayoutType.TensorNHWC, LayoutType.TensorNHWC, LayoutType.TensorNHWC)
CreateConv2dOperator(manifest, conv_layout, tile_descriptions, data_type, alignment_constraints)
#
def GenerateSM80_TensorOp_1688_fast_fp32_math_complex(manifest, cuda_version):
if not CudaToolkitVersionSatisfies(cuda_version, 11, 0):
return
layouts = [
(LayoutType.ColumnMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor),
(LayoutType.ColumnMajor, LayoutType.RowMajor, LayoutType.ColumnMajor),
(LayoutType.RowMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor),
(LayoutType.RowMajor, LayoutType.RowMajor, LayoutType.ColumnMajor),
]
math_inst = MathInstruction( \
[16, 8, 8], \
DataType.f32, DataType.f32, DataType.f32, \
OpcodeClass.TensorOp, \
MathOperation.multiply_add_complex_fast_f32)
min_cc = 80
max_cc = 1024
tile_descriptions = [
TileDescription([128, 64, 16], 3, [4, 2, 1], math_inst, min_cc, max_cc),
TileDescription([64, 128, 16], 3, [2, 4, 1], math_inst, min_cc, max_cc),
TileDescription([64, 64, 16], 4, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([64, 32, 16], 4, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([32, 64, 16], 4, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([32, 32, 16], 3, [2, 2, 1], math_inst, min_cc, max_cc),
]
data_type = [
DataType.cf32, DataType.cf32, DataType.cf32, DataType.cf32
]
alignment_constraints = [1,]
complex_transforms = [
(ComplexTransform.none, ComplexTransform.none),
(ComplexTransform.conj, ComplexTransform.none),
(ComplexTransform.none, ComplexTransform.conj),
(ComplexTransform.conj, ComplexTransform.conj)
]
CreateGemmOperator(manifest, layouts, tile_descriptions, \
data_type, alignment_constraints, complex_transforms)
#
def GenerateSM80_SparseTensorOp_16816_fast_math(manifest, cuda_version):
if not CudaToolkitVersionSatisfies(cuda_version, 11, 1):
return
layouts = [
(LayoutType.ColumnMajor, LayoutType.ColumnMajor, LayoutType.RowMajor),
(LayoutType.ColumnMajor, LayoutType.RowMajor, LayoutType.RowMajor),
(LayoutType.RowMajor, LayoutType.ColumnMajor, LayoutType.RowMajor),
(LayoutType.RowMajor, LayoutType.RowMajor, LayoutType.RowMajor),
]
math_instructions = [
MathInstruction( \
[16, 8, 16], \
DataType.tf32, DataType.tf32, DataType.f32, \
OpcodeClass.TensorOp, \
MathOperation.multiply_add),
]
min_cc = 80
max_cc = 1024
alignment_constraints = [4]
for math_inst in math_instructions:
tile_descriptions = [
TileDescription([128, 64, 32], 3, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([128, 128, 32], 3, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([256, 128, 32], 3, [4, 2, 1], math_inst, min_cc, max_cc),
TileDescription([128, 256, 32], 3, [2, 4, 1], math_inst, min_cc, max_cc),
TileDescription([256, 64, 32], 3, [4, 1, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 256, 32], 4, [1, 4, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 128, 32], 6, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 64, 32], 6, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([128, 128, 64], 3, [2, 4, 1], math_inst, min_cc, max_cc),
TileDescription([256, 64, 64], 3, [4, 1, 1], math_inst, min_cc, max_cc),
TileDescription([128, 64, 64], 4, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 128, 64], 3, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 64, 64], 3, [2, 2, 1], math_inst, min_cc, max_cc),
]
data_type = [DataType.f32, DataType.f32, DataType.f32, DataType.f32]
CreateSparseGemmOperator(manifest, layouts, tile_descriptions, \
data_type, alignment_constraints)
#
#
def GenerateSM80_TensorOp_1688_complex(manifest, cuda_version):
if not CudaToolkitVersionSatisfies(cuda_version, 11, 0):
return
layouts = [
(LayoutType.ColumnMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor),
(LayoutType.ColumnMajor, LayoutType.RowMajor, LayoutType.ColumnMajor),
(LayoutType.RowMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor),
(LayoutType.RowMajor, LayoutType.RowMajor, LayoutType.ColumnMajor),
]
math_inst = MathInstruction( \
[16, 8, 8], \
DataType.tf32, DataType.tf32, DataType.f32, \
OpcodeClass.TensorOp, \
MathOperation.multiply_add_complex)
min_cc = 80
max_cc = 1024
tile_descriptions = [
TileDescription([128, 128, 16], 4, [2, 4, 1], math_inst, min_cc, max_cc),
TileDescription([128, 64, 16], 4, [4, 2, 1], math_inst, min_cc, max_cc),
TileDescription([64, 128, 16], 4, [2, 4, 1], math_inst, min_cc, max_cc),
TileDescription([64, 64, 16], 4, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([64, 32, 16], 4, [2, 1, 1], math_inst, min_cc, max_cc),
TileDescription([32, 64, 16], 4, [1, 2, 1], math_inst, min_cc, max_cc),
TileDescription([32, 32, 16], 4, [2, 2, 1], math_inst, min_cc, max_cc),
]
data_type = [
DataType.cf32, DataType.cf32, DataType.cf32, DataType.cf32
]
alignment_constraints = [1,]
complex_transforms = [
(ComplexTransform.none, ComplexTransform.none),
(ComplexTransform.conj, ComplexTransform.none),
(ComplexTransform.none, ComplexTransform.conj),
(ComplexTransform.conj, ComplexTransform.conj)
]
CreateGemmOperator(manifest, layouts, tile_descriptions, \
data_type, alignment_constraints, complex_transforms)
#
#
def GenerateSM80_TensorOp_1688_rank_k(manifest, cuda_version):
if not CudaToolkitVersionSatisfies(cuda_version, 11, 0):
return
layouts = [
(LayoutType.ColumnMajor, LayoutType.ColumnMajor),
(LayoutType.RowMajor, LayoutType.ColumnMajor),
]
fill_modes = [
FillMode.Lower, FillMode.Upper,
]
math_instructions = [
MathInstruction( \
[16, 8, 8], \
DataType.tf32, DataType.tf32, DataType.f32, \
OpcodeClass.TensorOp, \
MathOperation.multiply_add),
MathInstruction( \
[16, 8, 8], \
DataType.f32, DataType.f32, DataType.f32, \
OpcodeClass.TensorOp, \
MathOperation.multiply_add_fast_f32),
]
min_cc = 80
max_cc = 1024
alignment_constraints = [1, 2, 4] # Alignment only applies to A in SYRK
for math_inst in math_instructions:
tile_descriptions = [
TileDescription([256, 128, 16], 3, [4, 2, 1], math_inst, min_cc, max_cc),
TileDescription([128, 256, 16], 3, [2, 4, 1], math_inst, min_cc, max_cc),
#TileDescription([256, 64, 16], 4, [4, 1, 1], math_inst, min_cc, max_cc),
#TileDescription([ 64, 256, 16], 4, [1, 4, 1], math_inst, min_cc, max_cc),
TileDescription([128, 128, 16], 5, [2, 2, 1], math_inst, min_cc, max_cc),
#TileDescription([128, 64, 16], 6, [2, 2, 1], math_inst, min_cc, max_cc),
#TileDescription([ 64, 128, 16], 6, [2, 2, 1], math_inst, min_cc, max_cc),
#TileDescription([ 64, 64, 16], 10, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([256, 128, 32], 3, [4, 2, 1], math_inst, min_cc, max_cc),
TileDescription([128, 256, 32], 3, [2, 4, 1], math_inst, min_cc, max_cc),
#TileDescription([256, 64, 32], 4, [4, 1, 1], math_inst, min_cc, max_cc),
#TileDescription([ 64, 256, 32], 4, [1, 4, 1], math_inst, min_cc, max_cc),
TileDescription([128, 128, 32], 4, [2, 2, 1], math_inst, min_cc, max_cc),
#TileDescription([128, 64, 32], 3, [2, 2, 1], math_inst, min_cc, max_cc),
#TileDescription([ 64, 128, 32], 3, [2, 2, 1], math_inst, min_cc, max_cc),
#TileDescription([ 64, 64, 32], 5, [2, 2, 1], math_inst, min_cc, max_cc),
]
data_type = [DataType.f32, DataType.f32, DataType.f32]
CreateRankKOperator(manifest, layouts, fill_modes, tile_descriptions, \
data_type, alignment_constraints, BlasMode.symmetric)
#
#
def GenerateSM80_TensorOp_1688_rank_k_complex(manifest, cuda_version):
if not CudaToolkitVersionSatisfies(cuda_version, 11, 0):
return
layouts = [
(LayoutType.ColumnMajor, LayoutType.ColumnMajor),
(LayoutType.RowMajor, LayoutType.ColumnMajor),
]
fill_modes = [
FillMode.Lower, FillMode.Upper,
]
math_instructions = [
MathInstruction( \
[16, 8, 8], \
DataType.tf32, DataType.tf32, DataType.f32, \
OpcodeClass.TensorOp, \
MathOperation.multiply_add_complex),
MathInstruction( \
[16, 8, 8], \
DataType.f32, DataType.f32, DataType.f32, \
OpcodeClass.TensorOp, \
MathOperation.multiply_add_complex_fast_f32),
]
min_cc = 80
max_cc = 1024
for math_inst in math_instructions:
tile_descriptions = [
TileDescription([128, 64, 16], 4, [4, 2, 1], math_inst, min_cc, max_cc),
TileDescription([64, 128, 16], 4, [2, 4, 1], math_inst, min_cc, max_cc),
TileDescription([64, 64, 16], 4, [2, 2, 1], math_inst, min_cc, max_cc),
#TileDescription([64, 32, 16], 4, [2, 1, 1], math_inst, min_cc, max_cc),
#TileDescription([32, 32, 16], 4, [2, 2, 1], math_inst, min_cc, max_cc),
]
data_type = [
DataType.cf32, DataType.cf32, DataType.cf32
]
alignment_constraints = [1,]
# SYRK
CreateRankKOperator(manifest, layouts, fill_modes, tile_descriptions, \
data_type, alignment_constraints, BlasMode.symmetric)
# HERK
CreateRankKOperator(manifest, layouts, fill_modes, tile_descriptions, \
data_type, alignment_constraints, BlasMode.hermitian)
#
#
def GenerateSM80_TensorOp_1688_trmm(manifest, cuda_version):
if not CudaToolkitVersionSatisfies(cuda_version, 11, 0):
return
layouts = [
(LayoutType.ColumnMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor),
(LayoutType.RowMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor),
]
side_modes = [
SideMode.Left, SideMode.Right,
]
fill_modes = [
FillMode.Lower, FillMode.Upper,
]
diag_types = [
DiagType.NonUnit, DiagType.Unit,
]
math_instructions = [
MathInstruction( \
[16, 8, 8], \
DataType.tf32, DataType.tf32, DataType.f32, \
OpcodeClass.TensorOp, \
MathOperation.multiply_add),
MathInstruction( \
[16, 8, 8], \
DataType.f32, DataType.f32, DataType.f32, \
OpcodeClass.TensorOp, \
MathOperation.multiply_add_fast_f32),
]
min_cc = 80
max_cc = 1024
alignment_constraints = [1, 2, 4]
for math_inst in math_instructions:
tile_descriptions = [
TileDescription([256, 128, 16], 3, [4, 2, 1], math_inst, min_cc, max_cc),
TileDescription([128, 256, 16], 3, [2, 4, 1], math_inst, min_cc, max_cc),
TileDescription([256, 64, 16], 4, [4, 1, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 256, 16], 4, [1, 4, 1], math_inst, min_cc, max_cc),
TileDescription([128, 128, 16], 5, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([128, 64, 16], 6, [2, 2, 1], math_inst, min_cc, max_cc),
#TileDescription([ 64, 128, 16], 6, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 64, 16], 10, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([256, 128, 32], 3, [4, 2, 1], math_inst, min_cc, max_cc),
TileDescription([128, 256, 32], 3, [2, 4, 1], math_inst, min_cc, max_cc),
#TileDescription([256, 64, 32], 4, [4, 1, 1], math_inst, min_cc, max_cc),
#TileDescription([ 64, 256, 32], 4, [1, 4, 1], math_inst, min_cc, max_cc),
TileDescription([128, 128, 32], 4, [2, 2, 1], math_inst, min_cc, max_cc),
#TileDescription([128, 64, 32], 3, [2, 2, 1], math_inst, min_cc, max_cc),
#TileDescription([ 64, 128, 32], 3, [2, 2, 1], math_inst, min_cc, max_cc),
#TileDescription([ 64, 64, 32], 5, [2, 2, 1], math_inst, min_cc, max_cc),
]
data_type = [DataType.f32, DataType.f32, DataType.f32, DataType.f32]
CreateTrmmOperator(manifest, layouts, side_modes, fill_modes, diag_types, tile_descriptions, \
data_type, alignment_constraints)
#
#
def GenerateSM80_TensorOp_1688_trmm_complex(manifest, cuda_version):
if not CudaToolkitVersionSatisfies(cuda_version, 11, 0):
return
layouts = [
(LayoutType.ColumnMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor),
(LayoutType.RowMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor),
]
side_modes = [
SideMode.Left, SideMode.Right,
]
fill_modes = [
FillMode.Lower, FillMode.Upper,
]
diag_types = [
DiagType.NonUnit, DiagType.Unit,
]
math_instructions = [
MathInstruction( \
[16, 8, 8], \
DataType.tf32, DataType.tf32, DataType.f32, \
OpcodeClass.TensorOp, \
MathOperation.multiply_add_complex),
MathInstruction( \
[16, 8, 8], \
DataType.f32, DataType.f32, DataType.f32, \
OpcodeClass.TensorOp, \
MathOperation.multiply_add_complex_fast_f32),
]
min_cc = 80
max_cc = 1024
for math_inst in math_instructions:
tile_descriptions = [
TileDescription([128, 64, 16], 4, [4, 2, 1], math_inst, min_cc, max_cc),
TileDescription([64, 128, 16], 4, [2, 4, 1], math_inst, min_cc, max_cc),
TileDescription([64, 64, 16], 4, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([64, 32, 16], 4, [2, 1, 1], math_inst, min_cc, max_cc),
TileDescription([32, 32, 16], 4, [2, 2, 1], math_inst, min_cc, max_cc),
]
data_type = [
DataType.cf32, DataType.cf32, DataType.cf32, DataType.cf32
]
alignment_constraints = [1,]
complex_transforms = [
ComplexTransform.none, ComplexTransform.conj,
]
CreateTrmmOperator(manifest, layouts, side_modes, fill_modes, diag_types, tile_descriptions, \
data_type, alignment_constraints, complex_transforms)
#
#
def GenerateSM80_TensorOp_1688_symm(manifest, cuda_version):
if not CudaToolkitVersionSatisfies(cuda_version, 11, 0):
return
# A and B have same layouts
layouts = [
(LayoutType.ColumnMajor, LayoutType.ColumnMajor),
]
side_modes = [
SideMode.Left, SideMode.Right,
]
fill_modes = [
FillMode.Lower, FillMode.Upper,
]
math_instructions = [
MathInstruction( \
[16, 8, 8], \
DataType.tf32, DataType.tf32, DataType.f32, \
OpcodeClass.TensorOp, \
MathOperation.multiply_add),
MathInstruction( \
[16, 8, 8], \
DataType.f32, DataType.f32, DataType.f32, \
OpcodeClass.TensorOp, \
MathOperation.multiply_add_fast_f32),
]
min_cc = 80
max_cc = 1024
alignment_constraints = [
1, 2, 4
]
for math_inst in math_instructions:
tile_descriptions = [
TileDescription([256, 128, 16], 3, [4, 2, 1], math_inst, min_cc, max_cc),
TileDescription([128, 256, 16], 3, [2, 4, 1], math_inst, min_cc, max_cc),
#TileDescription([256, 64, 16], 4, [4, 1, 1], math_inst, min_cc, max_cc),
#TileDescription([ 64, 256, 16], 4, [1, 4, 1], math_inst, min_cc, max_cc),
TileDescription([128, 128, 16], 5, [2, 2, 1], math_inst, min_cc, max_cc),
#TileDescription([128, 64, 16], 6, [2, 2, 1], math_inst, min_cc, max_cc),
#TileDescription([ 64, 128, 16], 6, [2, 2, 1], math_inst, min_cc, max_cc),
#TileDescription([ 64, 64, 16], 10, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([256, 128, 32], 3, [4, 2, 1], math_inst, min_cc, max_cc),
TileDescription([128, 256, 32], 3, [2, 4, 1], math_inst, min_cc, max_cc),
#TileDescription([256, 64, 32], 4, [4, 1, 1], math_inst, min_cc, max_cc),
#TileDescription([ 64, 256, 32], 4, [1, 4, 1], math_inst, min_cc, max_cc),
TileDescription([128, 128, 32], 4, [2, 2, 1], math_inst, min_cc, max_cc),
#TileDescription([128, 64, 32], 3, [2, 2, 1], math_inst, min_cc, max_cc),
#TileDescription([ 64, 128, 32], 3, [2, 2, 1], math_inst, min_cc, max_cc),
#TileDescription([ 64, 64, 32], 5, [2, 2, 1], math_inst, min_cc, max_cc),
]
data_type = [DataType.f32, DataType.f32, DataType.f32, DataType.f32]
CreateSymmOperator(manifest, layouts, side_modes, fill_modes, tile_descriptions, \
data_type, alignment_constraints, BlasMode.symmetric)
#
#
def GenerateSM80_TensorOp_1688_symm_complex(manifest, cuda_version):
if not CudaToolkitVersionSatisfies(cuda_version, 11, 0):
return
layouts = [
(LayoutType.ColumnMajor, LayoutType.ColumnMajor),
]
side_modes = [
SideMode.Left, SideMode.Right,
]
fill_modes = [
FillMode.Lower, FillMode.Upper,
]
math_instructions = [
MathInstruction( \
[16, 8, 8], \
DataType.tf32, DataType.tf32, DataType.f32, \
OpcodeClass.TensorOp, \
MathOperation.multiply_add_complex),
MathInstruction( \
[16, 8, 8], \
DataType.f32, DataType.f32, DataType.f32, \
OpcodeClass.TensorOp, \
MathOperation.multiply_add_complex_fast_f32),
]
min_cc = 80
max_cc = 1024
for math_inst in math_instructions:
tile_descriptions = [
TileDescription([128, 64, 16], 4, [4, 2, 1], math_inst, min_cc, max_cc),
TileDescription([64, 128, 16], 4, [2, 4, 1], math_inst, min_cc, max_cc),
TileDescription([64, 64, 16], 4, [2, 2, 1], math_inst, min_cc, max_cc),
#TileDescription([64, 32, 16], 4, [2, 1, 1], math_inst, min_cc, max_cc),
#TileDescription([32, 32, 16], 4, [2, 2, 1], math_inst, min_cc, max_cc),
]
data_type = [
DataType.cf32, DataType.cf32, DataType.cf32, DataType.cf32
]
alignment_constraints = [1,]
# SYMM
CreateSymmOperator(manifest, layouts, side_modes, fill_modes, tile_descriptions, \
data_type, alignment_constraints, BlasMode.symmetric)
# HEMM
CreateSymmOperator(manifest, layouts, side_modes, fill_modes, tile_descriptions, \
data_type, alignment_constraints, BlasMode.hermitian)
#
#
def GenerateSM80_TensorOp_884(manifest, cuda_version):
if not CudaToolkitVersionSatisfies(cuda_version, 11, 0):
return
layouts = [
(LayoutType.ColumnMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor),
(LayoutType.ColumnMajor, LayoutType.RowMajor, LayoutType.ColumnMajor),
(LayoutType.RowMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor),
(LayoutType.RowMajor, LayoutType.RowMajor, LayoutType.ColumnMajor),
]
math_inst = \
MathInstruction( \
[8, 8, 4], \
DataType.f64, DataType.f64, DataType.f64, \
OpcodeClass.TensorOp, \
MathOperation.multiply_add)
min_cc = 80
max_cc = 1024
alignment_constraints = [1,]
tile_descriptions = [
TileDescription([128, 128, 16], 3, [4, 2, 1], math_inst, min_cc, max_cc),
TileDescription([256, 64, 16], 3, [4, 2, 1], math_inst, min_cc, max_cc),
TileDescription([64, 256, 16], 3, [2, 4, 1], math_inst, min_cc, max_cc),
TileDescription([256, 32, 16], 3, [4, 1, 1], math_inst, min_cc, max_cc),
TileDescription([32, 256, 16], 3, [1, 4, 1], math_inst, min_cc, max_cc),
TileDescription([128, 64, 16], 3, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([64, 128, 16], 3, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([64, 64, 16], 4, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([64, 32, 16], 4, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([32, 64, 16], 4, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([32, 32, 16], 5, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([16, 32, 16], 5, [1, 2, 1], math_inst, min_cc, max_cc),
TileDescription([32, 16, 16], 5, [2, 1, 1], math_inst, min_cc, max_cc),
]
data_type = [DataType.f64, DataType.f64, DataType.f64, DataType.f64]
CreateGemmOperator(manifest, layouts, tile_descriptions, \
data_type, alignment_constraints)
#
#
def GenerateSM80_TensorOp_884_complex(manifest, cuda_version):
if not CudaToolkitVersionSatisfies(cuda_version, 11, 0):
return
layouts = [
(LayoutType.ColumnMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor),
(LayoutType.ColumnMajor, LayoutType.RowMajor, LayoutType.ColumnMajor),
(LayoutType.RowMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor),
(LayoutType.RowMajor, LayoutType.RowMajor, LayoutType.ColumnMajor),
]
math_inst = \
MathInstruction( \
[8, 8, 4], \
DataType.f64, DataType.f64, DataType.f64, \
OpcodeClass.TensorOp, \
MathOperation.multiply_add_complex)
min_cc = 80
max_cc = 1024
alignment_constraints = [1,]
tile_descriptions = [
TileDescription([128, 64, 8 ], 3, [4, 2, 1], math_inst, min_cc, max_cc),
TileDescription([64, 128, 8 ], 3, [2, 4, 1], math_inst, min_cc, max_cc),
TileDescription([64, 64, 8 ], 3, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([64, 32, 8 ], 4, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([32, 64, 8 ], 4, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([32, 32, 8 ], 4, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([16, 32, 8 ], 4, [1, 2, 1], math_inst, min_cc, max_cc),
TileDescription([32, 16, 8 ], 4, [2, 1, 1], math_inst, min_cc, max_cc),
TileDescription([128, 64, 16], 3, [4, 2, 1], math_inst, min_cc, max_cc),
TileDescription([64, 128, 16], 3, [2, 4, 1], math_inst, min_cc, max_cc),
TileDescription([64, 64, 16], 3, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([64, 32, 16], 3, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([32, 64, 16], 3, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([32, 32, 16], 4, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([16, 32, 16], 4, [1, 2, 1], math_inst, min_cc, max_cc),
TileDescription([32, 16, 16], 3, [2, 1, 1], math_inst, min_cc, max_cc),
]
data_type = [DataType.cf64, DataType.cf64, DataType.cf64, DataType.cf64]
complex_transforms = [
(ComplexTransform.none, ComplexTransform.none),
(ComplexTransform.conj, ComplexTransform.none),
(ComplexTransform.none, ComplexTransform.conj),
(ComplexTransform.conj, ComplexTransform.conj)
]
CreateGemmOperator(manifest, layouts, tile_descriptions, \
data_type, alignment_constraints, complex_transforms)
#
def GenerateSM80_TensorOp_884_complex_gaussian(manifest, cuda_version):
if not CudaToolkitVersionSatisfies(cuda_version, 11, 0):
return
layouts = [
(LayoutType.ColumnMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor),
(LayoutType.ColumnMajor, LayoutType.RowMajor, LayoutType.ColumnMajor),
(LayoutType.RowMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor),
(LayoutType.RowMajor, LayoutType.RowMajor, LayoutType.ColumnMajor),
]
math_inst = \
MathInstruction( \
[8, 8, 4], \
DataType.f64, DataType.f64, DataType.f64, \
OpcodeClass.TensorOp, \
MathOperation.multiply_add_complex_gaussian)
min_cc = 80
max_cc = 1024
alignment_constraints = [1,]
tile_descriptions = [
TileDescription([64, 64, 8], 3, [4, 2, 1], math_inst, min_cc, max_cc),
TileDescription([64, 32, 8], 4, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([32, 64, 8], 4, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([32, 32, 8], 4, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([16, 32, 8], 4, [1, 2, 1], math_inst, min_cc, max_cc),
TileDescription([32, 16, 8], 4, [2, 1, 1], math_inst, min_cc, max_cc),
]
data_type = [DataType.cf64, DataType.cf64, DataType.cf64, DataType.cf64]
complex_transforms = [
(ComplexTransform.none, ComplexTransform.none),
(ComplexTransform.conj, ComplexTransform.none),
(ComplexTransform.none, ComplexTransform.conj),
(ComplexTransform.conj, ComplexTransform.conj)
]
CreateGemmOperator(manifest, layouts, tile_descriptions, \
data_type, alignment_constraints, complex_transforms)
#
#
def GenerateSM80_TensorOp_884_rank_k(manifest, cuda_version):
if not CudaToolkitVersionSatisfies(cuda_version, 11, 0):
return
layouts = [
(LayoutType.ColumnMajor, LayoutType.ColumnMajor),
(LayoutType.RowMajor, LayoutType.ColumnMajor),
]
fill_modes = [
FillMode.Lower, FillMode.Upper,
]
math_inst = \
MathInstruction( \
[8, 8, 4], \
DataType.f64, DataType.f64, DataType.f64, \
OpcodeClass.TensorOp, \
MathOperation.multiply_add)
min_cc = 80
max_cc = 1024
alignment_constraints = [1,]
tile_descriptions = [
TileDescription([128, 128, 16], 3, [4, 2, 1], math_inst, min_cc, max_cc),
TileDescription([64, 128, 16], 3, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([128, 64, 16], 3, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([64, 64, 16], 4, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([64, 32, 16], 4, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([32, 64, 16], 4, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([32, 32, 16], 5, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([16, 32, 16], 5, [1, 2, 1], math_inst, min_cc, max_cc),
TileDescription([32, 16, 16], 5, [2, 1, 1], math_inst, min_cc, max_cc),
]
data_type = [DataType.f64, DataType.f64, DataType.f64]
CreateRankKOperator(manifest, layouts, fill_modes, tile_descriptions, \
data_type, alignment_constraints, BlasMode.symmetric)
#
#
def GenerateSM80_TensorOp_884_rank_k_complex(manifest, cuda_version):
if not CudaToolkitVersionSatisfies(cuda_version, 11, 0):
return
layouts = [
(LayoutType.ColumnMajor, LayoutType.ColumnMajor),
(LayoutType.RowMajor, LayoutType.ColumnMajor),
]
fill_modes = [
FillMode.Lower, FillMode.Upper,
]
math_inst = \
MathInstruction( \
[8, 8, 4], \
DataType.f64, DataType.f64, DataType.f64, \
OpcodeClass.TensorOp, \
MathOperation.multiply_add_complex)
min_cc = 80
max_cc = 1024
alignment_constraints = [1,]
tile_descriptions = [
TileDescription([128, 64, 8], 3, [4, 2, 1], math_inst, min_cc, max_cc),
TileDescription([64, 128, 8], 3, [2, 4, 1], math_inst, min_cc, max_cc),
TileDescription([64, 64, 8], 3, [2, 2, 1], math_inst, min_cc, max_cc),
#TileDescription([64, 32, 8], 4, [2, 2, 1], math_inst, min_cc, max_cc),
#TileDescription([32, 64, 8], 4, [2, 2, 1], math_inst, min_cc, max_cc),
#TileDescription([32, 32, 8], 4, [2, 2, 1], math_inst, min_cc, max_cc),
#TileDescription([16, 32, 8], 4, [1, 2, 1], math_inst, min_cc, max_cc),
#TileDescription([32, 16, 8], 4, [2, 1, 1], math_inst, min_cc, max_cc),
]
data_type = [DataType.cf64, DataType.cf64, DataType.cf64]
# SYRK computation
CreateRankKOperator(manifest, layouts, fill_modes, tile_descriptions, \
data_type, alignment_constraints, BlasMode.symmetric)
# HERK computation
CreateRankKOperator(manifest, layouts, fill_modes, tile_descriptions, \
data_type, alignment_constraints, BlasMode.hermitian)
#
#
def GenerateSM80_TensorOp_884_rank_k_complex_gaussian(manifest, cuda_version):
if not CudaToolkitVersionSatisfies(cuda_version, 11, 0):
return
layouts = [
(LayoutType.ColumnMajor, LayoutType.ColumnMajor),
(LayoutType.RowMajor, LayoutType.ColumnMajor),
]
fill_modes = [
FillMode.Lower, FillMode.Upper,
]
math_inst = \
MathInstruction( \
[8, 8, 4], \
DataType.f64, DataType.f64, DataType.f64, \
OpcodeClass.TensorOp, \
MathOperation.multiply_add_complex_gaussian)
min_cc = 80
max_cc = 1024
alignment_constraints = [1,]
tile_descriptions = [
TileDescription([64, 64, 8], 3, [4, 2, 1], math_inst, min_cc, max_cc),
TileDescription([64, 32, 8], 4, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([32, 64, 8], 4, [2, 2, 1], math_inst, min_cc, max_cc),
#TileDescription([32, 32, 8], 4, [2, 2, 1], math_inst, min_cc, max_cc),
#TileDescription([16, 32, 8], 4, [1, 2, 1], math_inst, min_cc, max_cc),
#TileDescription([32, 16, 8], 4, [2, 1, 1], math_inst, min_cc, max_cc),
]
data_type = [DataType.cf64, DataType.cf64, DataType.cf64]
complex_transforms = [ComplexTransform.none,]
# SYRK computation
CreateRankKOperator(manifest, layouts, fill_modes, tile_descriptions, \
data_type, alignment_constraints, BlasMode.symmetric)
# HERK computation
CreateRankKOperator(manifest, layouts, fill_modes, tile_descriptions, \
data_type, alignment_constraints, BlasMode.hermitian)
#
#
def GenerateSM80_TensorOp_884_trmm(manifest, cuda_version):
if not CudaToolkitVersionSatisfies(cuda_version, 11, 0):
return
layouts = [
(LayoutType.ColumnMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor),
(LayoutType.RowMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor),
]
side_modes = [
SideMode.Left, SideMode.Right,
]
fill_modes = [
FillMode.Lower, FillMode.Upper,
]
diag_types = [
DiagType.NonUnit, DiagType.Unit,
]
math_inst = \
MathInstruction( \
[8, 8, 4], \
DataType.f64, DataType.f64, DataType.f64, \
OpcodeClass.TensorOp, \
MathOperation.multiply_add)
min_cc = 80
max_cc = 1024
alignment_constraints = [1,]
tile_descriptions = [
TileDescription([128, 128, 16], 3, [4, 2, 1], math_inst, min_cc, max_cc),
TileDescription([64, 128, 16], 3, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([128, 64, 16], 3, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([64, 64, 16], 4, [2, 2, 1], math_inst, min_cc, max_cc),
]
data_type = [DataType.f64, DataType.f64, DataType.f64, DataType.f64]
CreateTrmmOperator(manifest, layouts, side_modes, fill_modes, diag_types, tile_descriptions, \
data_type, alignment_constraints)
#
#
def GenerateSM80_TensorOp_884_trmm_complex(manifest, cuda_version):
if not CudaToolkitVersionSatisfies(cuda_version, 11, 0):
return
layouts = [
(LayoutType.ColumnMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor),
(LayoutType.RowMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor),
]
side_modes = [
SideMode.Left, SideMode.Right,
]
fill_modes = [
FillMode.Lower, FillMode.Upper,
]
diag_types = [
DiagType.NonUnit, DiagType.Unit,
]
math_inst = \
MathInstruction( \
[8, 8, 4], \
DataType.f64, DataType.f64, DataType.f64, \
OpcodeClass.TensorOp, \
MathOperation.multiply_add_complex)
min_cc = 80
max_cc = 1024
alignment_constraints = [1,]
tile_descriptions = [
TileDescription([128, 64, 8], 3, [4, 2, 1], math_inst, min_cc, max_cc),
TileDescription([64, 128, 8], 3, [2, 4, 1], math_inst, min_cc, max_cc),
TileDescription([64, 64, 8], 3, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([64, 32, 8], 4, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([32, 64, 8], 4, [2, 2, 1], math_inst, min_cc, max_cc),
]
data_type = [DataType.cf64, DataType.cf64, DataType.cf64, DataType.cf64]
complex_transforms = [
ComplexTransform.none, ComplexTransform.conj,
]
CreateTrmmOperator(manifest, layouts, side_modes, fill_modes, diag_types, tile_descriptions, \
data_type, alignment_constraints, complex_transforms)
#
#
def GenerateSM80_TensorOp_884_trmm_complex_gaussian(manifest, cuda_version):
if not CudaToolkitVersionSatisfies(cuda_version, 11, 0):
return
layouts = [
(LayoutType.ColumnMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor),
(LayoutType.RowMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor),
]
side_modes = [
SideMode.Left, SideMode.Right,
]
fill_modes = [
FillMode.Lower, FillMode.Upper,
]
diag_types = [
DiagType.NonUnit, DiagType.Unit,
]
math_inst = \
MathInstruction( \
[8, 8, 4], \
DataType.f64, DataType.f64, DataType.f64, \
OpcodeClass.TensorOp, \
MathOperation.multiply_add_complex_gaussian)
min_cc = 80
max_cc = 1024
alignment_constraints = [1,]
tile_descriptions = [
TileDescription([64, 64, 8], 3, [4, 2, 1], math_inst, min_cc, max_cc),
TileDescription([64, 32, 8], 4, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([32, 64, 8], 4, [2, 2, 1], math_inst, min_cc, max_cc),
]
data_type = [DataType.cf64, DataType.cf64, DataType.cf64, DataType.cf64]
complex_transforms = [
ComplexTransform.none, ComplexTransform.conj,
]
CreateTrmmOperator(manifest, layouts, side_modes, fill_modes, diag_types, tile_descriptions, \
data_type, alignment_constraints, complex_transforms)
#
#
def GenerateSM80_TensorOp_884_symm(manifest, cuda_version):
if not CudaToolkitVersionSatisfies(cuda_version, 11, 0):
return
layouts = [
(LayoutType.ColumnMajor, LayoutType.ColumnMajor),
]
side_modes = [
SideMode.Left, SideMode.Right,
]
fill_modes = [
FillMode.Lower, FillMode.Upper,
]
math_inst = \
MathInstruction( \
[8, 8, 4], \
DataType.f64, DataType.f64, DataType.f64, \
OpcodeClass.TensorOp, \
MathOperation.multiply_add)
min_cc = 80
max_cc = 1024
alignment_constraints = [1,]
tile_descriptions = [
TileDescription([128, 128, 16], 3, [4, 2, 1], math_inst, min_cc, max_cc),
TileDescription([64, 128, 16], 3, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([128, 64, 16], 3, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([64, 64, 16], 4, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([64, 32, 16], 4, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([32, 64, 16], 4, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([32, 32, 16], 5, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([16, 32, 16], 5, [1, 2, 1], math_inst, min_cc, max_cc),
TileDescription([32, 16, 16], 5, [2, 1, 1], math_inst, min_cc, max_cc),
]
data_type = [DataType.f64, DataType.f64, DataType.f64, DataType.f64]
CreateSymmOperator(manifest, layouts, side_modes, fill_modes, tile_descriptions, \
data_type, alignment_constraints, BlasMode.symmetric)
#
#
def GenerateSM80_TensorOp_884_symm_complex(manifest, cuda_version):
if not CudaToolkitVersionSatisfies(cuda_version, 11, 0):
return
layouts = [
(LayoutType.ColumnMajor, LayoutType.ColumnMajor),
]
side_modes = [
SideMode.Left, SideMode.Right,
]
fill_modes = [
FillMode.Lower, FillMode.Upper,
]
math_inst = \
MathInstruction( \
[8, 8, 4], \
DataType.f64, DataType.f64, DataType.f64, \
OpcodeClass.TensorOp, \
MathOperation.multiply_add_complex)
min_cc = 80
max_cc = 1024
alignment_constraints = [1,]
tile_descriptions = [
TileDescription([128, 64, 8], 3, [4, 2, 1], math_inst, min_cc, max_cc),
TileDescription([64, 128, 8], 3, [2, 4, 1], math_inst, min_cc, max_cc),
TileDescription([64, 64, 8], 3, [2, 2, 1], math_inst, min_cc, max_cc),
#TileDescription([64, 32, 8], 4, [2, 2, 1], math_inst, min_cc, max_cc),
#TileDescription([32, 64, 8], 4, [2, 2, 1], math_inst, min_cc, max_cc),
#TileDescription([32, 32, 8], 4, [2, 2, 1], math_inst, min_cc, max_cc),
#TileDescription([16, 32, 8], 4, [1, 2, 1], math_inst, min_cc, max_cc),
#TileDescription([32, 16, 8], 4, [2, 1, 1], math_inst, min_cc, max_cc),
]
data_type = [DataType.cf64, DataType.cf64, DataType.cf64, DataType.cf64]
# SYMM computation
CreateSymmOperator(manifest, layouts, side_modes, fill_modes, tile_descriptions, \
data_type, alignment_constraints, BlasMode.symmetric)
# HEMM computation
CreateSymmOperator(manifest, layouts, side_modes, fill_modes, tile_descriptions, \
data_type, alignment_constraints, BlasMode.hermitian)
#
#
def GenerateSM80_TensorOp_884_symm_complex_gaussian(manifest, cuda_version):
if not CudaToolkitVersionSatisfies(cuda_version, 11, 0):
return
layouts = [
(LayoutType.ColumnMajor, LayoutType.ColumnMajor),
]
side_modes = [
SideMode.Left, SideMode.Right,
]
fill_modes = [
FillMode.Lower, FillMode.Upper,
]
math_inst = \
MathInstruction( \
[8, 8, 4], \
DataType.f64, DataType.f64, DataType.f64, \
OpcodeClass.TensorOp, \
MathOperation.multiply_add_complex_gaussian)
min_cc = 80
max_cc = 1024
alignment_constraints = [1,]
tile_descriptions = [
TileDescription([64, 64, 8], 3, [4, 2, 1], math_inst, min_cc, max_cc),
TileDescription([64, 32, 8], 4, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([32, 64, 8], 4, [2, 2, 1], math_inst, min_cc, max_cc),
#TileDescription([32, 32, 8], 4, [2, 2, 1], math_inst, min_cc, max_cc),
#TileDescription([16, 32, 8], 4, [1, 2, 1], math_inst, min_cc, max_cc),
#TileDescription([32, 16, 8], 4, [2, 1, 1], math_inst, min_cc, max_cc),
]
data_type = [DataType.cf64, DataType.cf64, DataType.cf64, DataType.cf64]
complex_transforms = [ComplexTransform.none,]
# SYMM computation
CreateSymmOperator(manifest, layouts, side_modes, fill_modes, tile_descriptions, \
data_type, alignment_constraints, BlasMode.symmetric)
# HEMM computation
CreateSymmOperator(manifest, layouts, side_modes, fill_modes, tile_descriptions, \
data_type, alignment_constraints, BlasMode.hermitian)
#
###################################################################################################
#
def GenerateSM80_Simt_f32(manifest, cuda_version):
layouts = [
(LayoutType.ColumnMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor),
(LayoutType.ColumnMajor, LayoutType.RowMajor, LayoutType.ColumnMajor),
(LayoutType.RowMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor),
(LayoutType.RowMajor, LayoutType.RowMajor, LayoutType.ColumnMajor),
]
math_instructions = [
MathInstruction( \
[1, 1, 1], \
DataType.f32, DataType.f32, DataType.f32, \
OpcodeClass.Simt, \
MathOperation.multiply_add),
]
min_cc = 80
max_cc = 1024
alignment_constraints = [1,]
for math_inst in math_instructions:
tile_descriptions = [
TileDescription([256, 128, 8], 5, [4, 2, 1], math_inst, min_cc, max_cc),
TileDescription([128, 256, 8], 5, [2, 4, 1], math_inst, min_cc, max_cc),
TileDescription([128, 128, 8], 5, [4, 2, 1], math_inst, min_cc, max_cc),
TileDescription([256, 128, 8], 4, [4, 2, 1], math_inst, min_cc, max_cc),
TileDescription([128, 256, 8], 4, [2, 4, 1], math_inst, min_cc, max_cc),
TileDescription([128, 128, 8], 4, [4, 2, 1], math_inst, min_cc, max_cc),
TileDescription([128, 64, 8], 5, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 128, 8], 5, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 64, 8], 5, [2, 1, 1], math_inst, min_cc, max_cc),
TileDescription([128, 32, 8], 5, [2, 1, 1], math_inst, min_cc, max_cc),
TileDescription([ 32, 128, 8], 5, [1, 2, 1], math_inst, min_cc, max_cc),
]
data_type = [
math_inst.element_a,
math_inst.element_b,
math_inst.element_accumulator,
math_inst.element_accumulator,
]
CreateGemmOperator(manifest, layouts, tile_descriptions, \
data_type, alignment_constraints)
conv_layout = (LayoutType.TensorNHWC, LayoutType.TensorNHWC, LayoutType.TensorNHWC)
CreateConv2dOperator(manifest, conv_layout, tile_descriptions, data_type, alignment_constraints)
#
#
def GenerateSM80_Simt_f64(manifest, cuda_version):
layouts = [
(LayoutType.ColumnMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor),
(LayoutType.ColumnMajor, LayoutType.RowMajor, LayoutType.ColumnMajor),
(LayoutType.RowMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor),
(LayoutType.RowMajor, LayoutType.RowMajor, LayoutType.ColumnMajor),
]
math_instructions = [
MathInstruction( \
[1, 1, 1], \
DataType.f64, DataType.f64, DataType.f64, \
OpcodeClass.Simt, \
MathOperation.multiply_add),
]
min_cc = 80
max_cc = 1024
alignment_constraints = [1,]
for math_inst in math_instructions:
tile_descriptions = [
TileDescription([128, 128, 8], 3, [4, 2, 1], math_inst, min_cc, max_cc),
TileDescription([128, 64, 8], 4, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 128, 8], 4, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 64, 8], 5, [2, 1, 1], math_inst, min_cc, max_cc),
TileDescription([128, 32, 8], 5, [2, 1, 1], math_inst, min_cc, max_cc),
TileDescription([ 32, 128, 8], 5, [1, 2, 1], math_inst, min_cc, max_cc),
]
data_type = [
math_inst.element_a,
math_inst.element_b,
math_inst.element_accumulator,
math_inst.element_accumulator,
]
CreateGemmOperator(manifest, layouts, tile_descriptions, \
data_type, alignment_constraints)
#
##################################################################################################
#
def GenerateSM80_Simt_complex(manifest, cuda_version):
math_instructions = [
MathInstruction( \
[1, 1, 1], \
DataType.f32, DataType.f32, DataType.f32, \
OpcodeClass.Simt, \
MathOperation.multiply_add_complex),
]
min_cc = 80
max_cc = 1024
alignment_constraints = [1,]
data_type = [
DataType.cf32,
DataType.cf32,
DataType.cf32,
DataType.cf32
]
layouts = [
(LayoutType.ColumnMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor),
(LayoutType.ColumnMajor, LayoutType.RowMajor, LayoutType.ColumnMajor),
(LayoutType.RowMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor),
(LayoutType.RowMajor, LayoutType.RowMajor, LayoutType.ColumnMajor),
]
complex_transforms = [
(ComplexTransform.none, ComplexTransform.none),
(ComplexTransform.conj, ComplexTransform.none),
(ComplexTransform.none, ComplexTransform.conj),
(ComplexTransform.conj, ComplexTransform.conj)
]
for math_inst in math_instructions:
tile_descriptions = [
TileDescription([128, 128, 8], 5, [4, 2, 1], math_inst, min_cc, max_cc),
TileDescription([128, 128, 8], 4, [4, 2, 1], math_inst, min_cc, max_cc),
TileDescription([64, 64, 8], 3, [4, 2, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 128, 16], 6, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([128, 64, 16], 6, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([64, 32, 16], 4, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([32, 64, 16], 4, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([32, 32, 16], 5, [2, 2, 1], math_inst, min_cc, max_cc),
]
CreateGemmOperator(manifest, layouts, tile_descriptions, data_type, alignment_constraints, complex_transforms)
conv_layout = (LayoutType.TensorNHWC, LayoutType.TensorNHWC, LayoutType.TensorNHWC)
CreateConv2dOperator(manifest, conv_layout, tile_descriptions, data_type, alignment_constraints)
#
###################################################################################################
#
def GenerateSM80(manifest, cuda_version):
GenerateSM80_TensorOp_16816(manifest, cuda_version)
GenerateSM80_SparseTensorOp_16832(manifest, cuda_version)
GenerateSM80_PlanarComplexTensorOp_16816(manifest, cuda_version)
GenerateSM80_TensorOp_1688(manifest, cuda_version)
GenerateSM80_TensorOp_1688_fast_math(manifest, cuda_version)
GenerateSM80_SparseTensorOp_16816_fast_math(manifest, cuda_version)
GenerateSM80_TensorOp_1688_complex(manifest, cuda_version)
# 3xTF32
GenerateSM80_TensorOp_1688_fast_fp32_math(manifest, cuda_version)
GenerateSM80_TensorOp_1688_fast_fp32_math_complex(manifest, cuda_version)
GenerateSM80_TensorOp_1688_rank_k(manifest, cuda_version)
GenerateSM80_TensorOp_1688_rank_k_complex(manifest, cuda_version)
GenerateSM80_TensorOp_1688_trmm(manifest, cuda_version)
GenerateSM80_TensorOp_1688_trmm_complex(manifest, cuda_version)
GenerateSM80_TensorOp_1688_symm(manifest, cuda_version)
GenerateSM80_TensorOp_1688_symm_complex(manifest, cuda_version)
GenerateSM80_TensorOp_884(manifest, cuda_version)
GenerateSM80_TensorOp_884_complex(manifest, cuda_version)
GenerateSM80_TensorOp_884_complex_gaussian(manifest, cuda_version)
GenerateSM80_TensorOp_884_rank_k(manifest, cuda_version)
GenerateSM80_TensorOp_884_rank_k_complex(manifest, cuda_version)
GenerateSM80_TensorOp_884_rank_k_complex_gaussian(manifest, cuda_version)
GenerateSM80_TensorOp_884_trmm(manifest, cuda_version)
GenerateSM80_TensorOp_884_trmm_complex(manifest, cuda_version)
GenerateSM80_TensorOp_884_trmm_complex_gaussian(manifest, cuda_version)
GenerateSM80_TensorOp_884_symm(manifest, cuda_version)
GenerateSM80_TensorOp_884_symm_complex(manifest, cuda_version)
GenerateSM80_TensorOp_884_symm_complex_gaussian(manifest, cuda_version)
GenerateSM80_TensorOp_16832_TN(manifest, cuda_version)
GenerateSM80_SparseTensorOp_16864_TN(manifest, cuda_version)
GenerateSM80_TensorOp_16832_Interleaved(manifest, cuda_version)
GenerateSM80_TensorOp_16864_TN(manifest, cuda_version)
GenerateSM80_SparseTensorOp_168128_TN(manifest, cuda_version)
GenerateSM80_TensorOp_16864_Interleaved(manifest, cuda_version)
GenerateSM80_TensorOp_168256(manifest, cuda_version)
GenerateSM80_Simt_f32(manifest, cuda_version)
GenerateSM80_Simt_f64(manifest, cuda_version)
GenerateSM80_Simt_complex(manifest, cuda_version)
###################################################################################################
#
def GenerateSM90_TensorOp_1684(manifest, cuda_version):
if not CudaToolkitVersionSatisfies(cuda_version, 11, 8):
return
layouts = [
(LayoutType.ColumnMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor),
(LayoutType.ColumnMajor, LayoutType.RowMajor, LayoutType.ColumnMajor),
(LayoutType.RowMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor),
(LayoutType.RowMajor, LayoutType.RowMajor, LayoutType.ColumnMajor),
]
math_inst = \
MathInstruction( \
[16, 8, 4], \
DataType.f64, DataType.f64, DataType.f64, \
OpcodeClass.TensorOp, \
MathOperation.multiply_add)
min_cc = 90
max_cc = 1024
alignment_constraints = [1,]
tile_descriptions = [
TileDescription([128, 128, 16], 3, [4, 2, 1], math_inst, min_cc, max_cc),
TileDescription([256, 64, 16], 3, [4, 2, 1], math_inst, min_cc, max_cc),
TileDescription([64, 256, 16], 3, [2, 4, 1], math_inst, min_cc, max_cc),
TileDescription([256, 32, 16], 3, [4, 1, 1], math_inst, min_cc, max_cc),
TileDescription([32, 256, 16], 3, [1, 4, 1], math_inst, min_cc, max_cc),
TileDescription([128, 64, 16], 3, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([64, 128, 16], 3, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([64, 64, 16], 4, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([64, 32, 16], 4, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([32, 64, 16], 4, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([32, 32, 16], 5, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([16, 32, 16], 5, [1, 2, 1], math_inst, min_cc, max_cc),
TileDescription([32, 16, 16], 5, [2, 1, 1], math_inst, min_cc, max_cc),
]
data_type = [DataType.f64, DataType.f64, DataType.f64, DataType.f64]
CreateGemmOperator(manifest, layouts, tile_descriptions, \
data_type, alignment_constraints)
#
#
def GenerateSM90_TensorOp_1684_complex(manifest, cuda_version):
if not CudaToolkitVersionSatisfies(cuda_version, 11, 8):
return
layouts = [
(LayoutType.ColumnMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor),
(LayoutType.ColumnMajor, LayoutType.RowMajor, LayoutType.ColumnMajor),
(LayoutType.RowMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor),
(LayoutType.RowMajor, LayoutType.RowMajor, LayoutType.ColumnMajor),
]
math_inst = \
MathInstruction( \
[16, 8, 4], \
DataType.f64, DataType.f64, DataType.f64, \
OpcodeClass.TensorOp, \
MathOperation.multiply_add_complex)
min_cc = 90
max_cc = 1024
alignment_constraints = [1,]
tile_descriptions = [
TileDescription([128, 64, 8 ], 3, [4, 2, 1], math_inst, min_cc, max_cc),
TileDescription([64, 128, 8 ], 3, [2, 4, 1], math_inst, min_cc, max_cc),
TileDescription([64, 64, 8 ], 3, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([64, 32, 8 ], 4, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([32, 64, 8 ], 4, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([32, 32, 8 ], 4, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([16, 32, 8 ], 4, [1, 2, 1], math_inst, min_cc, max_cc),
TileDescription([32, 16, 8 ], 4, [2, 1, 1], math_inst, min_cc, max_cc),
TileDescription([128, 64, 16], 3, [4, 2, 1], math_inst, min_cc, max_cc),
TileDescription([64, 128, 16], 3, [2, 4, 1], math_inst, min_cc, max_cc),
TileDescription([64, 64, 16], 3, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([64, 32, 16], 3, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([32, 64, 16], 3, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([32, 32, 16], 4, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([16, 32, 16], 4, [1, 2, 1], math_inst, min_cc, max_cc),
TileDescription([32, 16, 16], 3, [2, 1, 1], math_inst, min_cc, max_cc),
]
data_type = [DataType.cf64, DataType.cf64, DataType.cf64, DataType.cf64]
complex_transforms = [
(ComplexTransform.none, ComplexTransform.none),
(ComplexTransform.conj, ComplexTransform.none),
(ComplexTransform.none, ComplexTransform.conj),
(ComplexTransform.conj, ComplexTransform.conj)
]
CreateGemmOperator(manifest, layouts, tile_descriptions, \
data_type, alignment_constraints, complex_transforms)
#
#
def GenerateSM90_TensorOp_1684_complex_gaussian(manifest, cuda_version):
if not CudaToolkitVersionSatisfies(cuda_version, 11, 8):
return
layouts = [
(LayoutType.ColumnMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor),
(LayoutType.ColumnMajor, LayoutType.RowMajor, LayoutType.ColumnMajor),
(LayoutType.RowMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor),
(LayoutType.RowMajor, LayoutType.RowMajor, LayoutType.ColumnMajor),
]
math_inst = \
MathInstruction( \
[16, 8, 4], \
DataType.f64, DataType.f64, DataType.f64, \
OpcodeClass.TensorOp, \
MathOperation.multiply_add_complex_gaussian)
min_cc = 90
max_cc = 1024
alignment_constraints = [1,]
tile_descriptions = [
TileDescription([64, 64, 8], 3, [4, 2, 1], math_inst, min_cc, max_cc),
TileDescription([64, 32, 8], 4, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([32, 64, 8], 4, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([32, 32, 8], 4, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([16, 32, 8], 4, [1, 2, 1], math_inst, min_cc, max_cc),
TileDescription([32, 16, 8], 4, [2, 1, 1], math_inst, min_cc, max_cc),
]
data_type = [DataType.cf64, DataType.cf64, DataType.cf64, DataType.cf64]
complex_transforms = [
(ComplexTransform.none, ComplexTransform.none),
(ComplexTransform.conj, ComplexTransform.none),
(ComplexTransform.none, ComplexTransform.conj),
(ComplexTransform.conj, ComplexTransform.conj)
]
CreateGemmOperator(manifest, layouts, tile_descriptions, \
data_type, alignment_constraints, complex_transforms)
#
#
def GenerateSM90_TensorOp_1684_rank_k(manifest, cuda_version):
if not CudaToolkitVersionSatisfies(cuda_version, 11, 8):
return
layouts = [
(LayoutType.ColumnMajor, LayoutType.ColumnMajor),
(LayoutType.RowMajor, LayoutType.ColumnMajor),
]
fill_modes = [
FillMode.Lower, FillMode.Upper,
]
math_inst = \
MathInstruction( \
[16, 8, 4], \
DataType.f64, DataType.f64, DataType.f64, \
OpcodeClass.TensorOp, \
MathOperation.multiply_add)
min_cc = 90
max_cc = 1024
alignment_constraints = [1,]
tile_descriptions = [
TileDescription([128, 128, 16], 3, [4, 2, 1], math_inst, min_cc, max_cc),
TileDescription([64, 128, 16], 3, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([128, 64, 16], 3, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([64, 64, 16], 4, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([64, 32, 16], 4, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([32, 64, 16], 4, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([32, 32, 16], 5, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([16, 32, 16], 5, [1, 2, 1], math_inst, min_cc, max_cc),
TileDescription([32, 16, 16], 5, [2, 1, 1], math_inst, min_cc, max_cc),
]
data_type = [DataType.f64, DataType.f64, DataType.f64]
CreateRankKOperator(manifest, layouts, fill_modes, tile_descriptions, \
data_type, alignment_constraints, BlasMode.symmetric)
#
#
def GenerateSM90_TensorOp_1684_rank_k_complex(manifest, cuda_version):
if not CudaToolkitVersionSatisfies(cuda_version, 11, 8):
return
layouts = [
(LayoutType.ColumnMajor, LayoutType.ColumnMajor),
(LayoutType.RowMajor, LayoutType.ColumnMajor),
]
fill_modes = [
FillMode.Lower, FillMode.Upper,
]
math_inst = \
MathInstruction( \
[16, 8, 4], \
DataType.f64, DataType.f64, DataType.f64, \
OpcodeClass.TensorOp, \
MathOperation.multiply_add_complex)
min_cc = 90
max_cc = 1024
alignment_constraints = [1,]
tile_descriptions = [
TileDescription([128, 64, 8], 3, [4, 2, 1], math_inst, min_cc, max_cc),
TileDescription([64, 128, 8], 3, [2, 4, 1], math_inst, min_cc, max_cc),
TileDescription([64, 64, 8], 3, [2, 2, 1], math_inst, min_cc, max_cc),
#TileDescription([64, 32, 8], 4, [2, 2, 1], math_inst, min_cc, max_cc),
#TileDescription([32, 64, 8], 4, [2, 2, 1], math_inst, min_cc, max_cc),
#TileDescription([32, 32, 8], 4, [2, 2, 1], math_inst, min_cc, max_cc),
#TileDescription([16, 32, 8], 4, [1, 2, 1], math_inst, min_cc, max_cc),
#TileDescription([32, 16, 8], 4, [2, 1, 1], math_inst, min_cc, max_cc),
]
data_type = [DataType.cf64, DataType.cf64, DataType.cf64]
# SYRK computation
CreateRankKOperator(manifest, layouts, fill_modes, tile_descriptions, \
data_type, alignment_constraints, BlasMode.symmetric)
# HERK computation
CreateRankKOperator(manifest, layouts, fill_modes, tile_descriptions, \
data_type, alignment_constraints, BlasMode.hermitian)
#
#
def GenerateSM90_TensorOp_1684_rank_k_complex_gaussian(manifest, cuda_version):
if not CudaToolkitVersionSatisfies(cuda_version, 11, 8):
return
layouts = [
(LayoutType.ColumnMajor, LayoutType.ColumnMajor),
(LayoutType.RowMajor, LayoutType.ColumnMajor),
]
fill_modes = [
FillMode.Lower, FillMode.Upper,
]
math_inst = \
MathInstruction( \
[16, 8, 4], \
DataType.f64, DataType.f64, DataType.f64, \
OpcodeClass.TensorOp, \
MathOperation.multiply_add_complex_gaussian)
min_cc = 90
max_cc = 1024
alignment_constraints = [1,]
tile_descriptions = [
TileDescription([64, 64, 8], 3, [4, 2, 1], math_inst, min_cc, max_cc),
TileDescription([64, 32, 8], 4, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([32, 64, 8], 4, [2, 2, 1], math_inst, min_cc, max_cc),
#TileDescription([32, 32, 8], 4, [2, 2, 1], math_inst, min_cc, max_cc),
#TileDescription([16, 32, 8], 4, [1, 2, 1], math_inst, min_cc, max_cc),
#TileDescription([32, 16, 8], 4, [2, 1, 1], math_inst, min_cc, max_cc),
]
data_type = [DataType.cf64, DataType.cf64, DataType.cf64]
complex_transforms = [ComplexTransform.none,]
# SYRK computation
CreateRankKOperator(manifest, layouts, fill_modes, tile_descriptions, \
data_type, alignment_constraints, BlasMode.symmetric)
# HERK computation
CreateRankKOperator(manifest, layouts, fill_modes, tile_descriptions, \
data_type, alignment_constraints, BlasMode.hermitian)
#
#
def GenerateSM90_TensorOp_1684_trmm(manifest, cuda_version):
if not CudaToolkitVersionSatisfies(cuda_version, 11, 8):
return
layouts = [
(LayoutType.ColumnMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor),
(LayoutType.RowMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor),
]
side_modes = [
SideMode.Left, SideMode.Right,
]
fill_modes = [
FillMode.Lower, FillMode.Upper,
]
diag_types = [
DiagType.NonUnit, DiagType.Unit,
]
math_inst = \
MathInstruction( \
[16, 8, 4], \
DataType.f64, DataType.f64, DataType.f64, \
OpcodeClass.TensorOp, \
MathOperation.multiply_add)
min_cc = 90
max_cc = 1024
alignment_constraints = [1,]
tile_descriptions = [
TileDescription([128, 128, 16], 3, [4, 2, 1], math_inst, min_cc, max_cc),
TileDescription([64, 128, 16], 3, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([128, 64, 16], 3, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([64, 64, 16], 4, [2, 2, 1], math_inst, min_cc, max_cc),
]
data_type = [DataType.f64, DataType.f64, DataType.f64, DataType.f64]
CreateTrmmOperator(manifest, layouts, side_modes, fill_modes, diag_types, tile_descriptions, \
data_type, alignment_constraints)
#
#
def GenerateSM90_TensorOp_1684_trmm_complex(manifest, cuda_version):
if not CudaToolkitVersionSatisfies(cuda_version, 11, 8):
return
layouts = [
(LayoutType.ColumnMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor),
(LayoutType.RowMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor),
]
side_modes = [
SideMode.Left, SideMode.Right,
]
fill_modes = [
FillMode.Lower, FillMode.Upper,
]
diag_types = [
DiagType.NonUnit, DiagType.Unit,
]
math_inst = \
MathInstruction( \
[16, 8, 4], \
DataType.f64, DataType.f64, DataType.f64, \
OpcodeClass.TensorOp, \
MathOperation.multiply_add_complex)
min_cc = 90
max_cc = 1024
alignment_constraints = [1,]
tile_descriptions = [
TileDescription([128, 64, 8], 3, [4, 2, 1], math_inst, min_cc, max_cc),
TileDescription([64, 128, 8], 3, [2, 4, 1], math_inst, min_cc, max_cc),
TileDescription([64, 64, 8], 3, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([64, 32, 8], 4, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([32, 64, 8], 4, [2, 2, 1], math_inst, min_cc, max_cc),
]
data_type = [DataType.cf64, DataType.cf64, DataType.cf64, DataType.cf64]
complex_transforms = [
ComplexTransform.none, ComplexTransform.conj,
]
CreateTrmmOperator(manifest, layouts, side_modes, fill_modes, diag_types, tile_descriptions, \
data_type, alignment_constraints, complex_transforms)
#
#
def GenerateSM90_TensorOp_1684_trmm_complex_gaussian(manifest, cuda_version):
if not CudaToolkitVersionSatisfies(cuda_version, 11, 8):
return
layouts = [
(LayoutType.ColumnMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor),
(LayoutType.RowMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor),
]
side_modes = [
SideMode.Left, SideMode.Right,
]
fill_modes = [
FillMode.Lower, FillMode.Upper,
]
diag_types = [
DiagType.NonUnit, DiagType.Unit,
]
math_inst = \
MathInstruction( \
[16, 8, 4], \
DataType.f64, DataType.f64, DataType.f64, \
OpcodeClass.TensorOp, \
MathOperation.multiply_add_complex_gaussian)
min_cc = 90
max_cc = 1024
alignment_constraints = [1,]
tile_descriptions = [
TileDescription([64, 64, 8], 3, [4, 2, 1], math_inst, min_cc, max_cc),
TileDescription([64, 32, 8], 4, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([32, 64, 8], 4, [2, 2, 1], math_inst, min_cc, max_cc),
]
data_type = [DataType.cf64, DataType.cf64, DataType.cf64, DataType.cf64]
complex_transforms = [
ComplexTransform.none, ComplexTransform.conj,
]
CreateTrmmOperator(manifest, layouts, side_modes, fill_modes, diag_types, tile_descriptions, \
data_type, alignment_constraints, complex_transforms)
#
#
def GenerateSM90_TensorOp_1684_symm(manifest, cuda_version):
if not CudaToolkitVersionSatisfies(cuda_version, 11, 8):
return
layouts = [
(LayoutType.ColumnMajor, LayoutType.ColumnMajor),
]
side_modes = [
SideMode.Left, SideMode.Right,
]
fill_modes = [
FillMode.Lower, FillMode.Upper,
]
math_inst = \
MathInstruction( \
[16, 8, 4], \
DataType.f64, DataType.f64, DataType.f64, \
OpcodeClass.TensorOp, \
MathOperation.multiply_add)
min_cc = 90
max_cc = 1024
alignment_constraints = [1,]
tile_descriptions = [
TileDescription([128, 128, 16], 3, [4, 2, 1], math_inst, min_cc, max_cc),
TileDescription([64, 128, 16], 3, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([128, 64, 16], 3, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([64, 64, 16], 4, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([64, 32, 16], 4, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([32, 64, 16], 4, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([32, 32, 16], 5, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([16, 32, 16], 5, [1, 2, 1], math_inst, min_cc, max_cc),
TileDescription([32, 16, 16], 5, [2, 1, 1], math_inst, min_cc, max_cc),
]
data_type = [DataType.f64, DataType.f64, DataType.f64, DataType.f64]
CreateSymmOperator(manifest, layouts, side_modes, fill_modes, tile_descriptions, \
data_type, alignment_constraints, BlasMode.symmetric)
#
#
def GenerateSM90_TensorOp_1684_symm_complex(manifest, cuda_version):
if not CudaToolkitVersionSatisfies(cuda_version, 11, 8):
return
layouts = [
(LayoutType.ColumnMajor, LayoutType.ColumnMajor),
]
side_modes = [
SideMode.Left, SideMode.Right,
]
fill_modes = [
FillMode.Lower, FillMode.Upper,
]
math_inst = \
MathInstruction( \
[16, 8, 4], \
DataType.f64, DataType.f64, DataType.f64, \
OpcodeClass.TensorOp, \
MathOperation.multiply_add_complex)
min_cc = 90
max_cc = 1024
alignment_constraints = [1,]
tile_descriptions = [
TileDescription([128, 64, 8], 3, [4, 2, 1], math_inst, min_cc, max_cc),
TileDescription([64, 128, 8], 3, [2, 4, 1], math_inst, min_cc, max_cc),
TileDescription([64, 64, 8], 3, [2, 2, 1], math_inst, min_cc, max_cc),
#TileDescription([64, 32, 8], 4, [2, 2, 1], math_inst, min_cc, max_cc),
#TileDescription([32, 64, 8], 4, [2, 2, 1], math_inst, min_cc, max_cc),
#TileDescription([32, 32, 8], 4, [2, 2, 1], math_inst, min_cc, max_cc),
#TileDescription([16, 32, 8], 4, [1, 2, 1], math_inst, min_cc, max_cc),
#TileDescription([32, 16, 8], 4, [2, 1, 1], math_inst, min_cc, max_cc),
]
data_type = [DataType.cf64, DataType.cf64, DataType.cf64, DataType.cf64]
# SYMM computation
CreateSymmOperator(manifest, layouts, side_modes, fill_modes, tile_descriptions, \
data_type, alignment_constraints, BlasMode.symmetric)
# HEMM computation
CreateSymmOperator(manifest, layouts, side_modes, fill_modes, tile_descriptions, \
data_type, alignment_constraints, BlasMode.hermitian)
#
#
def GenerateSM90_TensorOp_1684_symm_complex_gaussian(manifest, cuda_version):
if not CudaToolkitVersionSatisfies(cuda_version, 11, 8):
return
layouts = [
(LayoutType.ColumnMajor, LayoutType.ColumnMajor),
]
side_modes = [
SideMode.Left, SideMode.Right,
]
fill_modes = [
FillMode.Lower, FillMode.Upper,
]
math_inst = \
MathInstruction( \
[16, 8, 4], \
DataType.f64, DataType.f64, DataType.f64, \
OpcodeClass.TensorOp, \
MathOperation.multiply_add_complex_gaussian)
min_cc = 90
max_cc = 1024
alignment_constraints = [1,]
tile_descriptions = [
TileDescription([64, 64, 8], 3, [4, 2, 1], math_inst, min_cc, max_cc),
TileDescription([64, 32, 8], 4, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([32, 64, 8], 4, [2, 2, 1], math_inst, min_cc, max_cc),
#TileDescription([32, 32, 8], 4, [2, 2, 1], math_inst, min_cc, max_cc),
#TileDescription([16, 32, 8], 4, [1, 2, 1], math_inst, min_cc, max_cc),
#TileDescription([32, 16, 8], 4, [2, 1, 1], math_inst, min_cc, max_cc),
]
data_type = [DataType.cf64, DataType.cf64, DataType.cf64, DataType.cf64]
complex_transforms = [ComplexTransform.none,]
# SYMM computation
CreateSymmOperator(manifest, layouts, side_modes, fill_modes, tile_descriptions, \
data_type, alignment_constraints, BlasMode.symmetric)
# HEMM computation
CreateSymmOperator(manifest, layouts, side_modes, fill_modes, tile_descriptions, \
data_type, alignment_constraints, BlasMode.hermitian)
#
###################################################################################################
#
def GenerateSM90(manifest, cuda_version):
GenerateSM90_TensorOp_1684(manifest, cuda_version)
GenerateSM90_TensorOp_1684_complex(manifest, cuda_version)
GenerateSM90_TensorOp_1684_complex_gaussian(manifest, cuda_version)
GenerateSM90_TensorOp_1684_rank_k(manifest, cuda_version)
GenerateSM90_TensorOp_1684_rank_k_complex(manifest, cuda_version)
GenerateSM90_TensorOp_1684_rank_k_complex_gaussian(manifest, cuda_version)
GenerateSM90_TensorOp_1684_trmm(manifest, cuda_version)
GenerateSM90_TensorOp_1684_trmm_complex(manifest, cuda_version)
GenerateSM90_TensorOp_1684_trmm_complex_gaussian(manifest, cuda_version)
GenerateSM90_TensorOp_1684_symm(manifest, cuda_version)
GenerateSM90_TensorOp_1684_symm_complex(manifest, cuda_version)
GenerateSM90_TensorOp_1684_symm_complex_gaussian(manifest, cuda_version)
###################################################################################################
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Generates device kernel registration code for CUTLASS Kernels")
parser.add_argument("--operations", default="all", help="Specifies the operation to generate (gemm, all)")
parser.add_argument("--build-dir", default=".", required=False, help="CUTLASS top-level build directory")
parser.add_argument("--curr-build-dir", default=".", help="CUTLASS current build directory. cmake files will be emitted in this directory")
parser.add_argument("--generator-target", default='library', help="Target of CUTLASS Library Generator.")
parser.add_argument("--architectures", default='53;60;61;70;75;80', help="Target compute architectures")
parser.add_argument("--kernels", default='', help='Comma delimited list to filter kernels by name.')
parser.add_argument("--ignore-kernels", default='', help='Comma delimited list of kernels to exclude from build.')
parser.add_argument("--filter-by-cc", default='True', type=str, help='If enabled, kernels whose comupte capability range is not satisfied by the build target are excluded.')
parser.add_argument("--cuda-version", default="11.0.0", help="Semantic version string of CUDA Toolkit")
parser.add_argument('--kernel-filter-file', type=str, default=None, required=False, help='Full path of filter file')
parser.add_argument('--selected-kernel-list', type=str, default=None, required=False,
help='Specify the output log file containing all enabled kernels in this build')
parser.add_argument("--interface-dir", default=None, required=False, help="Interface header to kernels")
args = parser.parse_args()
manifest = Manifest(args)
GenerateSM50(manifest, args.cuda_version)
GenerateSM60(manifest, args.cuda_version)
GenerateSM61(manifest, args.cuda_version)
GenerateSM70(manifest, args.cuda_version)
GenerateSM75(manifest, args.cuda_version)
GenerateSM80(manifest, args.cuda_version)
GenerateSM90(manifest, args.cuda_version)
if 'library' in args.generator_target.split(','):
manifest.emit(GeneratorTarget.Library)
if args.selected_kernel_list is not None:
if len(manifest.selected_kernels) > 0:
with open(args.selected_kernel_list, 'w') as file_writer:
for line in manifest.selected_kernels:
file_writer.write("%s\n" % line)
#
###################################################################################################
| warp-main | warp/native/cutlass/tools/library/scripts/generator.py |
#
# \file generator.py
#
# \brief Generates the CUTLASS Library's instances
#
#
import enum
import os.path
import shutil
from library import *
###################################################################################################
#
class Conv3dOperation:
#
def __init__(self, conv_kind, iterator_algorithm, arch, tile_description, A, B, C, element_epilogue, \
stride_support, epilogue_functor = EpilogueFunctor.LinearCombination, swizzling_functor = SwizzlingFunctor.Identity4):
self.operation_kind = OperationKind.Conv3d
self.arch = arch
self.tile_description = tile_description
self.conv_kind = conv_kind
self.A = A
self.B = B
self.C = C
self.element_epilogue = element_epilogue
self.epilogue_functor = epilogue_functor
self.iterator_algorithm = iterator_algorithm
self.stride_support = stride_support
self.swizzling_functor = swizzling_functor
#
def core_name(self):
''' The basic operation kind is prefixed with a letter indicating the accumulation type. '''
intermediate_type = ''
if self.tile_description.math_instruction.opcode_class == OpcodeClass.TensorOp:
inst_shape = "%d%d%d" % tuple(self.tile_description.math_instruction.instruction_shape)
if self.tile_description.math_instruction.element_a != self.A.element and \
self.tile_description.math_instruction.element_a != self.tile_description.math_instruction.element_accumulator:
intermediate_type = DataTypeNames[self.tile_description.math_instruction.element_a]
else:
inst_shape = ''
return "%s%s%s%s3d_%s" % (ShortDataTypeNames[self.tile_description.math_instruction.element_accumulator], \
inst_shape, intermediate_type, ConvKindNames[self.conv_kind], IteratorAlgorithmNames[self.iterator_algorithm])
#
def extended_name(self):
''' Append data types if they differ from compute type. '''
if self.C.element != self.tile_description.math_instruction.element_accumulator and \
self.A.element != self.tile_description.math_instruction.element_accumulator:
extended_name = "${element_c}_${core_name}_${element_a}"
elif self.C.element == self.tile_description.math_instruction.element_accumulator and \
self.A.element != self.tile_description.math_instruction.element_accumulator:
extended_name = "${core_name}_${element_a}"
else:
extended_name = "${core_name}"
extended_name = SubstituteTemplate(extended_name, {
'element_a': DataTypeNames[self.A.element],
'element_c': DataTypeNames[self.C.element],
'core_name': self.core_name()
})
return extended_name
#
def configuration_name(self):
''' The full procedural name indicates architecture, extended name, tile size, and layout. '''
opcode_class_name = OpcodeClassNames[self.tile_description.math_instruction.opcode_class]
threadblock = "%dx%d_%dx%d" % (
self.tile_description.threadblock_shape[0],
self.tile_description.threadblock_shape[1],
self.tile_description.threadblock_shape[2],
self.tile_description.stages
)
if self.stride_support == StrideSupport.Unity:
configuration_name = "cutlass_${opcode_class}_${extended_name}_${threadblock}_unity_stride"
else:
configuration_name = "cutlass_${opcode_class}_${extended_name}_${threadblock}"
return SubstituteTemplate(
configuration_name,
{
'opcode_class': opcode_class_name,
'extended_name': self.extended_name(),
'threadblock': threadblock,
}
)
#
def procedural_name(self):
''' The full procedural name indicates architecture, extended name, tile size, and layout. '''
return self.configuration_name()
###################################################################################################
#
# Emits single instances of a CUTLASS device-wide operator
#
###################################################################################################
class EmitConv3dInstance:
def __init__(self):
self.template = """
// Conv3d${conv_kind_name} ${iterator_algorithm_name} kernel instance "${operation_name}"
using ${operation_name}_base =
typename cutlass::conv::kernel::DefaultConv3d${conv_kind_name}<
${element_a},
cutlass::layout::TensorNDHWC,
${element_b},
cutlass::layout::TensorNDHWC,
${element_c},
cutlass::layout::TensorNDHWC,
${element_accumulator},
${opcode_class},
${arch},
cutlass::gemm::GemmShape<${threadblock_shape_m}, ${threadblock_shape_n}, ${threadblock_shape_k}>,
cutlass::gemm::GemmShape<${warp_shape_m}, ${warp_shape_n}, ${warp_shape_k} >,
cutlass::gemm::GemmShape<${instruction_shape_m}, ${instruction_shape_n}, ${instruction_shape_k}>,
${epilogue_functor}<
${element_c},
${epilogue_vector_length},
${element_accumulator},
${element_epilogue}
>,
${swizzling_functor}, // cutlass::gemm::threadblock::GemmSplitKIdentityThreadblockSwizzle<>,
${stages},
cutlass::arch::OpMultiplyAdd,
${iterator_algorithm},
${stride_support}
>::Kernel;
"""
def emit(self, operation):
warp_shape = [int(operation.tile_description.threadblock_shape[idx] / operation.tile_description.warp_count[idx]) for idx in range(3)]
epilogue_vector_length = int(min(operation.C.alignment * DataTypeSize[operation.C.element], 128) / DataTypeSize[operation.C.element])
values = {
'operation_name': operation.procedural_name(),
'conv_kind': ConvKindTag[operation.conv_kind],
'conv_kind_name': ConvKindNames[operation.conv_kind].capitalize(),
'element_a': DataTypeTag[operation.A.element],
'layout_a': LayoutTag[operation.A.layout],
'element_b': DataTypeTag[operation.B.element],
'layout_b': LayoutTag[operation.B.layout],
'element_c': DataTypeTag[operation.C.element],
'layout_c': LayoutTag[operation.C.layout],
'element_accumulator': DataTypeTag[operation.tile_description.math_instruction.element_accumulator],
'opcode_class': OpcodeClassTag[operation.tile_description.math_instruction.opcode_class],
'arch': "cutlass::arch::Sm%d" % operation.arch,
'threadblock_shape_m': str(operation.tile_description.threadblock_shape[0]),
'threadblock_shape_n': str(operation.tile_description.threadblock_shape[1]),
'threadblock_shape_k': str(operation.tile_description.threadblock_shape[2]),
'warp_shape_m': str(warp_shape[0]),
'warp_shape_n': str(warp_shape[1]),
'warp_shape_k': str(warp_shape[2]),
'instruction_shape_m': str(operation.tile_description.math_instruction.instruction_shape[0]),
'instruction_shape_n': str(operation.tile_description.math_instruction.instruction_shape[1]),
'instruction_shape_k': str(operation.tile_description.math_instruction.instruction_shape[2]),
'epilogue_vector_length': str(epilogue_vector_length),
'epilogue_functor': EpilogueFunctorTag[operation.epilogue_functor],
'element_epilogue': str(DataTypeTag[operation.element_epilogue]),
'swizzling_functor': SwizzlingFunctorTag[operation.swizzling_functor],
'stages': str(operation.tile_description.stages),
'iterator_algorithm': IteratorAlgorithmTag[operation.iterator_algorithm],
'iterator_algorithm_name': IteratorAlgorithmNames[operation.iterator_algorithm].capitalize(),
'stride_support': StrideSupportTag[operation.stride_support]
}
return SubstituteTemplate(self.template, values)
###################################################################################################
#
# Generator functions for all layouts
#
###################################################################################################
#
def GenerateConv3dTensorOp(manifest, tile_descriptions, min_cc, align = 128):
for tile in tile_descriptions:
for conv_kind in [ConvKind.Fprop, ConvKind.Dgrad, ConvKind.Wgrad]:
if conv_kind == ConvKind.Fprop or (tile.math_instruction.element_accumulator in [DataType.f16, DataType.f32]):
#
output_types = [tile.math_instruction.element_a, tile.math_instruction.element_accumulator] \
if DataTypeSize[tile.math_instruction.element_accumulator] == 32 \
else [tile.math_instruction.element_accumulator,]
for output_type in output_types:
A = TensorDescription(tile.math_instruction.element_a, LayoutType.TensorNDHWC, int(align / DataTypeSize[tile.math_instruction.element_a]))
B = TensorDescription(tile.math_instruction.element_b, LayoutType.TensorNDHWC, int(align / DataTypeSize[tile.math_instruction.element_b]))
C = TensorDescription(output_type, LayoutType.TensorNDHWC, max(1, int(align / DataTypeSize[output_type])))
manifest.append(Conv3dOperation(conv_kind, min_cc, tile, A, B, C, tile.math_instruction.element_accumulator))
###################################################################################################
#
# Emitters functions for all targets
#
###################################################################################################
class EmitConv3dConfigurationLibrary:
def __init__(self, operation_path, configuration_name):
self.configuration_name = configuration_name
self.configuration_path = os.path.join(operation_path, "%s.cu" % configuration_name)
self.instance_emitter = EmitConv3dInstance()
self.instance_template = """
${operation_instance}
// Derived class
struct ${operation_name} :
public ${operation_name}_base { };
///////////////////////////////////////////////////////////////////////////////////////////////////
"""
self.header_template = """
/*
Generated by conv3d_operation.py - Do not edit.
*/
///////////////////////////////////////////////////////////////////////////////////////////////////
#include "cutlass/cutlass.h"
#include "cutlass/library/library.h"
#include "cutlass/library/manifest.h"
#include "library_internal.h"
#include "conv3d_operation.h"
///////////////////////////////////////////////////////////////////////////////////////////////////
"""
self.configuration_header = """
namespace cutlass {
namespace library {
// Initialize all instances
void initialize_${configuration_name}(Manifest &manifest) {
"""
self.configuration_instance = """
using Operation_${operation_name} = cutlass::conv::device::ImplicitGemmConvolution<
${operation_name}>;
manifest.append(new cutlass::library::Conv3dOperation<
Operation_${operation_name}>(
"${operation_name}"));
"""
self.configuration_epilogue = """
}
"""
self.epilogue_template = """
///////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace library
} // namespace cutlass
///////////////////////////////////////////////////////////////////////////////////////////////////
"""
#
def __enter__(self):
self.configuration_file = open(self.configuration_path, "w")
self.configuration_file.write(SubstituteTemplate(self.header_template, {
'configuration_name': self.configuration_name
}))
self.operations = []
return self
#
def emit(self, operation):
self.operations.append(operation)
self.configuration_file.write(SubstituteTemplate(self.instance_template, {
'configuration_name': self.configuration_name,
'operation_name': operation.procedural_name(),
'operation_instance': self.instance_emitter.emit(operation)
}))
#
def __exit__(self, exception_type, exception_value, traceback):
self.configuration_file.write(SubstituteTemplate(self.configuration_header, {
'configuration_name': self.configuration_name
}))
for operation in self.operations:
self.configuration_file.write(SubstituteTemplate(self.configuration_instance, {
'configuration_name': self.configuration_name,
'operation_name': operation.procedural_name()
}))
self.configuration_file.write(self.configuration_epilogue)
self.configuration_file.write(self.epilogue_template)
self.configuration_file.close()
###################################################################################################
###################################################################################################
| warp-main | warp/native/cutlass/tools/library/scripts/conv3d_operation.py |
#
# \file generator.py
#
# \brief Generates the CUTLASS Library's instances
#
#
import enum
import os.path
import shutil
import functools
import operator
from library import *
###################################################################################################
#
# Data structure modeling a Rank K update operation
#
###################################################################################################
#
class Rank2KOperation:
#
def __init__(self, rank_k_kind, arch, tile_description, A, C, element_epilogue, \
epilogue_functor = EpilogueFunctor.LinearCombination, swizzling_functor = SwizzlingFunctor.Identity8, \
blas_mode = BlasMode.symmetric):
self.blas_mode = blas_mode
self.operation_kind = OperationKind.Rank2K
self.arch = arch
self.tile_description = tile_description
self.rank_k_kind = rank_k_kind
# tensor A and B have same data type and layout
self.A = A
self.B = A
self.C = C
self.element_epilogue = element_epilogue
self.epilogue_functor = epilogue_functor
self.swizzling_functor = swizzling_functor
#
def is_complex(self):
complex_operators = [
MathOperation.multiply_add_complex,
MathOperation.multiply_add_complex_gaussian,
MathOperation.multiply_add_complex_fast_f32
]
return self.tile_description.math_instruction.math_operation in complex_operators
return False
#
def is_planar_complex(self):
return False
#
def accumulator_type(self):
accum = self.tile_description.math_instruction.element_accumulator
if self.is_complex():
return get_complex_from_real(accum)
return accum
#
def short_math_name(self):
if self.tile_description.math_instruction.math_operation == MathOperation.multiply_add_complex_gaussian:
return "g%s" % ShortDataTypeNames[self.accumulator_type()]
return ShortDataTypeNames[self.accumulator_type()]
#
def core_name(self):
''' The basic operation kind is prefixed with a letter indicating the accumulation type. '''
inst_shape = ''
inst_operation = ''
intermediate_type = ''
math_operations_map = {
MathOperation.xor_popc: 'xor',
}
if self.tile_description.math_instruction.opcode_class == OpcodeClass.TensorOp or \
self.tile_description.math_instruction.opcode_class == OpcodeClass.WmmaTensorOp:
math_op = self.tile_description.math_instruction.math_operation
math_op_string = math_operations_map[math_op] if math_op in math_operations_map.keys() else ''
inst_shape = "%d%d%d" % tuple(self.tile_description.math_instruction.instruction_shape)
inst_shape += math_op_string
if self.tile_description.math_instruction.element_a != self.A.element and \
self.tile_description.math_instruction.element_a != self.tile_description.math_instruction.element_accumulator:
intermediate_type = DataTypeNames[self.tile_description.math_instruction.element_a]
operation_name = 'syr2k' if self.blas_mode == BlasMode.symmetric else 'her2k'
return "%s%s%s%s" % (self.short_math_name(), inst_shape, intermediate_type, operation_name)
#
def extended_name(self):
''' Append data types if they differ from compute type. '''
if self.is_complex():
extended_name = "${core_name}"
else:
if self.C.element != self.tile_description.math_instruction.element_accumulator and \
self.A.element != self.tile_description.math_instruction.element_accumulator:
extended_name = "${element_c}_${core_name}_${element_a}"
elif self.C.element == self.tile_description.math_instruction.element_accumulator and \
self.A.element != self.tile_description.math_instruction.element_accumulator:
extended_name = "${core_name}_${element_a}"
else:
extended_name = "${core_name}"
extended_name = SubstituteTemplate(extended_name, {
'element_a': DataTypeNames[self.A.element],
'element_c': DataTypeNames[self.C.element],
'core_name': self.core_name()
})
return extended_name
#
def layout_name(self):
if self.is_complex() or self.is_planar_complex():
return "%s" % (
ShortComplexLayoutNames[(self.A.layout, self.A.complex_transform)]
)
return "%s" % (ShortLayoutTypeNames[self.A.layout])
#
def fill_mode_name(self):
return "%s" % (ShortFillModeNames[self.C.fill_mode])
#
def procedural_name(self):
''' The full procedural name indicates architecture, extended name, tile size, and layout. '''
threadblock = self.tile_description.procedural_name()
opcode_class_name = OpcodeClassNames[self.tile_description.math_instruction.opcode_class]
alignment = max([self.A.alignment, self.C.alignment])
return SubstituteTemplate(
"cutlass_${opcode_class}_${extended_name}_${threadblock}_${layout}_${fill_mode}_align${alignment}",
{
'opcode_class': opcode_class_name,
'extended_name': self.extended_name(),
'threadblock': threadblock,
'layout': self.layout_name(),
'fill_mode': self.fill_mode_name(),
'alignment': "%d" % self.A.alignment,
}
)
#
def configuration_name(self):
''' The full procedural name indicates architecture, extended name, tile size, and layout. '''
return self.procedural_name()
###################################################################################################
#
# Emits single instances of a CUTLASS device-wide operator
#
###################################################################################################
#
class EmitRank2KUniversalInstance:
''' Responsible for emitting a CUTLASS template definition'''
def __init__(self):
self.rank_k_template = """
// Rank K operator ${operation_name}
using Operation_${operation_name} =
typename cutlass::gemm::device::Rank2K<
${element_a}, ${layout_a},
${element_b}, ${layout_b},
${element_c}, ${layout_c}, ${fill_mode},
${element_accumulator},
${opcode_class},
${arch},
cutlass::gemm::GemmShape<${threadblock_shape_m}, ${threadblock_shape_n}, ${threadblock_shape_k}>,
cutlass::gemm::GemmShape<${warp_shape_m}, ${warp_shape_n}, ${warp_shape_k}>,
cutlass::gemm::GemmShape<${instruction_shape_m}, ${instruction_shape_n}, ${instruction_shape_k}>,
${epilogue_functor}<
${element_c},
${epilogue_vector_length},
${element_accumulator},
${element_epilogue}
>,
${swizzling_functor},
${stages},
${align_a},
${align_b},
${split_k_serial},
${math_operation}
>;
"""
self.rank_k_complex_template = """
// Rank K operator ${operation_name}
using Operation_${operation_name} =
typename cutlass::gemm::device::Rank2K<
${element_a}, ${layout_a},
${element_b}, ${layout_b},
${element_c}, ${layout_c}, ${fill_mode},
${element_accumulator},
${opcode_class},
${arch},
cutlass::gemm::GemmShape<${threadblock_shape_m}, ${threadblock_shape_n}, ${threadblock_shape_k}>,
cutlass::gemm::GemmShape<${warp_shape_m}, ${warp_shape_n}, ${warp_shape_k}>,
cutlass::gemm::GemmShape<${instruction_shape_m}, ${instruction_shape_n}, ${instruction_shape_k}>,
${epilogue_functor}<
${element_c},
${epilogue_vector_length},
${element_accumulator},
${element_epilogue}
>,
${swizzling_functor},
${stages},
${align_a},
${align_b},
${split_k_serial},
${math_operation},
${transform_a},
${transform_b},
${blas_mode}
>;
"""
def emit(self, operation):
threadblock_shape = operation.tile_description.threadblock_shape
warp_count = operation.tile_description.warp_count
warp_shape = [threadblock_shape[idx] // warp_count[idx] for idx in range(3)]
epilogue_vector_length = int(min(operation.C.alignment * DataTypeSize[operation.C.element], 128) / DataTypeSize[operation.C.element])
values = {
'operation_name': operation.procedural_name(),
'element_a': DataTypeTag[operation.A.element],
'layout_a': LayoutTag[operation.A.layout],
'element_b': DataTypeTag[operation.B.element],
'layout_b': LayoutTag[operation.B.layout],
'element_c': DataTypeTag[operation.C.element],
'layout_c': LayoutTag[operation.C.layout],
'fill_mode': FillModeTag[operation.C.fill_mode],
'element_accumulator': DataTypeTag[operation.accumulator_type()],
'opcode_class': OpcodeClassTag[operation.tile_description.math_instruction.opcode_class],
'arch': "cutlass::arch::Sm%d" % operation.arch,
'threadblock_shape_m': str(operation.tile_description.threadblock_shape[0]),
'threadblock_shape_n': str(operation.tile_description.threadblock_shape[1]),
'threadblock_shape_k': str(operation.tile_description.threadblock_shape[2]),
'warp_shape_m': str(warp_shape[0]),
'warp_shape_n': str(warp_shape[1]),
'warp_shape_k': str(warp_shape[2]),
'instruction_shape_m': str(operation.tile_description.math_instruction.instruction_shape[0]),
'instruction_shape_n': str(operation.tile_description.math_instruction.instruction_shape[1]),
'instruction_shape_k': str(operation.tile_description.math_instruction.instruction_shape[2]),
'epilogue_vector_length': str(epilogue_vector_length),
'element_epilogue': str(DataTypeTag[operation.element_epilogue]),
'epilogue_functor': EpilogueFunctorTag[operation.epilogue_functor],
'swizzling_functor': SwizzlingFunctorTag[operation.swizzling_functor],
'stages': str(operation.tile_description.stages),
'align_a': str(operation.A.alignment),
'align_b': str(operation.B.alignment),
'split_k_serial': 'false',
'math_operation': MathOperationTag[operation.tile_description.math_instruction.math_operation],
'transform_a': ComplexTransformTag[operation.A.complex_transform],
'transform_b': ComplexTransformTag[operation.B.complex_transform],
'blas_mode': BlasModeTag[operation.blas_mode]
}
rank_k_template = self.rank_k_complex_template if operation.is_complex() else self.rank_k_template
return SubstituteTemplate(rank_k_template, values)
###################################################################################################
###################################################################################################
#
# Emitters functions for all targets
#
###################################################################################################
class EmitRank2KConfigurationLibrary:
def __init__(self, operation_path, configuration_name):
self.configuration_name = configuration_name
self.configuration_path = os.path.join(operation_path, "%s.cu" % configuration_name).replace('\\', '/')
self.instance_emitter = {
RankKKind.Universal: EmitRank2KUniversalInstance,
}
self.rank_k_kind_wrappers = {
RankKKind.Universal: 'Rank2KOperation',
}
self.instance_template = {
RankKKind.Universal: """
${compile_guard_start}
manifest.append(new ${rank_k_kind}<
Operation_${operation_name}
>("${operation_name}"));
${compile_guard_end}
"""
}
self.header_template = """
/*
Generated by rank_2k_operation.py - Do not edit.
*/
///////////////////////////////////////////////////////////////////////////////////////////////////
#include "cutlass/cutlass.h"
#include "cutlass/library/library.h"
#include "cutlass/library/manifest.h"
#include "library_internal.h"
#include "rank_2k_operation.h"
///////////////////////////////////////////////////////////////////////////////////////////////////
"""
self.initialize_function_template = """
///////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace library {
///////////////////////////////////////////////////////////////////////////////////////////////////
void initialize_${configuration_name}(Manifest &manifest) {
"""
self.epilogue_template = """
}
///////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace library
} // namespace cutlass
///////////////////////////////////////////////////////////////////////////////////////////////////
"""
def __enter__(self):
self.configuration_file = open(self.configuration_path, "w")
self.configuration_file.write(self.header_template)
self.instance_definitions = []
self.instance_wrappers = []
self.operations = []
return self
def emit(self, operation):
emitter = self.instance_emitter[operation.rank_k_kind]()
self.operations.append(operation)
self.instance_definitions.append(emitter.emit(operation))
self.instance_wrappers.append(SubstituteTemplate(self.instance_template[operation.rank_k_kind], {
'configuration_name': self.configuration_name,
'operation_name': operation.procedural_name(),
'rank_k_kind': self.rank_k_kind_wrappers[operation.rank_k_kind],
'compile_guard_start': SubstituteTemplate(self.wmma_guard_start, {'sm_number': str(operation.arch)}) \
if operation.tile_description.math_instruction.opcode_class == OpcodeClass.WmmaTensorOp else "",
'compile_guard_end': "#endif" \
if operation.tile_description.math_instruction.opcode_class == OpcodeClass.WmmaTensorOp else ""
}))
def __exit__(self, exception_type, exception_value, traceback):
# Write instance definitions in top-level namespace
for instance_definition in self.instance_definitions:
self.configuration_file.write(instance_definition)
# Add wrapper objects within initialize() function
self.configuration_file.write(SubstituteTemplate(self.initialize_function_template, {
'configuration_name': self.configuration_name
}))
for instance_wrapper in self.instance_wrappers:
self.configuration_file.write(instance_wrapper)
self.configuration_file.write(self.epilogue_template)
self.configuration_file.close()
###################################################################################################
| warp-main | warp/native/cutlass/tools/library/scripts/rank_2k_operation.py |
#
# \file generator.py
#
# \brief Generates the CUTLASS Library's instances
#
#
import enum
import os.path
import shutil
import functools
import operator
from library import *
###################################################################################################
#
# Data structure modeling a Symm update operation
#
###################################################################################################
#
class SymmOperation:
#
def __init__(self, symm_kind, arch, tile_description, A, B, C, element_epilogue, \
epilogue_functor = EpilogueFunctor.LinearCombination, swizzling_functor = SwizzlingFunctor.Identity8, \
blas_mode = BlasMode.symmetric):
self.blas_mode = blas_mode
self.operation_kind = OperationKind.Symm
self.arch = arch
self.tile_description = tile_description
self.symm_kind = symm_kind
# tensor A and B have same data type and layout
self.A = A
self.B = B
self.C = C
self.element_epilogue = element_epilogue
self.epilogue_functor = epilogue_functor
self.swizzling_functor = swizzling_functor
#
def is_complex(self):
complex_operators = [
MathOperation.multiply_add_complex,
MathOperation.multiply_add_complex_gaussian,
MathOperation.multiply_add_complex_fast_f32
]
return self.tile_description.math_instruction.math_operation in complex_operators
return False
#
def is_planar_complex(self):
return False
#
def accumulator_type(self):
accum = self.tile_description.math_instruction.element_accumulator
if self.is_complex():
return get_complex_from_real(accum)
return accum
#
def short_math_name(self):
if self.tile_description.math_instruction.math_operation == MathOperation.multiply_add_complex_gaussian:
return "g%s" % ShortDataTypeNames[self.accumulator_type()]
return ShortDataTypeNames[self.accumulator_type()]
#
def core_name(self):
''' The basic operation kind is prefixed with a letter indicating the accumulation type. '''
inst_shape = ''
inst_operation = ''
intermediate_type = ''
math_operations_map = {
MathOperation.xor_popc: 'xor',
}
if self.tile_description.math_instruction.opcode_class == OpcodeClass.TensorOp or \
self.tile_description.math_instruction.opcode_class == OpcodeClass.WmmaTensorOp:
math_op = self.tile_description.math_instruction.math_operation
math_op_string = math_operations_map[math_op] if math_op in math_operations_map.keys() else ''
inst_shape = "%d%d%d" % tuple(self.tile_description.math_instruction.instruction_shape)
inst_shape += math_op_string
if self.tile_description.math_instruction.element_a != self.A.element and \
self.tile_description.math_instruction.element_a != self.tile_description.math_instruction.element_accumulator:
intermediate_type = DataTypeNames[self.tile_description.math_instruction.element_a]
operation_name = 'symm' if self.blas_mode == BlasMode.symmetric else 'hemm'
return "%s%s%s%s" % (self.short_math_name(), inst_shape, intermediate_type, operation_name)
#
def extended_name(self):
''' Append data types if they differ from compute type. '''
if self.is_complex():
extended_name = "${core_name}"
else:
if self.C.element != self.tile_description.math_instruction.element_accumulator and \
self.A.element != self.tile_description.math_instruction.element_accumulator:
extended_name = "${element_c}_${core_name}_${element_a}"
elif self.C.element == self.tile_description.math_instruction.element_accumulator and \
self.A.element != self.tile_description.math_instruction.element_accumulator:
extended_name = "${core_name}_${element_a}"
else:
extended_name = "${core_name}"
extended_name = SubstituteTemplate(extended_name, {
'element_a': DataTypeNames[self.A.element],
'element_c': DataTypeNames[self.C.element],
'core_name': self.core_name()
})
return extended_name
#
def layout_name(self):
if self.is_complex() or self.is_planar_complex():
return "%s" % (
ShortComplexLayoutNames[(self.A.layout, self.A.complex_transform)]
)
return "%s" % (ShortLayoutTypeNames[self.A.layout])
#
def side_mode_name(self):
return "%s" % (ShortSideModeNames[self.A.side_mode])
#
def fill_mode_name(self):
return "%s" % (ShortFillModeNames[self.A.fill_mode])
#
def procedural_name(self):
''' The full procedural name indicates architecture, extended name, tile size, and layout. '''
threadblock = self.tile_description.procedural_name()
opcode_class_name = OpcodeClassNames[self.tile_description.math_instruction.opcode_class]
alignment = self.C.alignment
return SubstituteTemplate(
"cutlass_${opcode_class}_${extended_name}_${threadblock}_${layout}_${side_mode}_${fill_mode}_align${alignment}",
{
'opcode_class': opcode_class_name,
'extended_name': self.extended_name(),
'threadblock': threadblock,
'layout': self.layout_name(),
'side_mode': self.side_mode_name(),
'fill_mode': self.fill_mode_name(),
'alignment': "%d" % alignment,
}
)
#
def configuration_name(self):
''' The full procedural name indicates architecture, extended name, tile size, and layout. '''
return self.procedural_name()
###################################################################################################
#
# Emits single instances of a CUTLASS device-wide operator
#
###################################################################################################
#
class EmitSymmUniversalInstance:
''' Responsible for emitting a CUTLASS template definition'''
def __init__(self):
self.symm_template = """
// Symm operator ${operation_name}
using Operation_${operation_name} =
typename cutlass::gemm::device::Symm<
${element_a}, ${layout_a}, ${side_mode}, ${fill_mode},
${element_b}, ${layout_b},
${element_c}, ${layout_c},
${element_accumulator},
${opcode_class},
${arch},
cutlass::gemm::GemmShape<${threadblock_shape_m}, ${threadblock_shape_n}, ${threadblock_shape_k}>,
cutlass::gemm::GemmShape<${warp_shape_m}, ${warp_shape_n}, ${warp_shape_k}>,
cutlass::gemm::GemmShape<${instruction_shape_m}, ${instruction_shape_n}, ${instruction_shape_k}>,
${epilogue_functor}<
${element_c},
${epilogue_vector_length},
${element_accumulator},
${element_epilogue}
>,
${swizzling_functor},
${stages},
${align_a},
${align_b},
${split_k_serial},
${math_operation}
>;
"""
self.symm_complex_template = """
// Symm operator ${operation_name}
using Operation_${operation_name} =
typename cutlass::gemm::device::Symm<
${element_a}, ${layout_a}, ${side_mode}, ${fill_mode},
${element_b}, ${layout_b},
${element_c}, ${layout_c},
${element_accumulator},
${opcode_class},
${arch},
cutlass::gemm::GemmShape<${threadblock_shape_m}, ${threadblock_shape_n}, ${threadblock_shape_k}>,
cutlass::gemm::GemmShape<${warp_shape_m}, ${warp_shape_n}, ${warp_shape_k}>,
cutlass::gemm::GemmShape<${instruction_shape_m}, ${instruction_shape_n}, ${instruction_shape_k}>,
${epilogue_functor}<
${element_c},
${epilogue_vector_length},
${element_accumulator},
${element_epilogue}
>,
${swizzling_functor},
${stages},
${align_a},
${align_b},
${split_k_serial},
${math_operation},
${blas_mode}
>;
"""
def emit(self, operation):
threadblock_shape = operation.tile_description.threadblock_shape
warp_count = operation.tile_description.warp_count
warp_shape = [threadblock_shape[idx] // warp_count[idx] for idx in range(3)]
epilogue_vector_length = int(min(operation.C.alignment * DataTypeSize[operation.C.element], 128) / DataTypeSize[operation.C.element])
values = {
'operation_name': operation.procedural_name(),
'element_a': DataTypeTag[operation.A.element],
'layout_a': LayoutTag[operation.A.layout],
'side_mode': SideModeTag[operation.A.side_mode],
'fill_mode': FillModeTag[operation.A.fill_mode],
'element_b': DataTypeTag[operation.B.element],
'layout_b': LayoutTag[operation.B.layout],
'element_c': DataTypeTag[operation.C.element],
'layout_c': LayoutTag[operation.C.layout],
'element_accumulator': DataTypeTag[operation.accumulator_type()],
'opcode_class': OpcodeClassTag[operation.tile_description.math_instruction.opcode_class],
'arch': "cutlass::arch::Sm%d" % operation.arch,
'threadblock_shape_m': str(operation.tile_description.threadblock_shape[0]),
'threadblock_shape_n': str(operation.tile_description.threadblock_shape[1]),
'threadblock_shape_k': str(operation.tile_description.threadblock_shape[2]),
'warp_shape_m': str(warp_shape[0]),
'warp_shape_n': str(warp_shape[1]),
'warp_shape_k': str(warp_shape[2]),
'instruction_shape_m': str(operation.tile_description.math_instruction.instruction_shape[0]),
'instruction_shape_n': str(operation.tile_description.math_instruction.instruction_shape[1]),
'instruction_shape_k': str(operation.tile_description.math_instruction.instruction_shape[2]),
'epilogue_vector_length': str(epilogue_vector_length),
'element_epilogue': str(DataTypeTag[operation.element_epilogue]),
'epilogue_functor': EpilogueFunctorTag[operation.epilogue_functor],
'swizzling_functor': SwizzlingFunctorTag[operation.swizzling_functor],
'stages': str(operation.tile_description.stages),
'align_a': str(operation.A.alignment),
'align_b': str(operation.B.alignment),
'split_k_serial': 'false',
'math_operation': MathOperationTag[operation.tile_description.math_instruction.math_operation],
'blas_mode': BlasModeTag[operation.blas_mode]
}
symm_template = self.symm_complex_template if operation.is_complex() else self.symm_template
return SubstituteTemplate(symm_template, values)
###################################################################################################
###################################################################################################
#
# Emitters functions for all targets
#
###################################################################################################
class EmitSymmConfigurationLibrary:
def __init__(self, operation_path, configuration_name):
self.configuration_name = configuration_name
self.configuration_path = os.path.join(operation_path, "%s.cu" % configuration_name).replace('\\', '/')
self.instance_emitter = {
SymmKind.Universal: EmitSymmUniversalInstance,
}
self.symm_kind_wrappers = {
SymmKind.Universal: 'SymmOperation',
}
self.instance_template = {
SymmKind.Universal: """
${compile_guard_start}
manifest.append(new ${symm_kind}<
Operation_${operation_name}
>("${operation_name}"));
${compile_guard_end}
"""
}
self.header_template = """
/*
Generated by symm_operation.py - Do not edit.
*/
///////////////////////////////////////////////////////////////////////////////////////////////////
#include "cutlass/cutlass.h"
#include "cutlass/library/library.h"
#include "cutlass/library/manifest.h"
#include "library_internal.h"
#include "symm_operation.h"
///////////////////////////////////////////////////////////////////////////////////////////////////
"""
self.initialize_function_template = """
///////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace library {
///////////////////////////////////////////////////////////////////////////////////////////////////
void initialize_${configuration_name}(Manifest &manifest) {
"""
self.epilogue_template = """
}
///////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace library
} // namespace cutlass
///////////////////////////////////////////////////////////////////////////////////////////////////
"""
def __enter__(self):
self.configuration_file = open(self.configuration_path, "w")
self.configuration_file.write(self.header_template)
self.instance_definitions = []
self.instance_wrappers = []
self.operations = []
return self
def emit(self, operation):
emitter = self.instance_emitter[operation.symm_kind]()
self.operations.append(operation)
self.instance_definitions.append(emitter.emit(operation))
self.instance_wrappers.append(SubstituteTemplate(self.instance_template[operation.symm_kind], {
'configuration_name': self.configuration_name,
'operation_name': operation.procedural_name(),
'symm_kind': self.symm_kind_wrappers[operation.symm_kind],
'compile_guard_start': SubstituteTemplate(self.wmma_guard_start, {'sm_number': str(operation.arch)}) \
if operation.tile_description.math_instruction.opcode_class == OpcodeClass.WmmaTensorOp else "",
'compile_guard_end': "#endif" \
if operation.tile_description.math_instruction.opcode_class == OpcodeClass.WmmaTensorOp else ""
}))
def __exit__(self, exception_type, exception_value, traceback):
# Write instance definitions in top-level namespace
for instance_definition in self.instance_definitions:
self.configuration_file.write(instance_definition)
# Add wrapper objects within initialize() function
self.configuration_file.write(SubstituteTemplate(self.initialize_function_template, {
'configuration_name': self.configuration_name
}))
for instance_wrapper in self.instance_wrappers:
self.configuration_file.write(instance_wrapper)
self.configuration_file.write(self.epilogue_template)
self.configuration_file.close()
###################################################################################################
| warp-main | warp/native/cutlass/tools/library/scripts/symm_operation.py |
#
# \file generator.py
#
# \brief Generates the CUTLASS Library's instances
#
#
import enum
import os.path
import shutil
import functools
import operator
from library import *
###################################################################################################
#
# Data structure modeling a TRMM operation
#
###################################################################################################
#
class TrmmOperation:
#
def __init__(self, trmm_kind, arch, tile_description, A, B, C, element_epilogue, \
epilogue_functor = EpilogueFunctor.LinearCombination, swizzling_functor = SwizzlingFunctor.Identity8):
self.operation_kind = OperationKind.Trmm
self.arch = arch
self.tile_description = tile_description
self.trmm_kind = trmm_kind
self.A = A
self.B = B
self.C = C
self.element_epilogue = element_epilogue
self.epilogue_functor = epilogue_functor
self.swizzling_functor = swizzling_functor
#
def is_complex(self):
complex_operators = [
MathOperation.multiply_add_complex,
MathOperation.multiply_add_complex_gaussian,
MathOperation.multiply_add_complex_fast_f32
]
return self.tile_description.math_instruction.math_operation in complex_operators
return False
#
def is_planar_complex(self):
# return self.trmm_kind in (TrmmKind.PlanarComplex, TrmmKind.PlanarComplexArray)
return False
#
def accumulator_type(self):
accum = self.tile_description.math_instruction.element_accumulator
if self.is_complex():
return get_complex_from_real(accum)
return accum
#
def short_math_name(self):
if self.tile_description.math_instruction.math_operation == MathOperation.multiply_add_complex_gaussian:
return "g%s" % ShortDataTypeNames[self.accumulator_type()]
return ShortDataTypeNames[self.accumulator_type()]
#
def core_name(self):
''' The basic operation kind is prefixed with a letter indicating the accumulation type. '''
inst_shape = ''
inst_operation = ''
intermediate_type = ''
math_operations_map = {
MathOperation.xor_popc: 'xor',
}
if self.tile_description.math_instruction.opcode_class == OpcodeClass.TensorOp or \
self.tile_description.math_instruction.opcode_class == OpcodeClass.WmmaTensorOp:
math_op = self.tile_description.math_instruction.math_operation
math_op_string = math_operations_map[math_op] if math_op in math_operations_map.keys() else ''
inst_shape = "%d%d%d" % tuple(self.tile_description.math_instruction.instruction_shape)
inst_shape += math_op_string
if self.tile_description.math_instruction.element_a != self.A.element and \
self.tile_description.math_instruction.element_a != self.tile_description.math_instruction.element_accumulator:
intermediate_type = DataTypeNames[self.tile_description.math_instruction.element_a]
return "%s%s%s%s" % (self.short_math_name(), inst_shape, intermediate_type, TrmmKindNames[self.trmm_kind])
#
def extended_name(self):
''' Append data types if they differ from compute type. '''
if self.is_complex():
extended_name = "${core_name}"
else:
if self.C.element != self.tile_description.math_instruction.element_accumulator and \
self.A.element != self.tile_description.math_instruction.element_accumulator:
extended_name = "${element_c}_${core_name}_${element_a}"
elif self.C.element == self.tile_description.math_instruction.element_accumulator and \
self.A.element != self.tile_description.math_instruction.element_accumulator:
extended_name = "${core_name}_${element_a}"
else:
extended_name = "${core_name}"
extended_name = SubstituteTemplate(extended_name, {
'element_a': DataTypeNames[self.A.element],
'element_c': DataTypeNames[self.C.element],
'core_name': self.core_name()
})
return extended_name
#
def layout_name(self):
if self.is_complex() or self.is_planar_complex():
return "%s%s" % (
ShortComplexLayoutNames[(self.A.layout, self.A.complex_transform)],
ShortComplexLayoutNames[(self.B.layout, self.B.complex_transform)]
)
return "%s%s" % (ShortLayoutTypeNames[self.A.layout], ShortLayoutTypeNames[self.B.layout])
#
def side_mode_name(self):
return "%s" % (ShortSideModeNames[self.A.side_mode])
#
def fill_mode_name(self):
return "%s" % (ShortFillModeNames[self.A.fill_mode])
#
def diag_type_name(self):
return "%s" % (ShortDiagTypeNames[self.A.diag_type])
#
def procedural_name(self):
''' The full procedural name indicates architecture, extended name, tile size, and layout. '''
threadblock = self.tile_description.procedural_name()
opcode_class_name = OpcodeClassNames[self.tile_description.math_instruction.opcode_class]
alignment = max([self.C.alignment])
return SubstituteTemplate(
"cutlass_${opcode_class}_${extended_name}_${threadblock}_${layout}_${side_mode}_${fill_mode}_${diag_type}_align${alignment}",
{
'opcode_class': opcode_class_name,
'extended_name': self.extended_name(),
'threadblock': threadblock,
'layout': self.layout_name(),
'side_mode': self.side_mode_name(),
'fill_mode': self.fill_mode_name(),
'diag_type': self.diag_type_name(),
'alignment': "%d" % self.C.alignment,
}
)
#
def configuration_name(self):
''' The full procedural name indicates architecture, extended name, tile size, and layout. '''
return self.procedural_name()
###################################################################################################
#
# Emits single instances of a CUTLASS device-wide operator
#
###################################################################################################
#
class EmitTrmmUniversalInstance:
''' Responsible for emitting a CUTLASS template definition'''
def __init__(self):
self.trmm_template = """
// Trmm operator ${operation_name}
using Operation_${operation_name} =
typename cutlass::gemm::device::Trmm<
${element_a}, ${layout_a},
${side_mode}, ${fill_mode}, ${diag_type},
${element_b}, ${layout_b},
${element_c}, ${layout_c},
${element_accumulator},
${opcode_class},
${arch},
cutlass::gemm::GemmShape<${threadblock_shape_m}, ${threadblock_shape_n}, ${threadblock_shape_k}>,
cutlass::gemm::GemmShape<${warp_shape_m}, ${warp_shape_n}, ${warp_shape_k}>,
cutlass::gemm::GemmShape<${instruction_shape_m}, ${instruction_shape_n}, ${instruction_shape_k}>,
${epilogue_functor}<
${element_c},
${epilogue_vector_length},
${element_accumulator},
${element_epilogue},
cutlass::epilogue::thread::ScaleType::OnlyAlphaScaling
>,
${swizzling_functor},
${stages},
${align_a},
${align_b},
${split_k_serial},
${math_operation}
>;
"""
self.trmm_complex_template = """
// Trmm operator ${operation_name}
using Operation_${operation_name} =
typename cutlass::gemm::device::Trmm<
${element_a}, ${layout_a},
${side_mode}, ${fill_mode}, ${diag_type},
${element_b}, ${layout_b},
${element_c}, ${layout_c},
${element_accumulator},
${opcode_class},
${arch},
cutlass::gemm::GemmShape<${threadblock_shape_m}, ${threadblock_shape_n}, ${threadblock_shape_k}>,
cutlass::gemm::GemmShape<${warp_shape_m}, ${warp_shape_n}, ${warp_shape_k}>,
cutlass::gemm::GemmShape<${instruction_shape_m}, ${instruction_shape_n}, ${instruction_shape_k}>,
${epilogue_functor}<
${element_c},
${epilogue_vector_length},
${element_accumulator},
${element_epilogue},
cutlass::epilogue::thread::ScaleType::OnlyAlphaScaling
>,
${swizzling_functor},
${stages},
${align_a},
${align_b},
${split_k_serial},
${math_operation},
${transform_a}
>;
"""
def emit(self, operation):
threadblock_shape = operation.tile_description.threadblock_shape
warp_count = operation.tile_description.warp_count
warp_shape = [threadblock_shape[idx] // warp_count[idx] for idx in range(3)]
epilogue_vector_length = int(min(operation.C.alignment * DataTypeSize[operation.C.element], 128) / DataTypeSize[operation.C.element])
values = {
'operation_name': operation.procedural_name(),
'element_a': DataTypeTag[operation.A.element],
'layout_a': LayoutTag[operation.A.layout],
'side_mode' : SideModeTag[operation.A.side_mode],
'fill_mode': FillModeTag[operation.A.fill_mode],
'diag_type' : DiagTypeTag[operation.A.diag_type],
'element_b': DataTypeTag[operation.B.element],
'layout_b': LayoutTag[operation.B.layout],
'element_c': DataTypeTag[operation.C.element],
'layout_c': LayoutTag[operation.C.layout],
'element_accumulator': DataTypeTag[operation.accumulator_type()],
'opcode_class': OpcodeClassTag[operation.tile_description.math_instruction.opcode_class],
'arch': "cutlass::arch::Sm%d" % operation.arch,
'threadblock_shape_m': str(operation.tile_description.threadblock_shape[0]),
'threadblock_shape_n': str(operation.tile_description.threadblock_shape[1]),
'threadblock_shape_k': str(operation.tile_description.threadblock_shape[2]),
'warp_shape_m': str(warp_shape[0]),
'warp_shape_n': str(warp_shape[1]),
'warp_shape_k': str(warp_shape[2]),
'instruction_shape_m': str(operation.tile_description.math_instruction.instruction_shape[0]),
'instruction_shape_n': str(operation.tile_description.math_instruction.instruction_shape[1]),
'instruction_shape_k': str(operation.tile_description.math_instruction.instruction_shape[2]),
'epilogue_vector_length': str(epilogue_vector_length),
'element_epilogue': str(DataTypeTag[operation.element_epilogue]),
'epilogue_functor': EpilogueFunctorTag[operation.epilogue_functor],
'swizzling_functor': SwizzlingFunctorTag[operation.swizzling_functor],
'stages': str(operation.tile_description.stages),
'align_a': str(1), # TRMM A's alignment is always 1 for no padding to work until we make zfill work with variable bytes
'align_b': str(operation.B.alignment),
'split_k_serial': 'false',
'math_operation': MathOperationTag[operation.tile_description.math_instruction.math_operation],
'transform_a': ComplexTransformTag[operation.A.complex_transform]
}
trmm_template = self.trmm_complex_template if operation.is_complex() else self.trmm_template
return SubstituteTemplate(trmm_template, values)
###################################################################################################
###################################################################################################
#
# Emitters functions for all targets
#
###################################################################################################
class EmitTrmmConfigurationLibrary:
def __init__(self, operation_path, configuration_name):
self.configuration_name = configuration_name
self.configuration_path = os.path.join(operation_path, "%s.cu" % configuration_name).replace('\\', '/')
self.instance_emitter = {
TrmmKind.Universal: EmitTrmmUniversalInstance,
}
self.trmm_kind_wrappers = {
TrmmKind.Universal: 'TrmmOperation',
}
self.instance_template = {
TrmmKind.Universal: """
${compile_guard_start}
manifest.append(new ${trmm_kind}<
Operation_${operation_name}
>("${operation_name}"));
${compile_guard_end}
"""
}
self.header_template = """
/*
Generated by trmm_operation.py - Do not edit.
*/
///////////////////////////////////////////////////////////////////////////////////////////////////
#include "cutlass/cutlass.h"
#include "cutlass/library/library.h"
#include "cutlass/library/manifest.h"
#include "library_internal.h"
#include "trmm_operation.h"
///////////////////////////////////////////////////////////////////////////////////////////////////
"""
self.initialize_function_template = """
///////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace library {
///////////////////////////////////////////////////////////////////////////////////////////////////
void initialize_${configuration_name}(Manifest &manifest) {
"""
self.epilogue_template = """
}
///////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace library
} // namespace cutlass
///////////////////////////////////////////////////////////////////////////////////////////////////
"""
def __enter__(self):
self.configuration_file = open(self.configuration_path, "w")
self.configuration_file.write(self.header_template)
self.instance_definitions = []
self.instance_wrappers = []
self.operations = []
return self
def emit(self, operation):
emitter = self.instance_emitter[operation.trmm_kind]()
self.operations.append(operation)
self.instance_definitions.append(emitter.emit(operation))
self.instance_wrappers.append(SubstituteTemplate(self.instance_template[operation.trmm_kind], {
'configuration_name': self.configuration_name,
'operation_name': operation.procedural_name(),
'trmm_kind': self.trmm_kind_wrappers[operation.trmm_kind],
'compile_guard_start': SubstituteTemplate(self.wmma_guard_start, {'sm_number': str(operation.arch)}) \
if operation.tile_description.math_instruction.opcode_class == OpcodeClass.WmmaTensorOp else "",
'compile_guard_end': "#endif" \
if operation.tile_description.math_instruction.opcode_class == OpcodeClass.WmmaTensorOp else ""
}))
def __exit__(self, exception_type, exception_value, traceback):
# Write instance definitions in top-level namespace
for instance_definition in self.instance_definitions:
self.configuration_file.write(instance_definition)
# Add wrapper objects within initialize() function
self.configuration_file.write(SubstituteTemplate(self.initialize_function_template, {
'configuration_name': self.configuration_name
}))
for instance_wrapper in self.instance_wrappers:
self.configuration_file.write(instance_wrapper)
self.configuration_file.write(self.epilogue_template)
self.configuration_file.close()
###################################################################################################
| warp-main | warp/native/cutlass/tools/library/scripts/trmm_operation.py |
import distutils.cmd
from setuptools import setup
import setuptools.command.build_py
import os
# build rmm dependency
class BuildRMM(distutils.cmd.Command):
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
try:
import rmm
except ImportError:
print("installing rmm")
os.system("git clone -b branch-22.08 --recurse-submodules https://github.com/rapidsai/rmm.git")
os.chdir("./rmm")
os.system("./build.sh librmm rmm")
os.chdir("./python")
os.system("python setup.py build_ext --inplace")
os.system("python setup.py install")
cutlass_path = os.getenv('CUTLASS_PATH')
assert cutlass_path is not None, "Environment variable 'CUTLASS_PATH' is not defined."
cuda_install_path = os.getenv('CUDA_INSTALL_PATH')
assert cuda_install_path is not None, "Environment variable 'CUDA_INSTALL_PATH' is not defined."
ext_modules = []
try:
from pybind11.setup_helpers import Pybind11Extension, build_ext
include_dirs = [
cutlass_path + "/include",
cuda_install_path + "/include",
cutlass_path + "/tools/util/include",
cutlass_path + "/test",
cutlass_path + "/tools/library/scripts/pycutlass/googletest/googletest/include"
]
ext_modules = [
Pybind11Extension("cutlass",
["src/cpp/cutlass.cpp"],
include_dirs=include_dirs,
extra_compile_args=["-fpermissive", "-w"])
]
except ImportError:
pass
setup(
name="PyCutlass",
version="0.0.1",
author="Zhaodong Chen; Andrew Kerr; Haicheng Wu; Szymon Migacz; Graham Markall",
author_email="[email protected]",
description="Python interface for CUTLASS",
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
package_dir={"": "src"},
packages=['pycutlass', 'pycutlass.utils', 'pycutlass.test'],
setup_requires=["pybind11", "numpy<1.23"],
install_requires=[
"numpy<1.23",
'pybind11',
'cuda-python<11.7.0',
'typeguard',
'bfloat16',
'typing',
'scikit-build',
'treelib'
],
cmdclass={
'rmm': BuildRMM
},
ext_modules=ext_modules,
python_requires=">=3.6",
)
| warp-main | warp/native/cutlass/tools/library/scripts/pycutlass/setup.py |
#################################################################################################
#
# Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#################################################################################################
## Test case generator for SM80
import pycutlass
from pycutlass import *
from pycutlass.test import *
from pycutlass.utils.device import device_cc
import unittest
#
# Create GEMM operation
#
@unittest.skipIf(device_cc() < 80, "Device compute capability is insufficient for SM80 tests.")
def TestGemmOperator(gemm_kind, math_inst, layout, alignment, tiling, arch, mixed=False,
epilogue_functor=None, swizzling_functor=cutlass.IdentitySwizzle1, **kwargs):
"""
Test GEMM Operation based on configuration
"""
if "data_type" in kwargs.keys():
data_type = kwargs["data_type"]
else:
if mixed or math_inst.element_a == cutlass.bfloat16:
data_type = [
math_inst.element_a,
math_inst.element_b,
math_inst.element_accumulator,
math_inst.element_accumulator
]
else:
data_type = [
math_inst.element_a,
math_inst.element_b,
math_inst.element_a,
math_inst.element_accumulator
]
tile_description = TileDescription(
tiling[0], tiling[1], tiling[2],
math_inst
)
A = TensorDescription(
data_type[0], layout[0], alignment[0]
)
B = TensorDescription(
data_type[1], layout[1], alignment[1]
)
C = TensorDescription(
data_type[2], layout[2], alignment[2]
)
element_epilogue = data_type[3]
if epilogue_functor is None:
epilogue_functor = LinearCombination(
C.element, C.alignment,
math_inst.element_accumulator, element_epilogue)
if gemm_kind == GemmKind.Universal:
operation = GemmOperationUniversal(
arch=arch, tile_description=tile_description,
A=A, B=B, C=C,
epilogue_functor=epilogue_functor, swizzling_functor=swizzling_functor
)
if A.layout in [cutlass.ColumnMajorInterleaved32, cutlass.RowMajorInterleaved32]:
return test_all_gemm(operation, "interleaved")
else:
return test_all_gemm(operation, "universal")
elif gemm_kind == GemmKind.Grouped:
operation = GemmOperationGrouped(
arch, tile_description, A, B, C,
epilogue_functor, swizzling_functor,
precompute_mode=kwargs["precompute_mode"]
)
testbed = TestbedGrouped(operation=operation)
return testbed.run(24)
else:
raise NotImplementedError("the gemm kind is not implemented")
def TestConv2dOperator(math_inst, alignment, tiling, arch,
stride_supports=[StrideSupport.Strided, StrideSupport.Strided, StrideSupport.Strided],
epilogue_functor=None,
swizzling_functor=cutlass.IdentitySwizzle1, interleaved=False, **kwargs):
"""
Test Conv2d Operation based on configurations
"""
mixeds = [False, True, False]
conv_kinds = [cutlass.conv.Operator.fprop, cutlass.conv.Operator.dgrad, cutlass.conv.Operator.wgrad]
results = []
default_swizzling_functor = swizzling_functor
if "layout" in kwargs.keys():
layout = kwargs["layout"]
else:
layout = (cutlass.TensorNHWC, cutlass.TensorNHWC, cutlass.TensorNHWC)
for mixed, conv_kind, stride_support in zip(mixeds, conv_kinds, stride_supports):
if "data_type" in kwargs.keys():
data_type = kwargs["data_type"]
else:
if mixed or math_inst.element_a == cutlass.bfloat16:
data_type = [
math_inst.element_a,
math_inst.element_b,
math_inst.element_accumulator,
math_inst.element_accumulator
]
else:
data_type = [
math_inst.element_a,
math_inst.element_b,
math_inst.element_a,
math_inst.element_accumulator
]
# skip Int8 Conv Backward
if data_type[0] == cutlass.int8 and conv_kind in [cutlass.conv.Operator.dgrad, cutlass.conv.Operator.wgrad]:
continue
A = TensorDescription(
element=data_type[0],
layout=layout[0],
alignment=alignment[0])
B = TensorDescription(
element=data_type[1],
layout=layout[1],
alignment=alignment[1])
C = TensorDescription(
element=data_type[2],
layout=layout[2],
alignment=alignment[2])
tile_description = TileDescription(
threadblock_shape=tiling[0], stages=tiling[1],
warp_count=tiling[2],
math_instruction=math_inst
)
if conv_kind == cutlass.conv.Operator.dgrad and stride_support == StrideSupport.Strided:
swizzling_functor = cutlass.StridedDgradIdentitySwizzle1
else:
swizzling_functor = default_swizzling_functor
if epilogue_functor is None:
epilogue_functor_ = LinearCombination(
C.element, C.alignment,
math_inst.element_accumulator, data_type[3])
operation = Conv2dOperation(
conv_kind=conv_kind, iterator_algorithm=cutlass.conv.IteratorAlgorithm.optimized,
arch=arch, tile_description=tile_description, A=A, B=B, C=C,
stride_support=stride_support,
epilogue_functor=epilogue_functor_,
swizzling_functor=swizzling_functor
)
results.append(test_all_conv2d(operation, interleaved=interleaved))
return results
class Test_SM80(unittest.TestCase):
def test_SM80_TensorOp_16816(self):
math_instructions = [
MathInstruction(
[16, 8, 16], cutlass.float16, cutlass.float16, cutlass.float32,
cutlass.OpClass.TensorOp, MathOperation.multiply_add
),
MathInstruction(
[16, 8, 16], cutlass.float16, cutlass.float16, cutlass.float16,
cutlass.OpClass.TensorOp, MathOperation.multiply_add
),
MathInstruction(
[16, 8, 16], cutlass.bfloat16, cutlass.bfloat16, cutlass.float32,
cutlass.OpClass.TensorOp, MathOperation.multiply_add
)
]
layouts = [
(cutlass.RowMajor, cutlass.RowMajor, cutlass.RowMajor),
(cutlass.ColumnMajor, cutlass.RowMajor, cutlass.RowMajor),
(cutlass.RowMajor, cutlass.ColumnMajor, cutlass.RowMajor)
]
alignments = [
(8, 8, 8), (4, 8, 8), (8, 4, 8)
]
tilings = [
([256, 128, 32], 3, [4, 2, 1]),
([64, 256, 32], 4, [1, 4, 1]),
([128, 64, 64], 3, [2, 2, 1])
]
for math_inst, layout, alignment, tiling in zip(math_instructions, layouts, alignments, tilings):
self.assertTrue(TestGemmOperator(GemmKind.Universal, math_inst, layout, alignment, tiling, 80, False))
self.assertTrue(TestGemmOperator(GemmKind.Grouped, math_inst, layout, alignment, tiling, 80, True, precompute_mode=SchedulerMode.Host))
stride_supports = [StrideSupport.Strided, StrideSupport.Strided, StrideSupport.Strided]
results = TestConv2dOperator(math_inst, alignment, tiling, 80, stride_supports=stride_supports)
for res in results:
self.assertTrue(res)
def test_SM80_TensorOp_1688(self):
# tf32 is not supported by most of python environment. Skip the test
self.assertTrue(True)
def test_SM80_TensorOp_1688_fast_math(self):
math_instructions = [
MathInstruction(
[16, 8, 8], cutlass.tfloat32, cutlass.tfloat32, cutlass.float32,
cutlass.OpClass.TensorOp, MathOperation.multiply_add
),
MathInstruction(
[16, 8, 8], cutlass.float16, cutlass.float16, cutlass.float32,
cutlass.OpClass.TensorOp, MathOperation.multiply_add_fast_f16
),
MathInstruction(
[16, 8, 8], cutlass.bfloat16, cutlass.bfloat16, cutlass.float32,
cutlass.OpClass.TensorOp, MathOperation.multiply_add_fast_bf16
),
MathInstruction(
[16, 8, 8], cutlass.float32, cutlass.float32, cutlass.float32,
cutlass.OpClass.TensorOp, MathOperation.multiply_add_fast_f32
)
]
layouts = [
(cutlass.RowMajor, cutlass.RowMajor, cutlass.ColumnMajor),
(cutlass.RowMajor, cutlass.ColumnMajor, cutlass.ColumnMajor),
(cutlass.ColumnMajor, cutlass.RowMajor, cutlass.ColumnMajor),
(cutlass.ColumnMajor, cutlass.ColumnMajor, cutlass.RowMajor)
]
alignments = [
(4, 4, 4), (4, 2, 4), (2, 4, 4), (2, 2, 4)
]
tilings = [
([128, 256, 16], 3, [4, 2, 1]),
([64, 256, 16], 4, [1, 4, 1]),
([128, 64, 32], 3, [2, 2, 1]),
([256, 64, 32], 3, [4, 2, 1])
]
data_type = [
cutlass.float32, cutlass.float32, cutlass.float32, cutlass.float32
]
for math_inst, layout, alignment, tiling in zip(math_instructions, layouts, alignments, tilings):
self.assertTrue(
TestGemmOperator(
GemmKind.Universal, math_inst, layout,
alignment, tiling, 80, False, data_type=data_type))
self.assertTrue(
TestGemmOperator(
GemmKind.Grouped, math_inst, layout, alignment, tiling, 80,
True, precompute_mode=SchedulerMode.Device, data_type=data_type))
stride_supports = [StrideSupport.Unity, StrideSupport.Strided, StrideSupport.Unity]
results = TestConv2dOperator(math_inst, alignment, tiling, 80, stride_supports=stride_supports, data_type=data_type)
for res in results:
self.assertTrue(res)
def test_SM80_TensorOp_884(self):
math_inst = MathInstruction(
[8, 8, 4], cutlass.float64, cutlass.float64, cutlass.float64,
cutlass.OpClass.TensorOp, MathOperation.multiply_add
)
layout = (cutlass.ColumnMajor, cutlass.ColumnMajor, cutlass.ColumnMajor)
alignment = (1, 1, 1)
tiling = ([64, 256, 16], 3, [2, 4, 1])
data_type = [cutlass.float64, cutlass.float64, cutlass.float64, cutlass.float64]
self.assertTrue(TestGemmOperator(GemmKind.Universal, math_inst, layout, alignment, tiling, 80, False, data_type=data_type))
self.assertTrue(TestGemmOperator(GemmKind.Grouped, math_inst, layout, alignment, tiling, 80, True, precompute_mode=SchedulerMode.Device, data_type=data_type))
stride_supports = [StrideSupport.Unity, StrideSupport.Strided, StrideSupport.Unity]
results = TestConv2dOperator(math_inst, alignment, tiling, 80, stride_supports=stride_supports, data_type=data_type)
for res in results:
self.assertTrue(res)
def test_SM80_TensorOp_16832_TN(self):
math_inst = MathInstruction(
[16, 8, 32], cutlass.int8, cutlass.int8, cutlass.int32,
cutlass.OpClass.TensorOp, MathOperation.multiply_add_saturate
)
layout = (cutlass.RowMajor, cutlass.ColumnMajor, cutlass.ColumnMajor)
alignment = (16, 16, 4)
alignment_mixed = (16, 16, 16)
tiling = ([128, 256, 64], 3, [2, 4, 1])
data_type = [cutlass.int8, cutlass.int8, cutlass.int32, cutlass.int32]
data_type_mixed = [cutlass.int8, cutlass.int8, cutlass.int8, cutlass.float32]
self.assertTrue(TestGemmOperator(GemmKind.Universal, math_inst, layout, alignment, tiling, 80, False, data_type=data_type))
self.assertTrue(TestGemmOperator(GemmKind.Grouped, math_inst, layout, alignment_mixed, tiling, 80, True, precompute_mode=SchedulerMode.Device, data_type=data_type_mixed))
stride_supports = [StrideSupport.Strided, StrideSupport.Strided, StrideSupport.Strided]
results = TestConv2dOperator(math_inst, alignment, tiling, 80, stride_supports=stride_supports, data_type=data_type)
for res in results:
self.assertTrue(res)
def test_SM80_Simt_f32(self):
math_inst = MathInstruction(
[1, 1, 1], cutlass.float32, cutlass.float32, cutlass.float32,
cutlass.OpClass.Simt, MathOperation.multiply_add
)
layout = (cutlass.RowMajor, cutlass.RowMajor, cutlass.RowMajor)
alignment = (1, 1, 1)
tiling = ([128, 256, 8], 4, [2, 4, 1])
data_type = [cutlass.float32, cutlass.float32, cutlass.float32, cutlass.float32]
self.assertTrue(TestGemmOperator(GemmKind.Universal, math_inst, layout, alignment, tiling, 80, False, data_type=data_type))
self.assertTrue(TestGemmOperator(GemmKind.Grouped, math_inst, layout, alignment, tiling, 80, True, precompute_mode=SchedulerMode.Host, data_type=data_type))
stride_supports = [StrideSupport.Strided, StrideSupport.Strided, StrideSupport.Strided]
results = TestConv2dOperator(math_inst, alignment, tiling, 80, stride_supports=stride_supports, data_type=data_type)
for res in results:
self.assertTrue(res)
def test_SM80_Simt_f64(self):
math_inst = MathInstruction(
[1, 1, 1], cutlass.float64, cutlass.float64, cutlass.float64,
cutlass.OpClass.Simt, MathOperation.multiply_add
)
layout = (cutlass.RowMajor, cutlass.RowMajor, cutlass.ColumnMajor)
alignment = (1, 1, 1)
tiling = ([64, 128, 8], 5, [2, 2, 1])
data_type = [cutlass.float64, cutlass.float64, cutlass.float64, cutlass.float64]
self.assertTrue(TestGemmOperator(GemmKind.Universal, math_inst, layout, alignment, tiling, 80, False, data_type=data_type))
self.assertTrue(TestGemmOperator(GemmKind.Grouped, math_inst, layout, alignment, tiling, 80, True, precompute_mode=SchedulerMode.Device, data_type=data_type))
stride_supports = [StrideSupport.Strided, StrideSupport.Strided, StrideSupport.Strided]
results = TestConv2dOperator(math_inst, alignment, tiling, 80, stride_supports=stride_supports, data_type=data_type)
for res in results:
self.assertTrue(res)
def test_SM80_TensorOp_16832_Interleaved(self):
math_inst = MathInstruction(
[16, 8, 32], cutlass.int8, cutlass.int8, cutlass.int32,
cutlass.OpClass.TensorOp, MathOperation.multiply_add_saturate
)
layout = (cutlass.ColumnMajorInterleaved32, cutlass.RowMajorInterleaved32, cutlass.ColumnMajorInterleaved32)
alignment_mixed = (16, 16, 8)
tiling = ([256, 64, 64], 4, [4, 1, 1])
data_type_mixed = [cutlass.int8, cutlass.int8, cutlass.int8, cutlass.float32]
epilogue_functor = FastLinearCombinationClamp(
data_type_mixed[2], alignment_mixed[2]
)
self.assertTrue(TestGemmOperator(GemmKind.Universal, math_inst, layout, alignment_mixed, tiling, 80, False, data_type=data_type_mixed, epilogue_functor=epilogue_functor))
stride_supports = [StrideSupport.Strided, StrideSupport.Strided, StrideSupport.Strided]
layout = [cutlass.TensorNC32HW32, cutlass.TensorC32RSK32, cutlass.TensorNC32HW32]
results = TestConv2dOperator(math_inst, alignment_mixed, tiling, 80, stride_supports=stride_supports, data_type=data_type_mixed, layout=layout, interleaved=True)
for res in results:
self.assertTrue(res)
def SM80_SparseTensorOp_16832(self):
pass
def SM80_PlanarComplexTensorOp_16816(self):
pass
def SM80_SparseTensorOp_16816_fast_math(self):
pass
def SM80_TensorOp_1688_complex(self):
pass
def SM80_TensorOp_1688_fast_fp32_math_complex(self):
pass
def SM80_TensorOp_1688_rank_k(self):
pass
def SM80_TensorOp_1688_rank_k_complex(self):
pass
def SM80_TensorOp_1688_trmm(self):
pass
def SM80_TensorOp_1688_trmm_complex(self):
pass
def SM80_TensorOp_1688_symm(self):
pass
def SM80_TensorOp_1688_symm_complex(self):
pass
def SM80_TensorOp_884_complex(self):
pass
def SM80_TensorOp_884_complex_gaussian(self):
pass
def SM80_TensorOp_884_rank_k(self):
pass
def SM80_TensorOp_884_rank_k_complex(self):
pass
def SM80_TensorOp_884_rank_k_complex_gaussian(self):
pass
def SM80_TensorOp_884_trmm(self):
pass
def SM80_TensorOp_884_trmm_complex(self):
pass
def SM80_TensorOp_884_trmm_complex_gaussian(self):
pass
def SM80_TensorOp_884_symm(self):
pass
def SM80_TensorOp_884_symm_complex(self):
pass
def SM80_TensorOp_884_symm_complex_gaussian(self):
pass
def SM80_SparseTensorOp_16864_TN(self):
pass
def SM80_TensorOp_16864_TN(self):
pass
def SM80_SparseTensorOp_168128_TN(self):
pass
def SM80_TensorOp_16864_Interleaved(self):
pass
def SM80_TensorOp_168256(self):
pass
def SM80_Simt_complex(self):
pass
if __name__ == '__main__':
pycutlass.get_memory_pool(2**20, 2**34)
pycutlass.compiler.nvcc()
unittest.main()
| warp-main | warp/native/cutlass/tools/library/scripts/pycutlass/test/unit/test_sm80.py |
#################################################################################################
#
# Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#################################################################################################
## Test case for Pytorch
import pycutlass
import unittest
from pycutlass import *
from pycutlass.utils.device import device_cc
import torch
import cupy as cp
class Test_Frontend(unittest.TestCase):
def setUp(self) -> None:
#
# define the cutlass operator
#
cc = device_cc()
math_inst = MathInstruction(
[1, 1, 1], cutlass.float32, cutlass.float32, cutlass.float32,
cutlass.OpClass.Simt, MathOperation.multiply_add
)
# Stages > 2 is supported only for compute capability 80 and beyond
stages = 4 if cc >= 80 else 2
tile_description = TileDescription(
[128, 128, 8], stages, [2, 4, 1],
math_inst
)
A = TensorDescription(
cutlass.float32, cutlass.RowMajor, 1
)
B = TensorDescription(
cutlass.float32, cutlass.RowMajor, 1
)
C = TensorDescription(
cutlass.float32, cutlass.RowMajor, 1
)
epilogue_functor = LinearCombination(
C.element, C.alignment,
math_inst.element_accumulator, cutlass.float32)
self.operation = GemmOperationUniversal(
arch=cc, tile_description=tile_description,
A=A, B=B, C=C,
epilogue_functor=epilogue_functor,
swizzling_functor=cutlass.IdentitySwizzle1
)
pycutlass.compiler.add_module([self.operation,])
def test_torch_frontend(self):
problem_size = cutlass.gemm.GemmCoord(512, 256, 128)
tensor_A = torch.ceil(torch.empty(size=(problem_size.m(), problem_size.k()), dtype=torch.float32, device="cuda").uniform_(-8.5, 7.5))
tensor_B = torch.ceil(torch.empty(size=(problem_size.k(), problem_size.n()), dtype=torch.float32, device="cuda").uniform_(-8.5, 7.5))
tensor_C = torch.ceil(torch.empty(size=(problem_size.m(), problem_size.n()), dtype=torch.float32, device="cuda").uniform_(-8.5, 7.5))
tensor_D = torch.empty_like(tensor_C)
alpha = 1.0
beta = 0.0
arguments = GemmArguments(
operation=self.operation, problem_size=problem_size,
A=tensor_A, B=tensor_B, C=tensor_C, D=tensor_D,
output_op=self.operation.epilogue_type(alpha, beta),
gemm_mode=cutlass.gemm.Mode.Gemm, split_k_splices=1
)
self.operation.run(arguments)
arguments.sync()
tensor_D_ref = alpha * tensor_A @ tensor_B + beta * tensor_C
self.assertTrue(torch.equal(tensor_D, tensor_D_ref))
def test_cupy_frontend(self):
cp.cuda.set_allocator(rmm.rmm_cupy_allocator)
problem_size = cutlass.gemm.GemmCoord(512, 256, 128)
tensor_A = cp.ceil(cp.random.uniform(low=-8.5, high=7.5, size=(problem_size.m(), problem_size.k()), dtype=cp.float32))
tensor_B = cp.ceil(cp.random.uniform(low=-8.5, high=7.5, size=(problem_size.k(), problem_size.n()), dtype=cp.float32))
tensor_C = cp.ceil(cp.random.uniform(low=-8.5, high=7.5, size=(problem_size.m(), problem_size.n()), dtype=cp.float32))
tensor_D = cp.ones_like(tensor_C)
alpha = 1.0
beta = 1.0
tensor_D_ref = alpha * tensor_A @ tensor_B + beta * tensor_C
arguments = GemmArguments(
operation=self.operation, problem_size=problem_size,
A=tensor_A, B=tensor_B, C=tensor_C, D=tensor_D,
output_op=self.operation.epilogue_type(alpha, beta),
gemm_mode=cutlass.gemm.Mode.Gemm, split_k_splices=1
)
self.operation.run(arguments)
arguments.sync()
self.assertTrue(cp.array_equal(tensor_D, tensor_D_ref))
if __name__ == '__main__':
pycutlass.get_memory_pool(2**32, 2**32)
unittest.main()
| warp-main | warp/native/cutlass/tools/library/scripts/pycutlass/test/frontend/test_frontend.py |
# test/unit/conv/device/conv2d_fprop_implicit_gemm_tf32nhwc_tf32nhwc_f32nhwc_tensor_op_f32_sm80.cu
import pycutlass
from pycutlass import *
from pycutlass.test import *
from pycutlass.utils.device import device_cc
import unittest
@unittest.skipIf(device_cc() < 80, "Device compute capability is insufficient for SM80 tests.")
class Conv2dDgradImplicitGemmTF32nhwcTF32nhwcTF32nhwcTensorOpF32SM80(unittest.TestCase):
def test_SM80_Device_Conv2d_Dgrad_Analytic_ImplicitGemm_tf32nhwc_tf32nhwc_f32nhwc_tensor_op_f32(self):
math_inst = MathInstruction(
instruction_shape=[16, 8, 8],
element_a=cutlass.float32, element_b=cutlass.float32,
element_accumulator=cutlass.float32, opcode_class=cutlass.OpClass.TensorOp,
math_operation=MathOperation.multiply_add
)
A = TensorDescription(
element=math_inst.element_a,
layout=cutlass.TensorNHWC,
alignment=4)
B = TensorDescription(
element=math_inst.element_b,
layout=cutlass.TensorNHWC,
alignment=4)
C = TensorDescription(
element=cutlass.float32,
layout=cutlass.TensorNHWC,
alignment=8)
tile_description = TileDescription(
threadblock_shape=[128, 128, 16], stages=3,
warp_count=[2, 2, 1],
math_instruction=math_inst
)
epilogue_functor = LinearCombination(
C.element, C.alignment,
math_inst.element_accumulator, cutlass.float32)
operation = Conv2dOperation(
conv_kind=cutlass.conv.Operator.dgrad, iterator_algorithm=cutlass.conv.IteratorAlgorithm.analytic,
arch=80, tile_description=tile_description, A=A, B=B, C=C,
stride_support=StrideSupport.Unity,
epilogue_functor=epilogue_functor,
swizzling_functor=cutlass.IdentitySwizzle1
)
self.assertTrue(test_all_conv2d(operation))
def test_SM80_Device_Conv2d_Dgrad_Optimized_ImplicitGemm_tf32nhwc_tf32nhwc_f32nhwc_tensor_op_f32(self):
math_inst = MathInstruction(
instruction_shape=[16, 8, 8],
element_a=cutlass.float32, element_b=cutlass.float32,
element_accumulator=cutlass.float32, opcode_class=cutlass.OpClass.TensorOp,
math_operation=MathOperation.multiply_add
)
A = TensorDescription(
element=math_inst.element_a,
layout=cutlass.TensorNHWC,
alignment=4)
B = TensorDescription(
element=math_inst.element_b,
layout=cutlass.TensorNHWC,
alignment=4)
C = TensorDescription(
element=cutlass.float32,
layout=cutlass.TensorNHWC,
alignment=8)
tile_description = TileDescription(
threadblock_shape=[128, 128, 16], stages=3,
warp_count=[2, 2, 1],
math_instruction=math_inst
)
epilogue_functor = LinearCombination(
C.element, C.alignment,
math_inst.element_accumulator, cutlass.float32)
operation = Conv2dOperation(
conv_kind=cutlass.conv.Operator.dgrad, iterator_algorithm=cutlass.conv.IteratorAlgorithm.optimized,
arch=80, tile_description=tile_description, A=A, B=B, C=C,
stride_support=StrideSupport.Unity,
epilogue_functor=epilogue_functor,
swizzling_functor=cutlass.IdentitySwizzle1
)
self.assertTrue(test_all_conv2d(operation))
if __name__ == '__main__':
pycutlass.get_memory_pool(2**26, 2**26)
unittest.main()
| warp-main | warp/native/cutlass/tools/library/scripts/pycutlass/test/conv/conv2d_dgrad_implicit_gemm_tf32nhwc_tf32nhwc_f32nhwc_tensor_op_f32_sm80.py |
# test/unit/conv/device/conv2d_fprop_implicit_gemm_f16nhwc_f16nhwc_f32nhwc_tensor_op_f32_sm80.cu
import pycutlass
from pycutlass import *
from pycutlass.test import *
from pycutlass.utils.device import device_cc
import unittest
@unittest.skipIf(device_cc() < 80, "Device compute capability is insufficient for SM80 tests.")
class Conv2dDgradImplicitGemmF16nhwcF16nhwcF32nhwcTensorOpF32SM80(unittest.TestCase):
def test_SM80_Device_Conv2d_Dgrad_Optimized_ImplicitGemm_f16nhwc_f16nhwc_f32nhwc_tensor_op_f32_unity_stride_stage3(self):
math_inst = MathInstruction(
instruction_shape=[16, 8, 16],
element_a=cutlass.float16, element_b=cutlass.float16,
element_accumulator=cutlass.float32, opcode_class=cutlass.OpClass.TensorOp,
math_operation=MathOperation.multiply_add
)
A = TensorDescription(
element=math_inst.element_a,
layout=cutlass.TensorNHWC,
alignment=8)
B = TensorDescription(
element=math_inst.element_b,
layout=cutlass.TensorNHWC,
alignment=8)
C = TensorDescription(
element=cutlass.float32,
layout=cutlass.TensorNHWC,
alignment=4)
tile_description = TileDescription(
threadblock_shape=[128, 128, 32], stages=3,
warp_count=[2, 2, 1],
math_instruction=math_inst
)
epilogue_functor = LinearCombination(
C.element, C.alignment,
math_inst.element_accumulator, cutlass.float32)
operation = Conv2dOperation(
conv_kind=cutlass.conv.Operator.dgrad, iterator_algorithm=cutlass.conv.IteratorAlgorithm.optimized,
arch=80, tile_description=tile_description, A=A, B=B, C=C,
stride_support=StrideSupport.Unity,
epilogue_functor=epilogue_functor,
swizzling_functor=cutlass.IdentitySwizzle1
)
self.assertTrue(test_all_conv2d(operation))
def test_SM80_Device_Conv2d_Dgrad_Optimized_ImplicitGemm_f16nhwc_f16nhwc_f32nhwc_tensor_op_f32_unity_stride_stage4(self):
math_inst = MathInstruction(
instruction_shape=[16, 8, 16],
element_a=cutlass.float16, element_b=cutlass.float16,
element_accumulator=cutlass.float32, opcode_class=cutlass.OpClass.TensorOp,
math_operation=MathOperation.multiply_add
)
A = TensorDescription(
element=math_inst.element_a,
layout=cutlass.TensorNHWC,
alignment=8)
B = TensorDescription(
element=math_inst.element_b,
layout=cutlass.TensorNHWC,
alignment=8)
C = TensorDescription(
element=cutlass.float32,
layout=cutlass.TensorNHWC,
alignment=4)
tile_description = TileDescription(
threadblock_shape=[128, 128, 32], stages=4,
warp_count=[2, 2, 1],
math_instruction=math_inst
)
epilogue_functor = LinearCombination(
C.element, C.alignment,
math_inst.element_accumulator, cutlass.float32)
operation = Conv2dOperation(
conv_kind=cutlass.conv.Operator.dgrad, iterator_algorithm=cutlass.conv.IteratorAlgorithm.optimized,
arch=80, tile_description=tile_description, A=A, B=B, C=C,
stride_support=StrideSupport.Unity,
epilogue_functor=epilogue_functor,
swizzling_functor=cutlass.IdentitySwizzle1
)
self.assertTrue(test_all_conv2d(operation))
def test_SM80_Device_Conv2d_Dgrad_Optimized_ImplicitGemm_f16nhwc_f16nhwc_f32nhwc_tensor_op_f32_unity_stride_stage3_64(self):
math_inst = MathInstruction(
instruction_shape=[16, 8, 16],
element_a=cutlass.float16, element_b=cutlass.float16,
element_accumulator=cutlass.float32, opcode_class=cutlass.OpClass.TensorOp,
math_operation=MathOperation.multiply_add
)
A = TensorDescription(
element=math_inst.element_a,
layout=cutlass.TensorNHWC,
alignment=8)
B = TensorDescription(
element=math_inst.element_b,
layout=cutlass.TensorNHWC,
alignment=8)
C = TensorDescription(
element=cutlass.float32,
layout=cutlass.TensorNHWC,
alignment=4)
tile_description = TileDescription(
threadblock_shape=[128, 128, 64], stages=3,
warp_count=[2, 2, 1],
math_instruction=math_inst
)
epilogue_functor = LinearCombination(
C.element, C.alignment,
math_inst.element_accumulator, cutlass.float32)
operation = Conv2dOperation(
conv_kind=cutlass.conv.Operator.dgrad, iterator_algorithm=cutlass.conv.IteratorAlgorithm.optimized,
arch=80, tile_description=tile_description, A=A, B=B, C=C,
stride_support=StrideSupport.Unity,
epilogue_functor=epilogue_functor,
swizzling_functor=cutlass.IdentitySwizzle1
)
self.assertTrue(test_all_conv2d(operation))
def test_SM80_Device_Conv2d_Dgrad_Optimized_ImplicitGemm_f16nhwc_f16nhwc_f32nhwc_tensor_op_f32_unity_stride_stage4_64(self):
math_inst = MathInstruction(
instruction_shape=[16, 8, 16],
element_a=cutlass.float16, element_b=cutlass.float16,
element_accumulator=cutlass.float32, opcode_class=cutlass.OpClass.TensorOp,
math_operation=MathOperation.multiply_add
)
A = TensorDescription(
element=math_inst.element_a,
layout=cutlass.TensorNHWC,
alignment=8)
B = TensorDescription(
element=math_inst.element_b,
layout=cutlass.TensorNHWC,
alignment=8)
C = TensorDescription(
element=cutlass.float32,
layout=cutlass.TensorNHWC,
alignment=4)
tile_description = TileDescription(
threadblock_shape=[128, 128, 64], stages=4,
warp_count=[2, 2, 1],
math_instruction=math_inst
)
epilogue_functor = LinearCombination(
C.element, C.alignment,
math_inst.element_accumulator, cutlass.float32)
operation = Conv2dOperation(
conv_kind=cutlass.conv.Operator.dgrad, iterator_algorithm=cutlass.conv.IteratorAlgorithm.optimized,
arch=80, tile_description=tile_description, A=A, B=B, C=C,
stride_support=StrideSupport.Unity,
epilogue_functor=epilogue_functor,
swizzling_functor=cutlass.IdentitySwizzle1
)
self.assertTrue(test_all_conv2d(operation))
if __name__ == '__main__':
pycutlass.get_memory_pool(2**26, 2**26)
unittest.main()
| warp-main | warp/native/cutlass/tools/library/scripts/pycutlass/test/conv/conv2d_dgrad_implicit_gemm_f16nhwc_f16nhwc_f32nhwc_tensor_op_f32_sm80.py |
# test/unit/conv/device/conv2d_dgrad_implicit_gemm_f16nhwc_f16nhwc_f16nhwc_tensor_op_f16_sm80.cu
from pycutlass.conv2d_operation import *
from pycutlass import *
from pycutlass.test import *
from pycutlass.utils.device import device_cc
import unittest
@unittest.skipIf(device_cc() < 80, "Device compute capability is insufficient for SM80 tests.")
class Conv2dDgradImplicitGemmF16nhwcF16nhwcF16nhwcTensorOpF16SM80(unittest.TestCase):
def test_SM80_Device_Conv2d_Dgrad_Analytic_ImplicitGemm_f16nhwc_f16nhwc_f16nhwc_tensor_op_f16(self):
math_inst = MathInstruction(
instruction_shape=[16, 8, 16],
element_a=cutlass.float16, element_b=cutlass.float16,
element_accumulator=cutlass.float16, opcode_class=cutlass.OpClass.TensorOp,
math_operation=MathOperation.multiply_add
)
A = TensorDescription(
element=math_inst.element_a,
layout=cutlass.TensorNHWC,
alignment=8)
B = TensorDescription(
element=math_inst.element_b,
layout=cutlass.TensorNHWC,
alignment=8)
C = TensorDescription(
element=cutlass.float16,
layout=cutlass.TensorNHWC,
alignment=8)
tile_description = TileDescription(
threadblock_shape=[128, 128, 64], stages=3,
warp_count=[2, 2, 1],
math_instruction=math_inst
)
epilogue_functor = LinearCombination(
C.element, C.alignment,
math_inst.element_accumulator, cutlass.float16)
operation = Conv2dOperation(
conv_kind=cutlass.conv.Operator.dgrad, iterator_algorithm=cutlass.conv.IteratorAlgorithm.analytic,
arch=80, tile_description=tile_description, A=A, B=B, C=C,
stride_support=StrideSupport.Unity,
epilogue_functor=epilogue_functor,
swizzling_functor=cutlass.IdentitySwizzle1
)
self.assertTrue(test_all_conv2d(operation))
def test_SM80_Device_Conv2d_Dgrad_Optimized_ImplicitGemm_f16nhwc_f16nhwc_f16nhwc_tensor_op_f16(self):
math_inst = MathInstruction(
instruction_shape=[16, 8, 16],
element_a=cutlass.float16, element_b=cutlass.float16,
element_accumulator=cutlass.float16, opcode_class=cutlass.OpClass.TensorOp,
math_operation=MathOperation.multiply_add
)
A = TensorDescription(
element=math_inst.element_a,
layout=cutlass.TensorNHWC,
alignment=8)
B = TensorDescription(
element=math_inst.element_b,
layout=cutlass.TensorNHWC,
alignment=8)
C = TensorDescription(
element=cutlass.float16,
layout=cutlass.TensorNHWC,
alignment=8)
tile_description = TileDescription(
threadblock_shape=[128, 128, 64], stages=3,
warp_count=[2, 2, 1],
math_instruction=math_inst
)
epilogue_functor = LinearCombination(
C.element, C.alignment,
math_inst.element_accumulator, cutlass.float16)
operation = Conv2dOperation(
conv_kind=cutlass.conv.Operator.dgrad, iterator_algorithm=cutlass.conv.IteratorAlgorithm.optimized,
arch=80, tile_description=tile_description, A=A, B=B, C=C,
stride_support=StrideSupport.Unity,
epilogue_functor=epilogue_functor,
swizzling_functor=cutlass.IdentitySwizzle1
)
self.assertTrue(test_all_conv2d(operation))
def test_SM80_Device_Conv2d_Dgrad_Analytic_ImplicitGemm_f16nhwc_f16nhwc_f16nhwc_tensor_op_f16_align4(self):
math_inst = MathInstruction(
instruction_shape=[16, 8, 16],
element_a=cutlass.float16, element_b=cutlass.float16,
element_accumulator=cutlass.float16, opcode_class=cutlass.OpClass.TensorOp,
math_operation=MathOperation.multiply_add
)
A = TensorDescription(
element=math_inst.element_a,
layout=cutlass.TensorNHWC,
alignment=4)
B = TensorDescription(
element=math_inst.element_b,
layout=cutlass.TensorNHWC,
alignment=4)
C = TensorDescription(
element=cutlass.float16,
layout=cutlass.TensorNHWC,
alignment=4)
tile_description = TileDescription(
threadblock_shape=[128, 128, 64], stages=3,
warp_count=[2, 2, 1],
math_instruction=math_inst
)
epilogue_functor = LinearCombination(
C.element, C.alignment,
math_inst.element_accumulator, cutlass.float16)
operation = Conv2dOperation(
conv_kind=cutlass.conv.Operator.dgrad, iterator_algorithm=cutlass.conv.IteratorAlgorithm.analytic,
arch=80, tile_description=tile_description, A=A, B=B, C=C,
stride_support=StrideSupport.Unity,
epilogue_functor=epilogue_functor,
swizzling_functor=cutlass.IdentitySwizzle1
)
problem_sizes = [
cutlass.conv.Conv2dProblemSize(
cutlass.Tensor4DCoord(1, 4, 4, 12),
cutlass.Tensor4DCoord(8, 3, 3, 12),
cutlass.Tensor4DCoord(0, 0, 0, 0),
cutlass.MatrixCoord(3, 3),
cutlass.MatrixCoord(1, 1),
cutlass.conv.Mode.cross_correlation,
1, 1
),
]
self.assertTrue(test_all_conv2d(operation, problem_sizes))
def test_SM80_Device_Conv2d_Dgrad_Optimized_ImplicitGemm_f16nhwc_f16nhwc_f16nhwc_tensor_op_f16_align4(self):
math_inst = MathInstruction(
instruction_shape=[16, 8, 16],
element_a=cutlass.float16, element_b=cutlass.float16,
element_accumulator=cutlass.float16, opcode_class=cutlass.OpClass.TensorOp,
math_operation=MathOperation.multiply_add
)
A = TensorDescription(
element=math_inst.element_a,
layout=cutlass.TensorNHWC,
alignment=4)
B = TensorDescription(
element=math_inst.element_b,
layout=cutlass.TensorNHWC,
alignment=4)
C = TensorDescription(
element=cutlass.float16,
layout=cutlass.TensorNHWC,
alignment=4)
tile_description = TileDescription(
threadblock_shape=[128, 128, 64], stages=3,
warp_count=[2, 2, 1],
math_instruction=math_inst
)
epilogue_functor = LinearCombination(
C.element, C.alignment,
math_inst.element_accumulator, cutlass.float16)
operation = Conv2dOperation(
conv_kind=cutlass.conv.Operator.dgrad, iterator_algorithm=cutlass.conv.IteratorAlgorithm.optimized,
arch=80, tile_description=tile_description, A=A, B=B, C=C,
stride_support=StrideSupport.Unity,
epilogue_functor=epilogue_functor,
swizzling_functor=cutlass.IdentitySwizzle1
)
problem_sizes = [
cutlass.conv.Conv2dProblemSize(
cutlass.Tensor4DCoord(1, 4, 4, 12),
cutlass.Tensor4DCoord(8, 3, 3, 12),
cutlass.Tensor4DCoord(0, 0, 0, 0),
cutlass.MatrixCoord(3, 3),
cutlass.MatrixCoord(1, 1),
cutlass.conv.Mode.cross_correlation,
1, 1
),
]
self.assertTrue(test_all_conv2d(operation, problem_sizes))
if __name__ == '__main__':
pycutlass.get_memory_pool(2**26, 2**26)
unittest.main()
| warp-main | warp/native/cutlass/tools/library/scripts/pycutlass/test/conv/conv2d_dgrad_implicit_gemm_f16nhwc_f16nhwc_f16nhwc_tensor_op_f16_sm80.py |
# test/unit/conv/device/conv2d_strided_dgrad_implicit_gemm_f16nhwc_f16nhwc_f32nhwc_tensor_op_f32_sm80.cu
import pycutlass
from pycutlass import *
from pycutlass.test import *
from pycutlass.utils.device import device_cc
import unittest
@unittest.skipIf(device_cc() < 80, "Device compute capability is insufficient for SM80 tests.")
class Conv2dStridedDgradImplicitGemmF16NHWCF16NHWCF32NHWCTensorOpF32SM80(unittest.TestCase):
def test_SM80_Device_Conv2d_Strided_Dgrad_Analytic_ImplicitGemm_f16nhwc_f16nhwc_f32nhwc_tensor_op_f32_128x128_32x3_64x64x32(self):
math_inst = MathInstruction(
instruction_shape=[16, 8, 16],
element_a=cutlass.float16, element_b=cutlass.float16,
element_accumulator=cutlass.float32, opcode_class=cutlass.OpClass.TensorOp,
math_operation=MathOperation.multiply_add
)
A = TensorDescription(
element=math_inst.element_a,
layout=cutlass.TensorNHWC,
alignment=8)
B = TensorDescription(
element=math_inst.element_b,
layout=cutlass.TensorNHWC,
alignment=8)
C = TensorDescription(
element=cutlass.float32,
layout=cutlass.TensorNHWC,
alignment=4)
tile_description = TileDescription(
threadblock_shape=[128, 128, 32], stages=3,
warp_count=[2, 2, 1],
math_instruction=math_inst
)
epilogue_functor = LinearCombination(
C.element, C.alignment,
math_inst.element_accumulator, cutlass.float32)
operation = Conv2dOperation(
conv_kind=cutlass.conv.Operator.dgrad, iterator_algorithm=cutlass.conv.IteratorAlgorithm.analytic,
arch=80, tile_description=tile_description, A=A, B=B, C=C,
stride_support=StrideSupport.Strided,
epilogue_functor=epilogue_functor,
swizzling_functor=cutlass.StridedDgradIdentitySwizzle1
)
self.assertTrue(test_all_conv2d(operation))
def test_SM80_Device_Conv2d_Strided_Dgrad_Analytic_ImplicitGemm_f16nhwc_f16nhwc_f32nhwc_tensor_op_f32_128x256_64x3_64x64x64(self):
math_inst = MathInstruction(
instruction_shape=[16, 8, 16],
element_a=cutlass.float16, element_b=cutlass.float16,
element_accumulator=cutlass.float32, opcode_class=cutlass.OpClass.TensorOp,
math_operation=MathOperation.multiply_add
)
A = TensorDescription(
element=math_inst.element_a,
layout=cutlass.TensorNHWC,
alignment=8)
B = TensorDescription(
element=math_inst.element_b,
layout=cutlass.TensorNHWC,
alignment=8)
C = TensorDescription(
element=cutlass.float32,
layout=cutlass.TensorNHWC,
alignment=4)
tile_description = TileDescription(
threadblock_shape=[128, 256, 64], stages=3,
warp_count=[2, 4, 1],
math_instruction=math_inst
)
epilogue_functor = LinearCombination(
C.element, C.alignment,
math_inst.element_accumulator, cutlass.float32)
operation = Conv2dOperation(
conv_kind=cutlass.conv.Operator.dgrad, iterator_algorithm=cutlass.conv.IteratorAlgorithm.analytic,
arch=80, tile_description=tile_description, A=A, B=B, C=C,
stride_support=StrideSupport.Strided,
epilogue_functor=epilogue_functor,
swizzling_functor=cutlass.StridedDgradIdentitySwizzle1
)
self.assertTrue(test_all_conv2d(operation))
def test_SM80_Device_Conv2d_Strided_Dgrad_Analytic_ImplicitGemm_f16nhwc_f16nhwc_f32nhwc_tensor_op_f32_align4_128x128_32x3_64x64x32(self):
math_inst = MathInstruction(
instruction_shape=[16, 8, 16],
element_a=cutlass.float16, element_b=cutlass.float16,
element_accumulator=cutlass.float32, opcode_class=cutlass.OpClass.TensorOp,
math_operation=MathOperation.multiply_add
)
A = TensorDescription(
element=math_inst.element_a,
layout=cutlass.TensorNHWC,
alignment=4)
B = TensorDescription(
element=math_inst.element_b,
layout=cutlass.TensorNHWC,
alignment=4)
C = TensorDescription(
element=cutlass.float32,
layout=cutlass.TensorNHWC,
alignment=4)
tile_description = TileDescription(
threadblock_shape=[128, 128, 32], stages=3,
warp_count=[2, 2, 1],
math_instruction=math_inst
)
epilogue_functor = LinearCombination(
C.element, C.alignment,
math_inst.element_accumulator, cutlass.float32)
operation = Conv2dOperation(
conv_kind=cutlass.conv.Operator.dgrad, iterator_algorithm=cutlass.conv.IteratorAlgorithm.analytic,
arch=80, tile_description=tile_description, A=A, B=B, C=C,
stride_support=StrideSupport.Strided,
epilogue_functor=epilogue_functor,
swizzling_functor=cutlass.StridedDgradIdentitySwizzle1
)
problem_sizes = [
cutlass.conv.Conv2dProblemSize(
cutlass.Tensor4DCoord(1, 4, 4, 12),
cutlass.Tensor4DCoord(8, 3, 3, 12),
cutlass.Tensor4DCoord(0, 0, 0, 0),
cutlass.MatrixCoord(3, 3),
cutlass.MatrixCoord(1, 1),
cutlass.conv.Mode.cross_correlation,
1, 1
),
]
self.assertTrue(test_all_conv2d(operation, problem_sizes))
def test_SM80_Device_Conv2d_Strided_Dgrad_Optimized_ImplicitGemm_f16nhwc_f16nhwc_f32nhwc_tensor_op_f32_128x128_32x3_64x64x32(self):
math_inst = MathInstruction(
instruction_shape=[16, 8, 16],
element_a=cutlass.float16, element_b=cutlass.float16,
element_accumulator=cutlass.float32, opcode_class=cutlass.OpClass.TensorOp,
math_operation=MathOperation.multiply_add
)
A = TensorDescription(
element=math_inst.element_a,
layout=cutlass.TensorNHWC,
alignment=8)
B = TensorDescription(
element=math_inst.element_b,
layout=cutlass.TensorNHWC,
alignment=8)
C = TensorDescription(
element=cutlass.float32,
layout=cutlass.TensorNHWC,
alignment=4)
tile_description = TileDescription(
threadblock_shape=[128, 128, 32], stages=3,
warp_count=[2, 2, 1],
math_instruction=math_inst
)
epilogue_functor = LinearCombination(
C.element, C.alignment,
math_inst.element_accumulator, cutlass.float32)
operation = Conv2dOperation(
conv_kind=cutlass.conv.Operator.dgrad, iterator_algorithm=cutlass.conv.IteratorAlgorithm.optimized,
arch=80, tile_description=tile_description, A=A, B=B, C=C,
stride_support=StrideSupport.Strided,
epilogue_functor=epilogue_functor,
swizzling_functor=cutlass.StridedDgradIdentitySwizzle1
)
self.assertTrue(test_all_conv2d(operation))
def test_SM80_Device_Conv2d_Strided_Dgrad_Optimized_ImplicitGemm_f16nhwc_f16nhwc_f32nhwc_tensor_op_f32_128x128_32x3_64x64x32_align4(self):
math_inst = MathInstruction(
instruction_shape=[16, 8, 16],
element_a=cutlass.float16, element_b=cutlass.float16,
element_accumulator=cutlass.float32, opcode_class=cutlass.OpClass.TensorOp,
math_operation=MathOperation.multiply_add
)
A = TensorDescription(
element=math_inst.element_a,
layout=cutlass.TensorNHWC,
alignment=4)
B = TensorDescription(
element=math_inst.element_b,
layout=cutlass.TensorNHWC,
alignment=4)
C = TensorDescription(
element=cutlass.float32,
layout=cutlass.TensorNHWC,
alignment=4)
tile_description = TileDescription(
threadblock_shape=[128, 128, 32], stages=3,
warp_count=[2, 2, 1],
math_instruction=math_inst
)
epilogue_functor = LinearCombination(
C.element, C.alignment,
math_inst.element_accumulator, cutlass.float32)
operation = Conv2dOperation(
conv_kind=cutlass.conv.Operator.dgrad, iterator_algorithm=cutlass.conv.IteratorAlgorithm.optimized,
arch=80, tile_description=tile_description, A=A, B=B, C=C,
stride_support=StrideSupport.Strided,
epilogue_functor=epilogue_functor,
swizzling_functor=cutlass.StridedDgradIdentitySwizzle1
)
problem_sizes = [
cutlass.conv.Conv2dProblemSize(
cutlass.Tensor4DCoord(1, 56, 56, 12),
cutlass.Tensor4DCoord(8, 1, 1, 12),
cutlass.Tensor4DCoord(0, 0, 0, 0),
cutlass.MatrixCoord(2, 2),
cutlass.MatrixCoord(1, 1),
cutlass.conv.Mode.cross_correlation,
1, 1
),
cutlass.conv.Conv2dProblemSize(
cutlass.Tensor4DCoord(1, 55, 55, 12),
cutlass.Tensor4DCoord(8, 1, 1, 12),
cutlass.Tensor4DCoord(0, 0, 0, 0),
cutlass.MatrixCoord(2, 2),
cutlass.MatrixCoord(1, 1),
cutlass.conv.Mode.cross_correlation,
1, 1
),
]
self.assertTrue(test_all_conv2d(operation, problem_sizes))
if __name__ == '__main__':
pycutlass.get_memory_pool(2**26, 2**26)
unittest.main()
| warp-main | warp/native/cutlass/tools/library/scripts/pycutlass/test/conv/conv2d_strided_dgrad_implicit_gemm_f16nhwc_f16nhwc_f32nhwc_tensor_op_f32_sm80.py |
# test/unit/conv/device/conv2d_fprop_few_channels_f16nhwc_f16nhwc_f16nhwc_tensor_op_f32_sm80.cu
import pycutlass
from pycutlass.test import *
from pycutlass.utils.device import device_cc
import unittest
@unittest.skipIf(device_cc() < 80, "Device compute capability is insufficient for SM80 tests.")
def conv2d_few_channel_problemsizes(channels):
problem_sizes = [
cutlass.conv.Conv2dProblemSize(
cutlass.Tensor4DCoord(1, 8, 8, channels),
cutlass.Tensor4DCoord(16, 3, 3, channels),
cutlass.Tensor4DCoord(1, 1, 1, 1),
cutlass.MatrixCoord(2, 2),
cutlass.MatrixCoord(1, 1),
cutlass.conv.Mode.cross_correlation,
1, 1
),
cutlass.conv.Conv2dProblemSize(
cutlass.Tensor4DCoord(1, 16, 16, channels),
cutlass.Tensor4DCoord(16, 3, 3, channels),
cutlass.Tensor4DCoord(1, 1, 1, 1),
cutlass.MatrixCoord(2, 2),
cutlass.MatrixCoord(1, 1),
cutlass.conv.Mode.cross_correlation,
1, 1
),
cutlass.conv.Conv2dProblemSize(
cutlass.Tensor4DCoord(1, 16, 16, channels),
cutlass.Tensor4DCoord(16, 7, 7, channels),
cutlass.Tensor4DCoord(1, 1, 1, 1),
cutlass.MatrixCoord(1, 1),
cutlass.MatrixCoord(1, 1),
cutlass.conv.Mode.cross_correlation,
1, 1
),
cutlass.conv.Conv2dProblemSize(
cutlass.Tensor4DCoord(1, 224, 224, channels),
cutlass.Tensor4DCoord(32, 7, 7, channels),
cutlass.Tensor4DCoord(1, 1, 1, 1),
cutlass.MatrixCoord(1, 1),
cutlass.MatrixCoord(1, 1),
cutlass.conv.Mode.cross_correlation,
1, 1
),
cutlass.conv.Conv2dProblemSize(
cutlass.Tensor4DCoord(1, 224, 224, channels),
cutlass.Tensor4DCoord(64, 7, 7, channels),
cutlass.Tensor4DCoord(1, 1, 1, 1),
cutlass.MatrixCoord(2, 2),
cutlass.MatrixCoord(1, 1),
cutlass.conv.Mode.cross_correlation,
1, 1
),
cutlass.conv.Conv2dProblemSize(
cutlass.Tensor4DCoord(1, 224, 224, channels),
cutlass.Tensor4DCoord(64, 5, 5, channels),
cutlass.Tensor4DCoord(1, 1, 1, 1),
cutlass.MatrixCoord(1, 1),
cutlass.MatrixCoord(1, 1),
cutlass.conv.Mode.cross_correlation,
1, 1
),
cutlass.conv.Conv2dProblemSize(
cutlass.Tensor4DCoord(1, 224, 224, channels),
cutlass.Tensor4DCoord(64, 5, 5, channels),
cutlass.Tensor4DCoord(1, 1, 1, 1),
cutlass.MatrixCoord(2, 2),
cutlass.MatrixCoord(1, 1),
cutlass.conv.Mode.cross_correlation,
1, 1
),
]
return problem_sizes
class Conv2dFpropFewChannelsF16NHWCF16NHWCF16HNWCTensorOpF32SM80(unittest.TestCase):
def test_SM80_Device_Conv2d_Fprop_Few_Channels_ImplicitGemm_f16nhwc_f16nhwc_f16nhwc_tensor_op_f32_channels_2(self):
math_inst = MathInstruction(
instruction_shape=[16, 8, 16],
element_a=cutlass.float16, element_b=cutlass.float16,
element_accumulator=cutlass.float32, opcode_class=cutlass.OpClass.TensorOp,
math_operation=MathOperation.multiply_add
)
A = TensorDescription(
element=math_inst.element_a,
layout=cutlass.TensorNHWC,
alignment=2)
B = TensorDescription(
element=math_inst.element_b,
layout=cutlass.TensorNHWC,
alignment=2)
C = TensorDescription(
element=cutlass.float16,
layout=cutlass.TensorNHWC,
alignment=8)
tile_description = TileDescription(
threadblock_shape=[128, 128, 64], stages=3,
warp_count=[2, 2, 1],
math_instruction=math_inst
)
epilogue_functor = LinearCombination(
C.element, C.alignment,
math_inst.element_accumulator, cutlass.float32)
operation = Conv2dOperation(
conv_kind=cutlass.conv.Operator.fprop, iterator_algorithm=cutlass.conv.IteratorAlgorithm.few_channels,
arch=80, tile_description=tile_description, A=A, B=B, C=C,
stride_support=StrideSupport.Strided,
epilogue_functor=epilogue_functor,
swizzling_functor=cutlass.IdentitySwizzle1
)
self.assertTrue(test_all_conv2d(operation, conv2d_few_channel_problemsizes(2)))
def test_SM80_Device_Conv2d_Fprop_Few_Channels_ImplicitGemm_f16nhwc_f16nhwc_f16nhwc_tensor_op_f32_channels_1(self):
math_inst = MathInstruction(
instruction_shape=[16, 8, 8],
element_a=cutlass.float16, element_b=cutlass.float16,
element_accumulator=cutlass.float32, opcode_class=cutlass.OpClass.TensorOp,
math_operation=MathOperation.multiply_add
)
A = TensorDescription(
element=math_inst.element_a,
layout=cutlass.TensorNHWC,
alignment=1)
B = TensorDescription(
element=math_inst.element_b,
layout=cutlass.TensorNHWC,
alignment=1)
C = TensorDescription(
element=cutlass.float16,
layout=cutlass.TensorNHWC,
alignment=8)
tile_description = TileDescription(
threadblock_shape=[128, 128, 32], stages=2,
warp_count=[2, 2, 1],
math_instruction=math_inst
)
epilogue_functor = LinearCombination(
C.element, C.alignment,
math_inst.element_accumulator, cutlass.float32)
operation = Conv2dOperation(
conv_kind=cutlass.conv.Operator.fprop, iterator_algorithm=cutlass.conv.IteratorAlgorithm.few_channels,
arch=80, tile_description=tile_description, A=A, B=B, C=C,
stride_support=StrideSupport.Strided,
epilogue_functor=epilogue_functor,
swizzling_functor=cutlass.IdentitySwizzle1
)
self.assertTrue(test_all_conv2d(operation, conv2d_few_channel_problemsizes(1)))
if __name__ == '__main__':
pycutlass.get_memory_pool(2**26, 2**26)
unittest.main()
| warp-main | warp/native/cutlass/tools/library/scripts/pycutlass/test/conv/conv2d_fprop_few_channels_f16nhwc_f16nhwc_f16nhwc_tensor_op_f32_sm80.py |
# test/unit/conv/device/conv2d_wgrad_implicit_gemm_f16nhwc_f16nhwc_f16nhwc_tensor_op_f16_sm80.cu
import pycutlass
from pycutlass import *
from pycutlass.test import *
from pycutlass.utils.device import device_cc
import unittest
@unittest.skipIf(device_cc() < 80, "Device compute capability is insufficient for SM80 tests.")
class Conv2dWgradImplicitGemmF16nhwcF16nhwcF32nhwcTensorOpF32SM80(unittest.TestCase):
def test_Device_Conv2d_Wgrad_Analytic_ImplicitGemm_f16nhwc_f16nhwc_f32nhwc_tensor_op_f32(self):
math_inst = MathInstruction(
instruction_shape=[16, 8, 8],
element_a=cutlass.float16, element_b=cutlass.float16,
element_accumulator=cutlass.float32, opcode_class=cutlass.OpClass.TensorOp,
math_operation=MathOperation.multiply_add
)
A = TensorDescription(
element=math_inst.element_a,
layout=cutlass.TensorNHWC,
alignment=8)
B = TensorDescription(
element=math_inst.element_b,
layout=cutlass.TensorNHWC,
alignment=8)
C = TensorDescription(
element=cutlass.float32,
layout=cutlass.TensorNHWC,
alignment=4)
tile_description = TileDescription(
threadblock_shape=[128, 128, 16], stages=3,
warp_count=[2, 2, 1],
math_instruction=math_inst
)
epilogue_functor = LinearCombination(
C.element, C.alignment,
math_inst.element_accumulator, cutlass.float32)
operation = Conv2dOperation(
conv_kind=cutlass.conv.Operator.wgrad, iterator_algorithm=cutlass.conv.IteratorAlgorithm.analytic,
arch=80, tile_description=tile_description, A=A, B=B, C=C,
stride_support=StrideSupport.Strided,
epilogue_functor=epilogue_functor,
swizzling_functor=cutlass.IdentitySwizzle1
)
self.assertTrue(test_all_conv2d(operation))
def test_SM80_Device_Conv2d_Wgrad_Optimized_ImplicitGemm_f16nhwc_f16nhwc_f32nhwc_tensor_op_f32(self):
math_inst = MathInstruction(
instruction_shape=[16, 8, 8],
element_a=cutlass.float16, element_b=cutlass.float16,
element_accumulator=cutlass.float32, opcode_class=cutlass.OpClass.TensorOp,
math_operation=MathOperation.multiply_add
)
A = TensorDescription(
element=math_inst.element_a,
layout=cutlass.TensorNHWC,
alignment=8)
B = TensorDescription(
element=math_inst.element_b,
layout=cutlass.TensorNHWC,
alignment=8)
C = TensorDescription(
element=cutlass.float32,
layout=cutlass.TensorNHWC,
alignment=4)
tile_description = TileDescription(
threadblock_shape=[128, 128, 16], stages=3,
warp_count=[2, 2, 1],
math_instruction=math_inst
)
epilogue_functor = LinearCombination(
C.element, C.alignment,
math_inst.element_accumulator, cutlass.float32)
operation = Conv2dOperation(
conv_kind=cutlass.conv.Operator.wgrad, iterator_algorithm=cutlass.conv.IteratorAlgorithm.optimized,
arch=80, tile_description=tile_description, A=A, B=B, C=C,
stride_support=StrideSupport.Strided,
epilogue_functor=epilogue_functor,
swizzling_functor=cutlass.IdentitySwizzle1
)
self.assertTrue(test_all_conv2d(operation))
def test_SM80_Device_Conv2d_Wgrad_Optimized_ImplicitGemm_f16nhwc_f16nhwc_f32nhwc_tensor_op_f32_64x256_32x4_64x64x32(self):
math_inst = MathInstruction(
instruction_shape=[16, 8, 16],
element_a=cutlass.float16, element_b=cutlass.float16,
element_accumulator=cutlass.float32, opcode_class=cutlass.OpClass.TensorOp,
math_operation=MathOperation.multiply_add
)
A = TensorDescription(
element=math_inst.element_a,
layout=cutlass.TensorNHWC,
alignment=8)
B = TensorDescription(
element=math_inst.element_b,
layout=cutlass.TensorNHWC,
alignment=8)
C = TensorDescription(
element=cutlass.float32,
layout=cutlass.TensorNHWC,
alignment=4)
tile_description = TileDescription(
threadblock_shape=[64, 256, 32], stages=3,
warp_count=[1, 4, 1],
math_instruction=math_inst
)
epilogue_functor = LinearCombination(
C.element, C.alignment,
math_inst.element_accumulator, cutlass.float32)
operation = Conv2dOperation(
conv_kind=cutlass.conv.Operator.wgrad, iterator_algorithm=cutlass.conv.IteratorAlgorithm.optimized,
arch=80, tile_description=tile_description, A=A, B=B, C=C,
stride_support=StrideSupport.Strided,
epilogue_functor=epilogue_functor,
swizzling_functor=cutlass.IdentitySwizzle1
)
self.assertTrue(test_all_conv2d(operation))
def test_Device_Conv2d_Wgrad_Analytic_ImplicitGemm_f16nhwc_f16nhwc_f32nhwc_tensor_op_f32_align4(self):
math_inst = MathInstruction(
instruction_shape=[16, 8, 8],
element_a=cutlass.float16, element_b=cutlass.float16,
element_accumulator=cutlass.float32, opcode_class=cutlass.OpClass.TensorOp,
math_operation=MathOperation.multiply_add
)
A = TensorDescription(
element=math_inst.element_a,
layout=cutlass.TensorNHWC,
alignment=4)
B = TensorDescription(
element=math_inst.element_b,
layout=cutlass.TensorNHWC,
alignment=4)
C = TensorDescription(
element=cutlass.float32,
layout=cutlass.TensorNHWC,
alignment=4)
tile_description = TileDescription(
threadblock_shape=[128, 128, 16], stages=3,
warp_count=[2, 2, 1],
math_instruction=math_inst
)
epilogue_functor = LinearCombination(
C.element, C.alignment,
math_inst.element_accumulator, cutlass.float32)
operation = Conv2dOperation(
conv_kind=cutlass.conv.Operator.wgrad, iterator_algorithm=cutlass.conv.IteratorAlgorithm.analytic,
arch=80, tile_description=tile_description, A=A, B=B, C=C,
stride_support=StrideSupport.Strided,
epilogue_functor=epilogue_functor,
swizzling_functor=cutlass.IdentitySwizzle1
)
problem_sizes = [
cutlass.conv.Conv2dProblemSize(
cutlass.Tensor4DCoord(1, 4, 4, 12),
cutlass.Tensor4DCoord(8, 3, 3, 12),
cutlass.Tensor4DCoord(0, 0, 0, 0),
cutlass.MatrixCoord(3, 3),
cutlass.MatrixCoord(1, 1),
cutlass.conv.Mode.cross_correlation,
1, 1
),
]
self.assertTrue(test_all_conv2d(operation, problem_sizes))
def test_Device_Conv2d_Wgrad_Optimized_ImplicitGemm_f16nhwc_f16nhwc_f32nhwc_tensor_op_f32_align4(self):
math_inst = MathInstruction(
instruction_shape=[16, 8, 8],
element_a=cutlass.float16, element_b=cutlass.float16,
element_accumulator=cutlass.float32, opcode_class=cutlass.OpClass.TensorOp,
math_operation=MathOperation.multiply_add
)
A = TensorDescription(
element=math_inst.element_a,
layout=cutlass.TensorNHWC,
alignment=4)
B = TensorDescription(
element=math_inst.element_b,
layout=cutlass.TensorNHWC,
alignment=4)
C = TensorDescription(
element=cutlass.float32,
layout=cutlass.TensorNHWC,
alignment=4)
tile_description = TileDescription(
threadblock_shape=[128, 128, 16], stages=3,
warp_count=[2, 2, 1],
math_instruction=math_inst
)
epilogue_functor = LinearCombination(
C.element, C.alignment,
math_inst.element_accumulator, cutlass.float32)
operation = Conv2dOperation(
conv_kind=cutlass.conv.Operator.wgrad, iterator_algorithm=cutlass.conv.IteratorAlgorithm.optimized,
arch=80, tile_description=tile_description, A=A, B=B, C=C,
stride_support=StrideSupport.Strided,
epilogue_functor=epilogue_functor,
swizzling_functor=cutlass.IdentitySwizzle1
)
problem_sizes = [
cutlass.conv.Conv2dProblemSize(
cutlass.Tensor4DCoord(1, 4, 4, 12),
cutlass.Tensor4DCoord(8, 3, 3, 12),
cutlass.Tensor4DCoord(0, 0, 0, 0),
cutlass.MatrixCoord(3, 3),
cutlass.MatrixCoord(1, 1),
cutlass.conv.Mode.cross_correlation,
1, 1
),
]
self.assertTrue(test_all_conv2d(operation, problem_sizes))
if __name__ == '__main__':
pycutlass.get_memory_pool(2**26, 2**26)
unittest.main()
| warp-main | warp/native/cutlass/tools/library/scripts/pycutlass/test/conv/conv2d_wgrad_implicit_gemm_f16nhwc_f16nhwc_f32nhwc_tensor_op_f32_sm80.py |
# test/unit/conv/device/conv2d_dgrad_implicit_gemm_f32nhwc_f32nhwc_f32nhwc_simt_f32_sm80.cu
import pycutlass
from pycutlass.conv2d_operation import *
from pycutlass import *
from pycutlass.test import *
from pycutlass.utils.device import device_cc
import unittest
@unittest.skipIf(device_cc() < 80, "Device compute capability is insufficient for SM80 tests.")
class Conv2dDgradImplicitGemmF32nhwcF32nhwcF32nhwcSimtF32SM80(unittest.TestCase):
def test_SM80_Device_Conv2d_Fprop_Analytic_ImplicitGemm_f32nhwc_f32nhwc_f32nhwc_simt_f32(self):
math_inst = MathInstruction(
instruction_shape=[1, 1, 1],
element_a=cutlass.float32, element_b=cutlass.float32,
element_accumulator=cutlass.float32, opcode_class=cutlass.OpClass.Simt,
math_operation=MathOperation.multiply_add
)
A = TensorDescription(
element=math_inst.element_a,
layout=cutlass.TensorNHWC,
alignment=4)
B = TensorDescription(
element=math_inst.element_b,
layout=cutlass.TensorNHWC,
alignment=4)
C = TensorDescription(
element=cutlass.float32,
layout=cutlass.TensorNHWC,
alignment=1)
tile_description = TileDescription(
threadblock_shape=[128, 128, 8], stages=4,
warp_count=[4, 2, 1],
math_instruction=math_inst
)
epilogue_functor = LinearCombination(
C.element, C.alignment,
math_inst.element_accumulator, cutlass.float32)
operation = Conv2dOperation(
conv_kind=cutlass.conv.Operator.dgrad, iterator_algorithm=cutlass.conv.IteratorAlgorithm.analytic,
arch=80, tile_description=tile_description, A=A, B=B, C=C,
stride_support=StrideSupport.Unity,
epilogue_functor=epilogue_functor,
swizzling_functor=cutlass.IdentitySwizzle1
)
self.assertTrue(test_all_conv2d(operation))
def test_SM80_Device_Conv2d_Dgrad_Optimized_ImplicitGemm_f32nhwc_f32nhwc_f32nhwc_simt_f32(self):
math_inst = MathInstruction(
instruction_shape=[1, 1, 1],
element_a=cutlass.float32, element_b=cutlass.float32,
element_accumulator=cutlass.float32, opcode_class=cutlass.OpClass.Simt,
math_operation=MathOperation.multiply_add
)
A = TensorDescription(
element=math_inst.element_a,
layout=cutlass.TensorNHWC,
alignment=4)
B = TensorDescription(
element=math_inst.element_b,
layout=cutlass.TensorNHWC,
alignment=4)
C = TensorDescription(
element=cutlass.float32,
layout=cutlass.TensorNHWC,
alignment=1)
tile_description = TileDescription(
threadblock_shape=[128, 128, 8], stages=4,
warp_count=[2, 4, 1],
math_instruction=math_inst
)
epilogue_functor = LinearCombination(
C.element, C.alignment,
math_inst.element_accumulator, cutlass.float32)
operation = Conv2dOperation(
conv_kind=cutlass.conv.Operator.dgrad, iterator_algorithm=cutlass.conv.IteratorAlgorithm.optimized,
arch=80, tile_description=tile_description, A=A, B=B, C=C,
stride_support=StrideSupport.Unity,
epilogue_functor=epilogue_functor,
swizzling_functor=cutlass.IdentitySwizzle1
)
self.assertTrue(test_all_conv2d(operation))
if __name__ == '__main__':
pycutlass.get_memory_pool(2**26, 2**26)
unittest.main()
| warp-main | warp/native/cutlass/tools/library/scripts/pycutlass/test/conv/conv2d_dgrad_implicit_gemm_f32nhwc_f32nhwc_f32nhwc_simt_f32_sm80.py |
# test/unit/conv/device/conv2d_fprop_implicit_gemm_f32nhwc_f32nhwc_f32nhwc_simt_f32_sm80.cu
import pycutlass
from pycutlass.conv2d_operation import *
from pycutlass import *
from pycutlass.test import *
from pycutlass.utils.device import device_cc
import unittest
@unittest.skipIf(device_cc() < 80, "Device compute capability is insufficient for SM80 tests.")
class Conv2dFpropImplicitGemmF32nhwcF32nhwcF32nhwcSimtF32SM80(unittest.TestCase):
def test_SM80_Device_Conv2d_Fprop_Analytic_ImplicitGemm_f32nhwc_f32nhwc_f32nhwc_simt_f32(self):
math_inst = MathInstruction(
instruction_shape=[1, 1, 1],
element_a=cutlass.float32, element_b=cutlass.float32,
element_accumulator=cutlass.float32, opcode_class=cutlass.OpClass.Simt,
math_operation=MathOperation.multiply_add
)
A = TensorDescription(
element=math_inst.element_a,
layout=cutlass.TensorNHWC,
alignment=4)
B = TensorDescription(
element=math_inst.element_b,
layout=cutlass.TensorNHWC,
alignment=4)
C = TensorDescription(
element=cutlass.float32,
layout=cutlass.TensorNHWC,
alignment=1)
tile_description = TileDescription(
threadblock_shape=[128, 128, 8], stages=4,
warp_count=[4, 2, 1],
math_instruction=math_inst
)
epilogue_functor = LinearCombination(
C.element, C.alignment,
math_inst.element_accumulator, cutlass.float32)
operation = Conv2dOperation(
conv_kind=cutlass.conv.Operator.fprop, iterator_algorithm=cutlass.conv.IteratorAlgorithm.analytic,
arch=80, tile_description=tile_description, A=A, B=B, C=C,
stride_support=StrideSupport.Strided,
epilogue_functor=epilogue_functor,
swizzling_functor=cutlass.IdentitySwizzle2
)
self.assertTrue(test_all_conv2d(operation))
def test_SM80_Device_Conv2d_Fprop_Optimized_ImplicitGemm_f32nhwc_f32nhwc_f32nhwc_simt_f32(self):
math_inst = MathInstruction(
instruction_shape=[1, 1, 1],
element_a=cutlass.float32, element_b=cutlass.float32,
element_accumulator=cutlass.float32, opcode_class=cutlass.OpClass.Simt,
math_operation=MathOperation.multiply_add
)
A = TensorDescription(
element=math_inst.element_a,
layout=cutlass.TensorNHWC,
alignment=4)
B = TensorDescription(
element=math_inst.element_b,
layout=cutlass.TensorNHWC,
alignment=4)
C = TensorDescription(
element=cutlass.float32,
layout=cutlass.TensorNHWC,
alignment=1)
tile_description = TileDescription(
threadblock_shape=[128, 128, 8], stages=4,
warp_count=[2, 4, 1],
math_instruction=math_inst
)
epilogue_functor = LinearCombination(
C.element, C.alignment,
math_inst.element_accumulator, cutlass.float32)
operation = Conv2dOperation(
conv_kind=cutlass.conv.Operator.fprop, iterator_algorithm=cutlass.conv.IteratorAlgorithm.optimized,
arch=80, tile_description=tile_description, A=A, B=B, C=C,
stride_support=StrideSupport.Strided,
epilogue_functor=epilogue_functor,
swizzling_functor=cutlass.IdentitySwizzle1
)
self.assertTrue(test_all_conv2d(operation))
if __name__ == '__main__':
pycutlass.get_memory_pool(2**26, 2**26)
unittest.main()
| warp-main | warp/native/cutlass/tools/library/scripts/pycutlass/test/conv/conv2d_fprop_implicit_gemm_f32nhwc_f32nhwc_f32nhwc_simt_f32_sm80.py |
warp-main | warp/native/cutlass/tools/library/scripts/pycutlass/test/conv/__init__.py |
|
# test/unit/conv/device/conv2d_wgrad_implicit_gemm_f16nhwc_f16nhwc_f16nhwc_tensor_op_f16_sm80.cu
import pycutlass
from pycutlass import *
from pycutlass.test import *
from pycutlass.utils.device import device_cc
import unittest
@unittest.skipIf(device_cc() < 80, "Device compute capability is insufficient for SM80 tests.")
class Conv2dWgradImplicitGemmF16nhwcF16nhwcF16nhwcTensorOpF16SM80(unittest.TestCase):
def test_Device_Conv2d_Wgrad_Analytic_ImplicitGemm_f16nhwc_f16nhwc_f16nhwc_tensor_op_f16(self):
math_inst = MathInstruction(
instruction_shape=[16, 8, 16],
element_a=cutlass.float16, element_b=cutlass.float16,
element_accumulator=cutlass.float16, opcode_class=cutlass.OpClass.TensorOp,
math_operation=MathOperation.multiply_add
)
A = TensorDescription(
element=math_inst.element_a,
layout=cutlass.TensorNHWC,
alignment=8)
B = TensorDescription(
element=math_inst.element_b,
layout=cutlass.TensorNHWC,
alignment=8)
C = TensorDescription(
element=cutlass.float16,
layout=cutlass.TensorNHWC,
alignment=8)
tile_description = TileDescription(
threadblock_shape=[128, 128, 64], stages=3,
warp_count=[2, 2, 1],
math_instruction=math_inst
)
epilogue_functor = LinearCombination(
C.element, C.alignment, math_inst.element_accumulator,
cutlass.float16
)
operation = Conv2dOperation(
conv_kind=cutlass.conv.Operator.wgrad, iterator_algorithm=cutlass.conv.IteratorAlgorithm.analytic,
arch=80, tile_description=tile_description, A=A, B=B, C=C,
stride_support=StrideSupport.Strided,
epilogue_functor=epilogue_functor,
swizzling_functor=cutlass.IdentitySwizzle1
)
self.assertTrue(test_all_conv2d(operation))
def test_Device_Conv2d_Wgrad_Optimized_ImplicitGemm_f16nhwc_f16nhwc_f16nhwc_tensor_op_f16(self):
math_inst = MathInstruction(
instruction_shape=[16, 8, 16],
element_a=cutlass.float16, element_b=cutlass.float16,
element_accumulator=cutlass.float16, opcode_class=cutlass.OpClass.TensorOp,
math_operation=MathOperation.multiply_add
)
A = TensorDescription(
element=math_inst.element_a,
layout=cutlass.TensorNHWC,
alignment=8)
B = TensorDescription(
element=math_inst.element_b,
layout=cutlass.TensorNHWC,
alignment=8)
C = TensorDescription(
element=cutlass.float16,
layout=cutlass.TensorNHWC,
alignment=8)
tile_description = TileDescription(
threadblock_shape=[128, 128, 64], stages=3,
warp_count=[2, 2, 1],
math_instruction=math_inst
)
epilogue_functor = LinearCombination(
C.element, C.alignment, math_inst.element_accumulator,
cutlass.float16
)
operation = Conv2dOperation(
conv_kind=cutlass.conv.Operator.wgrad, iterator_algorithm=cutlass.conv.IteratorAlgorithm.optimized,
arch=80, tile_description=tile_description, A=A, B=B, C=C,
stride_support=StrideSupport.Strided,
epilogue_functor=epilogue_functor,
swizzling_functor=cutlass.IdentitySwizzle1
)
self.assertTrue(test_all_conv2d(operation))
if __name__ == '__main__':
pycutlass.get_memory_pool(2**26, 2**26)
unittest.main()
| warp-main | warp/native/cutlass/tools/library/scripts/pycutlass/test/conv/conv2d_wgrad_implicit_gemm_f16nhwc_f16nhwc_f16nhwc_tensor_op_f16_sm80.py |
# test/unit/conv/device/conv2d_fprop_fixed_channels_f16nhwc_f16nhwc_f16nhwc_tensor_op_f32_sm80.cu
import pycutlass
from pycutlass.test import *
from pycutlass.utils.device import device_cc
import unittest
@unittest.skipIf(device_cc() < 80, "Device compute capability is insufficient for SM80 tests.")
def conv2d_fixed_channel_problemsizes(channels):
problem_sizes = [
cutlass.conv.Conv2dProblemSize(
cutlass.Tensor4DCoord(1, 8, 8, channels),
cutlass.Tensor4DCoord(16, 3, 3, channels),
cutlass.Tensor4DCoord(1, 1, 1, 1),
cutlass.MatrixCoord(2, 2),
cutlass.MatrixCoord(1, 1),
cutlass.conv.Mode.cross_correlation,
1, 1
),
cutlass.conv.Conv2dProblemSize(
cutlass.Tensor4DCoord(1, 224, 224, channels),
cutlass.Tensor4DCoord(32, 7, 7, channels),
cutlass.Tensor4DCoord(1, 1, 1, 1),
cutlass.MatrixCoord(1, 1),
cutlass.MatrixCoord(1, 1),
cutlass.conv.Mode.cross_correlation,
1, 1
),
cutlass.conv.Conv2dProblemSize(
cutlass.Tensor4DCoord(1, 224, 224, channels),
cutlass.Tensor4DCoord(64, 7, 7, channels),
cutlass.Tensor4DCoord(1, 1, 1, 1),
cutlass.MatrixCoord(2, 2),
cutlass.MatrixCoord(1, 1),
cutlass.conv.Mode.cross_correlation,
1, 1
),
cutlass.conv.Conv2dProblemSize(
cutlass.Tensor4DCoord(1, 224, 224, channels),
cutlass.Tensor4DCoord(64, 5, 5, channels),
cutlass.Tensor4DCoord(1, 1, 1, 1),
cutlass.MatrixCoord(1, 1),
cutlass.MatrixCoord(1, 1),
cutlass.conv.Mode.cross_correlation,
1, 1
),
cutlass.conv.Conv2dProblemSize(
cutlass.Tensor4DCoord(1, 224, 224, channels),
cutlass.Tensor4DCoord(64, 5, 5, channels),
cutlass.Tensor4DCoord(1, 1, 1, 1),
cutlass.MatrixCoord(2, 2),
cutlass.MatrixCoord(1, 1),
cutlass.conv.Mode.cross_correlation,
1, 1
),
]
return problem_sizes
class Conv2dFpropFixedChannelsF16NHWCF16NHWCF16HNWCTensorOpF32SM80(unittest.TestCase):
def test_SM80_Device_Conv2d_Fprop_Fixed_Channels_ImplicitGemm_f16nhwc_f16nhwc_f16nhwc_tensor_op_f32_channels_8(self):
math_inst = MathInstruction(
instruction_shape=[16, 8, 16],
element_a=cutlass.float16, element_b=cutlass.float16,
element_accumulator=cutlass.float32, opcode_class=cutlass.OpClass.TensorOp,
math_operation=MathOperation.multiply_add
)
A = TensorDescription(
element=math_inst.element_a,
layout=cutlass.TensorNHWC,
alignment=8)
B = TensorDescription(
element=math_inst.element_b,
layout=cutlass.TensorNHWC,
alignment=8)
C = TensorDescription(
element=cutlass.float16,
layout=cutlass.TensorNHWC,
alignment=8)
tile_description = TileDescription(
threadblock_shape=[128, 128, 64], stages=3,
warp_count=[2, 2, 1],
math_instruction=math_inst
)
epilogue_functor = LinearCombination(
C.element, C.alignment,
math_inst.element_accumulator, cutlass.float32)
operation = Conv2dOperation(
conv_kind=cutlass.conv.Operator.fprop, iterator_algorithm=cutlass.conv.IteratorAlgorithm.fixed_channels,
arch=80, tile_description=tile_description, A=A, B=B, C=C,
stride_support=StrideSupport.Strided,
epilogue_functor=epilogue_functor,
swizzling_functor=cutlass.IdentitySwizzle1
)
self.assertTrue(test_all_conv2d(operation, conv2d_fixed_channel_problemsizes(8)))
def test_SM80_Device_Conv2d_Fprop_Fixed_Channels_ImplicitGemm_f16nhwc_f16nhwc_f16nhwc_tensor_op_f32_channels_4(self):
math_inst = MathInstruction(
instruction_shape=[16, 8, 16],
element_a=cutlass.float16, element_b=cutlass.float16,
element_accumulator=cutlass.float32, opcode_class=cutlass.OpClass.TensorOp,
math_operation=MathOperation.multiply_add
)
A = TensorDescription(
element=math_inst.element_a,
layout=cutlass.TensorNHWC,
alignment=4)
B = TensorDescription(
element=math_inst.element_b,
layout=cutlass.TensorNHWC,
alignment=4)
C = TensorDescription(
element=cutlass.float16,
layout=cutlass.TensorNHWC,
alignment=8)
tile_description = TileDescription(
threadblock_shape=[128, 128, 64], stages=3,
warp_count=[2, 2, 1],
math_instruction=math_inst
)
epilogue_functor = LinearCombination(
C.element, C.alignment,
math_inst.element_accumulator, cutlass.float32)
operation = Conv2dOperation(
conv_kind=cutlass.conv.Operator.fprop, iterator_algorithm=cutlass.conv.IteratorAlgorithm.fixed_channels,
arch=80, tile_description=tile_description, A=A, B=B, C=C,
stride_support=StrideSupport.Strided,
epilogue_functor=epilogue_functor,
swizzling_functor=cutlass.IdentitySwizzle1
)
self.assertTrue(test_all_conv2d(operation, conv2d_fixed_channel_problemsizes(4)))
def test_SM80_Device_Conv2d_Fprop_Fixed_Channels_ImplicitGemm_f16nhwc_f16nhwc_f16nhwc_tensor_op_f32_channels_2(self):
math_inst = MathInstruction(
instruction_shape=[16, 8, 16],
element_a=cutlass.float16, element_b=cutlass.float16,
element_accumulator=cutlass.float32, opcode_class=cutlass.OpClass.TensorOp,
math_operation=MathOperation.multiply_add
)
A = TensorDescription(
element=math_inst.element_a,
layout=cutlass.TensorNHWC,
alignment=2)
B = TensorDescription(
element=math_inst.element_b,
layout=cutlass.TensorNHWC,
alignment=2)
C = TensorDescription(
element=cutlass.float16,
layout=cutlass.TensorNHWC,
alignment=8)
tile_description = TileDescription(
threadblock_shape=[128, 128, 64], stages=3,
warp_count=[2, 2, 1],
math_instruction=math_inst
)
epilogue_functor = LinearCombination(
C.element, C.alignment,
math_inst.element_accumulator, cutlass.float32)
operation = Conv2dOperation(
conv_kind=cutlass.conv.Operator.fprop, iterator_algorithm=cutlass.conv.IteratorAlgorithm.fixed_channels,
arch=80, tile_description=tile_description, A=A, B=B, C=C,
stride_support=StrideSupport.Strided,
epilogue_functor=epilogue_functor,
swizzling_functor=cutlass.IdentitySwizzle1
)
self.assertTrue(test_all_conv2d(operation, conv2d_fixed_channel_problemsizes(2)))
if __name__ == '__main__':
pycutlass.get_memory_pool(2**26, 2**26)
unittest.main()
| warp-main | warp/native/cutlass/tools/library/scripts/pycutlass/test/conv/conv2d_fprop_fixed_channels_f16nhwc_f16nhwc_f16nhwc_tensor_op_f32_sm80.py |
# test/unit/conv/device/conv2d_fprop_implicit_gemm_tf32nhwc_tf32nhwc_f32nhwc_tensor_op_f32_sm80.cu
import pycutlass
from pycutlass import *
from pycutlass.test import *
from pycutlass.utils.device import device_cc
import unittest
@unittest.skipIf(device_cc() < 80, "Device compute capability is insufficient for SM80 tests.")
class Conv2dFpropImplicitGemmTF32nhwcTF32nhwcTF32nhwcTensorOpF32SM80(unittest.TestCase):
def test_SM80_Device_Conv2d_Fprop_Analytic_ImplicitGemm_tf32nhwc_tf32nhwc_f32nhwc_tensor_op_f32(self):
math_inst = MathInstruction(
instruction_shape=[16, 8, 8],
element_a=cutlass.float32, element_b=cutlass.float32,
element_accumulator=cutlass.float32, opcode_class=cutlass.OpClass.TensorOp,
math_operation=MathOperation.multiply_add
)
A = TensorDescription(
element=math_inst.element_a,
layout=cutlass.TensorNHWC,
alignment=4)
B = TensorDescription(
element=math_inst.element_b,
layout=cutlass.TensorNHWC,
alignment=4)
C = TensorDescription(
element=cutlass.float32,
layout=cutlass.TensorNHWC,
alignment=8)
tile_description = TileDescription(
threadblock_shape=[128, 128, 16], stages=3,
warp_count=[2, 2, 1],
math_instruction=math_inst
)
epilogue_functor = LinearCombination(
C.element, C.alignment,
math_inst.element_accumulator, cutlass.float32)
operation = Conv2dOperation(
conv_kind=cutlass.conv.Operator.fprop, iterator_algorithm=cutlass.conv.IteratorAlgorithm.analytic,
arch=80, tile_description=tile_description, A=A, B=B, C=C,
stride_support=StrideSupport.Strided,
epilogue_functor=epilogue_functor,
swizzling_functor=cutlass.IdentitySwizzle1
)
self.assertTrue(test_all_conv2d(operation))
def test_SM80_Device_Conv2d_Fprop_Optimized_ImplicitGemm_tf32nhwc_tf32nhwc_f32nhwc_tensor_op_f32_align2(self):
math_inst = MathInstruction(
instruction_shape=[16, 8, 8],
element_a=cutlass.float32, element_b=cutlass.float32,
element_accumulator=cutlass.float32, opcode_class=cutlass.OpClass.TensorOp,
math_operation=MathOperation.multiply_add
)
A = TensorDescription(
element=math_inst.element_a,
layout=cutlass.TensorNHWC,
alignment=2)
B = TensorDescription(
element=math_inst.element_b,
layout=cutlass.TensorNHWC,
alignment=2)
C = TensorDescription(
element=cutlass.float32,
layout=cutlass.TensorNHWC,
alignment=8)
tile_description = TileDescription(
threadblock_shape=[128, 128, 16], stages=3,
warp_count=[2, 2, 1],
math_instruction=math_inst
)
epilogue_functor = LinearCombination(
C.element, C.alignment,
math_inst.element_accumulator, cutlass.float32)
operation = Conv2dOperation(
conv_kind=cutlass.conv.Operator.fprop, iterator_algorithm=cutlass.conv.IteratorAlgorithm.optimized,
arch=80, tile_description=tile_description, A=A, B=B, C=C,
stride_support=StrideSupport.Strided,
epilogue_functor=epilogue_functor,
swizzling_functor=cutlass.IdentitySwizzle1
)
problem_sizes = [
cutlass.conv.Conv2dProblemSize(
cutlass.Tensor4DCoord(1, 4, 4, 12),
cutlass.Tensor4DCoord(8, 3, 3, 12),
cutlass.Tensor4DCoord(0, 0, 0, 0),
cutlass.MatrixCoord(3, 3),
cutlass.MatrixCoord(1, 1),
cutlass.conv.Mode.cross_correlation,
1, 1
)
]
self.assertTrue(test_all_conv2d(operation, problem_sizes))
if __name__ == '__main__':
pycutlass.get_memory_pool(2**26, 2**26)
unittest.main()
| warp-main | warp/native/cutlass/tools/library/scripts/pycutlass/test/conv/conv2d_fprop_implicit_gemm_tf32nhwc_tf32nhwc_f32nhwc_tensor_op_f32_sm80.py |
# test/unit/conv/device/conv2d_fprop_implicit_gemm_f16nhwc_f16nhwc_f32nhwc_tensor_op_f32_sm80.cu
import pycutlass
from pycutlass import *
from pycutlass.test import *
from pycutlass.utils.device import device_cc
import unittest
@unittest.skipIf(device_cc() < 80, "Device compute capability is insufficient for SM80 tests.")
class Conv2dFpropImplicitGemmF16nhwcF16nhwcF32nhwcTensorOpF32SM80(unittest.TestCase):
def test_SM80_Device_Conv2d_Fprop_Analytic_ImplicitGemm_f16nhwc_f16nhwc_f32nhwc_tensor_op_f32(self):
math_inst = MathInstruction(
instruction_shape=[16, 8, 16],
element_a=cutlass.float16, element_b=cutlass.float16,
element_accumulator=cutlass.float32, opcode_class=cutlass.OpClass.TensorOp,
math_operation=MathOperation.multiply_add
)
A = TensorDescription(
element=math_inst.element_a,
layout=cutlass.TensorNHWC,
alignment=8)
B = TensorDescription(
element=math_inst.element_b,
layout=cutlass.TensorNHWC,
alignment=8)
C = TensorDescription(
element=cutlass.float32,
layout=cutlass.TensorNHWC,
alignment=4)
tile_description = TileDescription(
threadblock_shape=[128, 128, 64], stages=3,
warp_count=[2, 2, 1],
math_instruction=math_inst
)
epilogue_functor = LinearCombination(
C.element, C.alignment,
math_inst.element_accumulator, cutlass.float32)
operation = Conv2dOperation(
conv_kind=cutlass.conv.Operator.fprop, iterator_algorithm=cutlass.conv.IteratorAlgorithm.analytic,
arch=80, tile_description=tile_description, A=A, B=B, C=C,
stride_support=StrideSupport.Strided,
epilogue_functor=epilogue_functor,
swizzling_functor=cutlass.IdentitySwizzle1
)
self.assertTrue(test_all_conv2d(operation))
if __name__ == '__main__':
pycutlass.get_memory_pool(2**26, 2**26)
unittest.main()
| warp-main | warp/native/cutlass/tools/library/scripts/pycutlass/test/conv/conv2d_fprop_implicit_gemm_f16nhwc_f16nhwc_f32nhwc_tensor_op_f32_sm80.py |
# test/unit/conv/device/conv2d_wgrad_implicit_gemm_f32nhwc_f32nhwc_f32nhwc_simt_f32_sm80.cu
import pycutlass
from pycutlass.conv2d_operation import *
from pycutlass import *
from pycutlass.test import *
from pycutlass.utils.device import device_cc
import unittest
@unittest.skipIf(device_cc() < 80, "Device compute capability is insufficient for SM80 tests.")
class Conv2dWgradImplicitGemmF32nhwcF32nhwcF32nhwcSimtF32SM80(unittest.TestCase):
def test_SM80_Device_Conv2d_Wgrad_Analytic_ImplicitGemm_f32nhwc_f32nhwc_f32nhwc_simt_f32(self):
math_inst = MathInstruction(
instruction_shape=[1, 1, 1],
element_a=cutlass.float32, element_b=cutlass.float32,
element_accumulator=cutlass.float32, opcode_class=cutlass.OpClass.Simt,
math_operation=MathOperation.multiply_add
)
A = TensorDescription(
element=math_inst.element_a,
layout=cutlass.TensorNHWC,
alignment=4)
B = TensorDescription(
element=math_inst.element_b,
layout=cutlass.TensorNHWC,
alignment=4)
C = TensorDescription(
element=cutlass.float32,
layout=cutlass.TensorNHWC,
alignment=1)
tile_description = TileDescription(
threadblock_shape=[128, 128, 8], stages=4,
warp_count=[2, 4, 1],
math_instruction=math_inst
)
epilogue_functor = LinearCombination(
C.element, C.alignment,
math_inst.element_accumulator, cutlass.float32)
operation = Conv2dOperation(
conv_kind=cutlass.conv.Operator.wgrad, iterator_algorithm=cutlass.conv.IteratorAlgorithm.analytic,
arch=80, tile_description=tile_description, A=A, B=B, C=C,
stride_support=StrideSupport.Strided,
epilogue_functor=epilogue_functor,
swizzling_functor=cutlass.IdentitySwizzle1
)
self.assertTrue(test_all_conv2d(operation))
def test_SM80_Device_Conv2d_Wgrad_Optimized_ImplicitGemm_f32nhwc_f32nhwc_f32nhwc_simt_f32(self):
math_inst = MathInstruction(
instruction_shape=[1, 1, 1],
element_a=cutlass.float32, element_b=cutlass.float32,
element_accumulator=cutlass.float32, opcode_class=cutlass.OpClass.Simt,
math_operation=MathOperation.multiply_add
)
A = TensorDescription(
element=math_inst.element_a,
layout=cutlass.TensorNHWC,
alignment=4)
B = TensorDescription(
element=math_inst.element_b,
layout=cutlass.TensorNHWC,
alignment=4)
C = TensorDescription(
element=cutlass.float32,
layout=cutlass.TensorNHWC,
alignment=1)
tile_description = TileDescription(
threadblock_shape=[128, 128, 8], stages=4,
warp_count=[2, 4, 1],
math_instruction=math_inst
)
epilogue_functor = LinearCombination(
C.element, C.alignment,
math_inst.element_accumulator, cutlass.float32)
operation = Conv2dOperation(
conv_kind=cutlass.conv.Operator.wgrad, iterator_algorithm=cutlass.conv.IteratorAlgorithm.optimized,
arch=80, tile_description=tile_description, A=A, B=B, C=C,
stride_support=StrideSupport.Strided,
epilogue_functor=epilogue_functor,
swizzling_functor=cutlass.IdentitySwizzle1
)
self.assertTrue(test_all_conv2d(operation))
if __name__ == '__main__':
pycutlass.get_memory_pool(2**26, 2**26)
unittest.main()
| warp-main | warp/native/cutlass/tools/library/scripts/pycutlass/test/conv/conv2d_wgrad_implicit_gemm_f32nhwc_f32nhwc_f32nhwc_simt_f32_sm80.py |
import pycutlass
import unittest
from pycutlass.memory_manager import *
if __name__ == '__main__':
pycutlass.get_memory_pool(2**32, 2**32)
loader = unittest.TestLoader()
tests = loader.discover('./', 'conv2d_*.py')
testRunner = unittest.runner.TextTestRunner()
testRunner.run(tests)
| warp-main | warp/native/cutlass/tools/library/scripts/pycutlass/test/conv/run_all_tests.py |
# test/unit/conv/device/conv2d_wgrad_implicit_gemm_tf32nhwc_tf32nhwc_f32nhwc_tensor_op_f32_sm80.cu
import pycutlass
from pycutlass import *
from pycutlass.test import *
from pycutlass.utils.device import device_cc
import unittest
@unittest.skipIf(device_cc() < 80, "Device compute capability is insufficient for SM80 tests.")
class Conv2dWgradImplicitGemmTF32nhwcTF32nhwcTF32nhwcTensorOpF32SM80(unittest.TestCase):
def test_SM80_Device_Conv2d_Wgrad_Optimized_ImplicitGemm_tf32nhwc_tf32nhwc_f32nhwc_tensor_op_f32(self):
math_inst = MathInstruction(
instruction_shape=[16, 8, 8],
element_a=cutlass.float32, element_b=cutlass.float32,
element_accumulator=cutlass.float32, opcode_class=cutlass.OpClass.TensorOp,
math_operation=MathOperation.multiply_add
)
A = TensorDescription(
element=math_inst.element_a,
layout=cutlass.TensorNHWC,
alignment=4)
B = TensorDescription(
element=math_inst.element_b,
layout=cutlass.TensorNHWC,
alignment=4)
C = TensorDescription(
element=cutlass.float32,
layout=cutlass.TensorNHWC,
alignment=8)
tile_description = TileDescription(
threadblock_shape=[128, 128, 16], stages=3,
warp_count=[2, 2, 1],
math_instruction=math_inst
)
epilogue_functor = LinearCombination(
C.element, C.alignment,
math_inst.element_accumulator, cutlass.float32)
operation = Conv2dOperation(
conv_kind=cutlass.conv.Operator.wgrad, iterator_algorithm=cutlass.conv.IteratorAlgorithm.optimized,
arch=80, tile_description=tile_description, A=A, B=B, C=C,
stride_support=StrideSupport.Strided,
epilogue_functor=epilogue_functor,
swizzling_functor=cutlass.IdentitySwizzle1
)
self.assertTrue(test_all_conv2d(operation))
def test_SM80_Device_Conv2d_Wgrad_Optimized_ImplicitGemm_tf32nhwc_tf32nhwc_f32nhwc_tensor_op_f32_align1(self):
math_inst = MathInstruction(
instruction_shape=[16, 8, 8],
element_a=cutlass.float32, element_b=cutlass.float32,
element_accumulator=cutlass.float32, opcode_class=cutlass.OpClass.TensorOp,
math_operation=MathOperation.multiply_add
)
A = TensorDescription(
element=math_inst.element_a,
layout=cutlass.TensorNHWC,
alignment=1)
B = TensorDescription(
element=math_inst.element_b,
layout=cutlass.TensorNHWC,
alignment=1)
C = TensorDescription(
element=cutlass.float32,
layout=cutlass.TensorNHWC,
alignment=4)
tile_description = TileDescription(
threadblock_shape=[128, 128, 32], stages=3,
warp_count=[2, 2, 1],
math_instruction=math_inst
)
epilogue_functor = LinearCombination(
C.element, C.alignment,
math_inst.element_accumulator, cutlass.float32)
operation = Conv2dOperation(
conv_kind=cutlass.conv.Operator.wgrad, iterator_algorithm=cutlass.conv.IteratorAlgorithm.optimized,
arch=80, tile_description=tile_description, A=A, B=B, C=C,
stride_support=StrideSupport.Strided,
epilogue_functor=epilogue_functor,
swizzling_functor=cutlass.IdentitySwizzle1
)
problem_sizes = [
cutlass.conv.Conv2dProblemSize(
cutlass.Tensor4DCoord(1, 8, 8, 1),
cutlass.Tensor4DCoord(1, 3, 3, 1),
cutlass.Tensor4DCoord(1, 1, 1, 1),
cutlass.MatrixCoord(1, 1),
cutlass.MatrixCoord(1, 1),
cutlass.conv.Mode.cross_correlation,
1, 1
),
]
self.assertTrue(test_all_conv2d(operation, problem_sizes))
if __name__ == '__main__':
pycutlass.get_memory_pool(2**26, 2**26)
unittest.main()
| warp-main | warp/native/cutlass/tools/library/scripts/pycutlass/test/conv/conv2d_wgrad_implicit_gemm_tf32nhwc_tf32nhwc_f32nhwc_tensor_op_f32_sm80.py |
# test/unit/conv/device/conv2d_fprop_implicit_gemm_f16nhwc_f16nhwc_f16nhwc_tensor_op_f16_sm80.cu
import pycutlass
from pycutlass import *
from pycutlass.test import *
from pycutlass.utils.device import device_cc
import unittest
@unittest.skipIf(device_cc() < 80, "Device compute capability is insufficient for SM80 tests.")
class Conv2dFpropImplicitGemmF16nhwcF16nhwcF16nhwcTensorOpF16SM80(unittest.TestCase):
def test_SM80_Device_Conv2d_Fprop_Analytic_ImplicitGemm_f16nhwc_f16nhwc_f16nhwc_tensor_op_f16(self):
math_inst = MathInstruction(
instruction_shape=[16, 8, 16],
element_a=cutlass.float16, element_b=cutlass.float16,
element_accumulator=cutlass.float16, opcode_class=cutlass.OpClass.TensorOp,
math_operation=MathOperation.multiply_add
)
A = TensorDescription(
element=math_inst.element_a,
layout=cutlass.TensorNHWC,
alignment=8)
B = TensorDescription(
element=math_inst.element_b,
layout=cutlass.TensorNHWC,
alignment=8)
C = TensorDescription(
element=cutlass.float16,
layout=cutlass.TensorNHWC,
alignment=8)
tile_description = TileDescription(
threadblock_shape=[128, 128, 64], stages=3,
warp_count=[2, 2, 1],
math_instruction=math_inst
)
epilogue_functor = LinearCombination(
C.element, C.alignment,
math_inst.element_accumulator, cutlass.float16)
operation = Conv2dOperation(
conv_kind=cutlass.conv.Operator.fprop, iterator_algorithm=cutlass.conv.IteratorAlgorithm.analytic,
arch=80, tile_description=tile_description, A=A, B=B, C=C,
stride_support=StrideSupport.Strided,
epilogue_functor=epilogue_functor,
swizzling_functor=cutlass.IdentitySwizzle1
)
self.assertTrue(test_all_conv2d(operation))
def test_SM80_Device_Conv2d_Fprop_Optimized_ImplicitGemm_f16nhwc_f16nhwc_f16nhwc_tensor_op_f16(self):
math_inst = MathInstruction(
instruction_shape=[16, 8, 16],
element_a=cutlass.float16, element_b=cutlass.float16,
element_accumulator=cutlass.float16, opcode_class=cutlass.OpClass.TensorOp,
math_operation=MathOperation.multiply_add
)
A = TensorDescription(
element=math_inst.element_a,
layout=cutlass.TensorNHWC,
alignment=8)
B = TensorDescription(
element=math_inst.element_b,
layout=cutlass.TensorNHWC,
alignment=8)
C = TensorDescription(
element=cutlass.float16,
layout=cutlass.TensorNHWC,
alignment=8)
tile_description = TileDescription(
threadblock_shape=[128, 128, 64], stages=3,
warp_count=[2, 2, 1],
math_instruction=math_inst
)
epilogue_functor = LinearCombination(
C.element, C.alignment,
math_inst.element_accumulator, cutlass.float16)
operation = Conv2dOperation(
conv_kind=cutlass.conv.Operator.fprop, iterator_algorithm=cutlass.conv.IteratorAlgorithm.optimized,
arch=80, tile_description=tile_description, A=A, B=B, C=C,
stride_support=StrideSupport.Strided,
epilogue_functor=epilogue_functor,
swizzling_functor=cutlass.IdentitySwizzle1
)
self.assertTrue(test_all_conv2d(operation))
def test_SM80_Device_Conv2d_Fprop_Analytic_ImplicitGemm_f16nhwc_f16nhwc_f16nhwc_tensor_op_f16_align2(self):
math_inst = MathInstruction(
instruction_shape=[16, 8, 16],
element_a=cutlass.float16, element_b=cutlass.float16,
element_accumulator=cutlass.float16, opcode_class=cutlass.OpClass.TensorOp,
math_operation=MathOperation.multiply_add
)
A = TensorDescription(
element=math_inst.element_a,
layout=cutlass.TensorNHWC,
alignment=2)
B = TensorDescription(
element=math_inst.element_b,
layout=cutlass.TensorNHWC,
alignment=2)
C = TensorDescription(
element=cutlass.float16,
layout=cutlass.TensorNHWC,
alignment=8)
tile_description = TileDescription(
threadblock_shape=[128, 128, 64], stages=3,
warp_count=[2, 2, 1],
math_instruction=math_inst
)
epilogue_functor = LinearCombination(
C.element, C.alignment,
math_inst.element_accumulator, cutlass.float16)
operation = Conv2dOperation(
conv_kind=cutlass.conv.Operator.fprop, iterator_algorithm=cutlass.conv.IteratorAlgorithm.analytic,
arch=80, tile_description=tile_description, A=A, B=B, C=C,
stride_support=StrideSupport.Strided,
epilogue_functor=epilogue_functor,
swizzling_functor=cutlass.IdentitySwizzle1
)
problem_sizes = [
cutlass.conv.Conv2dProblemSize(
cutlass.Tensor4DCoord(1, 4, 4, 12),
cutlass.Tensor4DCoord(8, 3, 3, 12),
cutlass.Tensor4DCoord(0, 0, 0, 0),
cutlass.MatrixCoord(3, 3),
cutlass.MatrixCoord(1, 1),
cutlass.conv.Mode.cross_correlation,
1, 1
),
cutlass.conv.Conv2dProblemSize(
cutlass.Tensor4DCoord(1, 4, 4, 14),
cutlass.Tensor4DCoord(8, 3, 3, 14),
cutlass.Tensor4DCoord(0, 0, 0, 0),
cutlass.MatrixCoord(3, 3),
cutlass.MatrixCoord(1, 1),
cutlass.conv.Mode.cross_correlation,
1, 1
),
cutlass.conv.Conv2dProblemSize(
cutlass.Tensor4DCoord(1, 23, 56, 98),
cutlass.Tensor4DCoord(128, 3, 3, 98),
cutlass.Tensor4DCoord(4, 0, 5, 0),
cutlass.MatrixCoord(3, 3),
cutlass.MatrixCoord(1, 1),
cutlass.conv.Mode.cross_correlation,
1, 1
),
]
self.assertTrue(test_all_conv2d(operation, problem_sizes))
def test_SM80_Device_Conv2d_Fprop_Optimized_ImplicitGemm_f16nhwc_f16nhwc_f16nhwc_tensor_op_f16_align2(self):
math_inst = MathInstruction(
instruction_shape=[16, 8, 16],
element_a=cutlass.float16, element_b=cutlass.float16,
element_accumulator=cutlass.float16, opcode_class=cutlass.OpClass.TensorOp,
math_operation=MathOperation.multiply_add
)
A = TensorDescription(
element=math_inst.element_a,
layout=cutlass.TensorNHWC,
alignment=2)
B = TensorDescription(
element=math_inst.element_b,
layout=cutlass.TensorNHWC,
alignment=2)
C = TensorDescription(
element=cutlass.float16,
layout=cutlass.TensorNHWC,
alignment=8)
tile_description = TileDescription(
threadblock_shape=[128, 128, 64], stages=3,
warp_count=[2, 2, 1],
math_instruction=math_inst
)
epilogue_functor = LinearCombination(
C.element, C.alignment,
math_inst.element_accumulator, cutlass.float16)
operation = Conv2dOperation(
conv_kind=cutlass.conv.Operator.fprop, iterator_algorithm=cutlass.conv.IteratorAlgorithm.optimized,
arch=80, tile_description=tile_description, A=A, B=B, C=C,
stride_support=StrideSupport.Strided,
epilogue_functor=epilogue_functor,
swizzling_functor=cutlass.IdentitySwizzle1
)
problem_sizes = [
cutlass.conv.Conv2dProblemSize(
cutlass.Tensor4DCoord(1, 4, 4, 12),
cutlass.Tensor4DCoord(8, 3, 3, 12),
cutlass.Tensor4DCoord(0, 0, 0, 0),
cutlass.MatrixCoord(3, 3),
cutlass.MatrixCoord(1, 1),
cutlass.conv.Mode.cross_correlation,
1, 1
),
cutlass.conv.Conv2dProblemSize(
cutlass.Tensor4DCoord(1, 4, 4, 14),
cutlass.Tensor4DCoord(8, 3, 3, 14),
cutlass.Tensor4DCoord(0, 0, 0, 0),
cutlass.MatrixCoord(3, 3),
cutlass.MatrixCoord(1, 1),
cutlass.conv.Mode.cross_correlation,
1, 1
),
cutlass.conv.Conv2dProblemSize(
cutlass.Tensor4DCoord(1, 23, 56, 98),
cutlass.Tensor4DCoord(128, 3, 3, 98),
cutlass.Tensor4DCoord(4, 0, 5, 0),
cutlass.MatrixCoord(3, 3),
cutlass.MatrixCoord(1, 1),
cutlass.conv.Mode.cross_correlation,
1, 1
),
]
self.assertTrue(test_all_conv2d(operation, problem_sizes))
def test_SM80_Device_Conv2d_Fprop_Analytic_ImplicitGemm_f16nhwc_f16nhwc_f16nhwc_tensor_op_f16_align4(self):
math_inst = MathInstruction(
instruction_shape=[16, 8, 16],
element_a=cutlass.float16, element_b=cutlass.float16,
element_accumulator=cutlass.float16, opcode_class=cutlass.OpClass.TensorOp,
math_operation=MathOperation.multiply_add
)
A = TensorDescription(
element=math_inst.element_a,
layout=cutlass.TensorNHWC,
alignment=4)
B = TensorDescription(
element=math_inst.element_b,
layout=cutlass.TensorNHWC,
alignment=4)
C = TensorDescription(
element=cutlass.float16,
layout=cutlass.TensorNHWC,
alignment=8)
tile_description = TileDescription(
threadblock_shape=[128, 128, 64], stages=3,
warp_count=[2, 2, 1],
math_instruction=math_inst
)
epilogue_functor = LinearCombination(
C.element, C.alignment,
math_inst.element_accumulator, cutlass.float16)
operation = Conv2dOperation(
conv_kind=cutlass.conv.Operator.fprop, iterator_algorithm=cutlass.conv.IteratorAlgorithm.optimized,
arch=80, tile_description=tile_description, A=A, B=B, C=C,
stride_support=StrideSupport.Strided,
epilogue_functor=epilogue_functor,
swizzling_functor=cutlass.IdentitySwizzle1
)
problem_sizes = [
cutlass.conv.Conv2dProblemSize(
cutlass.Tensor4DCoord(1, 4, 4, 12),
cutlass.Tensor4DCoord(8, 3, 3, 12),
cutlass.Tensor4DCoord(0, 0, 0, 0),
cutlass.MatrixCoord(3, 3),
cutlass.MatrixCoord(1, 1),
cutlass.conv.Mode.cross_correlation,
1, 1
),
cutlass.conv.Conv2dProblemSize(
cutlass.Tensor4DCoord(1, 4, 4, 28),
cutlass.Tensor4DCoord(8, 3, 3, 28),
cutlass.Tensor4DCoord(0, 0, 0, 0),
cutlass.MatrixCoord(3, 3),
cutlass.MatrixCoord(1, 1),
cutlass.conv.Mode.cross_correlation,
1, 1
),
cutlass.conv.Conv2dProblemSize(
cutlass.Tensor4DCoord(1, 23, 56, 100),
cutlass.Tensor4DCoord(128, 3, 3, 100),
cutlass.Tensor4DCoord(4, 0, 5, 0),
cutlass.MatrixCoord(3, 3),
cutlass.MatrixCoord(1, 1),
cutlass.conv.Mode.cross_correlation,
1, 1
),
]
self.assertTrue(test_all_conv2d(operation, problem_sizes))
if __name__ == '__main__':
pycutlass.get_memory_pool(2**26, 2**26)
unittest.main()
| warp-main | warp/native/cutlass/tools/library/scripts/pycutlass/test/conv/conv2d_fprop_implicit_gemm_f16nhwc_f16nhwc_f16nhwc_tensor_op_f16_sm80.py |
warp-main | warp/native/cutlass/tools/library/scripts/pycutlass/test/gemm/__init__.py |
|
import pycutlass
from pycutlass import *
from pycutlass.test import *
import unittest
from pycutlass.test.gemm_testbed import test_all_gemm
from pycutlass.utils.device import device_cc
@unittest.skipIf(device_cc() < 80, "Device compute capability is insufficient for SM80 tests.")
class GemmF64TensorOpSm80(unittest.TestCase):
def test_SM80_Device_Gemm_f64n_f64t_f64t_tensor_op_f64_32x32x16_16x16x16(self):
math_inst = MathInstruction(
instruction_shape=[8, 8, 4],
element_a=cutlass.float64, element_b=cutlass.float64,
element_accumulator=cutlass.float64, opcode_class=cutlass.OpClass.TensorOp,
math_operation=MathOperation.multiply_add
)
tile_description = TileDescription(
threadblock_shape=[32, 32, 16],
stages=4, warp_count=[2, 2, 1],
math_instruction=math_inst
)
# alignment 1 restricted for double
A = TensorDescription(
element=cutlass.float64, layout=cutlass.ColumnMajor,
alignment=1
)
B = TensorDescription(
element=cutlass.float64, layout=cutlass.RowMajor,
alignment=1
)
C = TensorDescription(
element=cutlass.float64, layout=cutlass.RowMajor,
alignment=1
)
element_epilogue = cutlass.float64
epilogue_functor = LinearCombination(
C.element, C.alignment,
math_inst.element_accumulator, element_epilogue)
swizzling_functor = cutlass.IdentitySwizzle1
operation = GemmOperationUniversal(
arch=80, tile_description=tile_description,
A=A, B=B, C=C,
epilogue_functor=epilogue_functor, swizzling_functor=swizzling_functor
)
self.assertTrue(test_all_gemm(operation, "universal"))
def test_SM80_Device_Gemm_f64t_f64n_f64t_tensor_op_f64_64x64x16_32x32x16(self):
math_inst = MathInstruction(
instruction_shape=[8, 8, 4],
element_a=cutlass.float64, element_b=cutlass.float64,
element_accumulator=cutlass.float64, opcode_class=cutlass.OpClass.TensorOp,
math_operation=MathOperation.multiply_add
)
tile_description = TileDescription(
threadblock_shape=[64, 64, 16],
stages=4, warp_count=[2, 2, 1],
math_instruction=math_inst
)
# alignment 1 restricted for double
A = TensorDescription(
element=cutlass.float64, layout=cutlass.RowMajor,
alignment=1
)
B = TensorDescription(
element=cutlass.float64, layout=cutlass.ColumnMajor,
alignment=1
)
C = TensorDescription(
element=cutlass.float64, layout=cutlass.RowMajor,
alignment=1
)
element_epilogue = cutlass.float64
epilogue_functor = LinearCombination(
C.element, C.alignment,
math_inst.element_accumulator, element_epilogue)
swizzling_functor = cutlass.IdentitySwizzle1
operation = GemmOperationUniversal(
arch=80, tile_description=tile_description,
A=A, B=B, C=C,
epilogue_functor=epilogue_functor, swizzling_functor=swizzling_functor
)
self.assertTrue(test_all_gemm(operation, "universal"))
if __name__ == '__main__':
pycutlass.get_memory_pool(2**24, 2**24)
unittest.main()
| warp-main | warp/native/cutlass/tools/library/scripts/pycutlass/test/gemm/gemm_f64_sm80.py |
import pycutlass
from pycutlass import *
from pycutlass.test import *
import unittest
from pycutlass.test.gemm_testbed import test_all_gemm
from pycutlass.utils.device import device_cc
@unittest.skipIf(device_cc() < 80, "Device compute capability is insufficient for SM80 tests.")
class GemmBF16TensorOpSm80(unittest.TestCase):
def SM80_Device_Gemm_bf16n_bf16n_f32t_tensor_op_f32_64x128x64_32x64x64(self):
math_inst = MathInstruction(
instruction_shape=[16, 8, 16],
element_a=cutlass.bfloat16, element_b=cutlass.bfloat16,
element_accumulator=cutlass.float32, opcode_class=cutlass.OpClass.TensorOp,
math_operation=MathOperation.multiply_add
)
tile_description = TileDescription(
threadblock_shape=[64, 128, 64],
stages=4, warp_count=[2, 2, 1],
math_instruction=math_inst
)
A = TensorDescription(
element=cutlass.bfloat16, layout=cutlass.ColumnMajor,
alignment=8
)
B = TensorDescription(
element=cutlass.bfloat16, layout=cutlass.ColumnMajor,
alignment=8
)
C = TensorDescription(
element=cutlass.float32, layout=cutlass.RowMajor,
alignment=4
)
epilogue_functor = LinearCombination(
C.element, C.alignment,
math_inst.element_accumulator, cutlass.float32)
swizzling_functor = cutlass.IdentitySwizzle1
operation = GemmOperationUniversal(
arch=80, tile_description=tile_description,
A=A, B=B, C=C,
epilogue_functor=epilogue_functor, swizzling_functor=swizzling_functor
)
self.assertTrue(test_all_gemm(operation, "universal"))
def test_SM80_Device_Gemm_bf16t_bf16t_bf16t_tensor_op_f32_128x256x64_64x64x64(self):
math_inst = MathInstruction(
instruction_shape=[16, 8, 16],
element_a=cutlass.bfloat16, element_b=cutlass.bfloat16,
element_accumulator=cutlass.float32, opcode_class=cutlass.OpClass.TensorOp,
math_operation=MathOperation.multiply_add
)
tile_description = TileDescription(
threadblock_shape=[64, 128, 32],
stages=6, warp_count=[2, 2, 1],
math_instruction=math_inst
)
A = TensorDescription(
element=cutlass.bfloat16, layout=cutlass.RowMajor,
alignment=8
)
B = TensorDescription(
element=cutlass.bfloat16, layout=cutlass.RowMajor,
alignment=8
)
C = TensorDescription(
element=cutlass.bfloat16, layout=cutlass.RowMajor,
alignment=8
)
epilogue_functor = LinearCombination(
C.element, C.alignment,
math_inst.element_accumulator, cutlass.float32)
swizzling_functor = cutlass.IdentitySwizzle1
operation = GemmOperationUniversal(
arch=80, tile_description=tile_description,
A=A, B=B, C=C,
epilogue_functor=epilogue_functor, swizzling_functor=swizzling_functor
)
self.assertTrue(test_all_gemm(operation, "multistage"))
if __name__ == '__main__':
pycutlass.get_memory_pool(2**24, 2**24)
unittest.main()
| warp-main | warp/native/cutlass/tools/library/scripts/pycutlass/test/gemm/gemm_bf16_sm80.py |
import pycutlass
from pycutlass import *
from pycutlass.test import *
import unittest
from pycutlass.test.gemm_testbed import test_all_gemm
from pycutlass.utils.device import device_cc
@unittest.skipIf(device_cc() < 80, "Device compute capability is insufficient for SM80 tests.")
class GemmF16Sm80(unittest.TestCase):
def test_SM80_Device_Gemm_f32t_f32n_f32t_tensor_op_bf16_f32_128x128x32_64x64x32(self):
math_inst = MathInstruction(
instruction_shape=[16, 8, 16],
element_a=cutlass.float16, element_b=cutlass.float16,
element_accumulator=cutlass.float32, opcode_class=cutlass.OpClass.TensorOp,
math_operation=MathOperation.multiply_add
)
tile_description = TileDescription(
threadblock_shape=[128, 128, 32],
stages=3, warp_count=[2, 2, 1],
math_instruction=math_inst
)
A = TensorDescription(
element=cutlass.float16, layout=cutlass.ColumnMajor,
alignment=8
)
B = TensorDescription(
element=cutlass.float16, layout=cutlass.RowMajor,
alignment=8
)
C = TensorDescription(
element=cutlass.float32, layout=cutlass.ColumnMajor,
alignment=4
)
element_epilogue = cutlass.float32
epilogue_functor = LinearCombination(
C.element, C.alignment,
math_inst.element_accumulator, element_epilogue)
swizzling_functor = cutlass.BatchedIdentitySwizzle
operation = GemmOperationUniversal(
arch=80, tile_description=tile_description,
A=A, B=B, C=C,
epilogue_functor=epilogue_functor, swizzling_functor=swizzling_functor,
direct_store=True
)
self.assertTrue(test_all_gemm(operation, "universal"))
def test_SM80_Device_Gemm_f16n_f16n_f16t_tensor_op_f32_128x128x64_64x64x64(self):
math_inst = MathInstruction(
instruction_shape=[16, 8, 16],
element_a=cutlass.float16, element_b=cutlass.float16,
element_accumulator=cutlass.float32, opcode_class=cutlass.OpClass.TensorOp,
math_operation=MathOperation.multiply_add
)
tile_description = TileDescription(
threadblock_shape=[128, 128, 64],
stages=3, warp_count=[2, 2, 1],
math_instruction=math_inst
)
A = TensorDescription(
element=cutlass.float16, layout=cutlass.ColumnMajor,
alignment=8
)
B = TensorDescription(
element=cutlass.float16, layout=cutlass.ColumnMajor,
alignment=8
)
C = TensorDescription(
element=cutlass.float16, layout=cutlass.RowMajor,
alignment=8
)
element_epilogue = cutlass.float32
epilogue_functor = LinearCombination(
C.element, C.alignment,
math_inst.element_accumulator, element_epilogue)
swizzling_functor = cutlass.IdentitySwizzle1
operation = GemmOperationUniversal(
arch=80, tile_description=tile_description,
A=A, B=B, C=C,
epilogue_functor=epilogue_functor, swizzling_functor=swizzling_functor
)
self.assertTrue(test_all_gemm(operation, "universal"))
def test_SM80_Device_Gemm_f16n_f16n_f32n_tensor_op_f32_128x256x64_64x64x64(self):
math_inst = MathInstruction(
instruction_shape=[16, 8, 16],
element_a=cutlass.float16, element_b=cutlass.float16,
element_accumulator=cutlass.float32, opcode_class=cutlass.OpClass.TensorOp,
math_operation=MathOperation.multiply_add
)
tile_description = TileDescription(
threadblock_shape=[128, 256, 64],
stages=3, warp_count=[2, 4, 1],
math_instruction=math_inst
)
A = TensorDescription(
element=cutlass.float16, layout=cutlass.ColumnMajor,
alignment=8
)
B = TensorDescription(
element=cutlass.float16, layout=cutlass.ColumnMajor,
alignment=8
)
C = TensorDescription(
element=cutlass.float32, layout=cutlass.ColumnMajor,
alignment=4
)
element_epilogue = cutlass.float32
epilogue_functor = LinearCombination(
C.element, C.alignment,
math_inst.element_accumulator, element_epilogue)
swizzling_functor = cutlass.IdentitySwizzle1
operation = GemmOperationUniversal(
arch=80, tile_description=tile_description,
A=A, B=B, C=C,
epilogue_functor=epilogue_functor, swizzling_functor=swizzling_functor
)
self.assertTrue(test_all_gemm(operation, "universal"))
def test_SM80_Device_Gemm_f16n_f16n_f32t_tensor_op_f32_256x128x64_64x64x64(self):
math_inst = MathInstruction(
instruction_shape=[16, 8, 16],
element_a=cutlass.float16, element_b=cutlass.float16,
element_accumulator=cutlass.float32, opcode_class=cutlass.OpClass.TensorOp,
math_operation=MathOperation.multiply_add
)
tile_description = TileDescription(
threadblock_shape=[256, 128, 64],
stages=3, warp_count=[4, 2, 1],
math_instruction=math_inst
)
A = TensorDescription(
element=cutlass.float16, layout=cutlass.ColumnMajor,
alignment=8
)
B = TensorDescription(
element=cutlass.float16, layout=cutlass.ColumnMajor,
alignment=8
)
C = TensorDescription(
element=cutlass.float32, layout=cutlass.RowMajor,
alignment=4
)
element_epilogue = cutlass.float32
epilogue_functor = LinearCombination(
C.element, C.alignment,
math_inst.element_accumulator, element_epilogue)
swizzling_functor = cutlass.IdentitySwizzle1
operation = GemmOperationUniversal(
arch=80, tile_description=tile_description,
A=A, B=B, C=C,
epilogue_functor=epilogue_functor, swizzling_functor=swizzling_functor
)
self.assertTrue(test_all_gemm(operation, "universal"))
def test_SM80_Device_Gemm_f16n_f16t_f16t_tensor_op_f16_sliced_k_128x64x64_64x64x32(self):
math_inst = MathInstruction(
instruction_shape=[16, 8, 16],
element_a=cutlass.float16, element_b=cutlass.float16,
element_accumulator=cutlass.float16, opcode_class=cutlass.OpClass.TensorOp,
math_operation=MathOperation.multiply_add
)
tile_description = TileDescription(
threadblock_shape=[128, 64, 64],
stages=3, warp_count=[2, 1, 1],
math_instruction=math_inst
)
A = TensorDescription(
element=cutlass.float16, layout=cutlass.ColumnMajor,
alignment=8
)
B = TensorDescription(
element=cutlass.float16, layout=cutlass.RowMajor,
alignment=8
)
C = TensorDescription(
element=cutlass.float16, layout=cutlass.RowMajor,
alignment=4
)
element_epilogue = cutlass.float16
epilogue_functor = LinearCombination(
C.element, C.alignment,
math_inst.element_accumulator, element_epilogue)
swizzling_functor = cutlass.IdentitySwizzle1
operation = GemmOperationUniversal(
arch=80, tile_description=tile_description,
A=A, B=B, C=C,
epilogue_functor=epilogue_functor, swizzling_functor=swizzling_functor
)
self.assertTrue(test_all_gemm(operation, "universal"))
def test_SM80_Device_GemmUniversal_f16n_f16t_f32t_tensor_op_f32_64x64x32_32x32x32(self):
math_inst = MathInstruction(
instruction_shape=[16, 8, 16],
element_a=cutlass.float16, element_b=cutlass.float16,
element_accumulator=cutlass.float16, opcode_class=cutlass.OpClass.TensorOp,
math_operation=MathOperation.multiply_add
)
tile_description = TileDescription(
threadblock_shape=[64, 64, 32],
stages=10, warp_count=[2, 2, 1],
math_instruction=math_inst
)
A = TensorDescription(
element=cutlass.float16, layout=cutlass.ColumnMajor,
alignment=8
)
B = TensorDescription(
element=cutlass.float16, layout=cutlass.RowMajor,
alignment=8
)
C = TensorDescription(
element=cutlass.float16, layout=cutlass.RowMajor,
alignment=4
)
element_epilogue = cutlass.float16
epilogue_functor = LinearCombination(
C.element, C.alignment,
math_inst.element_accumulator, element_epilogue)
swizzling_functor = cutlass.IdentitySwizzle1
operation = GemmOperationUniversal(
arch=80, tile_description=tile_description,
A=A, B=B, C=C,
epilogue_functor=epilogue_functor, swizzling_functor=swizzling_functor
)
self.assertTrue(test_all_gemm(operation, "universal"))
def test_SM80_Device_Gemm_f16n_f16t_f32t_tensor_op_f32_256x128x64_64x64x64(self):
math_inst = MathInstruction(
instruction_shape=[16, 8, 16],
element_a=cutlass.float16, element_b=cutlass.float16,
element_accumulator=cutlass.float32, opcode_class=cutlass.OpClass.TensorOp,
math_operation=MathOperation.multiply_add
)
tile_description = TileDescription(
threadblock_shape=[256, 128, 64],
stages=3, warp_count=[4, 2, 1],
math_instruction=math_inst
)
A = TensorDescription(
element=cutlass.float16, layout=cutlass.ColumnMajor,
alignment=8
)
B = TensorDescription(
element=cutlass.float16, layout=cutlass.RowMajor,
alignment=8
)
C = TensorDescription(
element=cutlass.float16, layout=cutlass.RowMajor,
alignment=8
)
element_epilogue = cutlass.float32
epilogue_functor = LinearCombination(
C.element, C.alignment,
math_inst.element_accumulator, element_epilogue)
swizzling_functor = cutlass.IdentitySwizzle1
operation = GemmOperationUniversal(
arch=80, tile_description=tile_description,
A=A, B=B, C=C,
epilogue_functor=epilogue_functor, swizzling_functor=swizzling_functor
)
self.assertTrue(test_all_gemm(operation, "universal"))
def test_test_SM80_Device_Gemm_f16t_f16n_f16t_tensor_op_f16_sliced_k_128x64x64_64x64x32(self):
math_inst = MathInstruction(
instruction_shape=[16, 8, 16],
element_a=cutlass.float16, element_b=cutlass.float16,
element_accumulator=cutlass.float32, opcode_class=cutlass.OpClass.TensorOp,
math_operation=MathOperation.multiply_add
)
tile_description = TileDescription(
threadblock_shape=[128, 64, 64],
stages=3, warp_count=[2, 1, 1],
math_instruction=math_inst
)
A = TensorDescription(
element=cutlass.float16, layout=cutlass.RowMajor,
alignment=8
)
B = TensorDescription(
element=cutlass.float16, layout=cutlass.ColumnMajor,
alignment=8
)
C = TensorDescription(
element=cutlass.float16, layout=cutlass.RowMajor,
alignment=4
)
element_epilogue = cutlass.float32
epilogue_functor = LinearCombination(
C.element, C.alignment,
math_inst.element_accumulator, element_epilogue)
swizzling_functor = cutlass.IdentitySwizzle1
operation = GemmOperationUniversal(
arch=80, tile_description=tile_description,
A=A, B=B, C=C,
epilogue_functor=epilogue_functor, swizzling_functor=swizzling_functor
)
self.assertTrue(test_all_gemm(operation, "universal"))
def test_SM80_Device_Gemm_f16t_f16t_f32n_tensor_op_f32_128x256x64_64x64x64(self):
math_inst = MathInstruction(
instruction_shape=[16, 8, 16],
element_a=cutlass.float16, element_b=cutlass.float16,
element_accumulator=cutlass.float32, opcode_class=cutlass.OpClass.TensorOp,
math_operation=MathOperation.multiply_add
)
tile_description = TileDescription(
threadblock_shape=[128, 256, 64],
stages=3, warp_count=[2, 4, 1],
math_instruction=math_inst
)
A = TensorDescription(
element=cutlass.float16, layout=cutlass.RowMajor,
alignment=8
)
B = TensorDescription(
element=cutlass.float16, layout=cutlass.RowMajor,
alignment=8
)
C = TensorDescription(
element=cutlass.float16, layout=cutlass.ColumnMajor,
alignment=8
)
element_epilogue = cutlass.float32
epilogue_functor = LinearCombination(
C.element, C.alignment,
math_inst.element_accumulator, element_epilogue)
swizzling_functor = cutlass.IdentitySwizzle1
operation = GemmOperationUniversal(
arch=80, tile_description=tile_description,
A=A, B=B, C=C,
epilogue_functor=epilogue_functor, swizzling_functor=swizzling_functor
)
self.assertTrue(test_all_gemm(operation, "universal"))
def test_SM80_Device_Gemm_f16t_f16t_f32t_tensor_op_f32_128x256x64_64x64x64(self):
math_inst = MathInstruction(
instruction_shape=[16, 8, 16],
element_a=cutlass.float16, element_b=cutlass.float16,
element_accumulator=cutlass.float32, opcode_class=cutlass.OpClass.TensorOp,
math_operation=MathOperation.multiply_add
)
tile_description = TileDescription(
threadblock_shape=[128, 256, 64],
stages=3, warp_count=[2, 4, 1],
math_instruction=math_inst
)
A = TensorDescription(
element=cutlass.float16, layout=cutlass.ColumnMajor,
alignment=8
)
B = TensorDescription(
element=cutlass.float16, layout=cutlass.ColumnMajor,
alignment=8
)
C = TensorDescription(
element=cutlass.float32, layout=cutlass.ColumnMajor,
alignment=4
)
element_epilogue = cutlass.float32
epilogue_functor = LinearCombination(
C.element, C.alignment,
math_inst.element_accumulator, element_epilogue)
swizzling_functor = cutlass.IdentitySwizzle1
operation = GemmOperationUniversal(
arch=80, tile_description=tile_description,
A=A, B=B, C=C,
epilogue_functor=epilogue_functor, swizzling_functor=swizzling_functor
)
self.assertTrue(test_all_gemm(operation, "universal"))
if __name__ == '__main__':
pycutlass.get_memory_pool(2**24, 2**24)
unittest.main()
| warp-main | warp/native/cutlass/tools/library/scripts/pycutlass/test/gemm/gemm_f16_sm80.py |
import pycutlass
from pycutlass import *
from pycutlass.memory_manager import get_allocated_size
from pycutlass.test import *
import unittest
from pycutlass.test.gemm_testbed import test_all_gemm
from pycutlass.utils.device import device_cc
@unittest.skipIf(device_cc() < 80, "Device compute capability is insufficient for SM80 tests.")
class GemmF32nF32nF32nTensorOpF32Sm80(unittest.TestCase):
def test_SM80_Device_Gemm_f32t_f32n_f32t_tensor_op_bf16_f32_128x128x32_64x64x32(self):
math_inst = MathInstruction(
instruction_shape=[16, 8, 8],
element_a=cutlass.float32, element_b=cutlass.float32,
element_accumulator=cutlass.float32, opcode_class=cutlass.OpClass.TensorOp,
math_operation=MathOperation.multiply_add_fast_bf16
)
tile_description = TileDescription(
threadblock_shape=[128, 128, 32],
stages=3, warp_count=[2, 2, 1],
math_instruction=math_inst
)
A = TensorDescription(
element=cutlass.float32, layout=cutlass.RowMajor,
alignment=4
)
B = TensorDescription(
element=cutlass.float32, layout=cutlass.ColumnMajor,
alignment=4
)
C = TensorDescription(
element=cutlass.float32, layout=cutlass.RowMajor,
alignment=4
)
element_epilogue = cutlass.float32
epilogue_functor = LinearCombination(
C.element, C.alignment,
math_inst.element_accumulator, element_epilogue)
swizzling_functor = cutlass.IdentitySwizzle1
operation = GemmOperationUniversal(
arch=80, tile_description=tile_description,
A=A, B=B, C=C,
epilogue_functor=epilogue_functor, swizzling_functor=swizzling_functor
)
self.assertTrue(test_all_gemm(operation, "universal"))
def test_SM80_Device_Gemm_f32n_f32n_f32t_tensor_op_f32_128x128x32_64x64x32(self):
math_inst = MathInstruction(
instruction_shape=[16, 8, 8],
element_a=cutlass.float32, element_b=cutlass.float32,
element_accumulator=cutlass.float32, opcode_class=cutlass.OpClass.TensorOp,
math_operation=MathOperation.multiply_add
)
tile_description = TileDescription(
threadblock_shape=[128, 128, 32],
stages=3, warp_count=[2, 2, 1],
math_instruction=math_inst
)
A = TensorDescription(
element=cutlass.float32, layout=cutlass.ColumnMajor,
alignment=4
)
B = TensorDescription(
element=cutlass.float32, layout=cutlass.ColumnMajor,
alignment=4
)
C = TensorDescription(
element=cutlass.float32, layout=cutlass.RowMajor,
alignment=4
)
element_epilogue = cutlass.float32
epilogue_functor = LinearCombination(
C.element, C.alignment,
math_inst.element_accumulator, element_epilogue)
swizzling_functor = cutlass.IdentitySwizzle1
operation = GemmOperationUniversal(
arch=80, tile_description=tile_description,
A=A, B=B, C=C,
epilogue_functor=epilogue_functor, swizzling_functor=swizzling_functor
)
self.assertTrue(test_all_gemm(operation, "universal"))
def test_SM80_Device_Gemm_f32n_f32n_f32t_tensor_op_fast_accurate_f32_64x64x32_32x32x32(self):
math_inst = MathInstruction(
instruction_shape=[16, 8, 8],
element_a=cutlass.float32, element_b=cutlass.float32,
element_accumulator=cutlass.float32, opcode_class=cutlass.OpClass.TensorOp,
math_operation=MathOperation.multiply_add_fast_f32
)
tile_description = TileDescription(
threadblock_shape=[64, 64, 32],
stages=3, warp_count=[2, 2, 1],
math_instruction=math_inst
)
A = TensorDescription(
element=cutlass.float32, layout=cutlass.ColumnMajor,
alignment=4
)
B = TensorDescription(
element=cutlass.float32, layout=cutlass.ColumnMajor,
alignment=4
)
C = TensorDescription(
element=cutlass.float32, layout=cutlass.RowMajor,
alignment=4
)
element_epilogue = cutlass.float32
epilogue_functor = LinearCombination(
C.element, C.alignment,
math_inst.element_accumulator, element_epilogue)
swizzling_functor = cutlass.IdentitySwizzle1
operation = GemmOperationUniversal(
arch=80, tile_description=tile_description,
A=A, B=B, C=C,
epilogue_functor=epilogue_functor, swizzling_functor=swizzling_functor
)
self.assertTrue(test_all_gemm(operation, "universal"))
if __name__ == '__main__':
pycutlass.get_memory_pool(2**24, 2**24)
pycutlass.compiler.load_from_cache()
unittest.main()
| warp-main | warp/native/cutlass/tools/library/scripts/pycutlass/test/gemm/gemm_f32_sm80.py |
import pycutlass
from pycutlass import *
from pycutlass.epilogue import LinearCombinationClamp
from pycutlass.test import *
import unittest
from pycutlass.test.gemm_testbed import test_all_gemm
from pycutlass.utils.device import device_cc
@unittest.skipIf(device_cc() < 80, "Device compute capability is insufficient for SM80 tests.")
class GemmS8TensorOpF32Sm80(unittest.TestCase):
def test_SM80_Device_Gemm_s8t_s8n_s8t_tensor_op_s32_64x64x64_32x32x64(self):
math_inst = MathInstruction(
instruction_shape=[16, 8, 32],
element_a=cutlass.int8, element_b=cutlass.int8,
element_accumulator=cutlass.int32, opcode_class=cutlass.OpClass.TensorOp,
math_operation=MathOperation.multiply_add_saturate
)
tile_description = TileDescription(
threadblock_shape=[64, 64, 64],
stages=6, warp_count=[2, 2, 1],
math_instruction=math_inst
)
A = TensorDescription(
element=cutlass.int8, layout=cutlass.ColumnMajorInterleaved32,
alignment=16
)
B = TensorDescription(
element=cutlass.int8, layout=cutlass.RowMajorInterleaved32,
alignment=16
)
C = TensorDescription(
element=cutlass.int8, layout=cutlass.ColumnMajorInterleaved32,
alignment=8
)
epilogue_functor = FastLinearCombinationClamp(
C.element, C.alignment
)
swizzling_functor = cutlass.IdentitySwizzle1
operation = GemmOperationUniversal(
arch=80, tile_description=tile_description,
A=A, B=B, C=C,
epilogue_functor=epilogue_functor, swizzling_functor=swizzling_functor
)
self.assertTrue(test_all_gemm(operation, "interleaved"))
def test_SM80_Device_Gemm_s8t_s8n_s8t_tensor_op_s32_256x128x128_64x64x128(self):
math_inst = MathInstruction(
instruction_shape=[16, 8, 32],
element_a=cutlass.int8, element_b=cutlass.int8,
element_accumulator=cutlass.int32, opcode_class=cutlass.OpClass.TensorOp,
math_operation=MathOperation.multiply_add
)
tile_description = TileDescription(
threadblock_shape=[128, 128, 128],
stages=3, warp_count=[2, 2, 1],
math_instruction=math_inst
)
A = TensorDescription(
element=cutlass.int8, layout=cutlass.RowMajor,
alignment=16
)
B = TensorDescription(
element=cutlass.int8, layout=cutlass.ColumnMajor,
alignment=16
)
C = TensorDescription(
element=cutlass.int8, layout=cutlass.RowMajor,
alignment=16
)
epilogue_functor = FastLinearCombinationClamp(
C.element, C.alignment
)
swizzling_functor = cutlass.IdentitySwizzle1
operation = GemmOperationUniversal(
arch=80, tile_description=tile_description,
A=A, B=B, C=C,
epilogue_functor=epilogue_functor, swizzling_functor=swizzling_functor
)
self.assertTrue(test_all_gemm(operation, "multistage"))
def test_SM80_Device_Gemm_s8t_s8n_s8n_tensor_op_s32_128x128x128_64x64x128(self):
math_inst = MathInstruction(
instruction_shape=[16, 8, 32],
element_a=cutlass.int8, element_b=cutlass.int8,
element_accumulator=cutlass.int32, opcode_class=cutlass.OpClass.TensorOp,
math_operation=MathOperation.multiply_add
)
tile_description = TileDescription(
threadblock_shape=[128, 128, 128],
stages=3, warp_count=[2, 2, 1],
math_instruction=math_inst
)
A = TensorDescription(
element=cutlass.int8, layout=cutlass.RowMajor,
alignment=16
)
B = TensorDescription(
element=cutlass.int8, layout=cutlass.ColumnMajor,
alignment=16
)
C = TensorDescription(
element=cutlass.int8, layout=cutlass.ColumnMajor,
alignment=16
)
epilogue_functor = FastLinearCombinationClamp(
C.element, C.alignment
)
swizzling_functor = cutlass.IdentitySwizzle1
operation = GemmOperationUniversal(
arch=80, tile_description=tile_description,
A=A, B=B, C=C,
epilogue_functor=epilogue_functor, swizzling_functor=swizzling_functor
)
self.assertTrue(test_all_gemm(operation, "multistage"))
def test_SM80_Device_Gemm_s8t_s8n_s32n_tensor_op_s32_128x128x128_64x64x128(self):
math_inst = MathInstruction(
instruction_shape=[16, 8, 32],
element_a=cutlass.int8, element_b=cutlass.int8,
element_accumulator=cutlass.int32, opcode_class=cutlass.OpClass.TensorOp,
math_operation=MathOperation.multiply_add
)
tile_description = TileDescription(
threadblock_shape=[128, 128, 128],
stages=3, warp_count=[2, 2, 1],
math_instruction=math_inst
)
A = TensorDescription(
element=cutlass.int8, layout=cutlass.RowMajor,
alignment=16
)
B = TensorDescription(
element=cutlass.int8, layout=cutlass.ColumnMajor,
alignment=16
)
C = TensorDescription(
element=cutlass.int32, layout=cutlass.ColumnMajor,
alignment=4
)
element_epilogue = cutlass.int32
epilogue_functor = LinearCombinationClamp(
C.element, C.alignment, math_inst.element_accumulator,
element_epilogue
)
swizzling_functor = cutlass.IdentitySwizzle1
operation = GemmOperationUniversal(
arch=80, tile_description=tile_description,
A=A, B=B, C=C,
epilogue_functor=epilogue_functor, swizzling_functor=swizzling_functor
)
self.assertTrue(test_all_gemm(operation, "multistage"))
def test_SM80_Device_Gemm_s8t_s8n_s32t_tensor_op_s32_128x128x128_64x64x128(self):
math_inst = MathInstruction(
instruction_shape=[16, 8, 32],
element_a=cutlass.int8, element_b=cutlass.int8,
element_accumulator=cutlass.int32, opcode_class=cutlass.OpClass.TensorOp,
math_operation=MathOperation.multiply_add
)
tile_description = TileDescription(
threadblock_shape=[128, 128, 128],
stages=3, warp_count=[2, 2, 1],
math_instruction=math_inst
)
A = TensorDescription(
element=cutlass.int8, layout=cutlass.RowMajor,
alignment=16
)
B = TensorDescription(
element=cutlass.int8, layout=cutlass.ColumnMajor,
alignment=16
)
C = TensorDescription(
element=cutlass.int32, layout=cutlass.RowMajor,
alignment=4
)
element_epilogue = cutlass.int32
epilogue_functor = LinearCombinationClamp(
C.element, C.alignment, math_inst.element_accumulator,
element_epilogue
)
swizzling_functor = cutlass.IdentitySwizzle1
operation = GemmOperationUniversal(
arch=80, tile_description=tile_description,
A=A, B=B, C=C,
epilogue_functor=epilogue_functor, swizzling_functor=swizzling_functor
)
self.assertTrue(test_all_gemm(operation, "multistage"))
if __name__ == '__main__':
pycutlass.get_memory_pool(2**24, 2**24)
unittest.main()
| warp-main | warp/native/cutlass/tools/library/scripts/pycutlass/test/gemm/gemm_s8_sm80.py |
import pycutlass
from pycutlass import *
from pycutlass.test import *
import unittest
from pycutlass.test.gemm_grouped_testbed import TestbedGrouped
from pycutlass.utils.device import device_cc
@unittest.skipIf(device_cc() < 80, "Device compute capability is insufficient for SM80 tests.")
class GemmGroupedSm80(unittest.TestCase):
def test_SM80_Device_GemmGrouped_f16n_f16t_f32n_tensor_op_f32_128x128x32_64x64x32(self):
math_inst = MathInstruction(
instruction_shape=[16, 8, 16], element_a=cutlass.float16,
element_b=cutlass.float16, element_accumulator=cutlass.float32,
opcode_class=cutlass.OpClass.TensorOp,
math_operation=MathOperation.multiply_add
)
tile_description = TileDescription(
threadblock_shape=[128, 128, 32],
stages=3, warp_count=[2, 2, 1],
math_instruction=math_inst
)
A = TensorDescription(
element=cutlass.float16, layout=cutlass.ColumnMajor,
alignment=8
)
B = TensorDescription(
element=cutlass.float16, layout=cutlass.ColumnMajor,
alignment=8
)
C = TensorDescription(
element=cutlass.float32, layout=cutlass.ColumnMajor,
alignment=4
)
element_epilogue = cutlass.float32
epilogue_functor = LinearCombination(
C.element, C.alignment,
math_inst.element_accumulator, element_epilogue)
swizzling_functor = cutlass.BatchedIdentitySwizzle
for precompute_mode in [SchedulerMode.Device, SchedulerMode.Host]:
operation = GemmOperationGrouped(
80,
tile_description, A, B, C,
epilogue_functor, swizzling_functor,
precompute_mode=precompute_mode
)
testbed = TestbedGrouped(operation=operation)
self.assertTrue(testbed.run(24))
def test_SM80_Device_GemmGrouped_f64t_f64t_f64n_tensor_op_f64_64x64x16_32x32x16(self):
math_inst = MathInstruction(
instruction_shape=[8, 8, 4], element_a=cutlass.float64,
element_b=cutlass.float64, element_accumulator=cutlass.float64,
opcode_class=cutlass.OpClass.TensorOp,
math_operation=MathOperation.multiply_add
)
tile_description = TileDescription(
threadblock_shape=[64, 64, 16],
stages=4, warp_count=[2, 2, 1],
math_instruction=math_inst
)
A = TensorDescription(
element=cutlass.float64, layout=cutlass.RowMajor,
alignment=1
)
B = TensorDescription(
element=cutlass.float64, layout=cutlass.RowMajor,
alignment=1
)
C = TensorDescription(
element=cutlass.float64, layout=cutlass.ColumnMajor,
alignment=1
)
element_epilogue = cutlass.float64
epilogue_functor = LinearCombination(
C.element, C.alignment,
math_inst.element_accumulator, element_epilogue)
swizzling_functor = cutlass.BatchedIdentitySwizzle
for precompute_mode in [SchedulerMode.Device, SchedulerMode.Host]:
operation = GemmOperationGrouped(
80,
tile_description, A, B, C,
epilogue_functor, swizzling_functor,
precompute_mode=precompute_mode
)
testbed = TestbedGrouped(operation=operation)
self.assertTrue(testbed.run(24))
def test_SM80_Device_GemmGrouped_f32t_f32t_f32t_simt_f32_128x64x8_64x32x1(self):
math_inst = MathInstruction(
instruction_shape=[1, 1, 1], element_a=cutlass.float32,
element_b=cutlass.float32, element_accumulator=cutlass.float32,
opcode_class=cutlass.OpClass.Simt,
math_operation=MathOperation.multiply_add
)
tile_description = TileDescription(
threadblock_shape=[128, 64, 8],
stages=4, warp_count=[2, 2, 1],
math_instruction=math_inst
)
A = TensorDescription(
element=cutlass.float32, layout=cutlass.RowMajor,
alignment=1
)
B = TensorDescription(
element=cutlass.float32, layout=cutlass.RowMajor,
alignment=1
)
C = TensorDescription(
element=cutlass.float32, layout=cutlass.RowMajor,
alignment=1
)
element_epilogue = cutlass.float32
epilogue_functor = LinearCombination(
C.element, C.alignment,
math_inst.element_accumulator, element_epilogue)
swizzling_functor = cutlass.BatchedIdentitySwizzle
for precompute_mode in [SchedulerMode.Device, SchedulerMode.Host]:
operation = GemmOperationGrouped(
80,
tile_description, A, B, C,
epilogue_functor, swizzling_functor,
precompute_mode=precompute_mode
)
testbed = TestbedGrouped(operation=operation)
self.assertTrue(testbed.run(27))
def test_SM80_Device_GemmGrouped_f16n_f16t_f32n_tensor_op_f32_128x128x32_64x64x32_cache(self):
math_inst = MathInstruction(
instruction_shape=[16, 8, 16], element_a=cutlass.float16,
element_b=cutlass.float16, element_accumulator=cutlass.float32,
opcode_class=cutlass.OpClass.TensorOp,
math_operation=MathOperation.multiply_add
)
tile_description = TileDescription(
threadblock_shape=[128, 128, 32],
stages=3, warp_count=[2, 2, 1],
math_instruction=math_inst
)
A = TensorDescription(
element=cutlass.float16, layout=cutlass.ColumnMajor,
alignment=8
)
B = TensorDescription(
element=cutlass.float16, layout=cutlass.ColumnMajor,
alignment=8
)
C = TensorDescription(
element=cutlass.float32, layout=cutlass.ColumnMajor,
alignment=4
)
element_epilogue = cutlass.float32
epilogue_functor = LinearCombination(
C.element, C.alignment,
math_inst.element_accumulator, element_epilogue)
swizzling_functor = cutlass.BatchedIdentitySwizzle
for precompute_mode in [SchedulerMode.Device, SchedulerMode.Host]:
operation = GemmOperationGrouped(
80,
tile_description, A, B, C,
epilogue_functor, swizzling_functor,
precompute_mode=precompute_mode
)
testbed = TestbedGrouped(operation=operation)
self.assertTrue(testbed.run(5))
if __name__ == '__main__':
pycutlass.get_memory_pool(2**26, 2**26)
unittest.main()
| warp-main | warp/native/cutlass/tools/library/scripts/pycutlass/test/gemm/gemm_grouped_sm80.py |
import pycutlass
import unittest
if __name__ == '__main__':
pycutlass.get_memory_pool(2**26, 2**26)
loader = unittest.TestLoader()
tests = loader.discover('./', 'gemm_*.py')
testRunner = unittest.runner.TextTestRunner()
testRunner.run(tests)
| warp-main | warp/native/cutlass/tools/library/scripts/pycutlass/test/gemm/run_all_tests.py |
#################################################################################################
#
# Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#################################################################################################
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- Project information -----------------------------------------------------
project = 'PyCutlass'
copyright = '2022, Zhaodong Chen; Andrew Kerr; Haicheng Wu; Szymon Migacz; Graham Markall'
author = 'Zhaodong Chen; Andrew Kerr; Haicheng Wu; Szymon Migacz; Graham Markall'
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.duration',
'sphinx.ext.doctest',
'sphinx.ext.autodoc',
'sphinx.ext.intersphinx',
'enum_tools.autoenum',
'sphinx.ext.autosummary',
'm2r2'
]
source_suffix = [".rst", ".md"]
autosummary_generate = True
autosummary_imported_members = True
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = []
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'bizstyle'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
# html_static_path = ['_static']
| warp-main | warp/native/cutlass/tools/library/scripts/pycutlass/docs/source/conf.py |
#################################################################################################
#
# Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#################################################################################################
from pycutlass import *
import pycutlass
from pycutlass.epilogue import LinearCombination
from pycutlass.test.conv2d_testbed import Conv2dLauncher
if __name__ == "__main__":
pycutlass.get_memory_pool(2**33, 2**33)
pycutlass.compiler.nvcc()
math_inst = MathInstruction(
instruction_shape=[16, 8, 16],
element_a=cutlass.float16, element_b=cutlass.float16,
element_accumulator=cutlass.float32, opcode_class=cutlass.OpClass.TensorOp,
math_operation=MathOperation.multiply_add
)
A = TensorDescription(
element=math_inst.element_a,
layout=cutlass.TensorNHWC,
alignment=8)
B = TensorDescription(
element=math_inst.element_b,
layout=cutlass.TensorNHWC,
alignment=8)
C = TensorDescription(
element=cutlass.float32,
layout=cutlass.TensorNHWC,
alignment=8)
tile_description = TileDescription(
threadblock_shape=[128, 128, 64], stages=4,
warp_count=[2, 2, 1],
math_instruction=math_inst
)
epilogue_functor = LinearCombination(cutlass.float32, 4, cutlass.float32, cutlass.float32)
operation = Conv2dOperation(
conv_kind=cutlass.conv.Operator.fprop, iterator_algorithm=cutlass.conv.IteratorAlgorithm.optimized,
arch=80, tile_description=tile_description, A=A, B=B, C=C,
element_epilogue=cutlass.float32, stride_support=StrideSupport.Strided,
epilogue_functor=epilogue_functor,
swizzling_functor=cutlass.IdentitySwizzle1
)
profiler = Conv2dLauncher(operation, verification=False, profiling=True)
python_runtime = profiler.run(
problem_size = cutlass.conv.Conv2dProblemSize(
cutlass.Tensor4DCoord(32, 224, 224, 128),
cutlass.Tensor4DCoord(128, 3, 3, 128),
cutlass.Tensor4DCoord(1, 1, 1, 1),
cutlass.MatrixCoord(1, 1),
cutlass.MatrixCoord(1, 1),
cutlass.conv.Mode.cross_correlation,
1, 1
), split_k_mode=cutlass.conv.SplitKMode.Serial
)
cpp_runtime = profiler.run_cutlass_profiler(
problem_size = cutlass.conv.Conv2dProblemSize(
cutlass.Tensor4DCoord(32, 224, 224, 128),
cutlass.Tensor4DCoord(128, 3, 3, 128),
cutlass.Tensor4DCoord(1, 1, 1, 1),
cutlass.MatrixCoord(1, 1),
cutlass.MatrixCoord(1, 1),
cutlass.conv.Mode.cross_correlation,
1, 1
), split_k_mode=cutlass.conv.SplitKMode.Serial
)
print(cpp_runtime / python_runtime)
| warp-main | warp/native/cutlass/tools/library/scripts/pycutlass/profile/conv/conv2d_f16_sm80.py |
#################################################################################################
#
# Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#################################################################################################
import pycutlass
from pycutlass import *
from pycutlass.test import *
from pycutlass.test.gemm_testbed import GemmUniversalLauncher
if __name__ == '__main__':
pycutlass.get_memory_pool(2**32, 2**32)
pycutlass.compiler.nvcc()
math_inst = MathInstruction(
instruction_shape=[16, 8, 16],
element_a=cutlass.float16, element_b=cutlass.float16,
element_accumulator=cutlass.float32, opcode_class=cutlass.OpClass.TensorOp,
math_operation=MathOperation.multiply_add
)
tile_description = TileDescription(
threadblock_shape=[256, 128, 32],
stages=3, warp_count=[4, 2, 1],
math_instruction=math_inst
)
A = TensorDescription(
element=cutlass.float16, layout=cutlass.RowMajor,
alignment=4
)
B = TensorDescription(
element=cutlass.float16, layout=cutlass.RowMajor,
alignment=4
)
C = TensorDescription(
element=cutlass.float32, layout=cutlass.ColumnMajor,
alignment=4
)
element_epilogue = cutlass.float32
epilogue_functor = LinearCombination(cutlass.float32, 4, cutlass.float32, cutlass.float32)
swizzling_functor = cutlass.IdentitySwizzle1
operation = GemmOperationUniversal(
arch=80, tile_description=tile_description,
A=A, B=B, C=C, element_epilogue=element_epilogue,
epilogue_functor=epilogue_functor, swizzling_functor=swizzling_functor
)
profiler = GemmUniversalLauncher(operation, verification=False, profiling=True)
python_runtime = profiler.run(
mode=cutlass.gemm.Mode.Gemm,
problem_size=cutlass.gemm.GemmCoord(4096, 4096, 4096)
)
cpp_runtime = profiler.run_cutlass_profiler(
mode=cutlass.gemm.Mode.Gemm,
problem_size=cutlass.gemm.GemmCoord(4096, 4096, 4096),
)
print(cpp_runtime / python_runtime)
| warp-main | warp/native/cutlass/tools/library/scripts/pycutlass/profile/gemm/gemm_f32_sm80.py |
#################################################################################################
#
# Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#################################################################################################
import ctypes
from pycutlass.library import *
# 12B
class GemmCoord_(ctypes.Structure):
_fields_ = [
("m", ctypes.c_int),
("n", ctypes.c_int),
("k", ctypes.c_int)
]
def __init__(self, gemm_coord) -> None:
for field_name, _ in self._fields_:
setattr(self, field_name, getattr(gemm_coord, field_name)())
class MatrixCoord_(ctypes.Structure):
_fields_ = [
("row", ctypes.c_int),
("column", ctypes.c_int)
]
dtype2ctype = {
cutlass.float16: ctypes.c_uint16,
cutlass.float32: ctypes.c_float,
cutlass.float64: ctypes.c_double,
cutlass.int32: ctypes.c_int32
}
def get_gemm_arguments(epilogue_functor):
_EpilogueOutputOpParams = epilogue_functor.epilogue_type
class _GemmArguments(ctypes.Structure):
_fields_ = [
# Arguments from UniversalArgumentsBase
("mode", ctypes.c_int),
("problem_size", GemmCoord_),
("batch_count", ctypes.c_int),
("batch_stride_D", ctypes.c_longlong),
# Remaining arguments
("epilogue", _EpilogueOutputOpParams),
("ptr_A", ctypes.c_void_p),
("ptr_B", ctypes.c_void_p),
("ptr_C", ctypes.c_void_p),
("ptr_D", ctypes.c_void_p),
("batch_stride_A", ctypes.c_longlong),
("batch_stride_B", ctypes.c_longlong),
("batch_stride_C", ctypes.c_longlong),
("stride_a", ctypes.c_longlong),
("stride_b", ctypes.c_longlong),
("stride_c", ctypes.c_longlong),
("stride_d", ctypes.c_longlong),
("lda", ctypes.c_longlong),
("ldb", ctypes.c_longlong),
("ldc", ctypes.c_longlong),
("ldd", ctypes.c_longlong),
("ptr_gather_A_indices", ctypes.c_void_p),
("ptr_gether_B_indices", ctypes.c_void_p),
("ptr_scatter_D_indices", ctypes.c_void_p)
]
return _GemmArguments, _EpilogueOutputOpParams
###########################################################################################
# GEMM Grouped
###########################################################################################
# include/cutlass/gemm/kernel/gemm_grouped.h
def get_gemm_grouped_arguments(epilogue_functor):
_EpilogueOutputOpParams = epilogue_functor.epilogue_type
class _GEMMGroupedArguments(ctypes.Structure):
_fields_ = [
("problem_sizes", ctypes.c_void_p),
("problem_count", ctypes.c_int),
("threadblock_count", ctypes.c_int),
("output_op", _EpilogueOutputOpParams),
("ptr_A", ctypes.c_void_p),
("ptr_B", ctypes.c_void_p),
("ptr_C", ctypes.c_void_p),
("ptr_D", ctypes.c_void_p),
("lda", ctypes.c_void_p),
("ldb", ctypes.c_void_p),
("ldc", ctypes.c_void_p),
("ldd", ctypes.c_void_p),
("host_problem_sizes", ctypes.c_void_p)
]
return _GEMMGroupedArguments, _EpilogueOutputOpParams
############################################################################################
# Convolution2D
############################################################################################
# We use the arguments as the interface
# include/cutlass/conv/conv2d_problem_size.h
# 64B
class Conv2DProblemSize(ctypes.Structure):
_fields_ = [
("N", ctypes.c_int),
("H", ctypes.c_int),
("W", ctypes.c_int),
("C", ctypes.c_int),
("P", ctypes.c_int),
("Q", ctypes.c_int),
("K", ctypes.c_int),
("R", ctypes.c_int),
("S", ctypes.c_int),
("pad_h", ctypes.c_int),
("pad_w", ctypes.c_int),
("stride_h", ctypes.c_int),
("stride_w", ctypes.c_int),
("dilation_h", ctypes.c_int),
("dilation_w", ctypes.c_int),
("mode", ctypes.c_int), # kCrossCorrelation: 0, kConvolution: 1
("split_k_slices", ctypes.c_int),
("groups", ctypes.c_int)
]
def __init__(self, problem_size) -> None:
for field_name, _ in self._fields_:
setattr(self, field_name, getattr(problem_size, field_name))
# include/cutlass/layout/tensor.h
# 12B
class Layout4D(ctypes.Structure):
_fields_ = [
("stride", ctypes.c_int * 3)
]
def __init__(self, tensor_ref):
stride = tensor_ref.stride()
setattr(self, "stride", (stride.at(0), stride.at(1), stride.at(2)))
# TODO: Tensor 5-D takes ("stride", ctypes.c_int * 4)
# include/cutlass/conv/threadblock/conv2d_dgrad_filter_tile_access_iterator_optimized.h
# TensorRef is basically cutlass::TensorRef<Element, Layout>;
# include/cutlass/tensor_ref.h
# 24B
class TensorRef_(ctypes.Structure):
_fields_ = [
("ptr", ctypes.c_void_p),
("layout", Layout4D)
]
def __init__(self, tensor_ref):
setattr(self, "ptr", tensor_ref.data())
setattr(self, "layout", Layout4D(tensor_ref.layout()))
class TensorRef2D_(ctypes.Structure):
_fields_ = [
("ptr", ctypes.c_void_p),
("stride", ctypes.c_int)
]
# include/cutlass/conv/kernel/implicit_gemm_convolution.h
# split_k_mode: kNone: 0, kSerial: 1, kParallel: 2, kParallelSerial: 3, kInvalid: 4
def get_conv2d_arguments(epilogue_functor):
_EpilogueOutputOpParams = epilogue_functor.epilogue_type
class _Conv2dArguments(ctypes.Structure):
_fields_ = [
("problem_size", Conv2DProblemSize), # 0
("ref_A", TensorRef_), # 72
("ref_B", TensorRef_), # 96
("ref_C", TensorRef_), # 120
("ref_D", TensorRef_), # 144
("output_op", _EpilogueOutputOpParams), # 168
("split_k_mode", ctypes.c_int) # 192
]
return _Conv2dArguments, _EpilogueOutputOpParams
############################################################################################
# Reduction
############################################################################################
def get_reduction_params(epilogue_functor):
_EpilogueOutputParams = epilogue_functor.epilogue_type
class _ReductionParams(ctypes.Structure):
_fields_ = [
("problem_size", MatrixCoord_),
("partitions", ctypes.c_int),
("partition_stride", ctypes.c_longlong),
("workspace", TensorRef2D_),
("destination", TensorRef2D_),
("source", TensorRef2D_),
("output_op", _EpilogueOutputParams)
]
return _ReductionParams, _EpilogueOutputParams
| warp-main | warp/native/cutlass/tools/library/scripts/pycutlass/src/pycutlass/c_types.py |
#################################################################################################
#
# Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#################################################################################################
import rmm
import numpy as np
class PoolMemoryManager:
def __init__(self, init_pool_size: int, max_pool_size: int) -> None:
self.pool = rmm.mr.PoolMemoryResource(
rmm.mr.CudaMemoryResource(),
initial_pool_size=init_pool_size,
maximum_pool_size=max_pool_size
)
self.mr = rmm.mr.TrackingResourceAdaptor(self.pool)
rmm.mr.set_current_device_resource(self.mr)
def get_allocated_size(self):
return self.mr.get_allocated_bytes()
def pool_size(self):
return self.pool.pool_size()
def todevice(host_data, dtype=np.float32):
"""
Pass the host_data to device memory
"""
if isinstance(host_data, list):
return rmm.DeviceBuffer.to_device(np.array(host_data, dtype=dtype).tobytes())
elif isinstance(host_data, np.ndarray):
return rmm.DeviceBuffer.to_device(host_data.tobytes())
def device_mem_alloc(size):
return rmm.DeviceBuffer(size=size)
def align_size(size, alignment=256):
return ((size + alignment - 1) // alignment) * alignment
def get_allocated_size():
device_resource = rmm.mr.get_current_device_resource()
return device_resource.get_allocated_bytes()
| warp-main | warp/native/cutlass/tools/library/scripts/pycutlass/src/pycutlass/memory_manager.py |
#################################################################################################
#
# Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#################################################################################################
from pycutlass import *
import cutlass
from cuda import cuda
from cuda import nvrtc
import tempfile
import os
import ctypes
#
import json
import sqlite3
IncludeTemplate = r'''#include "${include}"
'''
#
class CompilationOptions:
'''
Compilation options.
'''
#
def __init__(self, flags, architectures=[80], include_paths=[]):
self.includes = []
self.include_paths = include_paths
self.flags = flags
self.architectures = architectures
def get_str(self):
options = ""
for flag in self.flags:
options += " " + flag
for incl in self.include_paths:
options += ' --include-path=%s' % incl
arch_list = "-arch="
for idx, arch in enumerate(self.architectures):
if idx:
arch_list += ","
arch_list += "sm_%d" % arch
options += " " + arch_list
return options
#
def get(self):
options = []
for flag in self.flags:
options.append(bytes(str.encode(flag)))
for incl in self.include_paths:
options.append(bytes(str.encode('--include-path=%s' % incl)))
arch_list = "-arch="
for idx, arch in enumerate(self.architectures):
if idx:
arch_list += ","
arch_list += "sm_%d" % arch
options.append(bytes(str.encode(arch_list)))
return options
def convertToBinaryData(filename):
with open(filename, 'rb') as file:
blobData = file.read()
return blobData
def CDLLBin(host_binary):
tempfile.tempdir = "./"
temp_so = tempfile.NamedTemporaryFile(
prefix='host_func', suffix='.so', delete=True)
with open(temp_so.name, 'wb') as file:
file.write(host_binary)
host_lib = ctypes.CDLL(temp_so.name)
return host_lib
class ArtifactManager:
"""
Artifact manager
"""
def __init__(self) -> None:
try:
connection = sqlite3.connect("./compiled_cache.db")
cursor = connection.cursor()
sqlite_create_table_query = """CREATE TABLE compiled_operations(op_key TEXT NOT NULL UNIQUE, cubin BLOB NOT NULL, hostbin BLOB NOT NULL, op_name TEXT NOT NULL, op_attrs TEXT NOT NULL)"""
cursor.execute(sqlite_create_table_query)
connection.commit()
cursor.close()
except:
pass
self.nvcc()
self.compiled_cache_device = cutlass.CompileCache()
self.compiled_cache_host = cutlass.CompileCache()
def nvrtc(self):
self.backend = "nvrtc"
self.default_compile_options = [
'-std=c++11', '-default-device',
]
def nvcc(self):
self.backend = "nvcc"
self.default_compile_options = [
'-std=c++11',
]
def insert_operation(self, op_key, cubin, hostfile, op_name, op_attrs):
connection = sqlite3.connect("./compiled_cache.db")
cursor = connection.cursor()
sqlite_insert_blob_query = """ INSERT OR IGNORE INTO compiled_operations (op_key, cubin, hostbin, op_name, op_attrs) VALUES (?, ?, ?, ?, ?)"""
hostbin = convertToBinaryData(hostfile)
data_tuple = (op_key, cubin, hostbin, op_name, json.dumps(op_attrs))
cursor.execute(sqlite_insert_blob_query, data_tuple)
connection.commit()
cursor.close()
def load_operation(self, op_key):
connection = sqlite3.connect("./compiled_cache.db")
cursor = connection.cursor()
sqlite_fetch_blob_query = """SELECT * from compiled_operations where op_key = ?"""
# try:
cursor.execute(sqlite_fetch_blob_query, (op_key, ))
record = cursor.fetchall()
if len(record) == 0:
return False
for row in record:
key, cubin_image, host_binary, operation_name, op_attr = row
op_attr = json.loads(op_attr)
err, module = cuda.cuModuleLoadData(cubin_image)
if err != cuda.CUresult.CUDA_SUCCESS:
raise RuntimeError('Cuda Error: {}'.format(err))
err, kernel = cuda.cuModuleGetFunction(
module, bytes(str.encode(operation_name)))
self.compiled_cache_device.insert(key, kernel)
compiled_host_fns = {}
host_lib = CDLLBin(host_binary)
func_name = operation_name + '_get_params'
func = getattr(host_lib, func_name)
func.restype = ctypes.POINTER(ctypes.c_char * op_attr[0])
compiled_host_fns['get_args'] = func
func_name = operation_name + '_shared_memory_size'
func = getattr(host_lib, func_name)
compiled_host_fns['shared_memory_capacity'] = func()
for attr in op_attr:
if isinstance(attr, str):
func_name = operation_name + '_' + attr
func = getattr(host_lib, func_name)
compiled_host_fns[attr] = func
self.compiled_cache_host.insert(key, compiled_host_fns)
return True
def emit_compile_(self, operation_list, compilation_options):
"""
Compile a list of kernels and store them into database
"""
source_buffer_device = ""
source_buffer_host = ""
# 1. include
includes = []
for operation in operation_list:
for incl in operation.emitter.includes:
if incl not in includes:
includes.append(incl)
includes_host = [
"builtin_types.h", "device_launch_parameters.h", "stddef.h"] + includes
for incl in includes:
source_buffer_device += SubstituteTemplate(
IncludeTemplate, {'include': incl})
for incl in includes_host:
if "/device/" not in incl:
source_buffer_host += SubstituteTemplate(
IncludeTemplate, {'include': incl})
# 2. Operations
for operation in operation_list:
source_buffer_device += operation.emit()
source_buffer_host += operation.emit()
values = {
'operation_name': operation.name(),
'operation_suffix': operation.emitter.operation_suffix
}
source_buffer_device += SubstituteTemplate(
operation.KernelTemplate, values)
source_buffer_host += SubstituteTemplate(
operation.HostTemplate, values)
if self.backend == "nvrtc":
# 3. compile
err, program = nvrtc.nvrtcCreateProgram(
str.encode(source_buffer_device),
bytes(str.encode("module.cu")),
0, [], [])
if err != nvrtc.nvrtcResult.NVRTC_SUCCESS:
raise RuntimeError('NVRTC Error: {}'.format(err))
# Compile program
options = compilation_options.get()
err, = nvrtc.nvrtcCompileProgram(program, len(options), options)
if err != nvrtc.nvrtcResult.NVRTC_SUCCESS:
error_string = 'NVRTC Error: {}\n'.format(err)
# Get log from compilation
err, logSize = nvrtc.nvrtcGetProgramLogSize(program)
if err != nvrtc.nvrtcResult.NVRTC_SUCCESS:
raise RuntimeError('NVRTC Error: {}'.format(err))
log = b' ' * logSize
err, = nvrtc.nvrtcGetProgramLog(program, log)
if err != nvrtc.nvrtcResult.NVRTC_SUCCESS:
raise RuntimeError('NVRTC Error: {}'.format(err))
raise RuntimeError(
error_string + log.decode() + source_buffer_device)
# Get data from compilation
err, dataSize = nvrtc.nvrtcGetCUBINSize(program)
if err != nvrtc.nvrtcResult.NVRTC_SUCCESS:
raise RuntimeError('NVRTC Error: {}'.format(err))
cubin_image = b' ' * dataSize
err, = nvrtc.nvrtcGetCUBIN(program, cubin_image)
if err != nvrtc.nvrtcResult.NVRTC_SUCCESS:
raise RuntimeError('NVRTC Error: {}'.format(err))
else: # with nvcc backend
# emit code
tempfile.tempdir = "./"
temp_cu = tempfile.NamedTemporaryFile(
prefix='kernel', suffix='.cu', delete=True)
temp_cubin = tempfile.NamedTemporaryFile(
prefix='kernel', suffix='.cubin', delete=True)
with open(temp_cu.name, 'w') as file:
file.write(source_buffer_device)
# compile with nvcc
cuda_install_path = os.getenv('CUDA_INSTALL_PATH')
assert cuda_install_path is not None, "Environment variable 'CUDA_INSTALL_PATH' is not defined."
cmd_template = "${cuda_install_path}/bin/nvcc ${options} -cubin ${srcfile} -o ${tarfile}"
values = {
"cuda_install_path": cuda_install_path,
"options": compilation_options.get_str(),
"srcfile": temp_cu.name,
"tarfile": temp_cubin.name
}
cmd = SubstituteTemplate(cmd_template, values)
os.system(cmd)
# load the cubin image
with open(temp_cubin.name, 'rb') as file:
cubin_image = file.read()
# compile the host code
options = compilation_options.get()
cmd = "echo '%s'|g++ -x c++ -fpermissive -w -fPIC" % source_buffer_host
for opt in options:
opt = opt.decode("utf-8")
if opt not in ['-default-device', '-std=c++11', '-Xcicc', '-Xllc'] and '-arch=sm_' not in opt:
if '--include-path=' in opt:
cmd += " " + opt.replace('--include-path=', '-I')
else:
cmd += " " + opt
tempfile.tempdir = "./"
temp = tempfile.NamedTemporaryFile(
prefix='host_func', suffix='.so', delete=True)
cmd += ' - -shared -o %s' % temp.name
os.system(cmd)
host_lib = ctypes.CDLL(temp.name)
return cubin_image, host_lib, temp
def add_module(self, operations, compile_options=None):
"""
Insert a new compiled device module
"""
if compile_options is None:
cutlass_path = os.getenv('CUTLASS_PATH')
assert cutlass_path is not None, "Environment variable 'CUTLASS_PATH' is not defined."
cuda_install_path = os.getenv('CUDA_INSTALL_PATH')
assert cuda_install_path is not None, "Environment variable 'CUDA_INSTALL_PATH' is not defined."
architectures = []
for operation in operations:
if hasattr(operation, "tile_description"):
cc = operation.arch
if cc not in architectures:
architectures.append(cc)
include_paths = [
cuda_install_path + '/include',
cutlass_path + '/include',
cutlass_path + '/tools/util/include',
cutlass_path + '/tools/library/scripts/pycutlass/src/cpp/include'
]
compile_options = CompilationOptions(
self.default_compile_options, architectures, include_paths)
# save the cubin
operation_key = []
operation_list = []
for operation in operations:
# step 1: get kernel string as key
key = operation.rt_module.emit() + operation.procedural_name() + self.backend
# step 1: check if the operation is in cache
compiled_kernel = self.compiled_cache_device.at(key)
if compiled_kernel is None:
hit = self.load_operation(key)
if hit:
compiled_kernel = self.compiled_cache_device.at(key)
assert compiled_kernel is not None
if compiled_kernel is not None:
operation.rt_module.kernel = compiled_kernel
compiled_host_fns = self.compiled_cache_host.at(key)
assert compiled_host_fns is not None
for key in compiled_host_fns.keys():
setattr(operation.rt_module, key, compiled_host_fns[key])
operation.rt_module.initialize()
else:
operation_list.append(operation.rt_module)
operation_key.append(key)
if len(operation_list) > 0:
cubin_image, host_lib, host_file = self.emit_compile_(
operation_list, compile_options)
err, module = cuda.cuModuleLoadData(cubin_image)
if err != cuda.CUresult.CUDA_SUCCESS:
raise RuntimeError('Cuda Error: {}'.format(err))
operation_name = []
operation_attr = []
for operation, key in zip(operation_list, operation_key):
# get device kernels
err, operation.kernel = cuda.cuModuleGetFunction(
module,
bytes(str.encode(operation.name()))
)
operation_name.append(operation.name())
self.compiled_cache_device.insert(key, operation.kernel)
# get host functions
compiled_host_fns = {}
op_attr = []
# get param size
func_name = operation.name() + '_get_param_size'
func = getattr(host_lib, func_name)
param_size = func()
func_name = operation.name() + '_get_params'
func = getattr(host_lib, func_name)
func.argtype = operation.argtype
func.restype = ctypes.POINTER(ctypes.c_char * param_size)
setattr(operation, 'get_args', func)
compiled_host_fns['get_args'] = func
# set shared memory size
func_name = operation.name() + '_shared_memory_size'
func = getattr(host_lib, func_name)
setattr(operation, 'shared_memory_capacity', func())
compiled_host_fns['shared_memory_capacity'] = func()
# set the maximum dynamic shared size
operation.initialize()
# get extra functions
op_attr.append(param_size)
if hasattr(operation, "extra_funcs"):
for suffix in operation.extra_funcs:
func_name = operation.name() + '_' + suffix
func = getattr(host_lib, func_name)
setattr(operation, suffix, func)
compiled_host_fns[suffix] = func
op_attr.append(suffix)
operation_attr.append(op_attr)
self.compiled_cache_host.insert(key, compiled_host_fns)
for key, operation_name, operation_attr in zip(operation_key, operation_name, operation_attr):
self.insert_operation(
key, cubin_image, host_file.name, operation_name, operation_attr)
| warp-main | warp/native/cutlass/tools/library/scripts/pycutlass/src/pycutlass/compiler.py |
################################################################################
#
# Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
################################################################################
import enum
import copy
import numpy as np
from typeguard import typechecked
import cutlass
from pycutlass import *
from cuda import cuda
################################################################################
#
# Data structure modeling a GEMM operation
#
################################################################################
def transpose_layout(layout: cutlass.layout):
if layout == cutlass.ColumnMajor:
return cutlass.RowMajor
elif layout == cutlass.RowMajor:
return cutlass.ColumnMajor
else:
raise ValueError("unsupported Layout {}".format(layout))
# @typechecked
class GemmArguments(ArgumentBase):
"""
Argument wrapper for GEMM. It encodes problem information and
user-provide tensors into the kernel's argument
:param operation: the GEMM operation to take the argument
:type operation: :class:`pycutlass.GemmOperationUniversal` |
:class:`pycutlass.GemmOperationGrouped`
:param problem_size: GEMM problem size gemm(M, N, K)
:type operation: :class:`cutlass.gemm.GemmCoord`
:param A: tensor A
:type A: cuda.CUdeviceptr | numpy.ndarray | torch.Tensor | cupy.ndarray
:param B: tensor B
:type B: cuda.CUdeviceptr | numpy.ndarray | torch.Tensor | cupy.ndarray
:param C: tensor C
:type C: cuda.CUdeviceptr | numpy.ndarray | torch.Tensor | cupy.ndarray
:param D: tensor D
:type D: cuda.CUdeviceptr | numpy.ndarray | torch.Tensor | cupy.ndarray
:param gemm_mode: GEMM mode
:type gemm_mode: :class:`cutlass.gemm.Mode`
:param output_op: output operator, optional
:type output_op: :class:`pycutlass.LinearCombinationFunctorArguments`
"""
def __init__(
self, operation: 'GemmOperation', problem_size: 'cutlass.gemm.GemmCoord',
A: 'Tensor', B: 'Tensor', C: 'Tensor', D: 'Tensor',
gemm_mode: 'cutlass.gemm.Mode'=cutlass.gemm.Mode.Gemm, **kwargs):
self.operation = operation
self.layout_A: cutlass.layout = operation.A.layout
self.layout_B: cutlass.layout = operation.B.layout
self.layout_C: cutlass.layout = operation.C.layout
self.element_A = operation.A.element
self.element_B = operation.B.element
self.element_C = operation.C.element
if (operation.C.layout in
[cutlass.RowMajorInterleaved32, cutlass.ColumnMajorInterleaved32]):
# reorder tensor B for interleaved layout output
B = self.reorder_tensor_B(B, problem_size)
super().__init__(A, B, C, D, **kwargs)
if operation.switched:
self.problem_size = cutlass.gemm.GemmCoord(
problem_size.n(), problem_size.m(), problem_size.k())
self.ptr_A, self.ptr_B = self.ptr_B, self.ptr_A
else:
self.problem_size = cutlass.gemm.GemmCoord(
problem_size.m(), problem_size.n(), problem_size.k())
# if the number of elements in C = problem_size.n
# C is treated as the bias
if hasattr(self, "tensor_c_numel"):
if (self.tensor_c_numel == self.problem_size.n() and
self.problem_size.m() != 1): self.bias = True
# get the leading dimension
self.lda = operation.A.layout.packed(self.problem_size.mk()).stride()
self.ldb = operation.B.layout.packed(self.problem_size.kn()).stride()
self.ldc = operation.C.layout.packed(self.problem_size.mn()).stride()
self.ldd = self.ldc
# stride 0 trick
if self.bias:
self.ldc = 0
if 'output_op' in kwargs.keys() and \
gemm_mode != cutlass.gemm.Mode.GemmSplitKParallel:
self.output_op = kwargs['output_op']
else:
self.output_op = self.operation.epilogue_type(1.0, 0.0)
# get number of slices on k dimension
self.gemm_mode = gemm_mode
if gemm_mode in [cutlass.gemm.Mode.Gemm, cutlass.gemm.Mode.GemmSplitKParallel]:
if 'split_k_slices' in kwargs.keys():
self.batch_count = kwargs['split_k_slices']
else:
self.batch_count = 1
self.split_k_slices = self.batch_count
if gemm_mode in [cutlass.gemm.Mode.Batched, cutlass.gemm.Mode.Array]:
if 'batch' in kwargs.keys():
self.batch_count = kwargs['batch']
else:
self.batch_count = 1
self.batched_stride_A = self.problem_size.m() * self.problem_size.k()
self.batched_stride_B = self.problem_size.n() * self.problem_size.k()
self.batched_stride_C = self.problem_size.m() * self.problem_size.n()
self.batched_stride_D = self.problem_size.m() * self.problem_size.n()
if self.bias:
self.batched_stride_C = self.problem_size.n()
# support GEMM Mode Array
if gemm_mode == cutlass.gemm.Mode.Array:
self.ptr_A_array = []
self.ptr_B_array = []
self.ptr_C_array = []
self.ptr_D_array = []
ptr_A_addr = int(self.ptr_A)
ptr_B_addr = int(self.ptr_B)
ptr_C_addr = int(self.ptr_C)
ptr_D_addr = int(self.ptr_D)
stride_A = self.batched_stride_A * DataTypeSize[self.element_A] // 8
stride_B = self.batched_stride_B * DataTypeSize[self.element_B] // 8
stride_C = self.batched_stride_C * DataTypeSize[self.element_C] // 8
stride_D = self.batched_stride_D * DataTypeSize[self.element_C] // 8
for _ in range(self.batch_count):
self.ptr_A_array.append(ptr_A_addr)
self.ptr_B_array.append(ptr_B_addr)
self.ptr_C_array.append(ptr_C_addr)
self.ptr_D_array.append(ptr_D_addr)
ptr_A_addr += stride_A
ptr_B_addr += stride_B
ptr_C_addr += stride_C
ptr_D_addr += stride_D
self.ptr_A_array_buffer = todevice(self.ptr_A_array, dtype=np.int64)
self.ptr_B_array_buffer = todevice(self.ptr_B_array, dtype=np.int64)
self.ptr_C_array_buffer = todevice(self.ptr_C_array, dtype=np.int64)
self.ptr_D_array_buffer = todevice(self.ptr_D_array, dtype=np.int64)
if isinstance(self.operation, GemmOperationUniversal):
self.initialize()
def reorder_tensor_B(self, tensor_B: 'np.ndarray',
problem_size: 'cutlass.gemm.GemmCoord'):
"""
Reorder tensor_B for interleaved layout
:param tensor_B: input tensor B
:type tensor_B: numpy.ndarray
:param problem_size: GEMM problem size
:type problem_size: :class:`cutlass.gemm.GemmCoord`
:return: reordered tensor B
:rtype: numpy.ndarray
"""
reordered_tensor_B = np.empty_like(tensor_B)
tensor_ref_B = self.get_tensor_ref(
tensor_B, self.element_B, self.layout_B, problem_size, "b"
)
reordered_tensor_ref_B = self.get_tensor_ref(
reordered_tensor_B, self.element_B, self.layout_B, problem_size, "b"
)
cutlass.gemm.host.reorder_column(
tensor_ref_B, reordered_tensor_ref_B, problem_size)
return reordered_tensor_B
def get_tensor_ref(
self, tensor, dtype, tensor_layout, problem_size, operand):
if operand == "a":
tensor_coord = problem_size.mk()
elif operand == "b":
tensor_coord = problem_size.kn()
elif operand in ["c", "d"]:
tensor_coord = problem_size.mn()
else:
raise ValueError("unknown operand: " + operand)
layout = tensor_layout.packed(tensor_coord)
return TensorRef(tensor, dtype, layout).tensor_ref
def get_arguments(self):
problem_size_ = GemmCoord_(self.problem_size)
grid_tiled_shape_ = GemmCoord_(
cutlass.gemm.GemmCoord(
self.grid_tiled_shape.x, self.grid_tiled_shape.y,
self.grid_tiled_shape.z
)
)
if self.gemm_mode == cutlass.gemm.Mode.Array:
arguments = self.operation.argument_type(
# Arguments from UniversalArgumentsBase
self.gemm_mode, problem_size_, self.batch_count, 0,
# Remaining arguments
self.output_op,
int(self.ptr_A_array_buffer.ptr),
int(self.ptr_B_array_buffer.ptr),
int(self.ptr_C_array_buffer.ptr),
int(self.ptr_D_array_buffer.ptr),
0, 0, 0,
self.lda, self.ldb, self.ldc, self.ldd,
self.lda, self.ldb, self.ldc, self.ldd,
0, 0, 0
)
else:
arguments = self.operation.argument_type(
# Arguments from UniversalArgumentsBase
self.gemm_mode, problem_size_, self.batch_count, self.batched_stride_D,
# Remaining arguments
self.output_op,
int(self.ptr_A), int(self.ptr_B), int(self.ptr_C), int(self.ptr_D),
self.batched_stride_A, self.batched_stride_B, self.batched_stride_C,
self.lda, self.ldb, self.ldc, self.ldd,
self.lda, self.ldb, self.ldc, self.ldd,
0, 0, 0
)
self.arguments = arguments, grid_tiled_shape_, self.gemm_k_size
def initialize(self):
# get launch configuration
launch_config = self.operation.rt_module.plan(self)
# get the host and evice workspace
device_workspace_size = \
self.operation.rt_module.get_device_workspace_size(self)
if device_workspace_size > 0:
self.workspace_buffer = device_mem_alloc(device_workspace_size)
workspace_ptr = self.workspace_buffer.ptr
err, = cuda.cuMemsetD32(
workspace_ptr, 0, device_workspace_size // 4)
else:
workspace_ptr = None
device_workspace = 0
if (workspace_ptr is not None and
self.gemm_mode == cutlass.gemm.Mode.GemmSplitKParallel):
# in GEMM splik-K parallel, the D pointer is redirected
# to the workspace
self.ptr_D = cuda.CUdeviceptr(workspace_ptr)
elif (workspace_ptr is not None and
self.gemm_mode == cutlass.gemm.Mode.Gemm):
# in GEMM split-K serial
device_workspace = workspace_ptr
self.get_arguments()
arguments, grid_tiled_shape, gemm_k_size = self.arguments
res_arg = self.operation.rt_module.get_args(
ctypes.byref(arguments), ctypes.c_void_p(int(device_workspace)))
host_workspace = bytearray(res_arg.contents)
device_workspace = None
self.host_workspace = host_workspace
self.device_workspace = device_workspace
self.launch_config = launch_config
class GemmGroupedArguments:
"""
Argument wrapper for GEMM Grouped. It encodes problem information and
user-provide tensors into the kernel's argument
:param operation: the GEMM Grouped operation to take the argument
:type operation: :class:`pycutlass.GemmOperationGrouped`
:param problem_size: list of GEMM problem size gemm(M, N, K)
:type operation: list[:class:`cutlass.gemm.GemmCoord`]
:param A: list of tensor A
:type A: list[cuda.CUdeviceptr | numpy.ndarray | torch.Tensor | cupy.ndarray]
:param B: list of tensor B
:type B: list[cuda.CUdeviceptr | numpy.ndarray | torch.Tensor | cupy.ndarray]
:param C: list of tensor C
:type C: list[cuda.CUdeviceptr | numpy.ndarray | torch.Tensor | cupy.ndarray]
:param D: list of tensor D
:type D: list[cuda.CUdeviceptr | numpy.ndarray | torch.Tensor | cupy.ndarray]
:param output_op: output operator, optional
:type output_op: :class:`pycutlass.LinearCombinationFunctorArguments`
"""
def __init__(
self, operation: 'GemmOperationGrouped',
problem_sizes: 'list[cutlass.gemm.GemmCoord]',
A: 'list[Tensor]', B: 'list[Tensor]', C: 'list[torch.Tensor]',
D: 'list[Tensor]', **kwargs):
# get number of problems in the group
self.problem_count = len(problem_sizes)
# check the input arguments
assert len(A) == self.problem_count
assert len(B) == self.problem_count
assert len(C) == self.problem_count
assert len(D) == self.problem_count
problem_size_host = []
self.ptr_A_host = []
self.ptr_B_host = []
self.ptr_C_host = []
self.ptr_D_host = []
lda_host = []
ldb_host = []
ldc_host = []
ldd_host = []
self.partitions = 1
self.operation = operation
# get the threadblock
threadblock_shape = operation.tile_description.threadblock_shape
self.threadblock_shape = cutlass.gemm.GemmCoord(
threadblock_shape[0], threadblock_shape[1], threadblock_shape[2])
self.threadblock_swizzle = operation.swizzling_functor
self.total_tiles = 0
self.gemm_arguments = []
# process the input arguments
for idx, problem_size in enumerate(problem_sizes):
M, N, K = problem_size.m(), problem_size.n(), problem_size.k()
temp_argument = GemmArguments(
operation=operation,
problem_size=cutlass.gemm.GemmCoord(M, N, K),
A=A[idx], B=B[idx], C=C[idx], D=D[idx],
)
self.gemm_arguments.append(temp_argument)
problem_size_host.append(
[temp_argument.problem_size.m(),
temp_argument.problem_size.n(),
temp_argument.problem_size.k()]
)
self.ptr_A_host.append(int(temp_argument.ptr_A))
lda_host.append(temp_argument.lda)
self.ptr_B_host.append(int(temp_argument.ptr_B))
ldb_host.append(temp_argument.ldb)
self.ptr_C_host.append(int(temp_argument.ptr_C))
ldc_host.append(temp_argument.ldc)
self.ptr_D_host.append(int(temp_argument.ptr_D))
ldd_host.append(temp_argument.ldd)
# get number of tiles
grid = self.threadblock_swizzle.get_grid_shape(
self.threadblock_swizzle.get_tiled_shape(
temp_argument.problem_size, self.threadblock_shape,
temp_argument.batch_count)
)
self.total_tiles += grid.x * grid.y * grid.z
self.problem_size_buffer = todevice(problem_size_host, np.int32)
self.ptr_A_buffer = todevice(self.ptr_A_host, np.int64)
self.ptr_B_buffer = todevice(self.ptr_B_host, np.int64)
self.ptr_C_buffer = todevice(self.ptr_C_host, np.int64)
self.ptr_D_buffer = todevice(self.ptr_D_host, np.int64)
self.lda_buffer = todevice(lda_host, np.int64)
self.ldb_buffer = todevice(ldb_host, np.int64)
self.ldc_buffer = todevice(ldc_host, np.int64)
self.ldd_buffer = todevice(ldd_host, np.int64)
if 'output_op' in kwargs.keys():
self.alpha = kwargs['output_op'].alpha
self.beta = kwargs['output_op'].beta
else:
self.alpha = 1.0
self.beta = 0.0
if 'output_op' in kwargs.keys():
self.output_op = kwargs['output_op']
else:
self.output_op = self.operation.epilogue_type(1.0, 0.0)
# get host problem size
self.host_problem_size_ptr = np.array(
problem_size_host, dtype=np.int32).__array_interface__['data'][0]
self.arguments = self.get_arguments()
self.initialize()
def get_arguments(self):
return self.operation.argument_type(
self.problem_size_buffer.ptr, self.problem_count, self.total_tiles,
self.output_op, self.ptr_A_buffer.ptr, self.ptr_B_buffer.ptr,
self.ptr_C_buffer.ptr, self.ptr_D_buffer.ptr, self.lda_buffer.ptr,
self.ldb_buffer.ptr, self.ldc_buffer.ptr, self.ldd_buffer.ptr,
ctypes.c_void_p(int(self.host_problem_size_ptr))
)
def initialize(self):
# get launch configuration
launch_config = self.operation.rt_module.plan(self)
# get the host and evice workspace
device_workspace_size = \
self.operation.rt_module.get_device_workspace_size(self)
if device_workspace_size > 0:
self.workspace_buffer = device_mem_alloc(device_workspace_size)
workspace_ptr = self.workspace_buffer.ptr
err, = cuda.cuMemsetD32(
workspace_ptr, 0, device_workspace_size // 4)
else:
workspace_ptr = None
if self.operation.precompute_mode == SchedulerMode.Host:
device_workspace_ptr = self.operation.rt_module.host_precompute(
self, self.operation.rt_module.get_workspace_size(self))
else:
device_workspace_ptr = 0
result = self.operation.rt_module.get_args(
ctypes.byref(self.arguments), self.total_tiles,
ctypes.c_void_p(int(device_workspace_ptr))
)
host_workspace = bytearray(result.contents)
device_workspace = None
self.host_workspace = host_workspace
self.device_workspace = device_workspace
self.launch_config = launch_config
def sync(self):
err, = cudart.cudaDeviceSynchronize()
if err != cuda.CUresult.CUDA_SUCCESS:
raise RuntimeError("CUDA Error %s" % str(err))
for arg in self.gemm_arguments:
arg.sync(stream_sync=False)
################################################################################
# Base class for GEMM runtime module
################################################################################
class GemmRTbase(ExecutableOperation):
"""
GemmRT manages the CUTLASS runtime components
"""
KernelTemplate = r'''
extern "C"
__global__ void
${operation_name}(${operation_name}${operation_suffix}::Params params) {
// Dynamic shared memory base pointer
extern __shared__ int SharedStorageBase[];
// Declare pointer to dynamic shared memory.
${operation_name}${operation_suffix}::SharedStorage *shared_storage =
reinterpret_cast<${operation_name}${operation_suffix}::SharedStorage *>(SharedStorageBase);
${operation_name}${operation_suffix} op;
op(params, *shared_storage);
}
'''
def __init__(self, operation: 'GemmOperation'):
super().__init__(operation)
self.operation = operation
threadblock_shape = operation.tile_description.threadblock_shape
self.threadblock_shape = cutlass.gemm.GemmCoord(
threadblock_shape[0], threadblock_shape[1], threadblock_shape[2])
self.threadblock_swizzle = operation.swizzling_functor
#: number of threads per threadblock
self.threads: int = operation.tile_description.num_threads
#
def emit(self):
return self.emitter.emit(self.operation)
#
def can_implement(self, configuration, arguments):
raise NotImplementedError()
#
def get_host_workspace_size(self, arguments):
raise NotImplementedError()
#
def get_device_workspace_size(self, arguments):
return 0
#
def initialize(self):
err, = cuda.cuFuncSetAttribute(
self.kernel,
attrib=cuda.CUfunction_attribute.CU_FUNC_ATTRIBUTE_MAX_DYNAMIC_SHARED_SIZE_BYTES,
value=self.shared_memory_capacity)
if err != cuda.CUresult.CUDA_SUCCESS:
raise RuntimeError('Cuda Error: {}'.format(err))
################################################################################
# Runtime module for GEMM Universal
################################################################################
class GemmRTUniversal(GemmRTbase):
"""
GemmRTUniversal manages the CUTLASS runtime components
"""
HostTemplate = r'''
extern "C" {
// Get the size of params in bytes
int ${operation_name}_get_param_size(){
return sizeof(${operation_name}${operation_suffix}::Params);
}
// Get the size of dynamic shared memory in bytes
int ${operation_name}_shared_memory_size() {
return int(sizeof(${operation_name}${operation_suffix}::SharedStorage));
}
// Get the params as byte array
char* ${operation_name}_get_params(${operation_name}_base::Arguments* argument, int* workspace){
${operation_name}_base::Params* params;
params = new ${operation_name}_base::Params(*argument,
-1, // SM count. Only used for stream-K
-1 // Occupancy. Only used for stream-K
);
// Semaphore holds the pointer to the workspace in the Params struct
params->semaphore = workspace;
char *bytes = ((char*)(params));
char *output = new char[sizeof(${operation_name}_base::Params)];
for (unsigned int i = 0; i < sizeof(${operation_name}_base::Params); i ++)
output[i] = bytes[i];
return output;
}
}
'''
def __init__(self, operation: 'GemmOperation'):
super(GemmRTUniversal, self).__init__(operation)
self.emitter = EmitGemmUniversalInstance(
'_type', operation.direct_store, operation.visitor)
self.argument_type, self.epilogue_type = get_gemm_arguments(operation.epilogue_functor)
self.argtype = [
ctypes.POINTER(self.argument_type),
ctypes.POINTER(GemmCoord_), ctypes.c_int, ctypes.c_void_p
]
def plan(self, arguments):
grid = self.threadblock_swizzle.get_tiled_shape(
arguments.problem_size, self.threadblock_shape, arguments.batch_count
)
gemm_k_size = arguments.problem_size.k()
if (arguments.gemm_mode in
[cutlass.gemm.Mode.Gemm, cutlass.gemm.Mode.GemmSplitKParallel]):
#
alignk = max(max(128 // DataTypeSize[self.operation.A.element],
128 // DataTypeSize[self.operation.B.element]), 1)
gemm_k_size = (((arguments.problem_size.k() + arguments.batch_count - 1) //
arguments.batch_count + alignk - 1) // alignk) * alignk
if gemm_k_size:
grid_z = (arguments.problem_size.k() +
gemm_k_size - 1) // gemm_k_size
grid = cutlass.gemm.GemmCoord(grid.m(), grid.n(), grid_z)
arguments.grid_tiled_shape = cutlass.dim3(grid.m(), grid.n(), grid.k())
grid = self.threadblock_swizzle.get_grid_shape(grid)
arguments.gemm_k_size = gemm_k_size
return LaunchConfiguration(
[grid.x, grid.y, grid.z],
[self.threads, 1, 1],
self.shared_memory_capacity)
#
def get_device_workspace_size(self, arguments: GemmArguments):
workspace_bytes = 0
if arguments.gemm_mode == cutlass.gemm.Mode.GemmSplitKParallel:
workspace_bytes = (DataTypeSize[arguments.operation.C.element]
* arguments.batched_stride_D * arguments.grid_tiled_shape.z // 8)
elif (arguments.gemm_mode == cutlass.gemm.Mode.Gemm and
arguments.split_k_slices > 1):
#
workspace_bytes = 4 * arguments.grid_tiled_shape.x * arguments.grid_tiled_shape.y
# TODO: get extra workspace size
# see https://github.com/NVIDIA/cutlass/blob/master/include/cutlass/gemm/device/gemm_universal_base.h
return workspace_bytes
###################################################################################################
# Runtime module for GEMM Grouped
###################################################################################################
class GemmRTGrouped(GemmRTbase):
"""
GemmRTGrouped manages the CUTLASS runtime components
"""
HostTemplate = r'''
extern "C" {
// precompute scheduling information
char * ${operation_name}_precompute(${operation_name}_base::Arguments const &args, int tile_count, size_t workspace_bytes) {
char* host_workspace = new char[workspace_bytes];
${operation_name}_base::ProblemVisitor::host_precompute(
args.host_problem_sizes,
args.problem_count,
args.threadblock_count,
(void*)host_workspace
);
return host_workspace;
}
// Get the size of params in bytes
int ${operation_name}_get_param_size(){
return sizeof(${operation_name}${operation_suffix}::Params);
}
// Get the size of dynamic shared memory in bytes
int ${operation_name}_shared_memory_size() {
return int(sizeof(${operation_name}${operation_suffix}::SharedStorage));
}
// Get the params as byte array
char* ${operation_name}_get_params(${operation_name}_base::Arguments* argument, int tile_count, void* workspace=nullptr){
${operation_name}_base::Params* params;
params = new ${operation_name}_base::Params(*argument, workspace, tile_count);
char *bytes = ((char*)(params));
char *output = new char[sizeof(${operation_name}_base::Params)];
for (unsigned int i = 0; i < sizeof(${operation_name}_base::Params); i ++)
output[i] = bytes[i];
return output;
}
}
'''
def __init__(self, operation: 'GemmOperation'):
super(GemmRTGrouped, self).__init__(operation)
self.extra_funcs = ['precompute']
self.emitter = EmitGemmGroupedInstance('_type')
self.argument_type, self.epilogue_type = get_gemm_grouped_arguments(operation.epilogue_functor)
self.argtype = [ctypes.POINTER(self.argument_type), ctypes.c_int, ctypes.c_void_p]
def host_precompute(self, arguments, workspace_bytes):
self.precompute.argtype = [
self.argtype[0], ctypes.c_int, ctypes.c_longlong]
self.precompute.restype = ctypes.POINTER(
ctypes.c_byte * workspace_bytes)
problem_info = self.precompute(ctypes.byref(
arguments.arguments), arguments.total_tiles, workspace_bytes)
problem_info_array = bytearray(problem_info.contents)
# copy to device memory
return rmm.DeviceBuffer.to_device(problem_info_array).ptr
def plan(self, arguments):
return LaunchConfiguration(
[arguments.total_tiles, 1, 1],
[self.threads, 1, 1], self.shared_memory_capacity)
def get_workspace_size(self, arguments):
if self.operation.precompute_mode == SchedulerMode.Device:
return 0
elif self.operation.precompute_mode == SchedulerMode.Host:
total_tiles = arguments.total_tiles
entries_per_block = 1
return 8 * entries_per_block * total_tiles # three int32_t
################################################################################
# Runtime module for GEMM Grouped
################################################################################
#
class GemmOperationBase:
"""
CUTLASS GEMM operation
"""
#
def __init__(
self, gemm_kind, arch, tile_description: TileDescription,
A: TensorDescription, B: TensorDescription, C: TensorDescription,
epilogue_functor,
swizzling_functor=cutlass.IdentitySwizzle1, **kwargs):
#: operation kind
self.operation_kind: OperationKind = OperationKind.Gemm
#: compute capability
self.arch: int = arch
#: tile description object
self.tile_description: TileDescription = tile_description
#: gemm kind
self.gemm_kind: GemmKind = gemm_kind
# use deep copy to avoid overwritting the original TensorDescription
if C.layout == cutlass.ColumnMajor:
#: Operand A
self.A: TensorDescription = copy.deepcopy(B)
#: Operand B
self.B: TensorDescription = copy.deepcopy(A)
#: Operand C
self.C: TensorDescription = copy.deepcopy(C)
self.A.layout = transpose_layout(self.A.layout)
self.B.layout = transpose_layout(self.B.layout)
self.C.layout = transpose_layout(self.C.layout)
self.switched = True
else:
#: Operand A
self.A: TensorDescription = copy.deepcopy(A)
#: Operand B
self.B: TensorDescription = copy.deepcopy(B)
#: Operand C
self.C: TensorDescription = copy.deepcopy(C)
self.switched = False
self.epilogue_functor = epilogue_functor
self.swizzling_functor = swizzling_functor()
if "direct_store" in kwargs:
self.direct_store = kwargs["direct_store"]
else:
self.direct_store = False
if "visitor" in kwargs:
self.visitor = kwargs["visitor"]
else:
self.visitor = False
def run(self, arguments: GemmArguments) -> cuda.CUresult:
"""
Configure and launch the cuda kernel with input arguments
"""
err = self.rt_module.run(
arguments.host_workspace,
arguments.device_workspace,
arguments.launch_config)
if err != cuda.CUresult.CUDA_SUCCESS:
raise RuntimeError('CUDA Error %s' % str(err))
return err
def free(self):
if hasattr(self, "workspace_buffer"):
del self.workspace_buffer
#
def is_complex(self):
complex_operators = [
MathOperation.multiply_add_complex,
MathOperation.multiply_add_complex_gaussian,
MathOperation.multiply_add_complex_fast_f32
]
return self.tile_description.math_instruction.math_operation in complex_operators
#
def is_planar_complex(self):
return self.gemm_kind in (GemmKind.PlanarComplex, GemmKind.PlanarComplexArray)
#
def accumulator_type(self):
accum = self.tile_description.math_instruction.element_accumulator
if self.is_complex():
return get_complex_from_real(accum)
return accum
#
def short_math_name(self):
if self.tile_description.math_instruction.math_operation == MathOperation.multiply_add_complex_gaussian:
return "g%s" % ShortDataTypeNames[self.accumulator_type()]
return ShortDataTypeNames[self.accumulator_type()]
#
def core_name(self):
''' The basic operation kind is prefixed with a letter indicating the accumulation type. '''
inst_shape = ''
inst_operation = ''
intermediate_type = ''
math_operations_map = {
MathOperation.xor_popc: 'xor',
}
if self.tile_description.math_instruction.opcode_class == cutlass.OpClass.TensorOp or \
self.tile_description.math_instruction.opcode_class == cutlass.OpClass.WmmaTensorOp:
math_op = self.tile_description.math_instruction.math_operation
math_op_string = math_operations_map[math_op] if math_op in math_operations_map.keys(
) else ''
inst_shape = "%d%d%d" % tuple(
self.tile_description.math_instruction.instruction_shape)
inst_shape += math_op_string
if self.tile_description.math_instruction.element_a != self.A.element and \
self.tile_description.math_instruction.element_a != self.tile_description.math_instruction.element_accumulator:
intermediate_type = DataTypeNames[self.tile_description.math_instruction.element_a]
return "%s%s%s%s" % (self.short_math_name(), inst_shape, intermediate_type, GemmKindNames[self.gemm_kind])
#
def extended_name(self):
''' Append data types if they differ from compute type. '''
if self.is_complex():
extended_name = "${core_name}"
else:
if self.C.element != self.tile_description.math_instruction.element_accumulator and \
self.A.element != self.tile_description.math_instruction.element_accumulator:
extended_name = "${element_c}_${core_name}_${element_a}"
elif self.C.element == self.tile_description.math_instruction.element_accumulator and \
self.A.element != self.tile_description.math_instruction.element_accumulator:
extended_name = "${core_name}_${element_a}"
else:
extended_name = "${core_name}"
extended_name = SubstituteTemplate(extended_name, {
'element_a': DataTypeNames[self.A.element],
'element_c': DataTypeNames[self.C.element],
'core_name': self.core_name()
})
return extended_name
#
def layout_name(self):
if self.is_complex() or self.is_planar_complex():
return "%s%s" % (
ShortComplexLayoutNames[(
self.A.layout, self.A.complex_transform)],
ShortComplexLayoutNames[(
self.B.layout, self.B.complex_transform)]
)
return "%s%s" % (ShortLayoutTypeNames[self.A.layout], ShortLayoutTypeNames[self.B.layout])
#
def procedural_name(self):
''' The full procedural name indicates architecture, extended name, tile size, and layout. '''
threadblock = self.tile_description.procedural_name()
opcode_class_name = OpcodeClassNames[self.tile_description.math_instruction.opcode_class]
alignment = max([self.A.alignment, self.B.alignment, self.C.alignment])
return SubstituteTemplate(
"cutlass_${opcode_class}_${extended_name}_${threadblock}_${layout}_align${alignment}",
{
'opcode_class': opcode_class_name,
'extended_name': self.extended_name(),
'threadblock': threadblock,
'layout': self.layout_name(),
'alignment': "%d" % self.A.alignment,
}
)
#
def configuration_name(self):
''' The full procedural name indicates architecture, extended name, tile size, and layout. '''
return self.procedural_name()
class GemmOperationUniversal(GemmOperationBase):
def __init__(self, arch, tile_description: TileDescription, A: TensorDescription, B, C,
epilogue_functor, swizzling_functor=cutlass.IdentitySwizzle1, **kwargs):
super(GemmOperationUniversal, self).__init__(GemmKind.Universal, arch, tile_description,
A, B, C, epilogue_functor, swizzling_functor, **kwargs)
self.rt_module = GemmRTUniversal(self)
self.argument_type = self.rt_module.argument_type
self.epilogue_type = self.rt_module.epilogue_type
class GemmOperationGrouped(GemmOperationBase):
def __init__(self, arch, tile_description: TileDescription, A: TensorDescription, B, C,
epilogue_functor, swizzling_functor=cutlass.IdentitySwizzle1, **kwargs):
super(GemmOperationGrouped, self).__init__(GemmKind.Grouped, arch, tile_description,
A, B, C, epilogue_functor, swizzling_functor, **kwargs)
assert "precompute_mode" in kwargs.keys(
), "missing keyword arguement 'precompute_mode'."
self.precompute_mode = kwargs["precompute_mode"]
self.rt_module = GemmRTGrouped(self)
self.argument_type = self.rt_module.argument_type
self.epilogue_type = self.rt_module.epilogue_type
###################################################################################################
#
# Emits single instances of a CUTLASS device-wide operator
#
###################################################################################################
#
class EmitGemmUniversalInstance:
''' Responsible for emitting a CUTLASS template definition'''
def __init__(self, operation_suffix='', direct_store=False, visitor=False):
self.operation_suffix = operation_suffix
self.direct_store = direct_store
self.visitor = visitor
self.includes = [
"cutlass/cutlass.h",
"cutlass/numeric_types.h",
"cutlass/arch/arch.h",
"cutlass/arch/mma.h",
"cutlass/layout/matrix.h",
"cutlass/gemm/device/gemm.h",
"cutlass/gemm/device/gemm_universal_adapter.h",
"cutlass/gemm/kernel/default_gemm_universal.h",
]
if self.visitor:
self.includes += [
"gemm/gemm_universal_with_visitor.h",
"epilogue/epilogue_visitor_with_layernorm.h",
"epilogue/epilogue_visitor_generic.h"
]
if self.direct_store:
self.includes.append(
"cutlass/epilogue/threadblock/default_epilogue_direct_store.h")
self.gemm_template_interleaved = """
// Gemm operator ${operation_name}
using ${operation_name}_base =
typename cutlass::gemm::kernel::DefaultGemmUniversal<
${element_a}, ${layout_a}, ${transform_a}, ${align_a},
${element_b}, ${layout_b}, ${transform_b}, ${align_b},
${element_c}, ${layout_c},
${element_accumulator},
${opcode_class},
${arch},
cutlass::gemm::GemmShape<${threadblock_shape_m}, ${threadblock_shape_n}, ${threadblock_shape_k}>,
cutlass::gemm::GemmShape<${warp_shape_m}, ${warp_shape_n}, ${warp_shape_k}>,
cutlass::gemm::GemmShape<${instruction_shape_m}, ${instruction_shape_n}, ${instruction_shape_k}>,
${epilogue_functor},
${swizzling_functor},
${stages},
${math_operation}
>::GemmKernel;
// Define named type
struct ${operation_name}${operation_suffix} :
public ${operation_name}_base { };
"""
self.gemm_template_direct_store = """
// Gemm operator ${operation_name}
using ${operation_name}_default =
typename cutlass::gemm::kernel::DefaultGemmUniversal<
${element_a}, ${layout_a}, ${transform_a}, ${align_a},
${element_b}, ${layout_b}, ${transform_b}, ${align_b},
${element_c}, ${layout_c},
${element_accumulator},
${opcode_class},
${arch},
cutlass::gemm::GemmShape<${threadblock_shape_m}, ${threadblock_shape_n}, ${threadblock_shape_k}>,
cutlass::gemm::GemmShape<${warp_shape_m}, ${warp_shape_n}, ${warp_shape_k}>,
cutlass::gemm::GemmShape<${instruction_shape_m}, ${instruction_shape_n}, ${instruction_shape_k}>,
${epilogue_functor},
${swizzling_functor},
${stages},
${math_operation}
>::GemmKernel;
using ${operation_name}_base =
cutlass::gemm::kernel::GemmUniversal<
${operation_name}_default::Mma,
cutlass::epilogue::threadblock::DefaultEpilogueDirectStore<
${operation_name}_default::Epilogue
>::Epilogue,
${operation_name}_default::ThreadblockSwizzle
>;
// Define named type
struct ${operation_name}${operation_suffix} :
public ${operation_name}_base { };
"""
self.gemm_template_visitor = """
// Gemm operator ${operation_name}
using ${operation_name}_default =
typename cutlass::gemm::kernel::DefaultGemmUniversal<
${element_a}, ${layout_a}, ${transform_a}, ${align_a},
${element_b}, ${layout_b}, ${transform_b}, ${align_b},
${element_c}, ${layout_c},
${element_accumulator},
${opcode_class},
${arch},
cutlass::gemm::GemmShape<${threadblock_shape_m}, ${threadblock_shape_n}, ${threadblock_shape_k}>,
cutlass::gemm::GemmShape<${warp_shape_m}, ${warp_shape_n}, ${warp_shape_k}>,
cutlass::gemm::GemmShape<${instruction_shape_m}, ${instruction_shape_n}, ${instruction_shape_k}>,
${elementwise_epilogue_functor},
${swizzling_functor},
${stages},
${math_operation}
>::GemmKernel;
${epilogue_visitor}
using ${operation_name}_Epilogue = typename cutlass::epilogue::threadblock::EpilogueWithVisitorFromExistingEpilogue<
${operation_name}_EpilogueVisitor,
typename ${operation_name}_default::Epilogue>::Epilogue;
using ${operation_name}_base =
cutlass::gemm::kernel::GemmUniversalwithEpilogueVisitor<
${operation_name}_default::Mma,
${operation_name}_Epilogue,
${operation_name}_default::ThreadblockSwizzle
>;
// Define named type
struct ${operation_name}${operation_suffix} :
public ${operation_name}_base { };
"""
#
def instance_template(self):
return """
${compile_guard_start}
manifest.append(new ${gemm_kind}<
cutlass::gemm::device::GemmUniversalAdapter<${operation_name}>
>("${operation_name}"));
${compile_guard_end}
"""
#
def emit(self, operation):
threadblock_shape = operation.tile_description.threadblock_shape
warp_count = operation.tile_description.warp_count
warp_shape = [threadblock_shape[idx] // warp_count[idx]
for idx in range(3)]
# transpose_layouts = {
# cutlass.layout.ColumnMajorcutlass.layout.ColumnMajor,
# cutlass.layout.RowMajorcutlass.layout.RowMajor
# }
# if operation.A.layout in transpose_layouts.keys() and \
# operation.B.layout in transpose_layouts.keys() and \
# operation.C.layout in transpose_layouts.keys():
# instance_layout_A = transpose_layouts[operation.A.layout]
# instance_layout_B = transpose_layouts[operation.B.layout]
# instance_layout_C = transpose_layouts[operation.C.layout]
# gemm_template = self.gemm_template
# else:
instance_layout_A, instance_layout_B, instance_layout_C = \
(operation.A.layout, operation.B.layout, operation.C.layout)
if self.direct_store:
gemm_template = self.gemm_template_direct_store
elif self.visitor:
gemm_template = self.gemm_template_visitor
else:
gemm_template = self.gemm_template_interleaved
#
values = {
'operation_name': operation.procedural_name(),
'operation_suffix': self.operation_suffix,
'element_a': DataTypeTag[operation.A.element],
'layout_a': LayoutTag[instance_layout_A],
'element_b': DataTypeTag[operation.B.element],
'layout_b': LayoutTag[instance_layout_B],
'element_c': DataTypeTag[operation.C.element],
'layout_c': LayoutTag[instance_layout_C],
'element_accumulator': DataTypeTag[operation.accumulator_type()],
'opcode_class': OpcodeClassTag[operation.tile_description.math_instruction.opcode_class],
'arch': "cutlass::arch::Sm%d" % operation.arch,
'threadblock_shape_m': str(operation.tile_description.threadblock_shape[0]),
'threadblock_shape_n': str(operation.tile_description.threadblock_shape[1]),
'threadblock_shape_k': str(operation.tile_description.threadblock_shape[2]),
'warp_shape_m': str(warp_shape[0]),
'warp_shape_n': str(warp_shape[1]),
'warp_shape_k': str(warp_shape[2]),
'instruction_shape_m': str(operation.tile_description.math_instruction.instruction_shape[0]),
'instruction_shape_n': str(operation.tile_description.math_instruction.instruction_shape[1]),
'instruction_shape_k': str(operation.tile_description.math_instruction.instruction_shape[2]),
'swizzling_functor': operation.swizzling_functor.tag(),
'stages': str(operation.tile_description.stages),
'align_a': str(operation.A.alignment),
'align_b': str(operation.B.alignment),
'transform_a': ComplexTransformTag[operation.A.complex_transform],
'transform_b': ComplexTransformTag[operation.B.complex_transform],
'math_operation': MathOperationTag[operation.tile_description.math_instruction.math_operation]
}
if self.visitor:
values['epilogue_visitor'] = operation.epilogue_functor.emit(operation)
values['elementwise_epilogue_functor'] = operation.epilogue_functor.elementwise_functor.emit()
else:
values['epilogue_functor'] = operation.epilogue_functor.emit()
return SubstituteTemplate(gemm_template, values)
###################################################################################################
#
class EmitGemmGroupedInstance:
''' Responsible for emitting a CUTLASS template definition'''
def __init__(self, operation_suffix=''):
self.operation_suffix = operation_suffix
self.includes = [
"cutlass/cutlass.h",
"cutlass/numeric_types.h",
"cutlass/arch/arch.h",
"cutlass/arch/mma.h",
"cutlass/layout/matrix.h",
"cutlass/gemm/kernel/gemm_grouped.h",
"cutlass/gemm/kernel/default_gemm_grouped.h"
]
self.gemm_template = """
// Gemm operator ${operation_name}
using ${operation_name}_base =
typename cutlass::gemm::kernel::DefaultGemmGrouped<
${element_a}, ${layout_a}, ${transform_a}, ${align_a},
${element_b}, ${layout_b}, ${transform_b}, ${align_b},
${element_c}, ${layout_c},
${element_accumulator},
${opcode_class},
${arch},
cutlass::gemm::GemmShape<${threadblock_shape_m}, ${threadblock_shape_n}, ${threadblock_shape_k}>,
cutlass::gemm::GemmShape<${warp_shape_m}, ${warp_shape_n}, ${warp_shape_k}>,
cutlass::gemm::GemmShape<${instruction_shape_m}, ${instruction_shape_n}, ${instruction_shape_k}>,
${epilogue_functor},
${swizzling_functor},
${stages},
${precompute_mode},
${math_operation}
>::GemmKernel;
// Define named type
struct ${operation_name}${operation_suffix} :
public ${operation_name}_base { };
"""
#
def instance_template(self):
return """
${compile_guard_start}
manifest.append(new ${gemm_kind}<
cutlass::gemm::device::GemmGrouped<${operation_name}>
>("${operation_name}"));
${compile_guard_end}
"""
#
def emit(self, operation):
threadblock_shape = operation.tile_description.threadblock_shape
warp_count = operation.tile_description.warp_count
warp_shape = [threadblock_shape[idx] // warp_count[idx]
for idx in range(3)]
instance_layout_A, instance_layout_B, instance_layout_C = \
(operation.A.layout, operation.B.layout, operation.C.layout)
#
# Support built-in epilogue functors or user-defined functions
epilogue_functor = operation.epilogue_functor.emit()
values = {
'operation_name': operation.procedural_name(),
'operation_suffix': self.operation_suffix,
'element_a': DataTypeTag[operation.A.element],
'layout_a': LayoutTag[instance_layout_A],
'element_b': DataTypeTag[operation.B.element],
'layout_b': LayoutTag[instance_layout_B],
'element_c': DataTypeTag[operation.C.element],
'layout_c': LayoutTag[instance_layout_C],
'element_accumulator': DataTypeTag[operation.accumulator_type()],
'opcode_class': OpcodeClassTag[operation.tile_description.math_instruction.opcode_class],
'arch': "cutlass::arch::Sm%d" % operation.arch,
'threadblock_shape_m': str(operation.tile_description.threadblock_shape[0]),
'threadblock_shape_n': str(operation.tile_description.threadblock_shape[1]),
'threadblock_shape_k': str(operation.tile_description.threadblock_shape[2]),
'warp_shape_m': str(warp_shape[0]),
'warp_shape_n': str(warp_shape[1]),
'warp_shape_k': str(warp_shape[2]),
'instruction_shape_m': str(operation.tile_description.math_instruction.instruction_shape[0]),
'instruction_shape_n': str(operation.tile_description.math_instruction.instruction_shape[1]),
'instruction_shape_k': str(operation.tile_description.math_instruction.instruction_shape[2]),
'epilogue_functor': epilogue_functor,
'swizzling_functor': operation.swizzling_functor.tag(),
'stages': str(operation.tile_description.stages),
'align_a': str(operation.A.alignment),
'align_b': str(operation.B.alignment),
'transform_a': ComplexTransformTag[operation.A.complex_transform],
'transform_b': ComplexTransformTag[operation.B.complex_transform],
'precompute_mode': SchedulerModeTag[operation.precompute_mode],
'math_operation': MathOperationTag[operation.tile_description.math_instruction.math_operation]
}
return SubstituteTemplate(self.gemm_template, values)
| warp-main | warp/native/cutlass/tools/library/scripts/pycutlass/src/pycutlass/gemm_operation.py |
#################################################################################################
#
# Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#################################################################################################
import re
###################################################################################################
import enum
import cutlass
# The following block implements enum.auto() for Python 3.5 variants that don't include it such
# as the default 3.5.2 on Ubuntu 16.04.
#
# https://codereview.stackexchange.com/questions/177309/reimplementing-pythons-enum-auto-for-compatibility
try:
from enum import auto as enum_auto
except ImportError:
__cutlass_library_auto_enum = 0
def enum_auto() -> int:
global __cutlass_library_auto_enum
i = __cutlass_library_auto_enum
__cutlass_library_auto_enum += 1
return i
###################################################################################################
#
class GeneratorTarget(enum.Enum):
Library = enum_auto()
#
GeneratorTargetNames = {
GeneratorTarget.Library: 'library',
}
#
###################################################################################################
#
ShortDataTypeNames = {
cutlass.int32: 'i',
cutlass.float16: 'h',
cutlass.float32: 's',
cutlass.float64: 'd',
cutlass.dtype.cf32: 'c',
cutlass.dtype.cf64: 'z',
}
#
DataTypeNames = {
cutlass.dtype.b1: "b1",
cutlass.dtype.u4: "u4",
cutlass.dtype.u8: "u8",
cutlass.dtype.u16: "u16",
cutlass.dtype.u32: "u32",
cutlass.dtype.u64: "u64",
cutlass.dtype.s4: "s4",
cutlass.int8: "s8",
cutlass.dtype.s16: "s16",
cutlass.int32: "s32",
cutlass.dtype.s64: "s64",
cutlass.float16: "f16",
cutlass.bfloat16: "bf16",
cutlass.float32: "f32",
cutlass.tfloat32: "tf32",
cutlass.float64: "f64",
cutlass.dtype.cf16: "cf16",
cutlass.dtype.cbf16: "cbf16",
cutlass.dtype.cf32: "cf32",
cutlass.dtype.ctf32: "ctf32",
cutlass.dtype.cf64: "cf64",
cutlass.dtype.cu4: "cu4",
cutlass.dtype.cu8: "cu8",
cutlass.dtype.cu16: "cu16",
cutlass.dtype.cu32: "cu32",
cutlass.dtype.cu64: "cu64",
cutlass.dtype.cs4: "cs4",
cutlass.dtype.cs8: "cs8",
cutlass.dtype.cs16: "cs16",
cutlass.dtype.cs32: "cs32",
cutlass.dtype.cs64: "cs64",
}
DataTypeTag = {
cutlass.dtype.b1: "cutlass::uint1b_t",
cutlass.dtype.u4: "cutlass::uint4b_t",
cutlass.dtype.u8: "uint8_t",
cutlass.dtype.u16: "uint16_t",
cutlass.dtype.u32: "uint32_t",
cutlass.dtype.u64: "uint64_t",
cutlass.dtype.s4: "cutlass::int4b_t",
cutlass.int8: "int8_t",
cutlass.dtype.s16: "int16_t",
cutlass.int32: "int32_t",
cutlass.dtype.s64: "int64_t",
cutlass.float16: "cutlass::half_t",
cutlass.bfloat16: "cutlass::bfloat16_t",
cutlass.float32: "float",
cutlass.tfloat32: "cutlass::tfloat32_t",
cutlass.float64: "double",
cutlass.dtype.cf16: "cutlass::complex<cutlass::half_t>",
cutlass.dtype.cbf16: "cutlass::complex<cutlass::bfloat16_t>",
cutlass.dtype.cf32: "cutlass::complex<float>",
cutlass.dtype.ctf32: "cutlass::complex<cutlass::tfloat32_t>",
cutlass.dtype.cf64: "cutlass::complex<double>",
cutlass.dtype.cu4: "cutlass::complex<cutlass::uint4b_t>",
cutlass.dtype.cu8: "cutlass::complex<cutlass::uint8_t>",
cutlass.dtype.cu16: "cutlass::complex<cutlass::uint16_t>",
cutlass.dtype.cu32: "cutlass::complex<cutlass::uint32_t>",
cutlass.dtype.cu64: "cutlass::complex<cutlass::uint64_t>",
cutlass.dtype.cs4: "cutlass::complex<cutlass::int4b_t>",
cutlass.dtype.cs8: "cutlass::complex<cutlass::int8_t>",
cutlass.dtype.cs16: "cutlass::complex<cutlass::int16_t>",
cutlass.dtype.cs32: "cutlass::complex<cutlass::int32_t>",
cutlass.dtype.cs64: "cutlass::complex<cutlass::int64_t>",
}
DataTypeSize = {
cutlass.dtype.b1: 1,
cutlass.dtype.u4: 4,
cutlass.dtype.u8: 8,
cutlass.dtype.u16: 16,
cutlass.dtype.u32: 32,
cutlass.dtype.u64: 64,
cutlass.dtype.s4: 4,
cutlass.int8: 8,
cutlass.dtype.s16: 16,
cutlass.int32: 32,
cutlass.dtype.s64: 64,
cutlass.float16: 16,
cutlass.bfloat16: 16,
cutlass.float32: 32,
cutlass.tfloat32: 32,
cutlass.float64: 64,
cutlass.dtype.cf16: 32,
cutlass.dtype.cbf16: 32,
cutlass.dtype.cf32: 64,
cutlass.dtype.ctf32: 32,
cutlass.dtype.cf64: 128,
cutlass.dtype.cu4: 8,
cutlass.dtype.cu8: 16,
cutlass.dtype.cu16: 32,
cutlass.dtype.cu32: 64,
cutlass.dtype.cu64: 128,
cutlass.dtype.cs4: 8,
cutlass.dtype.cs8: 16,
cutlass.dtype.cs16: 32,
cutlass.dtype.cs32: 64,
cutlass.dtype.cs64: 128,
}
###################################################################################################
#
class BlasMode(enum.Enum):
symmetric = enum_auto()
hermitian = enum_auto()
#
BlasModeTag = {
BlasMode.symmetric: 'cutlass::BlasMode::kSymmetric',
BlasMode.hermitian: 'cutlass::BlasMode::kHermitian',
}
#
ComplexTransformTag = {
cutlass.complex_transform.none: 'cutlass::ComplexTransform::kNone',
cutlass.complex_transform.conj: 'cutlass::ComplexTransform::kConjugate',
}
#
RealComplexBijection = [
(cutlass.float16, cutlass.dtype.cf16),
(cutlass.float32, cutlass.dtype.cf32),
(cutlass.float64, cutlass.dtype.cf64),
]
#
def is_complex(data_type):
for r, c in RealComplexBijection:
if data_type == c:
return True
return False
#
def get_complex_from_real(real_type):
for r, c in RealComplexBijection:
if real_type == r:
return c
return cutlass.dtype.invalid
#
def get_real_from_complex(complex_type):
for r, c in RealComplexBijection:
if complex_type == c:
return r
return cutlass.dtype.invalid
#
class ComplexMultiplyOp(enum.Enum):
multiply_add = enum_auto()
gaussian = enum_auto()
###################################################################################################
#
class MathOperation(enum.Enum):
multiply_add = enum_auto()
multiply_add_saturate = enum_auto()
xor_popc = enum_auto()
multiply_add_fast_bf16 = enum_auto()
multiply_add_fast_f16 = enum_auto()
multiply_add_fast_f32 = enum_auto()
multiply_add_complex_fast_f32 = enum_auto()
multiply_add_complex = enum_auto()
multiply_add_complex_gaussian = enum_auto()
#
MathOperationNames = {
MathOperation.multiply_add: 'multiply_add',
MathOperation.multiply_add_saturate: 'multiply_add_saturate',
MathOperation.xor_popc: 'xor_popc',
MathOperation.multiply_add_fast_bf16: 'multiply_add_fast_bf16',
MathOperation.multiply_add_fast_f16: 'multiply_add_fast_f16',
MathOperation.multiply_add_fast_f32: 'multiply_add_fast_f32',
MathOperation.multiply_add_complex_fast_f32: 'multiply_add_complex_fast_f32',
MathOperation.multiply_add_complex: 'multiply_add_complex',
MathOperation.multiply_add_complex_gaussian: 'multiply_add_complex_gaussian',
}
#
MathOperationTag = {
MathOperation.multiply_add: 'cutlass::arch::OpMultiplyAdd',
MathOperation.multiply_add_saturate: 'cutlass::arch::OpMultiplyAddSaturate',
MathOperation.xor_popc: 'cutlass::arch::OpXorPopc',
MathOperation.multiply_add_fast_bf16: 'cutlass::arch::OpMultiplyAddFastBF16',
MathOperation.multiply_add_fast_f16: 'cutlass::arch::OpMultiplyAddFastF16',
MathOperation.multiply_add_fast_f32: 'cutlass::arch::OpMultiplyAddFastF32',
MathOperation.multiply_add_complex_fast_f32: 'cutlass::arch::OpMultiplyAddComplexFastF32',
MathOperation.multiply_add_complex: 'cutlass::arch::OpMultiplyAddComplex',
MathOperation.multiply_add_complex_gaussian: 'cutlass::arch::OpMultiplyAddGaussianComplex',
}
###################################################################################################
#
LayoutTag = {
cutlass.ColumnMajor: 'cutlass::layout::ColumnMajor',
cutlass.RowMajor: 'cutlass::layout::RowMajor',
cutlass.layout.ColumnMajorInterleaved2: 'cutlass::layout::ColumnMajorInterleaved<2>',
cutlass.layout.RowMajorInterleaved2: 'cutlass::layout::RowMajorInterleaved<2>',
cutlass.ColumnMajorInterleaved32: 'cutlass::layout::ColumnMajorInterleaved<32>',
cutlass.RowMajorInterleaved32: 'cutlass::layout::RowMajorInterleaved<32>',
cutlass.layout.ColumnMajorInterleaved64: 'cutlass::layout::ColumnMajorInterleaved<64>',
cutlass.layout.RowMajorInterleaved64: 'cutlass::layout::RowMajorInterleaved<64>',
cutlass.TensorNHWC: 'cutlass::layout::TensorNHWC',
cutlass.layout.TensorNDHWC: 'cutlass::layout::TensorNDHWC',
cutlass.layout.TensorNCHW: 'cutlass::layout::TensorNCHW',
cutlass.layout.TensorNGHWC: 'cutlass::layout::TensorNGHWC',
cutlass.TensorNC32HW32: 'cutlass::layout::TensorNCxHWx<32>',
cutlass.TensorC32RSK32: 'cutlass::layout::TensorCxRSKx<32>',
cutlass.layout.TensorNC64HW64: 'cutlass::layout::TensorNCxHWx<64>',
cutlass.layout.TensorC64RSK64: 'cutlass::layout::TensorCxRSKx<64>',
}
#
TransposedLayout = {
cutlass.ColumnMajor: cutlass.RowMajor,
cutlass.RowMajor: cutlass.ColumnMajor,
cutlass.layout.ColumnMajorInterleaved2: cutlass.layout.RowMajorInterleaved2,
cutlass.layout.RowMajorInterleaved2: cutlass.layout.ColumnMajorInterleaved2,
cutlass.ColumnMajorInterleaved32: cutlass.RowMajorInterleaved32,
cutlass.RowMajorInterleaved32: cutlass.ColumnMajorInterleaved32,
cutlass.layout.ColumnMajorInterleaved64: cutlass.layout.RowMajorInterleaved64,
cutlass.layout.RowMajorInterleaved64: cutlass.layout.ColumnMajorInterleaved64,
cutlass.TensorNHWC: cutlass.TensorNHWC
}
#
ShortLayoutTypeNames = {
cutlass.ColumnMajor: 'n',
cutlass.layout.ColumnMajorInterleaved2: 'n2',
cutlass.ColumnMajorInterleaved32: 'n32',
cutlass.layout.ColumnMajorInterleaved64: 'n64',
cutlass.RowMajor: 't',
cutlass.layout.RowMajorInterleaved2: 't2',
cutlass.RowMajorInterleaved32: 't32',
cutlass.layout.RowMajorInterleaved64: 't64',
cutlass.TensorNHWC: 'nhwc',
cutlass.layout.TensorNDHWC: 'ndhwc',
cutlass.layout.TensorNCHW: 'nchw',
cutlass.layout.TensorNGHWC: 'nghwc',
cutlass.TensorNC32HW32: 'nc32hw32',
cutlass.layout.TensorNC64HW64: 'nc64hw64',
cutlass.TensorC32RSK32: 'c32rsk32',
cutlass.layout.TensorC64RSK64: 'c64rsk64'
}
#
ShortComplexLayoutNames = {
(cutlass.ColumnMajor, cutlass.complex_transform.none): 'n',
(cutlass.ColumnMajor, cutlass.complex_transform.conj): 'c',
(cutlass.RowMajor, cutlass.complex_transform.none): 't',
(cutlass.RowMajor, cutlass.complex_transform.conj): 'h'
}
###################################################################################################
#
class SideMode(enum.Enum):
Left = enum_auto()
Right = enum_auto()
#
SideModeTag = {
SideMode.Left: 'cutlass::SideMode::kLeft',
SideMode.Right: 'cutlass::SideMode::kRight'
}
#
ShortSideModeNames = {
SideMode.Left: 'ls',
SideMode.Right: 'rs'
}
###################################################################################################
#
class FillMode(enum.Enum):
Lower = enum_auto()
Upper = enum_auto()
#
FillModeTag = {
FillMode.Lower: 'cutlass::FillMode::kLower',
FillMode.Upper: 'cutlass::FillMode::kUpper'
}
#
ShortFillModeNames = {
FillMode.Lower: 'l',
FillMode.Upper: 'u'
}
###################################################################################################
#
class DiagType(enum.Enum):
NonUnit = enum_auto()
Unit = enum_auto()
#
DiagTypeTag = {
DiagType.NonUnit: 'cutlass::DiagType::kNonUnit',
DiagType.Unit: 'cutlass::DiagType::kUnit'
}
#
ShortDiagTypeNames = {
DiagType.NonUnit: 'nu',
DiagType.Unit: 'un'
}
###################################################################################################
OpcodeClassNames = {
cutlass.OpClass.Simt: 'simt',
cutlass.OpClass.TensorOp: 'tensorop',
cutlass.OpClass.WmmaTensorOp: 'wmma_tensorop',
cutlass.OpClass.SparseTensorOp: 'sptensorop'
}
OpcodeClassTag = {
cutlass.OpClass.Simt: 'cutlass::arch::OpClassSimt',
cutlass.OpClass.TensorOp: 'cutlass::arch::OpClassTensorOp',
cutlass.OpClass.WmmaTensorOp: 'cutlass::arch::OpClassWmmaTensorOp',
cutlass.OpClass.SparseTensorOp: 'cutlass::arch::OpClassSparseTensorOp'
}
###################################################################################################
#
class OperationKind(enum.Enum):
Gemm = enum_auto()
RankK = enum_auto()
Rank2K = enum_auto()
Trmm = enum_auto()
Symm = enum_auto()
Conv2d = enum_auto()
Conv3d = enum_auto()
#
OperationKindNames = {
OperationKind.Gemm: 'gemm', OperationKind.RankK: 'rank_k', OperationKind.Rank2K: 'rank_2k', OperationKind.Trmm: 'trmm', OperationKind.Symm: 'symm', OperationKind.Conv2d: 'conv2d', OperationKind.Conv3d: 'conv3d'
}
#
ArchitectureNames = {
50: 'maxwell',
60: 'pascal',
61: 'pascal',
70: 'volta',
75: 'turing',
80: 'ampere',
}
#
SharedMemPerCC = {
70: 96, # 96KB of SMEM
72: 96, # 96KB of SMEM
75: 64, # 64KB of SMEM
80: 160, # 164KB of SMEM - 4KB reserved for the driver
86: 100, # 100KB of SMEM
87: 160, # 164KB of SMEM - 4KB reserved for the driver
}
###################################################################################################
class GemmKind(enum.Enum):
Gemm = enum_auto()
Sparse = enum_auto()
Universal = enum_auto()
PlanarComplex = enum_auto()
PlanarComplexArray = enum_auto()
Grouped = enum_auto()
#
GemmKindNames = {
GemmKind.Gemm: "gemm",
GemmKind.Sparse: "spgemm",
GemmKind.Universal: "gemm",
GemmKind.PlanarComplex: "gemm_planar_complex",
GemmKind.PlanarComplexArray: "gemm_planar_complex_array",
GemmKind.Grouped: "gemm_grouped"
}
#
class RankKKind(enum.Enum):
Universal = enum_auto()
#
RankKKindNames = {
RankKKind.Universal: "rank_k"
}
#
class TrmmKind(enum.Enum):
Universal = enum_auto()
#
TrmmKindNames = {
TrmmKind.Universal: "trmm"
}
#
class SymmKind(enum.Enum):
Universal = enum_auto()
#
SymmKindNames = {
SymmKind.Universal: "symm"
}
#
class SwizzlingFunctor(enum.Enum):
Identity1 = enum_auto()
Identity2 = enum_auto()
Identity4 = enum_auto()
Identity8 = enum_auto()
Horizontal = enum_auto()
BatchedIdentity1 = enum_auto()
StridedDgradIdentity1 = enum_auto()
StridedDgradIdentity4 = enum_auto()
StridedDgradHorizontal = enum_auto()
#
SwizzlingFunctorTag = {
cutlass.IdentitySwizzle1: 'cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<1>',
SwizzlingFunctor.Identity2: 'cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<2>',
SwizzlingFunctor.Identity4: 'cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<4>',
SwizzlingFunctor.Identity8: 'cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<8>',
SwizzlingFunctor.Horizontal: 'cutlass::gemm::threadblock::GemmHorizontalThreadblockSwizzle',
SwizzlingFunctor.BatchedIdentity1: "cutlass::gemm::threadblock::GemmBatchedIdentityThreadblockSwizzle",
SwizzlingFunctor.StridedDgradIdentity1: 'cutlass::conv::threadblock::StridedDgradIdentityThreadblockSwizzle<1>',
SwizzlingFunctor.StridedDgradIdentity4: 'cutlass::conv::threadblock::StridedDgradIdentityThreadblockSwizzle<4>',
SwizzlingFunctor.StridedDgradHorizontal: 'cutlass::conv::threadblock::StridedDgradHorizontalThreadblockSwizzle',
}
#
class SchedulerMode(enum.Enum):
Device = enum_auto(),
Host = enum_auto()
#
SchedulerModeTag = {
SchedulerMode.Device: 'cutlass::gemm::kernel::GroupScheduleMode::kDeviceOnly',
SchedulerMode.Host: 'cutlass::gemm::kernel::GroupScheduleMode::kHostPrecompute'
}
#
ShortSchedulerModeNames = {
SchedulerMode.Device: 'Device',
SchedulerMode.Host: 'Host'
}
###################################################################################################
#
ConvKindTag = {
cutlass.conv.Operator.fprop: 'cutlass::conv::Operator::kFprop',
cutlass.conv.Operator.dgrad: 'cutlass::conv::Operator::kDgrad',
cutlass.conv.Operator.wgrad: 'cutlass::conv::Operator::kWgrad'
}
ConvKindNames = {
cutlass.conv.Operator.fprop: 'fprop',
cutlass.conv.Operator.dgrad: 'dgrad',
cutlass.conv.Operator.wgrad: 'wgrad',
}
#
IteratorAlgorithmTag = {
cutlass.conv.IteratorAlgorithm.analytic: 'cutlass::conv::IteratorAlgorithm::kAnalytic',
cutlass.conv.IteratorAlgorithm.optimized: 'cutlass::conv::IteratorAlgorithm::kOptimized',
cutlass.conv.IteratorAlgorithm.fixed_channels: 'cutlass::conv::IteratorAlgorithm::kFixedChannels',
cutlass.conv.IteratorAlgorithm.few_channels: 'cutlass::conv::IteratorAlgorithm::kFewChannels'
}
IteratorAlgorithmNames = {
cutlass.conv.IteratorAlgorithm.analytic: 'analytic',
cutlass.conv.IteratorAlgorithm.optimized: 'optimized',
cutlass.conv.IteratorAlgorithm.fixed_channels: 'fixed_channels',
cutlass.conv.IteratorAlgorithm.few_channels: 'few_channels'
}
#
class StrideSupport(enum.Enum):
Strided = enum_auto()
Unity = enum_auto()
#
StrideSupportTag = {
StrideSupport.Strided: 'cutlass::conv::StrideSupport::kStrided',
StrideSupport.Unity: 'cutlass::conv::StrideSupport::kUnity',
}
StrideSupportNames = {
StrideSupport.Strided: '',
StrideSupport.Unity: 'unity_stride',
}
class ConvMode(enum.Enum):
CrossCorrelation = enum_auto()
Convolution = enum_auto()
#
ConvModeTag = {
ConvMode.CrossCorrelation: 'cutlass::conv::Mode::kCrossCorrelation',
ConvMode.Convolution: 'cutlass::conv::Mode::kConvolution'
}
###################################################################################################
#
class MathInstruction:
def __init__(self, instruction_shape, element_a, element_b, element_accumulator, opcode_class=cutlass.OpClass.Simt, math_operation=MathOperation.multiply_add):
self.instruction_shape = instruction_shape
self.element_a = element_a
self.element_b = element_b
self.element_accumulator = element_accumulator
self.opcode_class = opcode_class
self.math_operation = math_operation
#
class TileDescription:
def __init__(self, threadblock_shape, stages, warp_count, math_instruction):
self.threadblock_shape = threadblock_shape
#: number of pipeline stages
self.stages: int = stages
#: number of warps along x, y, z directions
self.warp_count: list[int] = warp_count
self.math_instruction = math_instruction
#: number threads per threadblock
self.num_threads: int = 32
for cnt in self.warp_count:
self.num_threads *= cnt
def procedural_name(self):
return "%dx%d_%dx%d" % (self.threadblock_shape[0], self.threadblock_shape[1], self.threadblock_shape[2], self.stages)
#
class TensorDescription:
def __init__(self, element, layout, alignment=1, complex_transform=cutlass.complex_transform.none):
self.element = element
self.layout = layout
self.alignment = min(128 // DataTypeSize[self.element], alignment)
self.complex_transform = complex_transform
#
class SymmetricTensorDescription:
def __init__(self, element, layout, fill_mode, alignment=1, complex_transform=cutlass.complex_transform.none, side_mode=SideMode.Left):
self.element = element
self.layout = layout
self.fill_mode = fill_mode
self.alignment = alignment
self.complex_transform = complex_transform
self.side_mode = side_mode
#
class TriangularTensorDescription:
def __init__(self, element, layout, side_mode, fill_mode, diag_type, alignment=1, complex_transform=cutlass.complex_transform.none):
self.element = element
self.layout = layout
self.side_mode = side_mode
self.fill_mode = fill_mode
self.diag_type = diag_type
self.alignment = alignment
self.complex_transform = complex_transform
###################################################################################################
#
def CalculateSmemUsage(operation):
cta_shape = operation.tile_description.threadblock_shape
stages = operation.tile_description.stages
if operation.operation_kind == OperationKind.Gemm and operation.gemm_kind == GemmKind.Sparse:
# Elements represented by 8 bits of metadata (based on 4:8, 2:4 or 1:2 sparsity)
if DataTypeSize[operation.A.element] == 32:
elements_per_8b_md = 2
elif DataTypeSize[operation.A.element] == 4:
elements_per_8b_md = 8
else:
elements_per_8b_md = 4
smem_per_stage = DataTypeSize[operation.A.element] * cta_shape[0] * (cta_shape[2] // 2) // 8 + \
DataTypeSize[operation.B.element] * cta_shape[1] * cta_shape[2] // 8 + \
cta_shape[0] * (cta_shape[2] // 2) // elements_per_8b_md
else:
# Few BLAS3 operations only have A tensor
smem_per_stage = DataTypeSize[operation.A.element] * cta_shape[0] * cta_shape[2] // 8 + \
DataTypeSize[operation.A.element] * \
cta_shape[1] * cta_shape[2] // 8
smem_usage = smem_per_stage * stages
return (smem_usage >> 10)
###################################################################################################
| warp-main | warp/native/cutlass/tools/library/scripts/pycutlass/src/pycutlass/library.py |
################################################################################
#
# Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
################################################################################
from typing import Union
from typeguard import typechecked
GemmOperation = 'Union[GemmOperationUniversal, GemmOperationGrouped]'
Tensor = 'Union[cuda.CUdeviceptr, np.ndarray, torch.Tensor, cp.ndarray]'
| warp-main | warp/native/cutlass/tools/library/scripts/pycutlass/src/pycutlass/type_hint.py |
################################################################################
#
# Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
################################################################################
from pycutlass import *
from pycutlass.c_types import get_reduction_params
import cutlass
from cuda import cuda
try:
import torch
torch_available = True
except ImportError:
torch_available = False
import numpy as np
from typing import Union
from cuda import cudart
class ReductionOperation:
pass
class ReductionArguments:
"""
Arguments of reduction
"""
def __init__(self, operation: ReductionOperation,
problem_size: 'list[int]', partitions: int,
workspace: cuda.CUdeviceptr,
destination: 'Union[cuda.CUdeviceptr, np.ndarray, torch.Tensor]',
source: 'Union[cuda.CUdeviceptr, np.ndarray, torch.Tensor]', **kwargs) -> None:
# tensor_C can be interpreted as the bias with bias=True in keyword args
if "bias" in kwargs.keys():
self.bias = kwargs["bias"]
else:
# by default, tensor_C is not bias
self.bias = False
self.operation = operation
#: pointer to the workspace
self.ptr_workspace = workspace
#: number of split-k partitions
self.partitions = partitions
if isinstance(destination, np.ndarray):
self.host_D = destination
self.destination_buffer = NumpyFrontend.argument(destination, True)
self.source_buffer = NumpyFrontend.argument(source, False)
self.ptr_destination = cuda.CUdeviceptr(
self.destination_buffer.ptr)
self.ptr_source = cuda.CUdeviceptr(self.source_buffer.ptr)
elif torch_available and isinstance(destination, torch.Tensor):
self.ptr_destination = TorchFrontend.argument(destination)
self.ptr_source = TorchFrontend.argument(source)
elif isinstance(destination, cuda.CUdeviceptr):
self.ptr_destination = destination
self.ptr_source = source
else:
raise TypeError("unknown Type")
self.problem_size = MatrixCoord_(
problem_size[0], problem_size[1]
)
self.partition_stride = problem_size[0] * \
problem_size[1] * DataTypeSize[operation.C.element] // 8
if "output_op" in kwargs.keys():
self.output_op = kwargs['output_op']
else:
self.output_op = self.operation.epilogue_type(1.0, 0.0)
# get arguments
self.get_arguments()
@staticmethod
def get_tensor_ref(extent: 'tuple[int]', device_ptr: cuda.CUdeviceptr, layout: cutlass.layout):
if layout == cutlass.RowMajor:
return TensorRef2D_(int(device_ptr), extent[1])
else:
raise ValueError("unknonwn layout type")
def get_arguments(self):
ref_workspace = ReductionArguments.get_tensor_ref(
extent=[self.problem_size.row, self.problem_size.column],
device_ptr=self.ptr_workspace, layout=cutlass.RowMajor)
if self.bias:
ref_source = ReductionArguments.get_tensor_ref(
extent=[0, 0],
device_ptr=self.ptr_source, layout=cutlass.RowMajor)
else:
ref_source = ReductionArguments.get_tensor_ref(
extent=[self.problem_size.row, self.problem_size.column],
device_ptr=self.ptr_source, layout=cutlass.RowMajor)
ref_destination = ReductionArguments.get_tensor_ref(
extent=[self.problem_size.row, self.problem_size.column],
device_ptr=self.ptr_destination, layout=cutlass.RowMajor)
self.c_arguments = self.operation.argument_type(
self.problem_size, self.partitions,
self.partition_stride, ref_workspace,
ref_destination, ref_source,
self.output_op
)
params_ = self.operation.rt_module.get_args(
ctypes.byref(self.c_arguments))
self.host_workspace = bytearray(params_.contents)
def sync(self):
err, = cudart.cudaDeviceSynchronize()
if err != cuda.CUresult.CUDA_SUCCESS:
raise RuntimeError("CUDA Error %s" % str(err))
if hasattr(self, "host_D"):
err, = cuda.cuMemcpyDtoH(
self.host_D, self.ptr_destination, self.host_D.size * self.host_D.itemsize)
if err != cuda.CUresult.CUDA_SUCCESS:
raise RuntimeError("CUDA Error %s" % str(err))
def free(self):
if hasattr(self, "destination_buffer"):
del self.destination_buffer
if hasattr(self, "source_buffer"):
del self.source_buffer
class ReductionRT(ExecutableOperation):
"""
ReductionRT manages the CUTLASS runtime components for reduction
"""
KernelTemplate = r'''
extern "C"
__global__ void
${operation_name}(${operation_name}${operation_suffix}::Params params) {
// Dynamic shared memory base pointer
extern __shared__ int SharedStorageBase[];
// Declare pointer to dynamic shared memory.
${operation_name}${operation_suffix}::SharedStorage *shared_storage =
reinterpret_cast<${operation_name}${operation_suffix}::SharedStorage *>(SharedStorageBase);
${operation_name}${operation_suffix} op;
op(params, *shared_storage);
}
'''
HostTemplate = r'''
extern "C" {
// Get the size of params in bytes
int ${operation_name}_get_param_size(){
return sizeof(${operation_name}${operation_suffix}::Params);
}
// Get the size of dynamic shared memory in bytes
int ${operation_name}_shared_memory_size() {
return int(sizeof(${operation_name}${operation_suffix}::SharedStorage));
}
// Get the params as byte array
char* ${operation_name}_get_params(${operation_name}${operation_suffix}::Params* params){
char *bytes = ((char*)(params));
char *output = new char[sizeof(${operation_name}${operation_suffix}::Params)];
for (unsigned int i = 0; i < sizeof(${operation_name}${operation_suffix}::Params); i ++)
output[i] = bytes[i];
return output;
}
}
'''
def __init__(self, operation: ReductionOperation):
super().__init__(operation)
self.operation: ReductionOperation = operation
self.emitter = EmitReductionInstance('_type')
self.elements_per_access = self.operation.count
self.argument_type, self.epilogue_type = get_reduction_params(operation.epilogue_functor)
self.argtype = [ctypes.POINTER(self.argument_type)]
def emit(self):
return self.emitter.emit(self.operation)
def plan(self, arguments: ReductionArguments):
block_shape = [self.operation.shape.column(
) // self.elements_per_access, self.operation.shape.row(), 1]
grid_shape = [
(arguments.problem_size.row + self.operation.shape.row() -
1) // self.operation.shape.row(),
(arguments.problem_size.column + self.operation.shape.column() -
1) // self.operation.shape.column(),
1
]
return LaunchConfiguration(grid_shape, block_shape, self.shared_memory_capacity)
def initialize(self):
err, = cuda.cuFuncSetAttribute(
self.kernel,
attrib=cuda.CUfunction_attribute.CU_FUNC_ATTRIBUTE_MAX_DYNAMIC_SHARED_SIZE_BYTES,
value=self.shared_memory_capacity)
if err != cuda.CUresult.CUDA_SUCCESS:
raise RuntimeError('Cuda Error: {}'.format(err))
class ReductionOperation:
"""
CUTLASS Reduction Operation
shape: shape of CTA
outputop: output operator
r
"""
def __init__(self, shape: cutlass.MatrixCoord, C: TensorDescription,
element_accumulator, element_workspace=None,
element_compute=None, epilogue_functor=None,
count: int = 1, partitions_per_stage: int = 4) -> None:
""" Constructor
"""
self.shape = shape
#: epilogue functor (default: LinearCombination)
self.epilogue_functor = epilogue_functor
#: datatype of accumulator
self.element_accumulator = element_accumulator
if element_workspace is None:
#: datatype of workspace
self.element_workspace = element_accumulator
else:
#: datatype of workspace
self.element_workspace = element_workspace
if element_compute is None:
#: datatype of workspace
self.element_compute = element_accumulator
else:
#: datatype of workspace
self.element_compute = element_compute
#: datatype of output
self.element_output = C.element
#: operand C
self.C: TensorDescription = C
#: reduce op processing size
self.count: int = count
#: number of partitions to reduce per stage
self.partitions_per_stage: int = partitions_per_stage
self.rt_module: ReductionRT = ReductionRT(self)
self.argument_type = self.rt_module.argument_type
self.epilogue_type = self.rt_module.epilogue_type
#
def extended_name(self):
extend_name = "${element_workspace}_${element_accumulator}_${element_compute}_${element_output}"
return SubstituteTemplate(extend_name,
{
'element_workspace': DataTypeNames[self.element_workspace],
'element_accumulator': DataTypeNames[self.element_accumulator],
'element_compute': DataTypeNames[self.element_compute],
'element_output': DataTypeNames[self.element_output]
})
#
def configuration_name(self):
''' The full procedural name indicates architecture, extended name, tile size'''
configuration_name = "cutlass_reduce_split_k_${extended_name}_${threadblock}"
threadblock = "%dx%d" % (
self.shape.row(),
self.shape.column()
)
return SubstituteTemplate(
configuration_name,
{
'extended_name': self.extended_name(),
'threadblock': threadblock
}
)
#
def procedural_name(self):
''' The full procedural name indicates architeture, extended name, tile size'''
return self.configuration_name()
def run(self, arguments: ReductionArguments) -> cuda.CUresult:
"""
Configure and launch the cuda kernel with input arguments
"""
# get launch configuration
launch_config = self.rt_module.plan(arguments)
# get the host and device workspace
host_workspace = arguments.host_workspace
device_workspace = None
# launch the kernel
err = self.rt_module.run(
host_workspace, device_workspace, launch_config)
if err != cuda.CUresult.CUDA_SUCCESS:
raise RuntimeError('CUDA Error %s' % str(err))
return err
class EmitReductionInstance:
def __init__(self, operation_suffix='') -> None:
self.operation_suffix = operation_suffix
self.includes = [
"cutlass/cutlass.h",
"cutlass/numeric_types.h",
"cutlass/arch/arch.h",
"cutlass/arch/mma.h",
"cutlass/layout/matrix.h",
"cutlass/gemm/device/gemm.h",
"cutlass/gemm/device/gemm_universal_adapter.h",
"cutlass/gemm/kernel/default_gemm_universal.h",
"cutlass/reduction/kernel/reduce_split_k.h",
"cutlass/reduction/thread/reduction_operators.h"
]
self.template = """
// Reduction kernel instance
using ${operation_name}_base =
typename cutlass::reduction::kernel::ReduceSplitK<
cutlass::MatrixShape<${shape_row}, ${shape_column}>,
${epilogue_functor},
cutlass::reduction::thread::ReduceAdd<
${element_accumulator},
${element_output},
${count}>,
${partition_per_stage}>;
struct ${operation_name}${operation_suffix}:
public ${operation_name}_base { };
"""
def emit(self, operation: ReductionOperation):
epilogue_vector_length = int(min(
operation.C.alignment * DataTypeSize[operation.C.element], 128) / DataTypeSize[operation.C.element])
values = {
'operation_name': operation.configuration_name(),
'operation_suffix': self.operation_suffix,
'shape_row': str(operation.shape.row()),
'shape_column': str(operation.shape.column()),
'epilogue_functor': operation.epilogue_functor.emit(),
'element_output': DataTypeTag[operation.element_output],
'epilogue_vector_length': str(epilogue_vector_length),
'element_accumulator': DataTypeTag[operation.element_accumulator],
'element_compute': DataTypeTag[operation.element_compute],
'element_workspace': DataTypeTag[operation.element_workspace],
'count': str(operation.count),
'partition_per_stage': str(operation.partitions_per_stage)
}
return SubstituteTemplate(self.template, values)
| warp-main | warp/native/cutlass/tools/library/scripts/pycutlass/src/pycutlass/reduction_operation.py |
#################################################################################################
#
# Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#################################################################################################
from .frontend import CupyFrontend
from typeguard import typechecked
from pycutlass.frontend import *
from typing import Union
import numpy as np
from cuda import cuda
try:
import torch
torch_available = True
except ImportError:
torch_available = False
from cuda import cudart
try:
import cupy as cp
cupy_available = True
except ImportError:
cupy_available = False
# @typechecked
class ArgumentBase:
"""
Base class for operation arguments
"""
def __init__(self,
A: 'Union[cuda.CUdeviceptr, np.ndarray, torch.Tensor, cp.ndarray]',
B: 'Union[cuda.CUdeviceptr, np.ndarray, torch.Tensor, cp.ndarray]',
C: 'Union[cuda.CUdeviceptr, np.ndarray, torch.Tensor, cp.ndarray]',
D: 'Union[cuda.CUdeviceptr, np.ndarray, torch.Tensor, cp.ndarray]',
**kwargs) -> None:
# tensor_C can be interpreted as the bias with bias=True in keyword args
if "bias" in kwargs.keys():
self.bias = kwargs["bias"]
else:
# by default, tensor_C is not bias
self.bias = False
# preprocessing input tensors
if isinstance(A, np.ndarray):
self.host_D = D
self.buffer_A = NumpyFrontend.argument(A, False)
self.buffer_B = NumpyFrontend.argument(B, False)
self.buffer_C = NumpyFrontend.argument(C, False)
self.buffer_D = NumpyFrontend.argument(D, True)
self.ptr_A = self.buffer_A.ptr
self.ptr_B = self.buffer_B.ptr
self.ptr_C = self.buffer_C.ptr
self.ptr_D = self.buffer_D.ptr
# number of elements in C
self.tensor_c_numel = C.size
elif torch_available and isinstance(A, torch.Tensor):
self.ptr_A = TorchFrontend.argument(A)
self.ptr_B = TorchFrontend.argument(B)
self.ptr_C = TorchFrontend.argument(C)
self.ptr_D = TorchFrontend.argument(D)
# number of elements in C
self.tensor_c_numel = C.numel()
elif isinstance(A, cuda.CUdeviceptr):
self.ptr_A = A
self.ptr_B = B
self.ptr_C = C
self.ptr_D = D
elif cupy_available and isinstance(A, cp.ndarray):
self.ptr_A = CupyFrontend.argument(A)
self.ptr_B = CupyFrontend.argument(B)
self.ptr_C = CupyFrontend.argument(C)
self.ptr_D = CupyFrontend.argument(D)
# number of elements in C
self.tensor_c_numel = C.size
else:
raise TypeError(
"Unsupported Frontend. Only support numpy and torch")
def sync(self, stream_sync=True):
if stream_sync:
err, = cudart.cudaDeviceSynchronize()
if err != cuda.CUresult.CUDA_SUCCESS:
raise RuntimeError("CUDA Error %s" % str(err))
if hasattr(self, "host_D"):
err, = cuda.cuMemcpyDtoH(
self.host_D, self.ptr_D, self.host_D.size * self.host_D.itemsize)
if err != cuda.CUresult.CUDA_SUCCESS:
raise RuntimeError("CUDA Error %s" % str(err))
| warp-main | warp/native/cutlass/tools/library/scripts/pycutlass/src/pycutlass/arguments.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.