Unnamed: 0
int64
0
0
repo_id
stringlengths
5
186
file_path
stringlengths
15
223
content
stringlengths
1
32.8M
0
repos/zigpkgs/pkgs
repos/zigpkgs/pkgs/bork/default.nix
{ lib , stdenv , fetchFromGitHub , buildZigProject }: buildZigProject rec { pname = "bork"; version = "0.1.0"; src = fetchFromGitHub { owner = "kristoff-it"; repo = "bork"; rev = "bcaed8091b6a6452ea4276d11f5e65fc4e01c12a"; sha256 = "FlZ8jp0NgFWtAGTayFYWJ+s0bLFrNDB1t8OZOLl0jD0="; }; options = [ "-Drelease-safe" "-Dcpu=baseline" ]; meta = with lib; { homepage = "https://github.com/kristoff-it/bork"; description = "A TUI chat client tailored for livecoding on Twitch."; license = licenses.mit; platforms = platforms.linux ++ platforms.darwin; maintainers = with maintainers; [ joachimschmidt557 ]; }; }
0
repos/zigpkgs/pkgs
repos/zigpkgs/pkgs/zig-doctest/default.nix
{ lib , stdenv , fetchFromGitHub , buildZigProject }: buildZigProject rec { pname = "zig-doctest-unstable"; version = "2022-07-28"; src = fetchFromGitHub { owner = "kristoff-it"; repo = "zig-doctest"; rev = "809e8625892935a1e09712f5e2bb98881ee1038b"; sha256 = "Vf2fzCwxeIJaVjABk18t0GC+NV7S2u1qexm4hIyP8xE="; }; options = [ "-Drelease-safe" "-Dcpu=baseline" ]; meta = with lib; { homepage = "https://github.com/kristoff-it/zig-doctest"; description = "A tool for testing snippets of code"; license = licenses.mit; platforms = platforms.linux; maintainers = with maintainers; [ joachimschmidt557 ]; broken = true; }; }
0
repos/zigpkgs/pkgs
repos/zigpkgs/pkgs/aro/default.nix
{ lib , stdenv , fetchFromGitHub , buildZigProject }: buildZigProject rec { pname = "aro-unstable"; version = "2022-08-08"; src = fetchFromGitHub { owner = "Vexu"; repo = "arocc"; rev = "ddb00676293aa85b8519ae2aac9dcf2513391471"; sha256 = "0LrBwXlejgMyFlCwJUjgonvziLIxyRlot8yvt5nMlxI="; }; options = [ "-Drelease-safe" "-Dcpu=baseline" ]; postInstall = '' mv include/ $out ''; meta = with lib; { homepage = "https://github.com/Vexu/arocc"; description = "A C compiler written in Zig."; license = licenses.mit; platforms = platforms.linux; maintainers = with maintainers; [ joachimschmidt557 ]; broken = true; }; }
0
repos/zigpkgs/pkgs
repos/zigpkgs/pkgs/zigmod/default.nix
{ lib , git , cacert , stdenv , fetchFromGitHub , buildZigProject }: let src = fetchFromGitHub { owner = "nektro"; repo = "zigmod"; rev = "62469b8511886314757bd7b25b70e3580966ee01"; sha256 = "NFcFQgzxB5Gm3XweKxBv+pQqKt406YJnu/oTXoVyj6E="; fetchSubmodules = true; }; version = "80"; zigmodBootstrap = import ./bootstrap.nix { inherit src version fetchFromGitHub buildZigProject; }; zigmod-deps = stdenv.mkDerivation { name = "zigmod-deps"; inherit src; nativeBuildInputs = [ zigmodBootstrap git cacert ]; buildPhase = '' runHook preBuild zigmod ci find -name .git -print0 | xargs -0 rm -rf runHook postBuild ''; installPhase = '' runHook preInstall mkdir -p $out cp deps.zig $out cp -r --reflink=auto .zigmod/deps $out runHook postInstall ''; outputHashMode = "recursive"; outputHashAlgo = "sha256"; outputHash = "1mljl5icrcmdb8idlxyjvxbjy4h3lxrzf0ir1z4wywag44zdxyib"; dontFixup = true; }; in buildZigProject { pname = "zigmod"; inherit src version; preBuild = '' mkdir -p .zigmod/deps cp ${zigmod-deps}/deps.zig deps.zig cp -r --reflink=auto ${zigmod-deps}/deps .zigmod/ ''; options = [ "-Drelease" "-Dcpu=baseline" ]; meta = with lib; { homepage = "https://github.com/nektro/zigmod"; description = "A package manager for the Zig programming language"; license = licenses.mit; platforms = platforms.all; maintainers = with maintainers; [ joachimschmidt557 ]; broken = true; }; }
0
repos/zigpkgs/pkgs
repos/zigpkgs/pkgs/zigmod/bootstrap.nix
{ fetchFromGitHub , buildZigProject , src , version }: buildZigProject rec { pname = "zigmod-bootstrap"; inherit src version; options = [ "-Dbootstrap" "-Drelease" "-Dcpu=baseline" ]; }
0
repos/zigpkgs/pkgs
repos/zigpkgs/pkgs/vpkz/default.nix
{ lib , stdenv , fetchFromGitHub , buildZigProject }: buildZigProject rec { pname = "vpkz-unstable"; version = "2022-07-11"; src = fetchFromGitHub { owner = "joachimschmidt557"; repo = "vpkz"; rev = "cfabceabc05494789860791a449aa5b662fec4c9"; sha256 = "EHS3mG38YYL9wwzxHxHnTl7D6cuuS6TvOCDWdy5PV3I="; }; options = [ "-Drelease-safe" "-Dcpu=baseline" ]; meta = with lib; { homepage = "https://github.com/joachimschmidt557/vpkz"; description = "Zig library and command-line application for working with Valve VPK files"; license = licenses.mit; platforms = platforms.linux; maintainers = with maintainers; [ joachimschmidt557 ]; broken = true; }; }
0
repos/zigpkgs/pkgs
repos/zigpkgs/pkgs/zigfd/default.nix
{ lib , stdenv , fetchFromGitHub , buildZigProject }: buildZigProject { pname = "zigfd-unstable"; version = "2022-07-09"; src = fetchFromGitHub { owner = "joachimschmidt557"; repo = "zigfd"; rev = "69914975c82a78d12da08f8a6b4d1157f41c1b06"; sha256 = "NCUNUbhhN637B9yjHmysOT1cRWrWSHty4Yyful19BPw="; fetchSubmodules = true; }; options = [ "-Drelease-safe" "-Dcpu=baseline" ]; meta = with lib; { homepage = "https://github.com/joachimschmidt557/zigfd"; description = "Recursively find files and directories with a regex pattern"; license = licenses.mit; platforms = platforms.all; maintainers = with maintainers; [ joachimschmidt557 ]; broken = true; }; }
0
repos/zigpkgs/pkgs
repos/zigpkgs/pkgs/development/build-zig-project.nix
{ lib, stdenv, zig }: { buildFile ? "build.zig" , steps ? [ "install" ] , zigLibDir ? "${zig}/lib" , options ? [ "-Dcpu=baseline" ] , nativeBuildInputs ? [ ] , ... }@args: stdenv.mkDerivation (args // { nativeBuildInputs = [ zig ] ++ nativeBuildInputs; dontConfigure = true; installPhase = '' runHook preInstall zig build \ --global-cache-dir $TMPDIR/zig-cache \ --zig-lib-dir ${zigLibDir} \ --build-file ${buildFile} \ ${lib.concatStringsSep " " options} \ --prefix $out \ ${lib.concatStringsSep " " steps} runHook postInstall ''; })
0
repos/zigpkgs/pkgs
repos/zigpkgs/pkgs/gyro/default.nix
{ lib , stdenv , fetchFromGitHub , buildZigProject }: buildZigProject { pname = "gyro-unstable"; version = "2022-08-13"; src = fetchFromGitHub { owner = "mattnite"; repo = "gyro"; rev = "1c841a37f137cf24271bd10e9c9e284d848565bf"; sha256 = "kE+4qZPar1Vgb/GjtxugCAeD42xJMJQlKR9Dvnwjzr0="; fetchSubmodules = true; }; options = [ "-Drelease-safe" # FIXME glibc builds don't work right now, use musl instead. The # issue is tracked here: # https://github.com/ziglang/zig/issues/9485 "-Dtarget=native-native-musl" "-Dcpu=baseline" ]; meta = with lib; { homepage = "https://github.com/mattnite/gyro"; description = "A Zig package manager with an index, build runner, and build dependencies"; license = licenses.mit; platforms = platforms.linux; maintainers = with maintainers; [ joachimschmidt557 ]; broken = true; }; }
0
repos/zigpkgs
repos/zigpkgs/ci/build_all.sh
#!/usr/bin/env bash for pkg in $(nix flake show --json | jq -r ".packages[\"x86_64-linux\"] | keys | .[]"); do echo "Building $pkg" nix build ".#$pkg" done exit 0
0
repos
repos/mach-sysgpu/README.md
<a href="https://machengine.org/pkg/mach-sysgpu"> <picture> <source media="(prefers-color-scheme: dark)" srcset="https://machengine.org/assets/mach/sysgpu-full-dark.svg"> <img alt="mach-sysgpu" src="https://machengine.org/assets/mach/sysgpu-full-light.svg" height="150px"> </picture> </a> Highly experimental, blazingly fast, lean & mean descendant of WebGPU written in Zig ## Moved This project has moved into the Mach standard library: https://github.com/hexops/mach/tree/main/src/sysgpu
0
repos
repos/mach-sysgpu/build.zig.zon
.{ .name = "mach-sysgpu", .version = "0.1.0", .paths = .{ "src", "tools", "build.zig", "build.zig.zon", "LICENSE", "LICENSE-APACHE", "LICENSE-MIT", "LICENSE-ZIG", "README.md", }, .dependencies = .{ .vulkan_zig_generated = .{ .url = "https://pkg.machengine.org/vulkan-zig-generated/378bed4e37007cef95b46ad9fadd6e192827ebba.tar.gz", .hash = "1220ccb48181cb849b64363142b78182161b3662959d81a2463df009612011fa5043", }, .mach_gpu = .{ .url = "https://pkg.machengine.org/mach-gpu/7f6792dbc5cd41da5f54b322b6d5be79d6dda2f1.tar.gz", .hash = "122044bcebd28b7d54fb40cbebc04ef6bab0a385952e3193bcf90b37e062ce382495", }, .mach_objc = .{ .url = "https://pkg.machengine.org/mach-objc/2b2a698e7f019e1599edb3eda4a974fa1fb07483.tar.gz", .hash = "1220d708af437c2076d1a0482ac745b3c4507f4f41cc9f248ee78a3c297c41ee7c33", }, .direct3d_headers = .{ .url = "https://pkg.machengine.org/direct3d-headers/74e2cdaf3a7f9485155bd22ea8e6bc5442d3cddb.tar.gz", .hash = "1220d3db004f3ae6ed6e4ad00e59e1b88c4090a152ea19c21ce6e4ead5b702c980e2", }, .opengl_headers = .{ .url = "https://pkg.machengine.org/opengl-headers/5e816d785ebe841506542b2a2ea95aa8ec943fbf.tar.gz", .hash = "122039881e49baf86d75f6089ba126cbabc32f162fb2a27c1ee3ae906823b1f67d29", }, .xcode_frameworks = .{ .url = "https://pkg.machengine.org/xcode-frameworks/e7c2abbe28efbb5ae7f04e6c1ac6578a6d75bb67.tar.gz", .hash = "1220432df3c74dc28bd6cc299e5c8317bc65a95219e6ebf40450c157d8c7a3ac54c0", }, .spirv_cross = .{ .url = "https://pkg.machengine.org/SPIRV-Cross/9866e847a3b01633db38458b1b7429750802a250.tar.gz", .hash = "1220922b722e291ae7f432b3a144863d76bd01e7d1f0f05ca883f3e09e48306103aa", }, .spirv_tools = .{ .url = "https://pkg.machengine.org/SPIRV-Tools/23092f85a721e5297f0f704d95bbd263d5a7a771.tar.gz", .hash = "1220ec815dfda171b5bf65f6078932d79e034508e9ecef51be660f6a89cc7c70aef9", }, }, }
0
repos
repos/mach-sysgpu/build.zig
const std = @import("std"); pub const Backend = enum { default, webgpu, d3d12, metal, vulkan, opengl, }; pub fn build(b: *std.Build) !void { const optimize = b.standardOptimizeOption(.{}); const target = b.standardTargetOptions(.{}); const backend = b.option(Backend, "backend", "API Backend") orelse .default; const vulkan_dep = b.dependency("vulkan_zig_generated", .{}); const mach_gpu_dep = b.dependency("mach_gpu", .{ .target = target, .optimize = optimize, }); const mach_objc_dep = b.dependency("mach_objc", .{ .target = target, .optimize = optimize, }); const build_options = b.addOptions(); build_options.addOption(Backend, "backend", backend); const module = b.addModule("mach-sysgpu", .{ .target = target, .optimize = optimize, .root_source_file = .{ .path = "src/main.zig" }, .imports = &.{ .{ .name = "vulkan", .module = vulkan_dep.module("vulkan-zig-generated") }, .{ .name = "gpu", .module = mach_gpu_dep.module("mach-gpu") }, .{ .name = "objc", .module = mach_objc_dep.module("mach-objc") }, .{ .name = "build-options", .module = build_options.createModule() }, }, }); link(b, module); const lib = b.addStaticLibrary(.{ .name = "mach-sysgpu", .root_source_file = b.addWriteFiles().add("empty.c", ""), .target = target, .optimize = optimize, }); var iter = module.import_table.iterator(); while (iter.next()) |e| { lib.root_module.addImport(e.key_ptr.*, e.value_ptr.*); } link(b, &lib.root_module); addPaths(&lib.root_module); b.installArtifact(lib); const test_step = b.step("test", "Run library tests"); const main_tests = b.addTest(.{ .name = "sysgpu-tests", .root_source_file = .{ .path = "src/main.zig" }, .target = target, .optimize = optimize, }); iter = module.import_table.iterator(); while (iter.next()) |e| { main_tests.root_module.addImport(e.key_ptr.*, e.value_ptr.*); } link(b, &main_tests.root_module); addPaths(&main_tests.root_module); b.installArtifact(main_tests); test_step.dependOn(&b.addRunArtifact(main_tests).step); } fn link(b: *std.Build, module: *std.Build.Module) void { module.link_libc = true; const target = module.resolved_target.?.result; if (target.isDarwin()) { module.linkSystemLibrary("objc", .{}); module.linkFramework("AppKit", .{}); module.linkFramework("CoreGraphics", .{}); module.linkFramework("Foundation", .{}); module.linkFramework("Metal", .{}); module.linkFramework("QuartzCore", .{}); } if (target.os.tag == .windows) { module.linkSystemLibrary("d3d12", .{}); module.linkSystemLibrary("d3dcompiler_47", .{}); module.linkSystemLibrary("opengl32", .{}); module.linkLibrary(b.dependency("direct3d_headers", .{ .target = module.resolved_target orelse b.host, .optimize = module.optimize.?, }).artifact("direct3d-headers")); @import("direct3d_headers").addLibraryPathToModule(module); module.linkLibrary(b.dependency("opengl_headers", .{ .target = module.resolved_target orelse b.host, .optimize = module.optimize.?, }).artifact("opengl-headers")); } module.linkLibrary(b.dependency("spirv_cross", .{ .target = module.resolved_target orelse b.host, .optimize = module.optimize.?, }).artifact("spirv-cross")); module.linkLibrary(b.dependency("spirv_tools", .{ .target = module.resolved_target orelse b.host, .optimize = module.optimize.?, }).artifact("spirv-opt")); } pub fn addPaths(mod: *std.Build.Module) void { if (mod.resolved_target.?.result.isDarwin()) @import("xcode_frameworks").addPaths(mod); }
0
repos/mach-sysgpu
repos/mach-sysgpu/src/limits.zig
pub const max_texture_dimension1d: u32 = 8192; pub const max_texture_dimension2d: u32 = 8192; pub const max_texture_dimension3d: u32 = 2048; pub const max_texture_array_layers: u32 = 256; pub const max_bind_groups: u32 = 4; pub const max_bind_groups_plus_vertex_buffers: u32 = 24; pub const max_bindings_per_bind_group: u32 = 1000; pub const max_dynamic_uniform_buffers_per_pipeline_layout: u32 = 8; pub const max_dynamic_storage_buffers_per_pipeline_layout: u32 = 4; pub const max_sampled_textures_per_shader_stage: u32 = 16; pub const max_samplers_per_shader_stage: u32 = 16; pub const max_storage_buffers_per_shader_stage: u32 = 8; pub const max_storage_textures_per_shader_stage: u32 = 4; pub const max_uniform_buffers_per_shader_stage: u32 = 12; pub const max_uniform_buffer_binding_size: u64 = 65536; pub const max_storage_buffer_binding_size: u64 = 134217728; pub const min_uniform_buffer_offset_alignment: u32 = 256; pub const min_storage_buffer_offset_alignment: u32 = 256; pub const max_vertex_buffers: u32 = 8; pub const max_buffer_size: u64 = 268435456; pub const max_vertex_attributes: u32 = 16; pub const max_vertex_buffer_array_stride: u32 = 2048; pub const max_inter_stage_shader_components: u32 = 60; pub const max_inter_stage_shader_variables: u32 = 16; pub const max_color_attachments: u32 = 8; pub const max_color_attachment_bytes_per_sample: u32 = 32; pub const max_compute_workgroup_storage_size: u32 = 16384; pub const max_compute_invocations_per_workgroup: u32 = 256; pub const max_compute_workgroup_size_x: u32 = 256; pub const max_compute_workgroup_size_y: u32 = 256; pub const max_compute_workgroup_size_z: u32 = 64; pub const max_compute_workgroups_per_dimension: u32 = 65535; pub const max_buffers_per_shader_stage = max_storage_buffers_per_shader_stage + max_uniform_buffers_per_shader_stage;
0
repos/mach-sysgpu
repos/mach-sysgpu/src/utils.zig
const std = @import("std"); const limits = @import("limits.zig"); const shader = @import("shader.zig"); const sysgpu = @import("sysgpu/main.zig"); pub fn Manager(comptime T: type) type { return struct { count: u32 = 1, pub fn reference(manager: *@This()) void { _ = @atomicRmw(u32, &manager.count, .Add, 1, .Monotonic); } pub fn release(manager: *@This()) void { if (@atomicRmw(u32, &manager.count, .Sub, 1, .Release) == 1) { @fence(.Acquire); const parent = @fieldParentPtr(T, "manager", manager); parent.deinit(); } } }; } pub fn findChained(comptime T: type, next_in_chain: ?*const sysgpu.ChainedStruct) ?*const T { const search = @as(*align(1) const sysgpu.ChainedStruct, @ptrCast(std.meta.fieldInfo(T, .chain).default_value.?)); var chain = next_in_chain; while (chain) |c| { if (c.s_type == search.s_type) { return @as(*const T, @ptrCast(c)); } chain = c.next; } return null; } pub fn alignUp(x: usize, a: usize) usize { return (x + a - 1) / a * a; } pub const FormatType = enum { float, unorm, unorm_srgb, snorm, uint, sint, depth, stencil, depth_stencil, }; pub fn vertexFormatType(format: sysgpu.VertexFormat) FormatType { return switch (format) { .undefined => unreachable, .uint8x2 => .uint, .uint8x4 => .uint, .sint8x2 => .sint, .sint8x4 => .sint, .unorm8x2 => .unorm, .unorm8x4 => .unorm, .snorm8x2 => .snorm, .snorm8x4 => .snorm, .uint16x2 => .uint, .uint16x4 => .uint, .sint16x2 => .sint, .sint16x4 => .sint, .unorm16x2 => .unorm, .unorm16x4 => .unorm, .snorm16x2 => .snorm, .snorm16x4 => .snorm, .float16x2 => .float, .float16x4 => .float, .float32 => .float, .float32x2 => .float, .float32x3 => .float, .float32x4 => .float, .uint32 => .uint, .uint32x2 => .uint, .uint32x3 => .uint, .uint32x4 => .uint, .sint32 => .sint, .sint32x2 => .sint, .sint32x3 => .sint, .sint32x4 => .sint, }; } pub fn textureFormatType(format: sysgpu.Texture.Format) FormatType { return switch (format) { .undefined => unreachable, .r8_unorm => .unorm, .r8_snorm => .snorm, .r8_uint => .uint, .r8_sint => .sint, .r16_uint => .uint, .r16_sint => .sint, .r16_float => .float, .rg8_unorm => .unorm, .rg8_snorm => .snorm, .rg8_uint => .uint, .rg8_sint => .sint, .r32_float => .float, .r32_uint => .uint, .r32_sint => .sint, .rg16_uint => .uint, .rg16_sint => .sint, .rg16_float => .float, .rgba8_unorm => .unorm, .rgba8_unorm_srgb => .unorm_srgb, .rgba8_snorm => .snorm, .rgba8_uint => .uint, .rgba8_sint => .sint, .bgra8_unorm => .unorm, .bgra8_unorm_srgb => .unorm_srgb, .rgb10_a2_unorm => .unorm, .rg11_b10_ufloat => .float, .rgb9_e5_ufloat => .float, .rg32_float => .float, .rg32_uint => .uint, .rg32_sint => .sint, .rgba16_uint => .uint, .rgba16_sint => .sint, .rgba16_float => .float, .rgba32_float => .float, .rgba32_uint => .uint, .rgba32_sint => .sint, .stencil8 => .stencil, .depth16_unorm => .depth, .depth24_plus => .depth, .depth24_plus_stencil8 => .depth_stencil, .depth32_float => .depth, .depth32_float_stencil8 => .depth_stencil, .bc1_rgba_unorm => .unorm, .bc1_rgba_unorm_srgb => .unorm_srgb, .bc2_rgba_unorm => .unorm, .bc2_rgba_unorm_srgb => .unorm_srgb, .bc3_rgba_unorm => .unorm, .bc3_rgba_unorm_srgb => .unorm_srgb, .bc4_runorm => .unorm, .bc4_rsnorm => .snorm, .bc5_rg_unorm => .unorm, .bc5_rg_snorm => .snorm, .bc6_hrgb_ufloat => .float, .bc6_hrgb_float => .float, .bc7_rgba_unorm => .unorm, .bc7_rgba_unorm_srgb => .snorm, .etc2_rgb8_unorm => .unorm, .etc2_rgb8_unorm_srgb => .unorm_srgb, .etc2_rgb8_a1_unorm => .unorm_srgb, .etc2_rgb8_a1_unorm_srgb => .unorm, .etc2_rgba8_unorm => .unorm, .etc2_rgba8_unorm_srgb => .unorm_srgb, .eacr11_unorm => .unorm, .eacr11_snorm => .snorm, .eacrg11_unorm => .unorm, .eacrg11_snorm => .snorm, .astc4x4_unorm => .unorm, .astc4x4_unorm_srgb => .unorm_srgb, .astc5x4_unorm => .unorm, .astc5x4_unorm_srgb => .unorm_srgb, .astc5x5_unorm => .unorm, .astc5x5_unorm_srgb => .unorm_srgb, .astc6x5_unorm => .unorm, .astc6x5_unorm_srgb => .unorm_srgb, .astc6x6_unorm => .unorm, .astc6x6_unorm_srgb => .unorm_srgb, .astc8x5_unorm => .unorm, .astc8x5_unorm_srgb => .unorm_srgb, .astc8x6_unorm => .unorm, .astc8x6_unorm_srgb => .unorm_srgb, .astc8x8_unorm => .unorm, .astc8x8_unorm_srgb => .unorm_srgb, .astc10x5_unorm => .unorm, .astc10x5_unorm_srgb => .unorm_srgb, .astc10x6_unorm => .unorm, .astc10x6_unorm_srgb => .unorm_srgb, .astc10x8_unorm => .unorm, .astc10x8_unorm_srgb => .unorm_srgb, .astc10x10_unorm => .unorm, .astc10x10_unorm_srgb => .unorm_srgb, .astc12x10_unorm => .unorm, .astc12x10_unorm_srgb => .unorm_srgb, .astc12x12_unorm => .unorm, .astc12x12_unorm_srgb => .unorm_srgb, .r8_bg8_biplanar420_unorm => .unorm, }; } pub fn formatHasDepthOrStencil(format: sysgpu.Texture.Format) bool { return switch (textureFormatType(format)) { .depth, .stencil, .depth_stencil => true, else => false, }; } pub fn calcOrigin(dimension: sysgpu.Texture.Dimension, origin: sysgpu.Origin3D) struct { x: u32, y: u32, z: u32, array_slice: u32, } { return .{ .x = origin.x, .y = origin.y, .z = if (dimension == .dimension_3d) origin.z else 0, .array_slice = if (dimension == .dimension_3d) 0 else origin.z, }; } pub fn calcExtent(dimension: sysgpu.Texture.Dimension, extent: sysgpu.Extent3D) struct { width: u32, height: u32, depth: u32, array_count: u32, } { return .{ .width = extent.width, .height = extent.height, .depth = if (dimension == .dimension_3d) extent.depth_or_array_layers else 1, .array_count = if (dimension == .dimension_3d) 0 else extent.depth_or_array_layers, }; } pub const DefaultPipelineLayoutDescriptor = struct { pub const Group = std.ArrayListUnmanaged(sysgpu.BindGroupLayout.Entry); allocator: std.mem.Allocator, groups: std.BoundedArray(Group, limits.max_bind_groups) = .{}, pub fn init(allocator: std.mem.Allocator) DefaultPipelineLayoutDescriptor { return .{ .allocator = allocator }; } pub fn deinit(desc: *DefaultPipelineLayoutDescriptor) void { for (desc.groups.slice()) |*group| { group.deinit(desc.allocator); } } pub fn addFunction( desc: *DefaultPipelineLayoutDescriptor, air: *const shader.Air, stage: sysgpu.ShaderStageFlags, entry_point: [*:0]const u8, ) !void { if (air.findFunction(std.mem.span(entry_point))) |fn_inst| { const global_var_ref_list = air.refToList(fn_inst.global_var_refs); for (global_var_ref_list) |global_var_inst_idx| { const var_inst = air.getInst(global_var_inst_idx).@"var"; if (var_inst.addr_space == .workgroup) continue; const var_type = air.getInst(var_inst.type); const group: u32 = @intCast(air.resolveInt(var_inst.group) orelse return error.ConstExpr); const binding: u32 = @intCast(air.resolveInt(var_inst.binding) orelse return error.ConstExpr); var entry: sysgpu.BindGroupLayout.Entry = .{ .binding = binding, .visibility = stage }; switch (var_type) { .sampler_type => entry.sampler.type = .filtering, .comparison_sampler_type => entry.sampler.type = .comparison, .texture_type => |texture| { switch (texture.kind) { .storage_1d, .storage_2d, .storage_2d_array, .storage_3d, => { entry.storage_texture.access = .undefined; // TODO - write_only entry.storage_texture.format = switch (texture.texel_format) { .none => unreachable, .rgba8unorm => .rgba8_unorm, .rgba8snorm => .rgba8_snorm, .bgra8unorm => .bgra8_unorm, .rgba16float => .rgba16_float, .r32float => .r32_float, .rg32float => .rg32_float, .rgba32float => .rgba32_float, .rgba8uint => .rgba8_uint, .rgba16uint => .rgba16_uint, .r32uint => .r32_uint, .rg32uint => .rg32_uint, .rgba32uint => .rgba32_uint, .rgba8sint => .rgba8_sint, .rgba16sint => .rgba16_sint, .r32sint => .r32_sint, .rg32sint => .rg32_sint, .rgba32sint => .rgba32_sint, }; entry.storage_texture.view_dimension = switch (texture.kind) { .storage_1d => .dimension_1d, .storage_2d => .dimension_2d, .storage_2d_array => .dimension_2d_array, .storage_3d => .dimension_3d, else => unreachable, }; }, else => { // sample_type entry.texture.sample_type = switch (texture.kind) { .depth_2d, .depth_2d_array, .depth_cube, .depth_cube_array, => .depth, else => switch (texture.texel_format) { .none => .float, // TODO - is this right? .rgba8unorm, .rgba8snorm, .bgra8unorm, .rgba16float, .r32float, .rg32float, .rgba32float, => .float, // TODO - unfilterable .rgba8uint, .rgba16uint, .r32uint, .rg32uint, .rgba32uint, => .uint, .rgba8sint, .rgba16sint, .r32sint, .rg32sint, .rgba32sint, => .sint, }, }; entry.texture.view_dimension = switch (texture.kind) { .sampled_1d, .storage_1d, => .dimension_1d, .sampled_2d, .multisampled_2d, .multisampled_depth_2d, .storage_2d, .depth_2d, => .dimension_2d, .sampled_2d_array, .storage_2d_array, .depth_2d_array, => .dimension_2d_array, .sampled_3d, .storage_3d, => .dimension_3d, .sampled_cube, .depth_cube, => .dimension_cube, .sampled_cube_array, .depth_cube_array, => .dimension_cube_array, }; entry.texture.multisampled = switch (texture.kind) { .multisampled_2d, .multisampled_depth_2d, => .true, else => .false, }; }, } }, else => { switch (var_inst.addr_space) { .uniform => entry.buffer.type = .uniform, .storage => { if (var_inst.access_mode == .read) { entry.buffer.type = .read_only_storage; } else { entry.buffer.type = .storage; } }, else => std.debug.panic("unhandled addr_space\n", .{}), } }, } while (desc.groups.len <= group) { desc.groups.appendAssumeCapacity(.{}); } var append = true; var group_entries = &desc.groups.buffer[group]; for (group_entries.items) |*previous_entry| { if (previous_entry.binding == binding) { // TODO - bitfield or? if (entry.visibility.vertex) previous_entry.visibility.vertex = true; if (entry.visibility.fragment) previous_entry.visibility.fragment = true; if (entry.visibility.compute) previous_entry.visibility.compute = true; if (previous_entry.buffer.min_binding_size < entry.buffer.min_binding_size) { previous_entry.buffer.min_binding_size = entry.buffer.min_binding_size; } if (previous_entry.texture.sample_type != entry.texture.sample_type) { if (previous_entry.texture.sample_type == .unfilterable_float and entry.texture.sample_type == .float) { previous_entry.texture.sample_type = .float; } else if (previous_entry.texture.sample_type == .float and entry.texture.sample_type == .unfilterable_float) { // ignore } else { return error.IncompatibleEntries; } } // TODO - any other differences return error append = false; break; } } if (append) try group_entries.append(desc.allocator, entry); } } } };
0
repos/mach-sysgpu
repos/mach-sysgpu/src/d3d12.zig
const std = @import("std"); const builtin = @import("builtin"); const sysgpu = @import("sysgpu/main.zig"); const limits = @import("limits.zig"); const shader = @import("shader.zig"); const utils = @import("utils.zig"); const c = @import("d3d12/c.zig"); const conv = @import("d3d12/conv.zig"); const gpu_allocator = @import("gpu_allocator.zig"); const log = std.log.scoped(.d3d12); // TODO - need to tweak all these sizes and make a better allocator const general_heap_size = 1024; const general_block_size = 16; const sampler_heap_size = 1024; const sampler_block_size = 16; const rtv_heap_size = 1024; const rtv_block_size = 16; const dsv_heap_size = 1024; const dsv_block_size = 1; const upload_page_size = 64 * 1024 * 1024; // TODO - split writes and/or support large uploads const max_back_buffer_count = 3; var allocator: std.mem.Allocator = undefined; var debug_enabled: bool = undefined; var gpu_validation_enabled: bool = undefined; // workaround c-translation errors const DXGI_PRESENT_ALLOW_TEARING: c.UINT = 0x00000200; pub const InitOptions = struct { debug_enabled: bool = builtin.mode == .Debug, gpu_validation_enabled: bool = builtin.mode == .Debug, }; pub fn init(alloc: std.mem.Allocator, options: InitOptions) !void { allocator = alloc; debug_enabled = options.debug_enabled; gpu_validation_enabled = options.gpu_validation_enabled; } const MapCallback = struct { buffer: *Buffer, callback: sysgpu.Buffer.MapCallback, userdata: ?*anyopaque, }; fn setDebugName(object: *c.ID3D12Object, opt_label: ?[*:0]const u8) void { if (opt_label) |label| { const slice = std.mem.span(label); _ = object.lpVtbl.*.SetPrivateData.?( object, &c.WKPDID_D3DDebugObjectName, @intCast(slice.len), slice.ptr, ); } else { _ = object.lpVtbl.*.SetPrivateData.?( object, &c.WKPDID_D3DDebugObjectName, 0, null, ); } } pub const Instance = struct { manager: utils.Manager(Instance) = .{}, dxgi_factory: *c.IDXGIFactory4, allow_tearing: bool, pub fn init(desc: *const sysgpu.Instance.Descriptor) !*Instance { // TODO _ = desc; var hr: c.HRESULT = undefined; // DXGI Factory var dxgi_factory: *c.IDXGIFactory4 = undefined; hr = c.CreateDXGIFactory2( if (debug_enabled) c.DXGI_CREATE_FACTORY_DEBUG else 0, &c.IID_IDXGIFactory4, @ptrCast(&dxgi_factory), ); if (hr != c.S_OK) { return error.CreateDXGIFactoryFailed; } errdefer _ = dxgi_factory.lpVtbl.*.Release.?(dxgi_factory); var opt_dxgi_factory5: ?*c.IDXGIFactory5 = null; _ = dxgi_factory.lpVtbl.*.QueryInterface.?( dxgi_factory, &c.IID_IDXGIFactory5, @ptrCast(&opt_dxgi_factory5), ); defer _ = if (opt_dxgi_factory5) |dxgi_factory5| dxgi_factory5.lpVtbl.*.Release.?(dxgi_factory5); // Feature support var allow_tearing: c.BOOL = c.FALSE; if (opt_dxgi_factory5) |dxgi_factory5| { hr = dxgi_factory5.lpVtbl.*.CheckFeatureSupport.?( dxgi_factory5, c.DXGI_FEATURE_PRESENT_ALLOW_TEARING, &allow_tearing, @sizeOf(@TypeOf(allow_tearing)), ); } // D3D12 Debug Layer if (debug_enabled) { var debug_controller: *c.ID3D12Debug1 = undefined; hr = c.D3D12GetDebugInterface(&c.IID_ID3D12Debug1, @ptrCast(&debug_controller)); if (hr == c.S_OK) { defer _ = debug_controller.lpVtbl.*.Release.?(debug_controller); debug_controller.lpVtbl.*.EnableDebugLayer.?(debug_controller); if (gpu_validation_enabled) { debug_controller.lpVtbl.*.SetEnableGPUBasedValidation.?( debug_controller, c.TRUE, ); } } } // Result const instance = try allocator.create(Instance); instance.* = .{ .dxgi_factory = dxgi_factory, .allow_tearing = allow_tearing == c.TRUE, }; return instance; } pub fn deinit(instance: *Instance) void { const dxgi_factory = instance.dxgi_factory; _ = dxgi_factory.lpVtbl.*.Release.?(dxgi_factory); Instance.reportLiveObjects(); allocator.destroy(instance); } pub fn createSurface(instance: *Instance, desc: *const sysgpu.Surface.Descriptor) !*Surface { return Surface.init(instance, desc); } // Internal pub fn reportLiveObjects() void { var hr: c.HRESULT = undefined; var dxgi_debug: *c.IDXGIDebug = undefined; hr = c.DXGIGetDebugInterface1(0, &c.IID_IDXGIDebug, @ptrCast(&dxgi_debug)); if (hr == c.S_OK) { defer _ = dxgi_debug.lpVtbl.*.Release.?(dxgi_debug); _ = dxgi_debug.lpVtbl.*.ReportLiveObjects.?( dxgi_debug, c.DXGI_DEBUG_ALL, c.DXGI_DEBUG_RLO_ALL, ); } } }; pub const Adapter = struct { manager: utils.Manager(Adapter) = .{}, instance: *Instance, dxgi_adapter: *c.IDXGIAdapter1, d3d_device: *c.ID3D12Device, dxgi_desc: c.DXGI_ADAPTER_DESC1, pub fn init(instance: *Instance, options: *const sysgpu.RequestAdapterOptions) !*Adapter { // TODO - choose appropriate device from options _ = options; const dxgi_factory = instance.dxgi_factory; var hr: c.HRESULT = undefined; var i: u32 = 0; var dxgi_adapter: *c.IDXGIAdapter1 = undefined; while (dxgi_factory.lpVtbl.*.EnumAdapters1.?( dxgi_factory, i, @ptrCast(&dxgi_adapter), ) != c.DXGI_ERROR_NOT_FOUND) : (i += 1) { defer _ = dxgi_adapter.lpVtbl.*.Release.?(dxgi_adapter); var dxgi_desc: c.DXGI_ADAPTER_DESC1 = undefined; hr = dxgi_adapter.lpVtbl.*.GetDesc1.?( dxgi_adapter, &dxgi_desc, ); std.debug.assert(hr == c.S_OK); if ((dxgi_desc.Flags & c.DXGI_ADAPTER_FLAG_SOFTWARE) != 0) continue; var d3d_device: *c.ID3D12Device = undefined; hr = c.D3D12CreateDevice( @ptrCast(dxgi_adapter), c.D3D_FEATURE_LEVEL_11_0, &c.IID_ID3D12Device, @ptrCast(&d3d_device), ); if (hr == c.S_OK) { _ = dxgi_adapter.lpVtbl.*.AddRef.?(dxgi_adapter); const adapter = try allocator.create(Adapter); adapter.* = .{ .instance = instance, .dxgi_adapter = dxgi_adapter, .d3d_device = d3d_device, .dxgi_desc = dxgi_desc, }; return adapter; } } return error.NoAdapterFound; } pub fn deinit(adapter: *Adapter) void { const dxgi_adapter = adapter.dxgi_adapter; const d3d_device = adapter.d3d_device; _ = dxgi_adapter.lpVtbl.*.Release.?(dxgi_adapter); _ = d3d_device.lpVtbl.*.Release.?(d3d_device); allocator.destroy(adapter); } pub fn createDevice(adapter: *Adapter, desc: ?*const sysgpu.Device.Descriptor) !*Device { return Device.init(adapter, desc); } pub fn getProperties(adapter: *Adapter) sysgpu.Adapter.Properties { const dxgi_desc = adapter.dxgi_desc; return .{ .vendor_id = dxgi_desc.VendorId, .vendor_name = "", // TODO .architecture = "", // TODO .device_id = dxgi_desc.DeviceId, .name = "", // TODO - wide to ascii - dxgi_desc.Description .driver_description = "", // TODO .adapter_type = .unknown, .backend_type = .d3d12, .compatibility_mode = .false, }; } }; pub const Surface = struct { manager: utils.Manager(Surface) = .{}, hwnd: c.HWND, pub fn init(instance: *Instance, desc: *const sysgpu.Surface.Descriptor) !*Surface { _ = instance; if (utils.findChained(sysgpu.Surface.DescriptorFromWindowsHWND, desc.next_in_chain.generic)) |win_desc| { // workaround issues with @alignCast panicking as HWND is not a real pointer var hwnd: c.HWND = undefined; @memcpy(std.mem.asBytes(&hwnd), std.mem.asBytes(&win_desc.hwnd)); const surface = try allocator.create(Surface); surface.* = .{ .hwnd = hwnd }; return surface; } else { return error.InvalidDescriptor; } } pub fn deinit(surface: *Surface) void { allocator.destroy(surface); } }; pub const Device = struct { manager: utils.Manager(Device) = .{}, adapter: *Adapter, d3d_device: *c.ID3D12Device, queue: *Queue, general_heap: DescriptorHeap = undefined, sampler_heap: DescriptorHeap = undefined, rtv_heap: DescriptorHeap = undefined, dsv_heap: DescriptorHeap = undefined, command_manager: CommandManager = undefined, streaming_manager: StreamingManager = undefined, reference_trackers: std.ArrayListUnmanaged(*ReferenceTracker) = .{}, mem_allocator: MemoryAllocator = undefined, map_callbacks: std.ArrayListUnmanaged(MapCallback) = .{}, lost_cb: ?sysgpu.Device.LostCallback = null, lost_cb_userdata: ?*anyopaque = null, log_cb: ?sysgpu.LoggingCallback = null, log_cb_userdata: ?*anyopaque = null, err_cb: ?sysgpu.ErrorCallback = null, err_cb_userdata: ?*anyopaque = null, pub fn init(adapter: *Adapter, desc: ?*const sysgpu.Device.Descriptor) !*Device { const d3d_device = adapter.d3d_device; var hr: c.HRESULT = undefined; // TODO _ = desc; // Debug Configuration if (debug_enabled) { var info_queue: *c.ID3D12InfoQueue = undefined; hr = d3d_device.lpVtbl.*.QueryInterface.?( d3d_device, &c.IID_ID3D12InfoQueue, @ptrCast(&info_queue), ); if (hr == c.S_OK) { defer _ = info_queue.lpVtbl.*.Release.?(info_queue); var deny_ids = [_]c.D3D12_MESSAGE_ID{ c.D3D12_MESSAGE_ID_CLEARRENDERTARGETVIEW_MISMATCHINGCLEARVALUE, c.D3D12_MESSAGE_ID_CLEARDEPTHSTENCILVIEW_MISMATCHINGCLEARVALUE, 1328, //c.D3D12_MESSAGE_ID_CREATERESOURCE_STATE_IGNORED, // Required for naive barrier strategy, can be removed with render graphs }; var severities = [_]c.D3D12_MESSAGE_SEVERITY{ c.D3D12_MESSAGE_SEVERITY_INFO, c.D3D12_MESSAGE_SEVERITY_MESSAGE, }; var filter = c.D3D12_INFO_QUEUE_FILTER{ .AllowList = .{ .NumCategories = 0, .pCategoryList = null, .NumSeverities = 0, .pSeverityList = null, .NumIDs = 0, .pIDList = null, }, .DenyList = .{ .NumCategories = 0, .pCategoryList = null, .NumSeverities = severities.len, .pSeverityList = &severities, .NumIDs = deny_ids.len, .pIDList = &deny_ids, }, }; hr = info_queue.lpVtbl.*.PushStorageFilter.?( info_queue, &filter, ); std.debug.assert(hr == c.S_OK); } } const queue = try allocator.create(Queue); errdefer allocator.destroy(queue); // Object var device = try allocator.create(Device); device.* = .{ .adapter = adapter, .d3d_device = d3d_device, .queue = queue, }; // Initialize device.queue.* = try Queue.init(device); errdefer queue.deinit(); device.general_heap = try DescriptorHeap.init( device, c.D3D12_DESCRIPTOR_HEAP_TYPE_CBV_SRV_UAV, c.D3D12_DESCRIPTOR_HEAP_FLAG_SHADER_VISIBLE, general_heap_size, general_block_size, ); errdefer device.general_heap.deinit(); device.sampler_heap = try DescriptorHeap.init( device, c.D3D12_DESCRIPTOR_HEAP_TYPE_SAMPLER, c.D3D12_DESCRIPTOR_HEAP_FLAG_SHADER_VISIBLE, sampler_heap_size, sampler_block_size, ); errdefer device.sampler_heap.deinit(); device.rtv_heap = try DescriptorHeap.init( device, c.D3D12_DESCRIPTOR_HEAP_TYPE_RTV, c.D3D12_DESCRIPTOR_HEAP_FLAG_NONE, rtv_heap_size, rtv_block_size, ); errdefer device.rtv_heap.deinit(); device.dsv_heap = try DescriptorHeap.init( device, c.D3D12_DESCRIPTOR_HEAP_TYPE_DSV, c.D3D12_DESCRIPTOR_HEAP_FLAG_NONE, dsv_heap_size, dsv_block_size, ); errdefer device.dsv_heap.deinit(); device.command_manager = CommandManager.init(device); device.streaming_manager = try StreamingManager.init(device); errdefer device.streaming_manager.deinit(); try device.mem_allocator.init(device); return device; } pub fn deinit(device: *Device) void { if (device.lost_cb) |lost_cb| { lost_cb(.destroyed, "Device was destroyed.", device.lost_cb_userdata); } device.queue.waitUntil(device.queue.fence_value); device.processQueuedOperations(); device.map_callbacks.deinit(allocator); device.reference_trackers.deinit(allocator); device.streaming_manager.deinit(); device.command_manager.deinit(); device.dsv_heap.deinit(); device.rtv_heap.deinit(); device.sampler_heap.deinit(); device.general_heap.deinit(); device.queue.manager.release(); device.mem_allocator.deinit(); allocator.destroy(device.queue); allocator.destroy(device); } pub fn createBindGroup(device: *Device, desc: *const sysgpu.BindGroup.Descriptor) !*BindGroup { return BindGroup.init(device, desc); } pub fn createBindGroupLayout(device: *Device, desc: *const sysgpu.BindGroupLayout.Descriptor) !*BindGroupLayout { return BindGroupLayout.init(device, desc); } pub fn createBuffer(device: *Device, desc: *const sysgpu.Buffer.Descriptor) !*Buffer { return Buffer.init(device, desc); } pub fn createCommandEncoder(device: *Device, desc: *const sysgpu.CommandEncoder.Descriptor) !*CommandEncoder { return CommandEncoder.init(device, desc); } pub fn createComputePipeline(device: *Device, desc: *const sysgpu.ComputePipeline.Descriptor) !*ComputePipeline { return ComputePipeline.init(device, desc); } pub fn createPipelineLayout(device: *Device, desc: *const sysgpu.PipelineLayout.Descriptor) !*PipelineLayout { return PipelineLayout.init(device, desc); } pub fn createRenderPipeline(device: *Device, desc: *const sysgpu.RenderPipeline.Descriptor) !*RenderPipeline { return RenderPipeline.init(device, desc); } pub fn createSampler(device: *Device, desc: *const sysgpu.Sampler.Descriptor) !*Sampler { return Sampler.init(device, desc); } pub fn createShaderModuleAir(device: *Device, air: *shader.Air, label: [*:0]const u8) !*ShaderModule { _ = label; return ShaderModule.initAir(device, air); } pub fn createShaderModuleSpirv(device: *Device, code: [*]const u32, code_size: u32) !*ShaderModule { _ = code; _ = code_size; _ = device; return error.Unsupported; } pub fn createShaderModuleHLSL(device: *Device, code: []const u8) !*ShaderModule { _ = device; const module = try allocator.create(ShaderModule); module.* = .{ .code = .{ .code = code } }; return module; } pub fn createShaderModuleMSL( device: *Device, label: [*:0]const u8, code: []const u8, workgroup_size: sysgpu.ShaderModule.WorkgroupSize, ) !*ShaderModule { _ = label; _ = code; _ = device; _ = workgroup_size; return error.Unsupported; } pub fn createSwapChain(device: *Device, surface: *Surface, desc: *const sysgpu.SwapChain.Descriptor) !*SwapChain { return SwapChain.init(device, surface, desc); } pub fn createTexture(device: *Device, desc: *const sysgpu.Texture.Descriptor) !*Texture { return Texture.init(device, desc); } pub fn getQueue(device: *Device) !*Queue { return device.queue; } pub fn tick(device: *Device) !void { device.processQueuedOperations(); } // Internal pub fn processQueuedOperations(device: *Device) void { // Reference trackers { const fence = device.queue.fence; const completed_value = fence.lpVtbl.*.GetCompletedValue.?(fence); var i: usize = 0; while (i < device.reference_trackers.items.len) { const reference_tracker = device.reference_trackers.items[i]; if (reference_tracker.fence_value <= completed_value) { reference_tracker.deinit(); _ = device.reference_trackers.swapRemove(i); } else { i += 1; } } } // MapAsync { var i: usize = 0; while (i < device.map_callbacks.items.len) { const map_callback = device.map_callbacks.items[i]; if (map_callback.buffer.gpu_count == 0) { map_callback.buffer.executeMapAsync(map_callback); _ = device.map_callbacks.swapRemove(i); } else { i += 1; } } } } pub fn createD3dBuffer(device: *Device, usage: sysgpu.Buffer.UsageFlags, size: u64) !Resource { const resource_size = conv.d3d12ResourceSizeForBuffer(size, usage); const heap_type = conv.d3d12HeapType(usage); const resource_desc = c.D3D12_RESOURCE_DESC{ .Dimension = c.D3D12_RESOURCE_DIMENSION_BUFFER, .Alignment = 0, .Width = resource_size, .Height = 1, .DepthOrArraySize = 1, .MipLevels = 1, .Format = c.DXGI_FORMAT_UNKNOWN, .SampleDesc = .{ .Count = 1, .Quality = 0 }, .Layout = c.D3D12_TEXTURE_LAYOUT_ROW_MAJOR, .Flags = conv.d3d12ResourceFlagsForBuffer(usage), }; const read_state = conv.d3d12ResourceStatesForBufferRead(usage); const initial_state = conv.d3d12ResourceStatesInitial(heap_type, read_state); const create_desc = ResourceCreateDescriptor{ .location = if (usage.map_write) .gpu_to_cpu else if (usage.map_read) .cpu_to_gpu else .gpu_only, .resource_desc = &resource_desc, .clear_value = null, .resource_category = .buffer, .initial_state = initial_state, }; return try device.mem_allocator.createResource(&create_desc); } }; pub const ResourceCategory = enum { buffer, rtv_dsv_texture, other_texture, pub inline fn heapUsable(self: ResourceCategory, heap: HeapCategory) bool { return switch (heap) { .all => true, .buffer => self == .buffer, .rtv_dsv_texture => self == .rtv_dsv_texture, .other_texture => self == .other_texture, }; } }; pub const HeapCategory = enum { all, buffer, rtv_dsv_texture, other_texture, }; pub const AllocationCreateDescriptor = struct { location: MemoryLocation, size: u64, alignment: u64, resource_category: ResourceCategory, }; pub const ResourceCreateDescriptor = struct { location: MemoryLocation, resource_category: ResourceCategory, resource_desc: *const c.D3D12_RESOURCE_DESC, clear_value: ?*const c.D3D12_CLEAR_VALUE, initial_state: c.D3D12_RESOURCE_STATES, }; pub const MemoryLocation = enum { unknown, gpu_only, cpu_to_gpu, gpu_to_cpu, }; pub const AllocationSizes = struct { device_memblock_size: u64 = 256 * 1024 * 1024, host_memblock_size: u64 = 64 * 1024 * 1024, const four_mb = 4 * 1024 * 1024; const two_hundred_fifty_six_mb = 256 * 1024 * 1024; pub fn init( device_memblock_size: u64, host_memblock_size: u64, ) AllocationSizes { var use_device_memblock_size = std.math.clamp( device_memblock_size, four_mb, two_hundred_fifty_six_mb, ); var use_host_memblock_size = std.math.clamp( host_memblock_size, four_mb, two_hundred_fifty_six_mb, ); if (use_device_memblock_size % four_mb != 0) { use_device_memblock_size = four_mb * (@divFloor(use_device_memblock_size, four_mb) + 1); } if (use_host_memblock_size % four_mb != 0) { use_host_memblock_size = four_mb * (@divFloor(use_host_memblock_size, four_mb) + 1); } return .{ .device_memblock_size = use_device_memblock_size, .host_memblock_size = use_host_memblock_size, }; } }; /// Stores a group of heaps pub const MemoryAllocator = struct { const max_memory_groups = 9; device: *Device, memory_groups: std.BoundedArray(MemoryGroup, max_memory_groups), allocation_sizes: AllocationSizes, /// a single heap, /// use the gpu_allocator field to allocate chunks of memory pub const MemoryHeap = struct { index: usize, heap: *c.ID3D12Heap, size: u64, gpu_allocator: gpu_allocator.Allocator, pub fn init( group: *MemoryGroup, index: usize, size: u64, dedicated: bool, ) gpu_allocator.Error!MemoryHeap { const heap = blk: { var desc = c.D3D12_HEAP_DESC{ .SizeInBytes = size, .Properties = group.heap_properties, .Alignment = @intCast(c.D3D12_DEFAULT_MSAA_RESOURCE_PLACEMENT_ALIGNMENT), .Flags = switch (group.heap_category) { .all => c.D3D12_HEAP_FLAG_NONE, .buffer => c.D3D12_HEAP_FLAG_ALLOW_ONLY_BUFFERS, .rtv_dsv_texture => c.D3D12_HEAP_FLAG_ALLOW_ONLY_RT_DS_TEXTURES, .other_texture => c.D3D12_HEAP_FLAG_ALLOW_ONLY_NON_RT_DS_TEXTURES, }, }; var heap: ?*c.ID3D12Heap = null; const d3d_device = group.owning_pool.device.d3d_device; const hr = d3d_device.lpVtbl.*.CreateHeap.?( d3d_device, &desc, &c.IID_ID3D12Heap, @ptrCast(&heap), ); if (hr == c.E_OUTOFMEMORY) return gpu_allocator.Error.OutOfMemory; if (hr != c.S_OK) return gpu_allocator.Error.Other; break :blk heap.?; }; return MemoryHeap{ .index = index, .heap = heap, .size = size, .gpu_allocator = if (dedicated) try gpu_allocator.Allocator.initDedicatedBlockAllocator(size) else try gpu_allocator.Allocator.initOffsetAllocator(allocator, @intCast(size), null), }; } pub fn deinit(self: *MemoryHeap) void { _ = self.heap.lpVtbl.*.Release.?(self.heap); self.gpu_allocator.deinit(); } }; /// a group of multiple heaps with a single heap type pub const MemoryGroup = struct { owning_pool: *MemoryAllocator, memory_location: MemoryLocation, heap_category: HeapCategory, heap_properties: c.D3D12_HEAP_PROPERTIES, heaps: std.ArrayListUnmanaged(?MemoryHeap), pub const GroupAllocation = struct { allocation: gpu_allocator.Allocation, heap: *MemoryHeap, size: u64, }; pub fn init( owner: *MemoryAllocator, memory_location: MemoryLocation, category: HeapCategory, properties: c.D3D12_HEAP_PROPERTIES, ) MemoryGroup { return .{ .owning_pool = owner, .memory_location = memory_location, .heap_category = category, .heap_properties = properties, .heaps = .{}, }; } pub fn deinit(self: *MemoryGroup) void { for (self.heaps.items) |*heap| { if (heap.*) |*h| h.deinit(); } self.heaps.deinit(allocator); } pub fn allocate(self: *MemoryGroup, size: u64) gpu_allocator.Error!GroupAllocation { const memblock_size: u64 = if (self.heap_properties.Type == c.D3D12_HEAP_TYPE_DEFAULT) self.owning_pool.allocation_sizes.device_memblock_size else self.owning_pool.allocation_sizes.host_memblock_size; if (size > memblock_size) { return self.allocateDedicated(size); } var empty_heap_index: ?usize = null; for (self.heaps.items, 0..) |*heap, index| { if (heap.*) |*h| { const allocation = h.gpu_allocator.allocate(@intCast(size)) catch |err| switch (err) { gpu_allocator.Error.OutOfMemory => continue, else => return err, }; return GroupAllocation{ .allocation = allocation, .heap = h, .size = size, }; } else if (empty_heap_index == null) { empty_heap_index = index; } } // couldn't allocate, use the empty heap if we got one const heap = try self.addHeap(memblock_size, false, empty_heap_index); const allocation = try heap.gpu_allocator.allocate(@intCast(size)); return GroupAllocation{ .allocation = allocation, .heap = heap, .size = size, }; } fn allocateDedicated(self: *MemoryGroup, size: u64) gpu_allocator.Error!GroupAllocation { const memory_block = try self.addHeap(size, true, blk: { for (self.heaps.items, 0..) |heap, index| { if (heap == null) break :blk index; } break :blk null; }); const allocation = try memory_block.gpu_allocator.allocate(@intCast(size)); return GroupAllocation{ .allocation = allocation, .heap = memory_block, .size = size, }; } pub fn free(self: *MemoryGroup, allocation: GroupAllocation) gpu_allocator.Error!void { const heap = allocation.heap; try heap.gpu_allocator.free(allocation.allocation); if (heap.gpu_allocator.isEmpty()) { const index = heap.index; heap.deinit(); self.heaps.items[index] = null; } } fn addHeap(self: *MemoryGroup, size: u64, dedicated: bool, replace: ?usize) gpu_allocator.Error!*MemoryHeap { const heap_index: usize = blk: { if (replace) |index| { if (self.heaps.items[index]) |*heap| { heap.deinit(); } self.heaps.items[index] = null; break :blk index; } else { _ = try self.heaps.addOne(allocator); break :blk self.heaps.items.len - 1; } }; errdefer _ = self.heaps.popOrNull(); const heap = &self.heaps.items[heap_index].?; heap.* = try MemoryHeap.init( self, heap_index, size, dedicated, ); return heap; } }; pub const Allocation = struct { allocation: gpu_allocator.Allocation, heap: *MemoryHeap, size: u64, group: *MemoryGroup, }; pub fn init(self: *MemoryAllocator, device: *Device) !void { const HeapType = struct { location: MemoryLocation, properties: c.D3D12_HEAP_PROPERTIES, }; const heap_types = [_]HeapType{ .{ .location = .gpu_only, .properties = c.D3D12_HEAP_PROPERTIES{ .Type = c.D3D12_HEAP_TYPE_DEFAULT, .CPUPageProperty = c.D3D12_CPU_PAGE_PROPERTY_UNKNOWN, .MemoryPoolPreference = c.D3D12_MEMORY_POOL_UNKNOWN, .CreationNodeMask = 0, .VisibleNodeMask = 0, }, }, .{ .location = .cpu_to_gpu, .properties = c.D3D12_HEAP_PROPERTIES{ .Type = c.D3D12_HEAP_TYPE_CUSTOM, .CPUPageProperty = c.D3D12_CPU_PAGE_PROPERTY_WRITE_COMBINE, .MemoryPoolPreference = c.D3D12_MEMORY_POOL_L0, .CreationNodeMask = 0, .VisibleNodeMask = 0, }, }, .{ .location = .gpu_to_cpu, .properties = c.D3D12_HEAP_PROPERTIES{ .Type = c.D3D12_HEAP_TYPE_CUSTOM, .CPUPageProperty = c.D3D12_CPU_PAGE_PROPERTY_WRITE_BACK, .MemoryPoolPreference = c.D3D12_MEMORY_POOL_L0, .CreationNodeMask = 0, .VisibleNodeMask = 0, }, } }; self.* = .{ .device = device, .memory_groups = std.BoundedArray(MemoryGroup, max_memory_groups).init(0) catch unreachable, .allocation_sizes = .{}, }; var options: c.D3D12_FEATURE_DATA_D3D12_OPTIONS = undefined; const hr = device.d3d_device.lpVtbl.*.CheckFeatureSupport.?( device.d3d_device, c.D3D12_FEATURE_D3D12_OPTIONS, @ptrCast(&options), @sizeOf(c.D3D12_FEATURE_DATA_D3D12_OPTIONS), ); if (hr != c.S_OK) return gpu_allocator.Error.Other; const tier_one_heap = options.ResourceHeapTier == c.D3D12_RESOURCE_HEAP_TIER_1; self.memory_groups = std.BoundedArray(MemoryGroup, max_memory_groups).init(0) catch unreachable; inline for (heap_types) |heap_type| { if (tier_one_heap) { self.memory_groups.appendAssumeCapacity(MemoryGroup.init( self, heap_type.location, .buffer, heap_type.properties, )); self.memory_groups.appendAssumeCapacity(MemoryGroup.init( self, heap_type.location, .rtv_dsv_texture, heap_type.properties, )); self.memory_groups.appendAssumeCapacity(MemoryGroup.init( self, heap_type.location, .other_texture, heap_type.properties, )); } else { self.memory_groups.appendAssumeCapacity(MemoryGroup.init( self, heap_type.location, .all, heap_type.properties, )); } } } pub fn deinit(self: *MemoryAllocator) void { for (self.memory_groups.slice()) |*group| { group.deinit(); } } pub fn reportMemoryLeaks(self: *const MemoryAllocator) void { log.info("memory leaks:", .{}); var total_blocks: u64 = 0; for (self.memory_groups.constSlice(), 0..) |mem_group, mem_group_index| { log.info(" memory group {} ({s}, {s}):", .{ mem_group_index, @tagName(mem_group.heap_category), @tagName(mem_group.memory_location), }); for (mem_group.heaps.items, 0..) |block, block_index| { if (block) |found_block| { log.info(" block {}; total size: {}; allocated: {};", .{ block_index, found_block.size, found_block.gpu_allocator.getAllocated(), }); total_blocks += 1; } } } log.info("total blocks: {}", .{total_blocks}); } pub fn allocate(self: *MemoryAllocator, desc: *const AllocationCreateDescriptor) gpu_allocator.Error!Allocation { // TODO: handle alignment for (self.memory_groups.slice()) |*memory_group| { if (memory_group.memory_location != desc.location and desc.location != .unknown) continue; if (!desc.resource_category.heapUsable(memory_group.heap_category)) continue; const allocation = try memory_group.allocate(desc.size); return Allocation{ .allocation = allocation.allocation, .heap = allocation.heap, .size = allocation.size, .group = memory_group, }; } return gpu_allocator.Error.NoCompatibleMemoryFound; } pub fn free(self: *MemoryAllocator, allocation: Allocation) gpu_allocator.Error!void { _ = self; const group = allocation.group; try group.free(MemoryGroup.GroupAllocation{ .allocation = allocation.allocation, .heap = allocation.heap, .size = allocation.size, }); } pub fn createResource(self: *MemoryAllocator, desc: *const ResourceCreateDescriptor) gpu_allocator.Error!Resource { const d3d_device = self.device.d3d_device; const allocation_desc = blk: { var _out_allocation_info: c.D3D12_RESOURCE_ALLOCATION_INFO = undefined; const allocation_info = d3d_device.lpVtbl.*.GetResourceAllocationInfo.?( d3d_device, &_out_allocation_info, 0, 1, @ptrCast(desc.resource_desc), ); break :blk AllocationCreateDescriptor{ .location = desc.location, .size = allocation_info.*.SizeInBytes, .alignment = allocation_info.*.Alignment, .resource_category = desc.resource_category, }; }; const allocation = try self.allocate(&allocation_desc); var d3d_resource: ?*c.ID3D12Resource = null; const hr = d3d_device.lpVtbl.*.CreatePlacedResource.?( d3d_device, allocation.heap.heap, allocation.allocation.offset, desc.resource_desc, desc.initial_state, desc.clear_value, &c.IID_ID3D12Resource, @ptrCast(&d3d_resource), ); if (hr != c.S_OK) return gpu_allocator.Error.Other; return Resource{ .mem_allocator = self, .read_state = desc.initial_state, .allocation = allocation, .d3d_resource = d3d_resource.?, .memory_location = desc.location, .size = allocation.size, }; } pub fn destroyResource(self: *MemoryAllocator, resource: Resource) gpu_allocator.Error!void { if (resource.allocation) |allocation| { try self.free(allocation); } const d3d_resource = resource.d3d_resource; _ = d3d_resource.lpVtbl.*.Release.?(d3d_resource); } }; const DescriptorAllocation = struct { index: u32, }; const DescriptorHeap = struct { // Initial version supports fixed-block size allocation only device: *Device, d3d_heap: *c.ID3D12DescriptorHeap, cpu_base: c.D3D12_CPU_DESCRIPTOR_HANDLE, gpu_base: c.D3D12_GPU_DESCRIPTOR_HANDLE, descriptor_size: u32, descriptor_count: u32, block_size: u32, next_alloc: u32, free_blocks: std.ArrayListUnmanaged(DescriptorAllocation) = .{}, pub fn init( device: *Device, heap_type: c.D3D12_DESCRIPTOR_HEAP_TYPE, flags: c.D3D12_DESCRIPTOR_HEAP_FLAGS, descriptor_count: u32, block_size: u32, ) !DescriptorHeap { const d3d_device = device.d3d_device; var hr: c.HRESULT = undefined; var d3d_heap: *c.ID3D12DescriptorHeap = undefined; hr = d3d_device.lpVtbl.*.CreateDescriptorHeap.?( d3d_device, &c.D3D12_DESCRIPTOR_HEAP_DESC{ .Type = heap_type, .NumDescriptors = descriptor_count, .Flags = flags, .NodeMask = 0, }, &c.IID_ID3D12DescriptorHeap, @ptrCast(&d3d_heap), ); if (hr != c.S_OK) { return error.CreateDescriptorHeapFailed; } errdefer _ = d3d_heap.lpVtbl.*.Release.?(d3d_heap); const descriptor_size = d3d_device.lpVtbl.*.GetDescriptorHandleIncrementSize.?( d3d_device, heap_type, ); var cpu_base: c.D3D12_CPU_DESCRIPTOR_HANDLE = undefined; _ = d3d_heap.lpVtbl.*.GetCPUDescriptorHandleForHeapStart.?( d3d_heap, &cpu_base, ); var gpu_base: c.D3D12_GPU_DESCRIPTOR_HANDLE = undefined; if ((flags & c.D3D12_DESCRIPTOR_HEAP_FLAG_SHADER_VISIBLE) != 0) { _ = d3d_heap.lpVtbl.*.GetGPUDescriptorHandleForHeapStart.?( d3d_heap, &gpu_base, ); } else { gpu_base = .{ .ptr = 0 }; } return .{ .device = device, .d3d_heap = d3d_heap, .cpu_base = cpu_base, .gpu_base = gpu_base, .descriptor_size = descriptor_size, .descriptor_count = descriptor_count, .block_size = block_size, .next_alloc = 0, }; } pub fn deinit(heap: *DescriptorHeap) void { const d3d_heap = heap.d3d_heap; heap.free_blocks.deinit(allocator); _ = d3d_heap.lpVtbl.*.Release.?(d3d_heap); } pub fn alloc(heap: *DescriptorHeap) !DescriptorAllocation { // Recycle finished blocks if (heap.free_blocks.items.len == 0) { heap.device.processQueuedOperations(); } // Create new block if (heap.free_blocks.items.len == 0) { if (heap.next_alloc == heap.descriptor_count) return error.OutOfDescriptorMemory; const index = heap.next_alloc; heap.next_alloc += heap.block_size; try heap.free_blocks.append(allocator, .{ .index = index }); } // Result return heap.free_blocks.pop(); } pub fn free(heap: *DescriptorHeap, allocation: DescriptorAllocation) void { heap.free_blocks.append(allocator, allocation) catch { std.debug.panic("OutOfMemory", .{}); }; } pub fn cpuDescriptor(heap: *DescriptorHeap, index: u32) c.D3D12_CPU_DESCRIPTOR_HANDLE { return .{ .ptr = heap.cpu_base.ptr + index * heap.descriptor_size }; } pub fn gpuDescriptor(heap: *DescriptorHeap, index: u32) c.D3D12_GPU_DESCRIPTOR_HANDLE { return .{ .ptr = heap.gpu_base.ptr + index * heap.descriptor_size }; } }; const CommandManager = struct { device: *Device, free_allocators: std.ArrayListUnmanaged(*c.ID3D12CommandAllocator) = .{}, free_command_lists: std.ArrayListUnmanaged(*c.ID3D12GraphicsCommandList) = .{}, pub fn init(device: *Device) CommandManager { return .{ .device = device, }; } pub fn deinit(manager: *CommandManager) void { for (manager.free_allocators.items) |command_allocator| { _ = command_allocator.lpVtbl.*.Release.?(command_allocator); } for (manager.free_command_lists.items) |command_list| { _ = command_list.lpVtbl.*.Release.?(command_list); } manager.free_allocators.deinit(allocator); manager.free_command_lists.deinit(allocator); } pub fn createCommandAllocator(manager: *CommandManager) !*c.ID3D12CommandAllocator { const d3d_device = manager.device.d3d_device; var hr: c.HRESULT = undefined; // Recycle finished allocators if (manager.free_allocators.items.len == 0) { manager.device.processQueuedOperations(); } // Create new command allocator if (manager.free_allocators.items.len == 0) { var command_allocator: *c.ID3D12CommandAllocator = undefined; hr = d3d_device.lpVtbl.*.CreateCommandAllocator.?( d3d_device, c.D3D12_COMMAND_LIST_TYPE_DIRECT, &c.IID_ID3D12CommandAllocator, @ptrCast(&command_allocator), ); if (hr != c.S_OK) { return error.CreateCommandAllocatorFailed; } try manager.free_allocators.append(allocator, command_allocator); } // Reset const command_allocator = manager.free_allocators.pop(); hr = command_allocator.lpVtbl.*.Reset.?(command_allocator); if (hr != c.S_OK) { return error.ResetCommandAllocatorFailed; } return command_allocator; } pub fn destroyCommandAllocator(manager: *CommandManager, command_allocator: *c.ID3D12CommandAllocator) void { manager.free_allocators.append(allocator, command_allocator) catch { std.debug.panic("OutOfMemory", .{}); }; } pub fn createCommandList( manager: *CommandManager, command_allocator: *c.ID3D12CommandAllocator, ) !*c.ID3D12GraphicsCommandList { const d3d_device = manager.device.d3d_device; var hr: c.HRESULT = undefined; if (manager.free_command_lists.items.len == 0) { var command_list: *c.ID3D12GraphicsCommandList = undefined; hr = d3d_device.lpVtbl.*.CreateCommandList.?( d3d_device, 0, c.D3D12_COMMAND_LIST_TYPE_DIRECT, command_allocator, null, &c.IID_ID3D12GraphicsCommandList, @ptrCast(&command_list), ); if (hr != c.S_OK) { return error.CreateCommandListFailed; } return command_list; } const command_list = manager.free_command_lists.pop(); hr = command_list.lpVtbl.*.Reset.?( command_list, command_allocator, null, ); if (hr != c.S_OK) { return error.ResetCommandListFailed; } return command_list; } pub fn destroyCommandList(manager: *CommandManager, command_list: *c.ID3D12GraphicsCommandList) void { manager.free_command_lists.append(allocator, command_list) catch std.debug.panic("OutOfMemory", .{}); } }; pub const StreamingManager = struct { device: *Device, free_buffers: std.ArrayListUnmanaged(Resource) = .{}, pub fn init(device: *Device) !StreamingManager { return .{ .device = device, }; } pub fn deinit(manager: *StreamingManager) void { for (manager.free_buffers.items) |*d3d_resource| { d3d_resource.deinit(); } manager.free_buffers.deinit(allocator); } pub fn acquire(manager: *StreamingManager) !Resource { const device = manager.device; // Recycle finished buffers if (manager.free_buffers.items.len == 0) { device.processQueuedOperations(); } // Create new buffer if (manager.free_buffers.items.len == 0) { var resource = try device.createD3dBuffer(.{ .map_write = true }, upload_page_size); errdefer _ = resource.deinit(); setDebugName(@ptrCast(resource.d3d_resource), "upload"); try manager.free_buffers.append(allocator, resource); } // Result return manager.free_buffers.pop(); } pub fn release(manager: *StreamingManager, resource: Resource) void { manager.free_buffers.append(allocator, resource) catch { std.debug.panic("OutOfMemory", .{}); }; } }; pub const SwapChain = struct { manager: utils.Manager(SwapChain) = .{}, device: *Device, surface: *Surface, queue: *Queue, dxgi_swap_chain: *c.IDXGISwapChain3, width: u32, height: u32, back_buffer_count: u32, sync_interval: c.UINT, present_flags: c.UINT, textures: [max_back_buffer_count]*Texture, views: [max_back_buffer_count]*TextureView, fence_values: [max_back_buffer_count]u64, buffer_index: u32 = 0, pub fn init(device: *Device, surface: *Surface, desc: *const sysgpu.SwapChain.Descriptor) !*SwapChain { const instance = device.adapter.instance; const dxgi_factory = instance.dxgi_factory; var hr: c.HRESULT = undefined; device.processQueuedOperations(); // Swap Chain const back_buffer_count: u32 = if (desc.present_mode == .mailbox) 3 else 2; var swap_chain_desc = c.DXGI_SWAP_CHAIN_DESC1{ .Width = desc.width, .Height = desc.height, .Format = conv.dxgiFormatForTexture(desc.format), .Stereo = c.FALSE, .SampleDesc = .{ .Count = 1, .Quality = 0 }, .BufferUsage = conv.dxgiUsage(desc.usage), .BufferCount = back_buffer_count, .Scaling = c.DXGI_MODE_SCALING_UNSPECIFIED, .SwapEffect = c.DXGI_SWAP_EFFECT_FLIP_DISCARD, .AlphaMode = c.DXGI_ALPHA_MODE_UNSPECIFIED, .Flags = if (instance.allow_tearing) c.DXGI_SWAP_CHAIN_FLAG_ALLOW_TEARING else 0, }; var dxgi_swap_chain: *c.IDXGISwapChain3 = undefined; hr = dxgi_factory.lpVtbl.*.CreateSwapChainForHwnd.?( dxgi_factory, @ptrCast(device.queue.d3d_command_queue), surface.hwnd, &swap_chain_desc, null, null, @ptrCast(&dxgi_swap_chain), ); if (hr != c.S_OK) { return error.CreateSwapChainFailed; } errdefer _ = dxgi_swap_chain.lpVtbl.*.Release.?(dxgi_swap_chain); // Views var textures = std.BoundedArray(*Texture, max_back_buffer_count){}; var views = std.BoundedArray(*TextureView, max_back_buffer_count){}; var fence_values = std.BoundedArray(u64, max_back_buffer_count){}; errdefer { for (views.slice()) |view| view.manager.release(); for (textures.slice()) |texture| texture.manager.release(); } for (0..back_buffer_count) |i| { var buffer: *c.ID3D12Resource = undefined; hr = dxgi_swap_chain.lpVtbl.*.GetBuffer.?( dxgi_swap_chain, @intCast(i), &c.IID_ID3D12Resource, @ptrCast(&buffer), ); if (hr != c.S_OK) { return error.SwapChainGetBufferFailed; } const texture = try Texture.initForSwapChain(device, desc, buffer); const view = try texture.createView(&sysgpu.TextureView.Descriptor{}); textures.appendAssumeCapacity(texture); views.appendAssumeCapacity(view); fence_values.appendAssumeCapacity(0); } // Result const swapchain = try allocator.create(SwapChain); swapchain.* = .{ .device = device, .surface = surface, .queue = device.queue, .dxgi_swap_chain = dxgi_swap_chain, .width = desc.width, .height = desc.height, .back_buffer_count = back_buffer_count, .sync_interval = if (desc.present_mode == .immediate) 0 else 1, .present_flags = if (desc.present_mode == .immediate and instance.allow_tearing) DXGI_PRESENT_ALLOW_TEARING else 0, .textures = textures.buffer, .views = views.buffer, .fence_values = fence_values.buffer, }; return swapchain; } pub fn deinit(swapchain: *SwapChain) void { const dxgi_swap_chain = swapchain.dxgi_swap_chain; const queue = swapchain.queue; queue.waitUntil(queue.fence_value); for (swapchain.views[0..swapchain.back_buffer_count]) |view| view.manager.release(); for (swapchain.textures[0..swapchain.back_buffer_count]) |texture| texture.manager.release(); _ = dxgi_swap_chain.lpVtbl.*.Release.?(dxgi_swap_chain); allocator.destroy(swapchain); } pub fn getCurrentTextureView(swapchain: *SwapChain) !*TextureView { const dxgi_swap_chain = swapchain.dxgi_swap_chain; const fence_value = swapchain.fence_values[swapchain.buffer_index]; swapchain.queue.waitUntil(fence_value); const index = dxgi_swap_chain.lpVtbl.*.GetCurrentBackBufferIndex.?(dxgi_swap_chain); swapchain.buffer_index = index; // TEMP - resolve reference tracking in main.zig swapchain.views[index].manager.reference(); return swapchain.views[index]; } pub fn present(swapchain: *SwapChain) !void { const dxgi_swap_chain = swapchain.dxgi_swap_chain; const queue = swapchain.queue; var hr: c.HRESULT = undefined; hr = dxgi_swap_chain.lpVtbl.*.Present.?( dxgi_swap_chain, swapchain.sync_interval, swapchain.present_flags, ); if (hr != c.S_OK) { return error.PresentFailed; } queue.fence_value += 1; try queue.signal(); swapchain.fence_values[swapchain.buffer_index] = queue.fence_value; } }; pub const Resource = struct { // NOTE - this is a naive sync solution as a placeholder until render graphs are implemented mem_allocator: ?*MemoryAllocator = null, read_state: c.D3D12_RESOURCE_STATES, allocation: ?MemoryAllocator.Allocation = null, d3d_resource: *c.ID3D12Resource, memory_location: MemoryLocation = .unknown, size: u64 = 0, pub fn init( d3d_resource: *c.ID3D12Resource, read_state: c.D3D12_RESOURCE_STATES, ) Resource { return .{ .d3d_resource = d3d_resource, .read_state = read_state, }; } pub fn deinit(resource: *Resource) void { if (resource.mem_allocator) |mem_allocator| { mem_allocator.destroyResource(resource.*) catch {}; } } }; pub const Buffer = struct { manager: utils.Manager(Buffer) = .{}, device: *Device, resource: Resource, stage_buffer: ?*Buffer, gpu_count: u32 = 0, map: ?[*]u8, // TODO - packed buffer descriptor struct size: u64, usage: sysgpu.Buffer.UsageFlags, pub fn init(device: *Device, desc: *const sysgpu.Buffer.Descriptor) !*Buffer { var hr: c.HRESULT = undefined; var resource = try device.createD3dBuffer(desc.usage, desc.size); errdefer resource.deinit(); if (desc.label) |label| setDebugName(@ptrCast(resource.d3d_resource), label); // Mapped at Creation var stage_buffer: ?*Buffer = null; var map: ?*anyopaque = null; if (desc.mapped_at_creation == .true) { var map_resource: *c.ID3D12Resource = undefined; if (!desc.usage.map_write) { stage_buffer = try Buffer.init(device, &.{ .usage = .{ .copy_src = true, .map_write = true }, .size = desc.size, }); map_resource = stage_buffer.?.resource.d3d_resource; } else { map_resource = resource.d3d_resource; } // TODO - map status in callback instead of failure hr = map_resource.lpVtbl.*.Map.?(map_resource, 0, null, &map); if (hr != c.S_OK) { return error.MapBufferAtCreationFailed; } } // Result const buffer = try allocator.create(Buffer); buffer.* = .{ .device = device, .resource = resource, .stage_buffer = stage_buffer, .map = @ptrCast(map), .size = desc.size, .usage = desc.usage, }; return buffer; } pub fn deinit(buffer: *Buffer) void { if (buffer.stage_buffer) |stage_buffer| stage_buffer.manager.release(); buffer.resource.deinit(); allocator.destroy(buffer); } pub fn getMappedRange(buffer: *Buffer, offset: usize, size: usize) !?*anyopaque { return @ptrCast(buffer.map.?[offset .. offset + size]); } pub fn getSize(buffer: *Buffer) u64 { return buffer.size; } pub fn getUsage(buffer: *Buffer) sysgpu.Buffer.UsageFlags { return buffer.usage; } pub fn mapAsync( buffer: *Buffer, mode: sysgpu.MapModeFlags, offset: usize, size: usize, callback: sysgpu.Buffer.MapCallback, userdata: ?*anyopaque, ) !void { _ = size; _ = offset; _ = mode; const map_callback = MapCallback{ .buffer = buffer, .callback = callback, .userdata = userdata }; if (buffer.gpu_count == 0) { buffer.executeMapAsync(map_callback); } else { try buffer.device.map_callbacks.append(allocator, map_callback); } } pub fn setLabel(buffer: *Buffer, label: [*:0]const u8) void { setDebugName(@ptrCast(buffer.resource.d3d_resource), label); } pub fn unmap(buffer: *Buffer) !void { var map_resource: *c.ID3D12Resource = undefined; if (buffer.stage_buffer) |stage_buffer| { map_resource = stage_buffer.resource.d3d_resource; const encoder = try buffer.device.queue.getCommandEncoder(); try encoder.copyBufferToBuffer(stage_buffer, 0, buffer, 0, buffer.size); stage_buffer.manager.release(); buffer.stage_buffer = null; } else { map_resource = buffer.resource.d3d_resource; } map_resource.lpVtbl.*.Unmap.?(map_resource, 0, null); } // Internal pub fn executeMapAsync(buffer: *Buffer, map_callback: MapCallback) void { const d3d_resource = buffer.resource.d3d_resource; var hr: c.HRESULT = undefined; var map: ?*anyopaque = null; hr = d3d_resource.lpVtbl.*.Map.?(d3d_resource, 0, null, &map); if (hr != c.S_OK) { map_callback.callback(.unknown, map_callback.userdata); return; } buffer.map = @ptrCast(map); map_callback.callback(.success, map_callback.userdata); } }; pub const Texture = struct { manager: utils.Manager(Texture) = .{}, device: *Device, resource: Resource, // TODO - packed texture descriptor struct usage: sysgpu.Texture.UsageFlags, dimension: sysgpu.Texture.Dimension, size: sysgpu.Extent3D, format: sysgpu.Texture.Format, mip_level_count: u32, sample_count: u32, pub fn init(device: *Device, desc: *const sysgpu.Texture.Descriptor) !*Texture { const resource_desc = c.D3D12_RESOURCE_DESC{ .Dimension = conv.d3d12ResourceDimension(desc.dimension), .Alignment = 0, .Width = desc.size.width, .Height = desc.size.height, .DepthOrArraySize = @intCast(desc.size.depth_or_array_layers), .MipLevels = @intCast(desc.mip_level_count), .Format = conv.dxgiFormatForTextureResource(desc.format, desc.usage, desc.view_format_count), .SampleDesc = .{ .Count = desc.sample_count, .Quality = 0 }, .Layout = c.D3D12_TEXTURE_LAYOUT_UNKNOWN, .Flags = conv.d3d12ResourceFlagsForTexture(desc.usage, desc.format), }; const read_state = conv.d3d12ResourceStatesForTextureRead(desc.usage); const initial_state = read_state; const clear_value = c.D3D12_CLEAR_VALUE{ .Format = resource_desc.Format }; // TODO: the code below was terribly broken, I rewrote it, Is it correct? // const create_desc = ResourceCreateDescriptor{ // .location = .gpu_only, // .resource_desc = if (utils.formatHasDepthOrStencil(desc.format) or desc.usage.render_attachment) // &clear_value // else // null, // .clear_value = null, // .resource_category = .buffer, // .initial_state = initial_state, // }; const create_desc = ResourceCreateDescriptor{ .location = .gpu_only, .resource_desc = &resource_desc, .clear_value = if (utils.formatHasDepthOrStencil(desc.format) or desc.usage.render_attachment) &clear_value else null, .resource_category = .buffer, .initial_state = initial_state, }; const resource = device.mem_allocator.createResource(&create_desc) catch return error.CreateTextureFailed; if (desc.label) |label| setDebugName(@ptrCast(resource.d3d_resource), label); // Result const texture = try allocator.create(Texture); texture.* = .{ .device = device, .resource = resource, .usage = desc.usage, .dimension = desc.dimension, .size = desc.size, .format = desc.format, .mip_level_count = desc.mip_level_count, .sample_count = desc.sample_count, }; return texture; } pub fn initForSwapChain(device: *Device, desc: *const sysgpu.SwapChain.Descriptor, d3d_resource: *c.ID3D12Resource) !*Texture { const read_state = c.D3D12_RESOURCE_STATE_PRESENT; const texture = try allocator.create(Texture); texture.* = .{ .device = device, .resource = Resource.init(d3d_resource, read_state), .usage = desc.usage, .dimension = .dimension_2d, .size = .{ .width = desc.width, .height = desc.height, .depth_or_array_layers = 1 }, .format = desc.format, .mip_level_count = 1, .sample_count = 1, }; return texture; } pub fn deinit(texture: *Texture) void { texture.resource.deinit(); allocator.destroy(texture); } pub fn createView(texture: *Texture, desc: *const sysgpu.TextureView.Descriptor) !*TextureView { return TextureView.init(texture, desc); } // Internal pub fn calcSubresource(texture: *Texture, mip_level: u32, array_slice: u32) u32 { return mip_level + (array_slice * texture.mip_level_count); } }; pub const TextureView = struct { manager: utils.Manager(TextureView) = .{}, texture: *Texture, format: sysgpu.Texture.Format, dimension: sysgpu.TextureView.Dimension, base_mip_level: u32, mip_level_count: u32, base_array_layer: u32, array_layer_count: u32, aspect: sysgpu.Texture.Aspect, base_subresource: u32, pub fn init(texture: *Texture, desc: *const sysgpu.TextureView.Descriptor) !*TextureView { texture.manager.reference(); const texture_dimension: sysgpu.TextureView.Dimension = switch (texture.dimension) { .dimension_1d => .dimension_1d, .dimension_2d => .dimension_2d, .dimension_3d => .dimension_3d, }; const view = try allocator.create(TextureView); view.* = .{ .texture = texture, .format = if (desc.format != .undefined) desc.format else texture.format, .dimension = if (desc.dimension != .dimension_undefined) desc.dimension else texture_dimension, .base_mip_level = desc.base_mip_level, .mip_level_count = desc.mip_level_count, .base_array_layer = desc.base_array_layer, .array_layer_count = desc.array_layer_count, .aspect = desc.aspect, .base_subresource = texture.calcSubresource(desc.base_mip_level, desc.base_array_layer), }; return view; } pub fn deinit(view: *TextureView) void { view.texture.manager.release(); allocator.destroy(view); } // Internal pub fn width(view: *TextureView) u32 { return @max(1, view.texture.size.width >> @intCast(view.base_mip_level)); } pub fn height(view: *TextureView) u32 { return @max(1, view.texture.size.height >> @intCast(view.base_mip_level)); } pub fn srvDesc(view: *TextureView) c.D3D12_SHADER_RESOURCE_VIEW_DESC { var srv_desc: c.D3D12_SHADER_RESOURCE_VIEW_DESC = undefined; srv_desc.Format = conv.dxgiFormatForTextureView(view.format, view.aspect); srv_desc.ViewDimension = conv.d3d12SrvDimension(view.dimension, view.texture.sample_count); srv_desc.Shader4ComponentMapping = c.D3D12_DEFAULT_SHADER_4_COMPONENT_MAPPING; switch (srv_desc.ViewDimension) { c.D3D12_SRV_DIMENSION_TEXTURE1D => srv_desc.unnamed_0.Texture1D = .{ .MostDetailedMip = view.base_mip_level, .MipLevels = view.mip_level_count, .ResourceMinLODClamp = 0.0, }, c.D3D12_SRV_DIMENSION_TEXTURE2D => srv_desc.unnamed_0.Texture2D = .{ .MostDetailedMip = view.base_mip_level, .MipLevels = view.mip_level_count, .PlaneSlice = 0, // TODO .ResourceMinLODClamp = 0.0, }, c.D3D12_SRV_DIMENSION_TEXTURE2DARRAY => srv_desc.unnamed_0.Texture2DArray = .{ .MostDetailedMip = view.base_mip_level, .MipLevels = view.mip_level_count, .FirstArraySlice = view.base_array_layer, .ArraySize = view.array_layer_count, .PlaneSlice = 0, .ResourceMinLODClamp = 0.0, }, c.D3D12_SRV_DIMENSION_TEXTURE2DMS => {}, c.D3D12_SRV_DIMENSION_TEXTURE2DMSARRAY => srv_desc.unnamed_0.Texture2DMSArray = .{ .FirstArraySlice = view.base_array_layer, .ArraySize = view.array_layer_count, }, c.D3D12_SRV_DIMENSION_TEXTURE3D => srv_desc.unnamed_0.Texture3D = .{ .MostDetailedMip = view.base_mip_level, .MipLevels = view.mip_level_count, .ResourceMinLODClamp = 0.0, }, c.D3D12_SRV_DIMENSION_TEXTURECUBE => srv_desc.unnamed_0.TextureCube = .{ .MostDetailedMip = view.base_mip_level, .MipLevels = view.mip_level_count, .ResourceMinLODClamp = 0.0, }, c.D3D12_SRV_DIMENSION_TEXTURECUBEARRAY => srv_desc.unnamed_0.TextureCubeArray = .{ .MostDetailedMip = view.base_mip_level, .MipLevels = view.mip_level_count, .First2DArrayFace = view.base_array_layer, // TODO - does this need a conversion? .NumCubes = view.array_layer_count, // TODO - does this need a conversion? .ResourceMinLODClamp = 0.0, }, else => {}, } return srv_desc; } pub fn uavDesc(view: *TextureView) c.D3D12_UNORDERED_ACCESS_VIEW_DESC { var uav_desc: c.D3D12_UNORDERED_ACCESS_VIEW_DESC = undefined; uav_desc.Format = conv.dxgiFormatForTextureView(view.format, view.aspect); uav_desc.ViewDimension = conv.d3d12UavDimension(view.dimension); switch (uav_desc.ViewDimension) { c.D3D12_UAV_DIMENSION_TEXTURE1D => uav_desc.unnamed_0.Texture1D = .{ .MipSlice = view.base_mip_level, }, c.D3D12_UAV_DIMENSION_TEXTURE2D => uav_desc.unnamed_0.Texture2D = .{ .MipSlice = view.base_mip_level, .PlaneSlice = 0, // TODO }, c.D3D12_UAV_DIMENSION_TEXTURE2DARRAY => uav_desc.unnamed_0.Texture2DArray = .{ .MipSlice = view.base_mip_level, .FirstArraySlice = view.base_array_layer, .ArraySize = view.array_layer_count, .PlaneSlice = 0, }, c.D3D12_UAV_DIMENSION_TEXTURE3D => uav_desc.unnamed_0.Texture3D = .{ .MipSlice = view.base_mip_level, .FirstWSlice = view.base_array_layer, // TODO - ?? .WSize = view.array_layer_count, // TODO - ?? }, else => {}, } return uav_desc; } }; pub const Sampler = struct { manager: utils.Manager(Sampler) = .{}, d3d_desc: c.D3D12_SAMPLER_DESC, pub fn init(device: *Device, desc: *const sysgpu.Sampler.Descriptor) !*Sampler { _ = device; const d3d_desc = c.D3D12_SAMPLER_DESC{ .Filter = conv.d3d12Filter(desc.mag_filter, desc.min_filter, desc.mipmap_filter, desc.max_anisotropy), .AddressU = conv.d3d12TextureAddressMode(desc.address_mode_u), .AddressV = conv.d3d12TextureAddressMode(desc.address_mode_v), .AddressW = conv.d3d12TextureAddressMode(desc.address_mode_w), .MipLODBias = 0.0, .MaxAnisotropy = desc.max_anisotropy, .ComparisonFunc = if (desc.compare != .undefined) conv.d3d12ComparisonFunc(desc.compare) else c.D3D12_COMPARISON_FUNC_NEVER, .BorderColor = [4]c.FLOAT{ 0.0, 0.0, 0.0, 0.0 }, .MinLOD = desc.lod_min_clamp, .MaxLOD = desc.lod_max_clamp, }; const sampler = try allocator.create(Sampler); sampler.* = .{ .d3d_desc = d3d_desc, }; return sampler; } pub fn deinit(sampler: *Sampler) void { allocator.destroy(sampler); } }; pub const BindGroupLayout = struct { const Entry = struct { binding: u32, visibility: sysgpu.ShaderStageFlags, buffer: sysgpu.Buffer.BindingLayout = .{}, sampler: sysgpu.Sampler.BindingLayout = .{}, texture: sysgpu.Texture.BindingLayout = .{}, storage_texture: sysgpu.StorageTextureBindingLayout = .{}, range_type: c.D3D12_DESCRIPTOR_RANGE_TYPE, table_index: ?u32, dynamic_index: ?u32, }; const DynamicEntry = struct { parameter_type: c.D3D12_ROOT_PARAMETER_TYPE, }; manager: utils.Manager(BindGroupLayout) = .{}, entries: std.ArrayListUnmanaged(Entry), dynamic_entries: std.ArrayListUnmanaged(DynamicEntry), general_table_size: u32, sampler_table_size: u32, pub fn init(device: *Device, desc: *const sysgpu.BindGroupLayout.Descriptor) !*BindGroupLayout { _ = device; var entries = std.ArrayListUnmanaged(Entry){}; errdefer entries.deinit(allocator); var dynamic_entries = std.ArrayListUnmanaged(DynamicEntry){}; errdefer dynamic_entries.deinit(allocator); var general_table_size: u32 = 0; var sampler_table_size: u32 = 0; for (0..desc.entry_count) |entry_index| { const entry = desc.entries.?[entry_index]; var table_index: ?u32 = null; var dynamic_index: ?u32 = null; if (entry.buffer.has_dynamic_offset == .true) { dynamic_index = @intCast(dynamic_entries.items.len); try dynamic_entries.append(allocator, .{ .parameter_type = conv.d3d12RootParameterType(entry), }); } else if (entry.sampler.type != .undefined) { table_index = sampler_table_size; sampler_table_size += 1; } else { table_index = general_table_size; general_table_size += 1; } try entries.append(allocator, .{ .binding = entry.binding, .visibility = entry.visibility, .buffer = entry.buffer, .sampler = entry.sampler, .texture = entry.texture, .storage_texture = entry.storage_texture, .range_type = conv.d3d12DescriptorRangeType(entry), .table_index = table_index, .dynamic_index = dynamic_index, }); } const layout = try allocator.create(BindGroupLayout); layout.* = .{ .entries = entries, .dynamic_entries = dynamic_entries, .general_table_size = general_table_size, .sampler_table_size = sampler_table_size, }; return layout; } pub fn deinit(layout: *BindGroupLayout) void { layout.entries.deinit(allocator); layout.dynamic_entries.deinit(allocator); allocator.destroy(layout); } // Internal pub fn getEntry(layout: *BindGroupLayout, binding: u32) ?*const Entry { for (layout.entries.items) |*entry| { if (entry.binding == binding) return entry; } return null; } }; pub const BindGroup = struct { const ResourceAccess = struct { resource: *Resource, uav: bool, }; const DynamicResource = struct { address: c.D3D12_GPU_VIRTUAL_ADDRESS, parameter_type: c.D3D12_ROOT_PARAMETER_TYPE, }; manager: utils.Manager(BindGroup) = .{}, device: *Device, general_allocation: ?DescriptorAllocation, general_table: ?c.D3D12_GPU_DESCRIPTOR_HANDLE, sampler_allocation: ?DescriptorAllocation, sampler_table: ?c.D3D12_GPU_DESCRIPTOR_HANDLE, dynamic_resources: []DynamicResource, buffers: std.ArrayListUnmanaged(*Buffer), textures: std.ArrayListUnmanaged(*Texture), accesses: std.ArrayListUnmanaged(ResourceAccess), pub fn init(device: *Device, desc: *const sysgpu.BindGroup.Descriptor) !*BindGroup { const d3d_device = device.d3d_device; const layout: *BindGroupLayout = @ptrCast(@alignCast(desc.layout)); // General Descriptor Table var general_allocation: ?DescriptorAllocation = null; var general_table: ?c.D3D12_GPU_DESCRIPTOR_HANDLE = null; if (layout.general_table_size > 0) { const allocation = try device.general_heap.alloc(); general_allocation = allocation; general_table = device.general_heap.gpuDescriptor(allocation.index); for (0..desc.entry_count) |i| { const entry = desc.entries.?[i]; const layout_entry = layout.getEntry(entry.binding) orelse return error.UnknownBinding; if (layout_entry.sampler.type != .undefined) continue; if (layout_entry.table_index) |table_index| { const dest_descriptor = device.general_heap.cpuDescriptor(allocation.index + table_index); if (layout_entry.buffer.type != .undefined) { const buffer: *Buffer = @ptrCast(@alignCast(entry.buffer.?)); const d3d_resource = buffer.resource.d3d_resource; const buffer_location = d3d_resource.lpVtbl.*.GetGPUVirtualAddress.?(d3d_resource) + entry.offset; switch (layout_entry.buffer.type) { .undefined => unreachable, .uniform => { const cbv_desc: c.D3D12_CONSTANT_BUFFER_VIEW_DESC = .{ .BufferLocation = buffer_location, .SizeInBytes = @intCast(utils.alignUp(entry.size, limits.min_uniform_buffer_offset_alignment)), }; d3d_device.lpVtbl.*.CreateConstantBufferView.?( d3d_device, &cbv_desc, dest_descriptor, ); }, .storage => { // TODO - switch to RWByteAddressBuffer after using DXC const stride = entry.elem_size; const uav_desc: c.D3D12_UNORDERED_ACCESS_VIEW_DESC = .{ .Format = c.DXGI_FORMAT_UNKNOWN, .ViewDimension = c.D3D12_UAV_DIMENSION_BUFFER, .unnamed_0 = .{ .Buffer = .{ .FirstElement = @intCast(entry.offset / stride), .NumElements = @intCast(entry.size / stride), .StructureByteStride = stride, .CounterOffsetInBytes = 0, .Flags = 0, }, }, }; d3d_device.lpVtbl.*.CreateUnorderedAccessView.?( d3d_device, d3d_resource, null, &uav_desc, dest_descriptor, ); }, .read_only_storage => { // TODO - switch to ByteAddressBuffer after using DXC const stride = entry.elem_size; const srv_desc: c.D3D12_SHADER_RESOURCE_VIEW_DESC = .{ .Format = c.DXGI_FORMAT_UNKNOWN, .ViewDimension = c.D3D12_SRV_DIMENSION_BUFFER, .Shader4ComponentMapping = c.D3D12_DEFAULT_SHADER_4_COMPONENT_MAPPING, .unnamed_0 = .{ .Buffer = .{ .FirstElement = @intCast(entry.offset / stride), .NumElements = @intCast(entry.size / stride), .StructureByteStride = stride, .Flags = 0, }, }, }; d3d_device.lpVtbl.*.CreateShaderResourceView.?( d3d_device, d3d_resource, &srv_desc, dest_descriptor, ); }, } } else if (layout_entry.texture.sample_type != .undefined) { const texture_view: *TextureView = @ptrCast(@alignCast(entry.texture_view.?)); const d3d_resource = texture_view.texture.resource.d3d_resource; d3d_device.lpVtbl.*.CreateShaderResourceView.?( d3d_device, d3d_resource, &texture_view.srvDesc(), dest_descriptor, ); } else if (layout_entry.storage_texture.format != .undefined) { const texture_view: *TextureView = @ptrCast(@alignCast(entry.texture_view.?)); const d3d_resource = texture_view.texture.resource.d3d_resource; d3d_device.lpVtbl.*.CreateUnorderedAccessView.?( d3d_device, d3d_resource, null, &texture_view.uavDesc(), dest_descriptor, ); } } } } // Sampler Descriptor Table var sampler_allocation: ?DescriptorAllocation = null; var sampler_table: ?c.D3D12_GPU_DESCRIPTOR_HANDLE = null; if (layout.sampler_table_size > 0) { const allocation = try device.sampler_heap.alloc(); sampler_allocation = allocation; sampler_table = device.sampler_heap.gpuDescriptor(allocation.index); for (0..desc.entry_count) |i| { const entry = desc.entries.?[i]; const layout_entry = layout.getEntry(entry.binding) orelse return error.UnknownBinding; if (layout_entry.sampler.type == .undefined) continue; if (layout_entry.table_index) |table_index| { const dest_descriptor = device.sampler_heap.cpuDescriptor(allocation.index + table_index); const sampler: *Sampler = @ptrCast(@alignCast(entry.sampler.?)); d3d_device.lpVtbl.*.CreateSampler.?( d3d_device, &sampler.d3d_desc, dest_descriptor, ); } } } // Resource tracking and dynamic resources var dynamic_resources = try allocator.alloc(DynamicResource, layout.dynamic_entries.items.len); errdefer allocator.free(dynamic_resources); var buffers = std.ArrayListUnmanaged(*Buffer){}; errdefer buffers.deinit(allocator); var textures = std.ArrayListUnmanaged(*Texture){}; errdefer textures.deinit(allocator); var accesses = std.ArrayListUnmanaged(ResourceAccess){}; errdefer accesses.deinit(allocator); for (0..desc.entry_count) |i| { const entry = desc.entries.?[i]; const layout_entry = layout.getEntry(entry.binding) orelse return error.UnknownBinding; if (layout_entry.buffer.type != .undefined) { const buffer: *Buffer = @ptrCast(@alignCast(entry.buffer.?)); const d3d_resource = buffer.resource.d3d_resource; try buffers.append(allocator, buffer); buffer.manager.reference(); const buffer_location = d3d_resource.lpVtbl.*.GetGPUVirtualAddress.?(d3d_resource) + entry.offset; if (layout_entry.dynamic_index) |dynamic_index| { const layout_dynamic_entry = layout.dynamic_entries.items[dynamic_index]; dynamic_resources[dynamic_index] = .{ .address = buffer_location, .parameter_type = layout_dynamic_entry.parameter_type, }; } try accesses.append(allocator, .{ .resource = &buffer.resource, .uav = layout_entry.buffer.type == .storage }); } else if (layout_entry.sampler.type != .undefined) {} else if (layout_entry.texture.sample_type != .undefined) { const texture_view: *TextureView = @ptrCast(@alignCast(entry.texture_view.?)); const texture = texture_view.texture; try textures.append(allocator, texture); texture.manager.reference(); try accesses.append(allocator, .{ .resource = &texture.resource, .uav = false }); } else if (layout_entry.storage_texture.format != .undefined) { const texture_view: *TextureView = @ptrCast(@alignCast(entry.texture_view.?)); const texture = texture_view.texture; try textures.append(allocator, texture); texture.manager.reference(); try accesses.append(allocator, .{ .resource = &texture.resource, .uav = true }); } } const group = try allocator.create(BindGroup); group.* = .{ .device = device, .general_allocation = general_allocation, .general_table = general_table, .sampler_allocation = sampler_allocation, .sampler_table = sampler_table, .dynamic_resources = dynamic_resources, .buffers = buffers, .textures = textures, .accesses = accesses, }; return group; } pub fn deinit(group: *BindGroup) void { if (group.general_allocation) |allocation| group.device.general_heap.free(allocation); if (group.sampler_allocation) |allocation| group.device.sampler_heap.free(allocation); for (group.buffers.items) |buffer| buffer.manager.release(); for (group.textures.items) |texture| texture.manager.release(); group.buffers.deinit(allocator); group.textures.deinit(allocator); group.accesses.deinit(allocator); allocator.free(group.dynamic_resources); allocator.destroy(group); } }; pub const PipelineLayout = struct { pub const Function = struct { stage: sysgpu.ShaderStageFlags, shader_module: *ShaderModule, entry_point: [*:0]const u8, }; manager: utils.Manager(PipelineLayout) = .{}, root_signature: *c.ID3D12RootSignature, group_layouts: []*BindGroupLayout, group_parameter_indices: std.BoundedArray(u32, limits.max_bind_groups), pub fn init(device: *Device, desc: *const sysgpu.PipelineLayout.Descriptor) !*PipelineLayout { const d3d_device = device.d3d_device; var hr: c.HRESULT = undefined; // Per Bind Group: // - up to 1 descriptor table for CBV/SRV/UAV // - up to 1 descriptor table for Sampler // - 1 root descriptor per dynamic resource // Root signature 1.1 hints not supported yet var group_layouts = try allocator.alloc(*BindGroupLayout, desc.bind_group_layout_count); errdefer allocator.free(group_layouts); var group_parameter_indices = std.BoundedArray(u32, limits.max_bind_groups){}; var parameter_count: u32 = 0; var range_count: u32 = 0; for (0..desc.bind_group_layout_count) |i| { const layout: *BindGroupLayout = @ptrCast(@alignCast(desc.bind_group_layouts.?[i])); layout.manager.reference(); group_layouts[i] = layout; group_parameter_indices.appendAssumeCapacity(parameter_count); var general_entry_count: u32 = 0; var sampler_entry_count: u32 = 0; for (layout.entries.items) |entry| { if (entry.dynamic_index) |_| { parameter_count += 1; } else if (entry.sampler.type != .undefined) { sampler_entry_count += 1; range_count += 1; } else { general_entry_count += 1; range_count += 1; } } if (general_entry_count > 0) parameter_count += 1; if (sampler_entry_count > 0) parameter_count += 1; } var parameters = try std.ArrayListUnmanaged(c.D3D12_ROOT_PARAMETER).initCapacity(allocator, parameter_count); defer parameters.deinit(allocator); var ranges = try std.ArrayListUnmanaged(c.D3D12_DESCRIPTOR_RANGE).initCapacity(allocator, range_count); defer ranges.deinit(allocator); for (0..desc.bind_group_layout_count) |group_index| { const layout: *BindGroupLayout = group_layouts[group_index]; // General Table { const entry_range_base = ranges.items.len; for (layout.entries.items) |entry| { if (entry.dynamic_index == null and entry.sampler.type == .undefined) { ranges.appendAssumeCapacity(.{ .RangeType = entry.range_type, .NumDescriptors = 1, .BaseShaderRegister = entry.binding, .RegisterSpace = @intCast(group_index), .OffsetInDescriptorsFromTableStart = c.D3D12_DESCRIPTOR_RANGE_OFFSET_APPEND, }); } } const entry_range_count = ranges.items.len - entry_range_base; if (entry_range_count > 0) { parameters.appendAssumeCapacity(.{ .ParameterType = c.D3D12_ROOT_PARAMETER_TYPE_DESCRIPTOR_TABLE, .unnamed_0 = .{ .DescriptorTable = .{ .NumDescriptorRanges = @intCast(entry_range_count), .pDescriptorRanges = &ranges.items[entry_range_base], }, }, .ShaderVisibility = c.D3D12_SHADER_VISIBILITY_ALL, }); } } // Sampler Table { const entry_range_base = ranges.items.len; for (layout.entries.items) |entry| { if (entry.dynamic_index == null and entry.sampler.type != .undefined) { ranges.appendAssumeCapacity(.{ .RangeType = entry.range_type, .NumDescriptors = 1, .BaseShaderRegister = entry.binding, .RegisterSpace = @intCast(group_index), .OffsetInDescriptorsFromTableStart = c.D3D12_DESCRIPTOR_RANGE_OFFSET_APPEND, }); } } const entry_range_count = ranges.items.len - entry_range_base; if (entry_range_count > 0) { parameters.appendAssumeCapacity(.{ .ParameterType = c.D3D12_ROOT_PARAMETER_TYPE_DESCRIPTOR_TABLE, .unnamed_0 = .{ .DescriptorTable = .{ .NumDescriptorRanges = @intCast(entry_range_count), .pDescriptorRanges = &ranges.items[entry_range_base], }, }, .ShaderVisibility = c.D3D12_SHADER_VISIBILITY_ALL, }); } } // Dynamic Resources for (layout.entries.items) |entry| { if (entry.dynamic_index) |dynamic_index| { const layout_dynamic_entry = layout.dynamic_entries.items[dynamic_index]; parameters.appendAssumeCapacity(.{ .ParameterType = layout_dynamic_entry.parameter_type, .unnamed_0 = .{ .Descriptor = .{ .ShaderRegister = entry.binding, .RegisterSpace = @intCast(group_index), }, }, .ShaderVisibility = c.D3D12_SHADER_VISIBILITY_ALL, }); } } } var root_signature_blob: *c.ID3DBlob = undefined; var opt_errors: ?*c.ID3DBlob = null; hr = c.D3D12SerializeRootSignature( &c.D3D12_ROOT_SIGNATURE_DESC{ .NumParameters = @intCast(parameters.items.len), .pParameters = parameters.items.ptr, .NumStaticSamplers = 0, .pStaticSamplers = null, .Flags = c.D3D12_ROOT_SIGNATURE_FLAG_ALLOW_INPUT_ASSEMBLER_INPUT_LAYOUT, // TODO - would like a flag for this }, c.D3D_ROOT_SIGNATURE_VERSION_1, @ptrCast(&root_signature_blob), @ptrCast(&opt_errors), ); if (opt_errors) |errors| { const message: [*:0]const u8 = @ptrCast(errors.lpVtbl.*.GetBufferPointer.?(errors).?); std.debug.print("{s}\n", .{message}); _ = errors.lpVtbl.*.Release.?(errors); } if (hr != c.S_OK) { return error.SerializeRootSignatureFailed; } defer _ = root_signature_blob.lpVtbl.*.Release.?(root_signature_blob); var root_signature: *c.ID3D12RootSignature = undefined; hr = d3d_device.lpVtbl.*.CreateRootSignature.?( d3d_device, 0, root_signature_blob.lpVtbl.*.GetBufferPointer.?(root_signature_blob), root_signature_blob.lpVtbl.*.GetBufferSize.?(root_signature_blob), &c.IID_ID3D12RootSignature, @ptrCast(&root_signature), ); errdefer _ = root_signature.lpVtbl.*.Release.?(root_signature); // Result const layout = try allocator.create(PipelineLayout); layout.* = .{ .root_signature = root_signature, .group_layouts = group_layouts, .group_parameter_indices = group_parameter_indices, }; return layout; } pub fn initDefault(device: *Device, default_pipeline_layout: utils.DefaultPipelineLayoutDescriptor) !*PipelineLayout { const groups = default_pipeline_layout.groups; var bind_group_layouts = std.BoundedArray(*sysgpu.BindGroupLayout, limits.max_bind_groups){}; defer { for (bind_group_layouts.slice()) |bind_group_layout| bind_group_layout.release(); } for (groups.slice()) |entries| { const bind_group_layout = try device.createBindGroupLayout( &sysgpu.BindGroupLayout.Descriptor.init(.{ .entries = entries.items }), ); bind_group_layouts.appendAssumeCapacity(@ptrCast(bind_group_layout)); } return device.createPipelineLayout( &sysgpu.PipelineLayout.Descriptor.init(.{ .bind_group_layouts = bind_group_layouts.slice() }), ); } pub fn deinit(layout: *PipelineLayout) void { const root_signature = layout.root_signature; for (layout.group_layouts) |group_layout| group_layout.manager.release(); _ = root_signature.lpVtbl.*.Release.?(root_signature); allocator.free(layout.group_layouts); allocator.destroy(layout); } }; pub const ShaderModule = struct { manager: utils.Manager(ShaderModule) = .{}, code: union(enum) { code: []const u8, air: *shader.Air, }, pub fn initAir(device: *Device, air: *shader.Air) !*ShaderModule { _ = device; const module = try allocator.create(ShaderModule); module.* = .{ .code = .{ .air = air } }; return module; } pub fn deinit(module: *ShaderModule) void { if (module.code == .air) { module.code.air.deinit(allocator); allocator.destroy(module.code.air); } allocator.destroy(module); } // Internal fn compile(module: *ShaderModule, entrypoint: [*:0]const u8, target: [*:0]const u8) !*c.ID3DBlob { var hr: c.HRESULT = undefined; const code = switch (module.code) { .air => |air| try shader.CodeGen.generate(allocator, air, .hlsl, false, .{ .emit_source_file = "" }, null, null, null), .code => |code| code, }; defer if (module.code == .air) allocator.free(code); var flags: u32 = 0; if (debug_enabled) flags |= c.D3DCOMPILE_DEBUG | c.D3DCOMPILE_SKIP_OPTIMIZATION; var shader_blob: *c.ID3DBlob = undefined; var opt_errors: ?*c.ID3DBlob = null; hr = c.D3DCompile( code.ptr, code.len, null, null, null, entrypoint, target, flags, 0, @ptrCast(&shader_blob), @ptrCast(&opt_errors), ); if (opt_errors) |errors| { const message: [*:0]const u8 = @ptrCast(errors.lpVtbl.*.GetBufferPointer.?(errors).?); std.debug.print("{s}\n", .{message}); _ = errors.lpVtbl.*.Release.?(errors); } if (hr != c.S_OK) { return error.CompileShaderFailed; } return shader_blob; } }; pub const ComputePipeline = struct { manager: utils.Manager(ComputePipeline) = .{}, device: *Device, d3d_pipeline: *c.ID3D12PipelineState, layout: *PipelineLayout, pub fn init(device: *Device, desc: *const sysgpu.ComputePipeline.Descriptor) !*ComputePipeline { const d3d_device = device.d3d_device; var hr: c.HRESULT = undefined; const compute_module: *ShaderModule = @ptrCast(@alignCast(desc.compute.module)); // Pipeline Layout var layout: *PipelineLayout = undefined; if (desc.layout) |layout_raw| { layout = @ptrCast(@alignCast(layout_raw)); layout.manager.reference(); } else if (compute_module.code == .air) { var layout_desc = utils.DefaultPipelineLayoutDescriptor.init(allocator); defer layout_desc.deinit(); try layout_desc.addFunction(compute_module.code.air, .{ .compute = true }, desc.compute.entry_point); layout = try PipelineLayout.initDefault(device, layout_desc); } else { @panic( \\Cannot create pipeline descriptor autoamtically. \\Please provide it yourself or write the shader in WGSL. ); } errdefer layout.manager.release(); // Shaders const compute_shader = try compute_module.compile(desc.compute.entry_point, "cs_5_1"); defer _ = compute_shader.lpVtbl.*.Release.?(compute_shader); // PSO var d3d_pipeline: *c.ID3D12PipelineState = undefined; hr = d3d_device.lpVtbl.*.CreateComputePipelineState.?( d3d_device, &c.D3D12_COMPUTE_PIPELINE_STATE_DESC{ .pRootSignature = layout.root_signature, .CS = conv.d3d12ShaderBytecode(compute_shader), .NodeMask = 0, .CachedPSO = .{ .pCachedBlob = null, .CachedBlobSizeInBytes = 0 }, .Flags = c.D3D12_PIPELINE_STATE_FLAG_NONE, }, &c.IID_ID3D12PipelineState, @ptrCast(&d3d_pipeline), ); if (hr != c.S_OK) { return error.CreateComputePipelineFailed; } errdefer _ = d3d_pipeline.lpVtbl.*.Release.?(d3d_pipeline); if (desc.label) |label| setDebugName(@ptrCast(d3d_pipeline), label); // Result const pipeline = try allocator.create(ComputePipeline); pipeline.* = .{ .device = device, .d3d_pipeline = d3d_pipeline, .layout = layout, }; return pipeline; } pub fn deinit(pipeline: *ComputePipeline) void { const d3d_pipeline = pipeline.d3d_pipeline; pipeline.layout.manager.release(); _ = d3d_pipeline.lpVtbl.*.Release.?(d3d_pipeline); allocator.destroy(pipeline); } pub fn getBindGroupLayout(pipeline: *ComputePipeline, group_index: u32) *BindGroupLayout { return @ptrCast(pipeline.layout.group_layouts[group_index]); } }; pub const RenderPipeline = struct { manager: utils.Manager(RenderPipeline) = .{}, device: *Device, d3d_pipeline: *c.ID3D12PipelineState, layout: *PipelineLayout, topology: c.D3D12_PRIMITIVE_TOPOLOGY_TYPE, vertex_strides: std.BoundedArray(c.UINT, limits.max_vertex_buffers), pub fn init(device: *Device, desc: *const sysgpu.RenderPipeline.Descriptor) !*RenderPipeline { const d3d_device = device.d3d_device; var hr: c.HRESULT = undefined; const vertex_module: *ShaderModule = @ptrCast(@alignCast(desc.vertex.module)); // Pipeline Layout var layout: *PipelineLayout = undefined; if (desc.layout) |layout_raw| { layout = @ptrCast(@alignCast(layout_raw)); layout.manager.reference(); } else if (vertex_module.code == .air) { var layout_desc = utils.DefaultPipelineLayoutDescriptor.init(allocator); defer layout_desc.deinit(); try layout_desc.addFunction(vertex_module.code.air, .{ .vertex = true }, desc.vertex.entry_point); if (desc.fragment) |frag| { const frag_module: *ShaderModule = @ptrCast(@alignCast(frag.module)); if (frag_module.code == .air) { try layout_desc.addFunction(frag_module.code.air, .{ .fragment = true }, frag.entry_point); } else { @panic( \\Cannot create pipeline descriptor autoamtically. \\Please provide it yourself or write the shader in WGSL. ); } } layout = try PipelineLayout.initDefault(device, layout_desc); } else { @panic( \\Cannot create pipeline descriptor autoamtically. \\Please provide it yourself or write the shader in WGSL. ); } errdefer layout.manager.release(); // Shaders const vertex_shader = try vertex_module.compile(desc.vertex.entry_point, "vs_5_1"); defer _ = vertex_shader.lpVtbl.*.Release.?(vertex_shader); var opt_pixel_shader: ?*c.ID3DBlob = null; if (desc.fragment) |frag| { const frag_module: *ShaderModule = @ptrCast(@alignCast(frag.module)); opt_pixel_shader = try frag_module.compile(frag.entry_point, "ps_5_1"); } defer if (opt_pixel_shader) |pixel_shader| { _ = pixel_shader.lpVtbl.*.Release.?(pixel_shader); }; // PSO var input_elements = std.BoundedArray(c.D3D12_INPUT_ELEMENT_DESC, limits.max_vertex_buffers){}; var vertex_strides = std.BoundedArray(c.UINT, limits.max_vertex_buffers){}; for (0..desc.vertex.buffer_count) |i| { const buffer = desc.vertex.buffers.?[i]; for (0..buffer.attribute_count) |j| { const attr = buffer.attributes.?[j]; input_elements.appendAssumeCapacity(conv.d3d12InputElementDesc(i, buffer, attr)); } vertex_strides.appendAssumeCapacity(@intCast(buffer.array_stride)); } var num_render_targets: usize = 0; var rtv_formats = [_]c.DXGI_FORMAT{c.DXGI_FORMAT_UNKNOWN} ** limits.max_color_attachments; if (desc.fragment) |frag| { num_render_targets = frag.target_count; for (0..frag.target_count) |i| { const target = frag.targets.?[i]; rtv_formats[i] = conv.dxgiFormatForTexture(target.format); } } var d3d_pipeline: *c.ID3D12PipelineState = undefined; hr = d3d_device.lpVtbl.*.CreateGraphicsPipelineState.?( d3d_device, &c.D3D12_GRAPHICS_PIPELINE_STATE_DESC{ .pRootSignature = layout.root_signature, .VS = conv.d3d12ShaderBytecode(vertex_shader), .PS = conv.d3d12ShaderBytecode(opt_pixel_shader), .DS = conv.d3d12ShaderBytecode(null), .HS = conv.d3d12ShaderBytecode(null), .GS = conv.d3d12ShaderBytecode(null), .StreamOutput = conv.d3d12StreamOutputDesc(), .BlendState = conv.d3d12BlendDesc(desc), .SampleMask = desc.multisample.mask, .RasterizerState = conv.d3d12RasterizerDesc(desc), .DepthStencilState = conv.d3d12DepthStencilDesc(desc.depth_stencil), .InputLayout = .{ .pInputElementDescs = if (desc.vertex.buffer_count > 0) &input_elements.buffer else null, .NumElements = @intCast(input_elements.len), }, .IBStripCutValue = conv.d3d12IndexBufferStripCutValue(desc.primitive.strip_index_format), .PrimitiveTopologyType = conv.d3d12PrimitiveTopologyType(desc.primitive.topology), .NumRenderTargets = @intCast(num_render_targets), .RTVFormats = rtv_formats, .DSVFormat = if (desc.depth_stencil) |ds| conv.dxgiFormatForTexture(ds.format) else c.DXGI_FORMAT_UNKNOWN, .SampleDesc = .{ .Count = desc.multisample.count, .Quality = 0 }, .NodeMask = 0, .CachedPSO = .{ .pCachedBlob = null, .CachedBlobSizeInBytes = 0 }, .Flags = c.D3D12_PIPELINE_STATE_FLAG_NONE, }, &c.IID_ID3D12PipelineState, @ptrCast(&d3d_pipeline), ); if (hr != c.S_OK) { return error.CreateRenderPipelineFailed; } errdefer _ = d3d_pipeline.lpVtbl.*.Release.?(d3d_pipeline); if (desc.label) |label| setDebugName(@ptrCast(d3d_pipeline), label); // Result const pipeline = try allocator.create(RenderPipeline); pipeline.* = .{ .d3d_pipeline = d3d_pipeline, .device = device, .layout = layout, .topology = conv.d3d12PrimitiveTopology(desc.primitive.topology), .vertex_strides = vertex_strides, }; return pipeline; } pub fn deinit(pipeline: *RenderPipeline) void { const d3d_pipeline = pipeline.d3d_pipeline; pipeline.layout.manager.release(); _ = d3d_pipeline.lpVtbl.*.Release.?(d3d_pipeline); allocator.destroy(pipeline); } pub fn getBindGroupLayout(pipeline: *RenderPipeline, group_index: u32) *BindGroupLayout { return @ptrCast(pipeline.layout.group_layouts[group_index]); } }; pub const CommandBuffer = struct { pub const StreamingResult = struct { d3d_resource: *c.ID3D12Resource, map: [*]u8, offset: u32, }; manager: utils.Manager(CommandBuffer) = .{}, device: *Device, command_allocator: *c.ID3D12CommandAllocator, command_list: *c.ID3D12GraphicsCommandList, reference_tracker: *ReferenceTracker, rtv_allocation: DescriptorAllocation = .{ .index = 0 }, rtv_next_index: u32 = rtv_block_size, upload_buffer: ?*c.ID3D12Resource = null, upload_map: ?[*]u8 = null, upload_next_offset: u32 = upload_page_size, pub fn init(device: *Device) !*CommandBuffer { const command_allocator = try device.command_manager.createCommandAllocator(); errdefer device.command_manager.destroyCommandAllocator(command_allocator); const command_list = try device.command_manager.createCommandList(command_allocator); errdefer device.command_manager.destroyCommandList(command_list); const heaps = [2]*c.ID3D12DescriptorHeap{ device.general_heap.d3d_heap, device.sampler_heap.d3d_heap }; command_list.lpVtbl.*.SetDescriptorHeaps.?( command_list, 2, &heaps, ); const reference_tracker = try ReferenceTracker.init(device, command_allocator); errdefer reference_tracker.deinit(); const command_buffer = try allocator.create(CommandBuffer); command_buffer.* = .{ .device = device, .command_allocator = command_allocator, .command_list = command_list, .reference_tracker = reference_tracker, }; return command_buffer; } pub fn deinit(command_buffer: *CommandBuffer) void { // reference_tracker lifetime is managed externally // command_allocator lifetime is managed externally // command_list lifetime is managed externally allocator.destroy(command_buffer); } // Internal pub fn upload(command_buffer: *CommandBuffer, size: u64) !StreamingResult { if (command_buffer.upload_next_offset + size > upload_page_size) { const streaming_manager = &command_buffer.device.streaming_manager; var hr: c.HRESULT = undefined; std.debug.assert(size <= upload_page_size); // TODO - support large uploads const resource = try streaming_manager.acquire(); const d3d_resource = resource.d3d_resource; try command_buffer.reference_tracker.referenceUploadPage(resource); command_buffer.upload_buffer = d3d_resource; var map: ?*anyopaque = null; hr = d3d_resource.lpVtbl.*.Map.?(d3d_resource, 0, null, &map); if (hr != c.S_OK) { return error.MapForUploadFailed; } command_buffer.upload_map = @ptrCast(map); command_buffer.upload_next_offset = 0; } const offset = command_buffer.upload_next_offset; command_buffer.upload_next_offset = @intCast(utils.alignUp(offset + size, limits.min_uniform_buffer_offset_alignment)); return StreamingResult{ .d3d_resource = command_buffer.upload_buffer.?, .map = command_buffer.upload_map.? + offset, .offset = offset, }; } pub fn allocateRtvDescriptors(command_buffer: *CommandBuffer, count: usize) !c.D3D12_CPU_DESCRIPTOR_HANDLE { if (count == 0) return .{ .ptr = 0 }; var rtv_heap = &command_buffer.device.rtv_heap; if (command_buffer.rtv_next_index + count > rtv_block_size) { command_buffer.rtv_allocation = try rtv_heap.alloc(); try command_buffer.reference_tracker.referenceRtvDescriptorBlock(command_buffer.rtv_allocation); command_buffer.rtv_next_index = 0; } const index = command_buffer.rtv_next_index; command_buffer.rtv_next_index = @intCast(index + count); return rtv_heap.cpuDescriptor(command_buffer.rtv_allocation.index + index); } pub fn allocateDsvDescriptor(command_buffer: *CommandBuffer) !c.D3D12_CPU_DESCRIPTOR_HANDLE { var dsv_heap = &command_buffer.device.dsv_heap; const allocation = try dsv_heap.alloc(); try command_buffer.reference_tracker.referenceDsvDescriptorBlock(allocation); return dsv_heap.cpuDescriptor(allocation.index); } }; pub const ReferenceTracker = struct { device: *Device, command_allocator: *c.ID3D12CommandAllocator, fence_value: u64 = 0, buffers: std.ArrayListUnmanaged(*Buffer) = .{}, textures: std.ArrayListUnmanaged(*Texture) = .{}, bind_groups: std.ArrayListUnmanaged(*BindGroup) = .{}, compute_pipelines: std.ArrayListUnmanaged(*ComputePipeline) = .{}, render_pipelines: std.ArrayListUnmanaged(*RenderPipeline) = .{}, rtv_descriptor_blocks: std.ArrayListUnmanaged(DescriptorAllocation) = .{}, dsv_descriptor_blocks: std.ArrayListUnmanaged(DescriptorAllocation) = .{}, upload_pages: std.ArrayListUnmanaged(Resource) = .{}, pub fn init(device: *Device, command_allocator: *c.ID3D12CommandAllocator) !*ReferenceTracker { const tracker = try allocator.create(ReferenceTracker); tracker.* = .{ .device = device, .command_allocator = command_allocator, }; return tracker; } pub fn deinit(tracker: *ReferenceTracker) void { const device = tracker.device; device.command_manager.destroyCommandAllocator(tracker.command_allocator); for (tracker.buffers.items) |buffer| { buffer.gpu_count -= 1; buffer.manager.release(); } for (tracker.textures.items) |texture| { texture.manager.release(); } for (tracker.bind_groups.items) |group| { for (group.buffers.items) |buffer| buffer.gpu_count -= 1; group.manager.release(); } for (tracker.compute_pipelines.items) |pipeline| { pipeline.manager.release(); } for (tracker.render_pipelines.items) |pipeline| { pipeline.manager.release(); } for (tracker.rtv_descriptor_blocks.items) |block| { device.rtv_heap.free(block); } for (tracker.dsv_descriptor_blocks.items) |block| { device.dsv_heap.free(block); } for (tracker.upload_pages.items) |resource| { device.streaming_manager.release(resource); } tracker.buffers.deinit(allocator); tracker.textures.deinit(allocator); tracker.bind_groups.deinit(allocator); tracker.compute_pipelines.deinit(allocator); tracker.render_pipelines.deinit(allocator); tracker.rtv_descriptor_blocks.deinit(allocator); tracker.dsv_descriptor_blocks.deinit(allocator); tracker.upload_pages.deinit(allocator); allocator.destroy(tracker); } pub fn referenceBuffer(tracker: *ReferenceTracker, buffer: *Buffer) !void { buffer.manager.reference(); try tracker.buffers.append(allocator, buffer); } pub fn referenceTexture(tracker: *ReferenceTracker, texture: *Texture) !void { texture.manager.reference(); try tracker.textures.append(allocator, texture); } pub fn referenceBindGroup(tracker: *ReferenceTracker, group: *BindGroup) !void { group.manager.reference(); try tracker.bind_groups.append(allocator, group); } pub fn referenceComputePipeline(tracker: *ReferenceTracker, pipeline: *ComputePipeline) !void { pipeline.manager.reference(); try tracker.compute_pipelines.append(allocator, pipeline); } pub fn referenceRenderPipeline(tracker: *ReferenceTracker, pipeline: *RenderPipeline) !void { pipeline.manager.reference(); try tracker.render_pipelines.append(allocator, pipeline); } pub fn referenceRtvDescriptorBlock(tracker: *ReferenceTracker, block: DescriptorAllocation) !void { try tracker.rtv_descriptor_blocks.append(allocator, block); } pub fn referenceDsvDescriptorBlock(tracker: *ReferenceTracker, block: DescriptorAllocation) !void { try tracker.dsv_descriptor_blocks.append(allocator, block); } pub fn referenceUploadPage(tracker: *ReferenceTracker, upload_page: Resource) !void { try tracker.upload_pages.append(allocator, upload_page); } pub fn submit(tracker: *ReferenceTracker, queue: *Queue) !void { tracker.fence_value = queue.fence_value; for (tracker.buffers.items) |buffer| { buffer.gpu_count += 1; } for (tracker.bind_groups.items) |group| { for (group.buffers.items) |buffer| buffer.gpu_count += 1; } try tracker.device.reference_trackers.append(allocator, tracker); } }; pub const CommandEncoder = struct { manager: utils.Manager(CommandEncoder) = .{}, device: *Device, command_buffer: *CommandBuffer, reference_tracker: *ReferenceTracker, state_tracker: StateTracker = .{}, pub fn init(device: *Device, desc: ?*const sysgpu.CommandEncoder.Descriptor) !*CommandEncoder { // TODO _ = desc; const command_buffer = try CommandBuffer.init(device); var encoder = try allocator.create(CommandEncoder); encoder.* = .{ .device = device, .command_buffer = command_buffer, .reference_tracker = command_buffer.reference_tracker, }; encoder.state_tracker.init(device); return encoder; } pub fn deinit(encoder: *CommandEncoder) void { encoder.state_tracker.deinit(); encoder.command_buffer.manager.release(); allocator.destroy(encoder); } pub fn beginComputePass(encoder: *CommandEncoder, desc: *const sysgpu.ComputePassDescriptor) !*ComputePassEncoder { return ComputePassEncoder.init(encoder, desc); } pub fn beginRenderPass(encoder: *CommandEncoder, desc: *const sysgpu.RenderPassDescriptor) !*RenderPassEncoder { try encoder.state_tracker.endPass(); return RenderPassEncoder.init(encoder, desc); } pub fn copyBufferToBuffer( encoder: *CommandEncoder, source: *Buffer, source_offset: u64, destination: *Buffer, destination_offset: u64, size: u64, ) !void { const command_list = encoder.command_buffer.command_list; try encoder.reference_tracker.referenceBuffer(source); try encoder.reference_tracker.referenceBuffer(destination); try encoder.state_tracker.transition(&source.resource, source.resource.read_state); try encoder.state_tracker.transition(&destination.resource, c.D3D12_RESOURCE_STATE_COPY_DEST); encoder.state_tracker.flush(command_list); command_list.lpVtbl.*.CopyBufferRegion.?( command_list, destination.resource.d3d_resource, destination_offset, source.resource.d3d_resource, source_offset, size, ); } pub fn copyBufferToTexture( encoder: *CommandEncoder, source: *const sysgpu.ImageCopyBuffer, destination: *const sysgpu.ImageCopyTexture, copy_size_raw: *const sysgpu.Extent3D, ) !void { const command_list = encoder.command_buffer.command_list; const source_buffer: *Buffer = @ptrCast(@alignCast(source.buffer)); const destination_texture: *Texture = @ptrCast(@alignCast(destination.texture)); try encoder.reference_tracker.referenceBuffer(source_buffer); try encoder.reference_tracker.referenceTexture(destination_texture); try encoder.state_tracker.transition(&source_buffer.resource, source_buffer.resource.read_state); try encoder.state_tracker.transition(&destination_texture.resource, c.D3D12_RESOURCE_STATE_COPY_DEST); encoder.state_tracker.flush(command_list); const copy_size = utils.calcExtent(destination_texture.dimension, copy_size_raw.*); const destination_origin = utils.calcOrigin(destination_texture.dimension, destination.origin); const destination_subresource_index = destination_texture.calcSubresource(destination.mip_level, destination_origin.array_slice); std.debug.assert(copy_size.array_count == 1); // TODO command_list.lpVtbl.*.CopyTextureRegion.?( command_list, &.{ .pResource = destination_texture.resource.d3d_resource, .Type = c.D3D12_TEXTURE_COPY_TYPE_SUBRESOURCE_INDEX, .unnamed_0 = .{ .SubresourceIndex = destination_subresource_index, }, }, destination_origin.x, destination_origin.y, destination_origin.z, &.{ .pResource = source_buffer.resource.d3d_resource, .Type = c.D3D12_TEXTURE_COPY_TYPE_PLACED_FOOTPRINT, .unnamed_0 = .{ .PlacedFootprint = .{ .Offset = source.layout.offset, .Footprint = .{ .Format = conv.dxgiFormatForTexture(destination_texture.format), .Width = copy_size.width, .Height = copy_size.height, .Depth = copy_size.depth, .RowPitch = source.layout.bytes_per_row, }, }, }, }, null, ); } pub fn copyTextureToTexture( encoder: *CommandEncoder, source: *const sysgpu.ImageCopyTexture, destination: *const sysgpu.ImageCopyTexture, copy_size_raw: *const sysgpu.Extent3D, ) !void { const command_list = encoder.command_buffer.command_list; const source_texture: *Texture = @ptrCast(@alignCast(source.texture)); const destination_texture: *Texture = @ptrCast(@alignCast(destination.texture)); try encoder.reference_tracker.referenceTexture(source_texture); try encoder.reference_tracker.referenceTexture(destination_texture); try encoder.state_tracker.transition(&source_texture.resource, source_texture.resource.read_state); try encoder.state_tracker.transition(&destination_texture.resource, c.D3D12_RESOURCE_STATE_COPY_DEST); encoder.state_tracker.flush(command_list); const copy_size = utils.calcExtent(destination_texture.dimension, copy_size_raw.*); const source_origin = utils.calcOrigin(source_texture.dimension, source.origin); const destination_origin = utils.calcOrigin(destination_texture.dimension, destination.origin); const source_subresource_index = source_texture.calcSubresource(source.mip_level, source_origin.array_slice); const destination_subresource_index = destination_texture.calcSubresource(destination.mip_level, destination_origin.array_slice); std.debug.assert(copy_size.array_count == 1); // TODO command_list.lpVtbl.*.CopyTextureRegion.?( command_list, &.{ .pResource = destination_texture.resource.d3d_resource, .Type = c.D3D12_TEXTURE_COPY_TYPE_SUBRESOURCE_INDEX, .unnamed_0 = .{ .SubresourceIndex = destination_subresource_index, }, }, destination_origin.x, destination_origin.y, destination_origin.z, &.{ .pResource = source_texture.resource.d3d_resource, .Type = c.D3D12_TEXTURE_COPY_TYPE_SUBRESOURCE_INDEX, .unnamed_0 = .{ .SubresourceIndex = source_subresource_index, }, }, &.{ .left = source_origin.x, .top = source_origin.y, .front = source_origin.z, .right = source_origin.x + copy_size.width, .bottom = source_origin.y + copy_size.height, .back = source_origin.z + copy_size.depth, }, ); } pub fn finish(encoder: *CommandEncoder, desc: *const sysgpu.CommandBuffer.Descriptor) !*CommandBuffer { const command_list = encoder.command_buffer.command_list; var hr: c.HRESULT = undefined; try encoder.state_tracker.endPass(); encoder.state_tracker.flush(command_list); hr = command_list.lpVtbl.*.Close.?(command_list); if (hr != c.S_OK) { return error.CommandListCloseFailed; } if (desc.label) |label| setDebugName(@ptrCast(command_list), label); return encoder.command_buffer; } pub fn writeBuffer(encoder: *CommandEncoder, buffer: *Buffer, offset: u64, data: [*]const u8, size: u64) !void { const command_list = encoder.command_buffer.command_list; const stream = try encoder.command_buffer.upload(size); @memcpy(stream.map[0..size], data[0..size]); try encoder.reference_tracker.referenceBuffer(buffer); try encoder.state_tracker.transition(&buffer.resource, c.D3D12_RESOURCE_STATE_COPY_DEST); encoder.state_tracker.flush(command_list); command_list.lpVtbl.*.CopyBufferRegion.?( command_list, buffer.resource.d3d_resource, offset, stream.d3d_resource, stream.offset, size, ); } pub fn writeTexture( encoder: *CommandEncoder, destination: *const sysgpu.ImageCopyTexture, data: [*]const u8, data_size: usize, data_layout: *const sysgpu.Texture.DataLayout, write_size_raw: *const sysgpu.Extent3D, ) !void { const command_list = encoder.command_buffer.command_list; const destination_texture: *Texture = @ptrCast(@alignCast(destination.texture)); const stream = try encoder.command_buffer.upload(data_size); @memcpy(stream.map[0..data_size], data[0..data_size]); try encoder.reference_tracker.referenceTexture(destination_texture); try encoder.state_tracker.transition(&destination_texture.resource, c.D3D12_RESOURCE_STATE_COPY_DEST); encoder.state_tracker.flush(command_list); const write_size = utils.calcExtent(destination_texture.dimension, write_size_raw.*); const destination_origin = utils.calcOrigin(destination_texture.dimension, destination.origin); const destination_subresource_index = destination_texture.calcSubresource(destination.mip_level, destination_origin.array_slice); std.debug.assert(write_size.array_count == 1); // TODO command_list.lpVtbl.*.CopyTextureRegion.?( command_list, &.{ .pResource = destination_texture.resource.d3d_resource, .Type = c.D3D12_TEXTURE_COPY_TYPE_SUBRESOURCE_INDEX, .unnamed_0 = .{ .SubresourceIndex = destination_subresource_index, }, }, destination_origin.x, destination_origin.y, destination_origin.z, &.{ .pResource = stream.d3d_resource, .Type = c.D3D12_TEXTURE_COPY_TYPE_PLACED_FOOTPRINT, .unnamed_0 = .{ .PlacedFootprint = .{ .Offset = stream.offset, .Footprint = .{ .Format = conv.dxgiFormatForTexture(destination_texture.format), .Width = write_size.width, .Height = write_size.height, .Depth = write_size.depth, .RowPitch = data_layout.bytes_per_row, }, }, }, }, null, ); } }; pub const StateTracker = struct { device: *Device = undefined, written_set: std.AutoArrayHashMapUnmanaged(*Resource, c.D3D12_RESOURCE_STATES) = .{}, barriers: std.ArrayListUnmanaged(c.D3D12_RESOURCE_BARRIER) = .{}, pub fn init(tracker: *StateTracker, device: *Device) void { tracker.device = device; } pub fn deinit(tracker: *StateTracker) void { tracker.written_set.deinit(allocator); tracker.barriers.deinit(allocator); } pub fn transition(tracker: *StateTracker, resource: *Resource, new_state: c.D3D12_RESOURCE_STATES) !void { const current_state = tracker.written_set.get(resource) orelse resource.read_state; if (current_state == c.D3D12_RESOURCE_STATE_UNORDERED_ACCESS and new_state == c.D3D12_RESOURCE_STATE_UNORDERED_ACCESS) { try tracker.addUavBarrier(resource); } else if (current_state != new_state) { try tracker.written_set.put(allocator, resource, new_state); try tracker.addTransitionBarrier(resource, current_state, new_state); } } pub fn flush(tracker: *StateTracker, command_list: *c.ID3D12GraphicsCommandList) void { if (tracker.barriers.items.len > 0) { command_list.lpVtbl.*.ResourceBarrier.?( command_list, @intCast(tracker.barriers.items.len), tracker.barriers.items.ptr, ); tracker.barriers.clearRetainingCapacity(); } } pub fn endPass(tracker: *StateTracker) !void { var it = tracker.written_set.iterator(); while (it.next()) |entry| { const resource = entry.key_ptr.*; const current_state = entry.value_ptr.*; if (current_state != resource.read_state) try tracker.addTransitionBarrier(resource, current_state, resource.read_state); } tracker.written_set.clearRetainingCapacity(); } fn addUavBarrier(tracker: *StateTracker, resource: *Resource) !void { try tracker.barriers.append(allocator, .{ .Type = c.D3D12_RESOURCE_BARRIER_TYPE_UAV, .Flags = c.D3D12_RESOURCE_BARRIER_FLAG_NONE, .unnamed_0 = .{ .UAV = .{ .pResource = resource.d3d_resource, }, }, }); } fn addTransitionBarrier( tracker: *StateTracker, resource: *Resource, state_before: c.D3D12_RESOURCE_STATES, state_after: c.D3D12_RESOURCE_STATES, ) !void { try tracker.barriers.append(allocator, .{ .Type = c.D3D12_RESOURCE_BARRIER_TYPE_TRANSITION, .Flags = c.D3D12_RESOURCE_BARRIER_FLAG_NONE, .unnamed_0 = .{ .Transition = .{ .pResource = resource.d3d_resource, .Subresource = c.D3D12_RESOURCE_BARRIER_ALL_SUBRESOURCES, .StateBefore = state_before, .StateAfter = state_after, }, }, }); } }; pub const ComputePassEncoder = struct { manager: utils.Manager(ComputePassEncoder) = .{}, command_list: *c.ID3D12GraphicsCommandList, reference_tracker: *ReferenceTracker, state_tracker: *StateTracker, bind_groups: [limits.max_bind_groups]*BindGroup = undefined, group_parameter_indices: []u32 = undefined, pub fn init(cmd_encoder: *CommandEncoder, desc: *const sysgpu.ComputePassDescriptor) !*ComputePassEncoder { _ = desc; const command_list = cmd_encoder.command_buffer.command_list; const encoder = try allocator.create(ComputePassEncoder); encoder.* = .{ .command_list = command_list, .reference_tracker = cmd_encoder.reference_tracker, .state_tracker = &cmd_encoder.state_tracker, }; return encoder; } pub fn deinit(encoder: *ComputePassEncoder) void { allocator.destroy(encoder); } pub fn dispatchWorkgroups( encoder: *ComputePassEncoder, workgroup_count_x: u32, workgroup_count_y: u32, workgroup_count_z: u32, ) !void { const command_list = encoder.command_list; const bind_group_count = encoder.group_parameter_indices.len; for (encoder.bind_groups[0..bind_group_count]) |group| { for (group.accesses.items) |access| { if (access.uav) { try encoder.state_tracker.transition(access.resource, c.D3D12_RESOURCE_STATE_UNORDERED_ACCESS); } else { try encoder.state_tracker.transition(access.resource, access.resource.read_state); } } } encoder.state_tracker.flush(command_list); command_list.lpVtbl.*.Dispatch.?( command_list, workgroup_count_x, workgroup_count_y, workgroup_count_z, ); } pub fn end(encoder: *ComputePassEncoder) void { _ = encoder; } pub fn setBindGroup( encoder: *ComputePassEncoder, group_index: u32, group: *BindGroup, dynamic_offset_count: usize, dynamic_offsets: ?[*]const u32, ) !void { const command_list = encoder.command_list; try encoder.reference_tracker.referenceBindGroup(group); encoder.bind_groups[group_index] = group; var parameter_index = encoder.group_parameter_indices[group_index]; if (group.general_table) |table| { command_list.lpVtbl.*.SetComputeRootDescriptorTable.?( command_list, parameter_index, table, ); parameter_index += 1; } if (group.sampler_table) |table| { command_list.lpVtbl.*.SetComputeRootDescriptorTable.?( command_list, parameter_index, table, ); parameter_index += 1; } for (0..dynamic_offset_count) |i| { const dynamic_resource = group.dynamic_resources[i]; const dynamic_offset = dynamic_offsets.?[i]; switch (dynamic_resource.parameter_type) { c.D3D12_ROOT_PARAMETER_TYPE_CBV => command_list.lpVtbl.*.SetComputeRootConstantBufferView.?( command_list, parameter_index, dynamic_resource.address + dynamic_offset, ), c.D3D12_ROOT_PARAMETER_TYPE_SRV => command_list.lpVtbl.*.SetComputeRootShaderResourceView.?( command_list, parameter_index, dynamic_resource.address + dynamic_offset, ), c.D3D12_ROOT_PARAMETER_TYPE_UAV => command_list.lpVtbl.*.SetComputeRootUnorderedAccessView.?( command_list, parameter_index, dynamic_resource.address + dynamic_offset, ), else => {}, } parameter_index += 1; } } pub fn setPipeline(encoder: *ComputePassEncoder, pipeline: *ComputePipeline) !void { const command_list = encoder.command_list; try encoder.reference_tracker.referenceComputePipeline(pipeline); encoder.group_parameter_indices = pipeline.layout.group_parameter_indices.slice(); command_list.lpVtbl.*.SetComputeRootSignature.?( command_list, pipeline.layout.root_signature, ); command_list.lpVtbl.*.SetPipelineState.?( command_list, pipeline.d3d_pipeline, ); } }; pub const RenderPassEncoder = struct { manager: utils.Manager(RenderPassEncoder) = .{}, command_list: *c.ID3D12GraphicsCommandList, reference_tracker: *ReferenceTracker, state_tracker: *StateTracker, color_attachments: std.BoundedArray(sysgpu.RenderPassColorAttachment, limits.max_color_attachments) = .{}, depth_attachment: ?sysgpu.RenderPassDepthStencilAttachment, group_parameter_indices: []u32 = undefined, vertex_apply_count: u32 = 0, vertex_buffer_views: [limits.max_vertex_buffers]c.D3D12_VERTEX_BUFFER_VIEW, vertex_strides: []c.UINT = undefined, pub fn init(cmd_encoder: *CommandEncoder, desc: *const sysgpu.RenderPassDescriptor) !*RenderPassEncoder { const d3d_device = cmd_encoder.device.d3d_device; const command_list = cmd_encoder.command_buffer.command_list; var width: u32 = 0; var height: u32 = 0; var color_attachments: std.BoundedArray(sysgpu.RenderPassColorAttachment, limits.max_color_attachments) = .{}; var rtv_handles = try cmd_encoder.command_buffer.allocateRtvDescriptors(desc.color_attachment_count); const descriptor_size = cmd_encoder.device.rtv_heap.descriptor_size; var rtv_handle = rtv_handles; for (0..desc.color_attachment_count) |i| { const attach = desc.color_attachments.?[i]; if (attach.view) |view_raw| { const view: *TextureView = @ptrCast(@alignCast(view_raw)); const texture = view.texture; try cmd_encoder.reference_tracker.referenceTexture(texture); try cmd_encoder.state_tracker.transition(&texture.resource, c.D3D12_RESOURCE_STATE_RENDER_TARGET); width = view.width(); height = view.height(); color_attachments.appendAssumeCapacity(attach); // TODO - rtvDesc() d3d_device.lpVtbl.*.CreateRenderTargetView.?( d3d_device, texture.resource.d3d_resource, null, rtv_handle, ); } else { d3d_device.lpVtbl.*.CreateRenderTargetView.?( d3d_device, null, &.{ .Format = c.DXGI_FORMAT_R8G8B8A8_UNORM, .ViewDimension = c.D3D12_RTV_DIMENSION_TEXTURE2D, .unnamed_0 = .{ .Texture2D = .{ .MipSlice = 0, .PlaneSlice = 0 } }, }, rtv_handle, ); } rtv_handle.ptr += descriptor_size; } var depth_attachment: ?sysgpu.RenderPassDepthStencilAttachment = null; var dsv_handle: c.D3D12_CPU_DESCRIPTOR_HANDLE = .{ .ptr = 0 }; if (desc.depth_stencil_attachment) |attach| { const view: *TextureView = @ptrCast(@alignCast(attach.view)); const texture = view.texture; try cmd_encoder.reference_tracker.referenceTexture(texture); try cmd_encoder.state_tracker.transition(&texture.resource, c.D3D12_RESOURCE_STATE_DEPTH_WRITE); width = view.width(); height = view.height(); depth_attachment = attach.*; dsv_handle = try cmd_encoder.command_buffer.allocateDsvDescriptor(); d3d_device.lpVtbl.*.CreateDepthStencilView.?( d3d_device, texture.resource.d3d_resource, null, dsv_handle, ); } cmd_encoder.state_tracker.flush(command_list); command_list.lpVtbl.*.OMSetRenderTargets.?( command_list, @intCast(desc.color_attachment_count), &rtv_handles, c.TRUE, if (desc.depth_stencil_attachment != null) &dsv_handle else null, ); rtv_handle = rtv_handles; for (0..desc.color_attachment_count) |i| { const attach = desc.color_attachments.?[i]; if (attach.load_op == .clear) { const clear_color = [4]f32{ @floatCast(attach.clear_value.r), @floatCast(attach.clear_value.g), @floatCast(attach.clear_value.b), @floatCast(attach.clear_value.a), }; command_list.lpVtbl.*.ClearRenderTargetView.?( command_list, rtv_handle, &clear_color, 0, null, ); } rtv_handle.ptr += descriptor_size; } if (desc.depth_stencil_attachment) |attach| { var clear_flags: c.D3D12_CLEAR_FLAGS = 0; if (attach.depth_load_op == .clear) clear_flags |= c.D3D12_CLEAR_FLAG_DEPTH; if (attach.stencil_load_op == .clear) clear_flags |= c.D3D12_CLEAR_FLAG_STENCIL; if (clear_flags != 0) { command_list.lpVtbl.*.ClearDepthStencilView.?( command_list, dsv_handle, clear_flags, attach.depth_clear_value, @intCast(attach.stencil_clear_value), 0, null, ); } } const viewport = c.D3D12_VIEWPORT{ .TopLeftX = 0, .TopLeftY = 0, .Width = @floatFromInt(width), .Height = @floatFromInt(height), .MinDepth = 0, .MaxDepth = 1, }; const scissor_rect = c.D3D12_RECT{ .left = 0, .top = 0, .right = @intCast(width), .bottom = @intCast(height), }; command_list.lpVtbl.*.RSSetViewports.?(command_list, 1, &viewport); command_list.lpVtbl.*.RSSetScissorRects.?(command_list, 1, &scissor_rect); // Result const encoder = try allocator.create(RenderPassEncoder); encoder.* = .{ .command_list = command_list, .color_attachments = color_attachments, .depth_attachment = depth_attachment, .reference_tracker = cmd_encoder.reference_tracker, .state_tracker = &cmd_encoder.state_tracker, .vertex_buffer_views = std.mem.zeroes([limits.max_vertex_buffers]c.D3D12_VERTEX_BUFFER_VIEW), }; return encoder; } pub fn deinit(encoder: *RenderPassEncoder) void { allocator.destroy(encoder); } pub fn draw( encoder: *RenderPassEncoder, vertex_count: u32, instance_count: u32, first_vertex: u32, first_instance: u32, ) !void { const command_list = encoder.command_list; encoder.applyVertexBuffers(); command_list.lpVtbl.*.DrawInstanced.?( command_list, vertex_count, instance_count, first_vertex, first_instance, ); } pub fn drawIndexed( encoder: *RenderPassEncoder, index_count: u32, instance_count: u32, first_index: u32, base_vertex: i32, first_instance: u32, ) !void { const command_list = encoder.command_list; encoder.applyVertexBuffers(); command_list.lpVtbl.*.DrawIndexedInstanced.?( command_list, index_count, instance_count, first_index, base_vertex, first_instance, ); } pub fn end(encoder: *RenderPassEncoder) !void { const command_list = encoder.command_list; for (encoder.color_attachments.slice()) |attach| { const view: *TextureView = @ptrCast(@alignCast(attach.view.?)); if (attach.resolve_target) |resolve_target_raw| { const resolve_target: *TextureView = @ptrCast(@alignCast(resolve_target_raw)); try encoder.reference_tracker.referenceTexture(resolve_target.texture); try encoder.state_tracker.transition(&view.texture.resource, c.D3D12_RESOURCE_STATE_RESOLVE_SOURCE); try encoder.state_tracker.transition(&resolve_target.texture.resource, c.D3D12_RESOURCE_STATE_RESOLVE_DEST); encoder.state_tracker.flush(command_list); // Format const resolve_d3d_resource = resolve_target.texture.resource.d3d_resource; const view_d3d_resource = view.texture.resource.d3d_resource; var d3d_desc: c.D3D12_RESOURCE_DESC = undefined; var format: c.DXGI_FORMAT = undefined; _ = resolve_d3d_resource.lpVtbl.*.GetDesc.?(resolve_d3d_resource, &d3d_desc); format = d3d_desc.Format; if (conv.dxgiFormatIsTypeless(format)) { _ = view_d3d_resource.lpVtbl.*.GetDesc.?(view_d3d_resource, &d3d_desc); format = d3d_desc.Format; if (conv.dxgiFormatIsTypeless(format)) { return error.NoTypedFormat; } } command_list.lpVtbl.*.ResolveSubresource.?( command_list, resolve_target.texture.resource.d3d_resource, resolve_target.base_subresource, view.texture.resource.d3d_resource, view.base_subresource, format, ); try encoder.state_tracker.transition(&resolve_target.texture.resource, resolve_target.texture.resource.read_state); } try encoder.state_tracker.transition(&view.texture.resource, view.texture.resource.read_state); } if (encoder.depth_attachment) |attach| { const view: *TextureView = @ptrCast(@alignCast(attach.view)); try encoder.state_tracker.transition(&view.texture.resource, view.texture.resource.read_state); } } pub fn setBindGroup( encoder: *RenderPassEncoder, group_index: u32, group: *BindGroup, dynamic_offset_count: usize, dynamic_offsets: ?[*]const u32, ) !void { const command_list = encoder.command_list; try encoder.reference_tracker.referenceBindGroup(group); var parameter_index = encoder.group_parameter_indices[group_index]; if (group.general_table) |table| { command_list.lpVtbl.*.SetGraphicsRootDescriptorTable.?( command_list, parameter_index, table, ); parameter_index += 1; } if (group.sampler_table) |table| { command_list.lpVtbl.*.SetGraphicsRootDescriptorTable.?( command_list, parameter_index, table, ); parameter_index += 1; } for (0..dynamic_offset_count) |i| { const dynamic_resource = group.dynamic_resources[i]; const dynamic_offset = dynamic_offsets.?[i]; switch (dynamic_resource.parameter_type) { c.D3D12_ROOT_PARAMETER_TYPE_CBV => command_list.lpVtbl.*.SetGraphicsRootConstantBufferView.?( command_list, parameter_index, dynamic_resource.address + dynamic_offset, ), c.D3D12_ROOT_PARAMETER_TYPE_SRV => command_list.lpVtbl.*.SetGraphicsRootShaderResourceView.?( command_list, parameter_index, dynamic_resource.address + dynamic_offset, ), c.D3D12_ROOT_PARAMETER_TYPE_UAV => command_list.lpVtbl.*.SetGraphicsRootUnorderedAccessView.?( command_list, parameter_index, dynamic_resource.address + dynamic_offset, ), else => {}, } parameter_index += 1; } } pub fn setIndexBuffer( encoder: *RenderPassEncoder, buffer: *Buffer, format: sysgpu.IndexFormat, offset: u64, size: u64, ) !void { const command_list = encoder.command_list; const d3d_resource = buffer.resource.d3d_resource; try encoder.reference_tracker.referenceBuffer(buffer); const d3d_size: u32 = @intCast(if (size == sysgpu.whole_size) buffer.size - offset else size); command_list.lpVtbl.*.IASetIndexBuffer.?( command_list, &c.D3D12_INDEX_BUFFER_VIEW{ .BufferLocation = d3d_resource.lpVtbl.*.GetGPUVirtualAddress.?(d3d_resource) + offset, .SizeInBytes = d3d_size, .Format = conv.dxgiFormatForIndex(format), }, ); } pub fn setPipeline(encoder: *RenderPassEncoder, pipeline: *RenderPipeline) !void { const command_list = encoder.command_list; try encoder.reference_tracker.referenceRenderPipeline(pipeline); encoder.group_parameter_indices = pipeline.layout.group_parameter_indices.slice(); encoder.vertex_strides = pipeline.vertex_strides.slice(); command_list.lpVtbl.*.SetGraphicsRootSignature.?( command_list, pipeline.layout.root_signature, ); command_list.lpVtbl.*.SetPipelineState.?( command_list, pipeline.d3d_pipeline, ); command_list.lpVtbl.*.IASetPrimitiveTopology.?( command_list, pipeline.topology, ); } pub fn setScissorRect(encoder: *RenderPassEncoder, x: u32, y: u32, width: u32, height: u32) !void { const command_list = encoder.command_list; const scissor_rect = c.D3D12_RECT{ .left = @intCast(x), .top = @intCast(y), .right = @intCast(x + width), .bottom = @intCast(y + height), }; command_list.lpVtbl.*.RSSetScissorRects.?(command_list, 1, &scissor_rect); } pub fn setVertexBuffer(encoder: *RenderPassEncoder, slot: u32, buffer: *Buffer, offset: u64, size: u64) !void { const d3d_resource = buffer.resource.d3d_resource; try encoder.reference_tracker.referenceBuffer(buffer); var view = &encoder.vertex_buffer_views[slot]; view.BufferLocation = d3d_resource.lpVtbl.*.GetGPUVirtualAddress.?(d3d_resource) + offset; view.SizeInBytes = @intCast(size); // StrideInBytes deferred until draw() encoder.vertex_apply_count = @max(encoder.vertex_apply_count, slot + 1); } pub fn setViewport( encoder: *RenderPassEncoder, x: f32, y: f32, width: f32, height: f32, min_depth: f32, max_depth: f32, ) !void { const command_list = encoder.command_list; const viewport = c.D3D12_VIEWPORT{ .TopLeftX = x, .TopLeftY = y, .Width = width, .Height = height, .MinDepth = min_depth, .MaxDepth = max_depth, }; command_list.lpVtbl.*.RSSetViewports.?(command_list, 1, &viewport); } // Private fn applyVertexBuffers(encoder: *RenderPassEncoder) void { if (encoder.vertex_apply_count > 0) { const command_list = encoder.command_list; for (0..encoder.vertex_apply_count) |i| { var view = &encoder.vertex_buffer_views[i]; view.StrideInBytes = encoder.vertex_strides[i]; } command_list.lpVtbl.*.IASetVertexBuffers.?( command_list, 0, encoder.vertex_apply_count, &encoder.vertex_buffer_views, ); encoder.vertex_apply_count = 0; } } }; pub const Queue = struct { manager: utils.Manager(Queue) = .{}, device: *Device, d3d_command_queue: *c.ID3D12CommandQueue, fence: *c.ID3D12Fence, fence_value: u64 = 0, fence_event: c.HANDLE, command_encoder: ?*CommandEncoder = null, pub fn init(device: *Device) !Queue { const d3d_device = device.d3d_device; var hr: c.HRESULT = undefined; // Command Queue var d3d_command_queue: *c.ID3D12CommandQueue = undefined; hr = d3d_device.lpVtbl.*.CreateCommandQueue.?( d3d_device, &c.D3D12_COMMAND_QUEUE_DESC{ .Type = c.D3D12_COMMAND_LIST_TYPE_DIRECT, .Priority = c.D3D12_COMMAND_QUEUE_PRIORITY_NORMAL, .Flags = c.D3D12_COMMAND_QUEUE_FLAG_NONE, .NodeMask = 0, }, &c.IID_ID3D12CommandQueue, @ptrCast(&d3d_command_queue), ); if (hr != c.S_OK) { return error.CreateCommandQueueFailed; } errdefer _ = d3d_command_queue.lpVtbl.*.Release.?(d3d_command_queue); // Fence var fence: *c.ID3D12Fence = undefined; hr = d3d_device.lpVtbl.*.CreateFence.?( d3d_device, 0, c.D3D12_FENCE_FLAG_NONE, &c.IID_ID3D12Fence, @ptrCast(&fence), ); if (hr != c.S_OK) { return error.CreateFenceFailed; } errdefer _ = fence.lpVtbl.*.Release.?(fence); // Fence Event const fence_event = c.CreateEventW(null, c.FALSE, c.FALSE, null); if (fence_event == null) { return error.CreateEventFailed; } errdefer _ = c.CloseHandle(fence_event); // Result return .{ .device = device, .d3d_command_queue = d3d_command_queue, .fence = fence, .fence_event = fence_event, }; } pub fn deinit(queue: *Queue) void { const d3d_command_queue = queue.d3d_command_queue; const fence = queue.fence; queue.waitUntil(queue.fence_value); if (queue.command_encoder) |command_encoder| command_encoder.manager.release(); _ = d3d_command_queue.lpVtbl.*.Release.?(d3d_command_queue); _ = fence.lpVtbl.*.Release.?(fence); _ = c.CloseHandle(queue.fence_event); } pub fn submit(queue: *Queue, command_buffers: []const *CommandBuffer) !void { var command_manager = &queue.device.command_manager; const d3d_command_queue = queue.d3d_command_queue; var command_lists = try std.ArrayListUnmanaged(*c.ID3D12GraphicsCommandList).initCapacity( allocator, command_buffers.len + 1, ); defer command_lists.deinit(allocator); queue.fence_value += 1; if (queue.command_encoder) |command_encoder| { const command_buffer = try command_encoder.finish(&.{}); command_buffer.manager.reference(); // handled in main.zig defer command_buffer.manager.release(); command_lists.appendAssumeCapacity(command_buffer.command_list); try command_buffer.reference_tracker.submit(queue); command_encoder.manager.release(); queue.command_encoder = null; } for (command_buffers) |command_buffer| { command_lists.appendAssumeCapacity(command_buffer.command_list); try command_buffer.reference_tracker.submit(queue); } d3d_command_queue.lpVtbl.*.ExecuteCommandLists.?( d3d_command_queue, @intCast(command_lists.items.len), @ptrCast(command_lists.items.ptr), ); for (command_lists.items) |command_list| { command_manager.destroyCommandList(command_list); } try queue.signal(); } pub fn writeBuffer(queue: *Queue, buffer: *Buffer, offset: u64, data: [*]const u8, size: u64) !void { const encoder = try queue.getCommandEncoder(); try encoder.writeBuffer(buffer, offset, data, size); } pub fn writeTexture( queue: *Queue, destination: *const sysgpu.ImageCopyTexture, data: [*]const u8, data_size: usize, data_layout: *const sysgpu.Texture.DataLayout, write_size: *const sysgpu.Extent3D, ) !void { const encoder = try queue.getCommandEncoder(); try encoder.writeTexture(destination, data, data_size, data_layout, write_size); } // Internal pub fn signal(queue: *Queue) !void { const d3d_command_queue = queue.d3d_command_queue; var hr: c.HRESULT = undefined; hr = d3d_command_queue.lpVtbl.*.Signal.?( d3d_command_queue, queue.fence, queue.fence_value, ); if (hr != c.S_OK) { return error.SignalFailed; } } pub fn waitUntil(queue: *Queue, fence_value: u64) void { const fence = queue.fence; const fence_event = queue.fence_event; var hr: c.HRESULT = undefined; const completed_value = fence.lpVtbl.*.GetCompletedValue.?(fence); if (completed_value >= fence_value) return; hr = fence.lpVtbl.*.SetEventOnCompletion.?( fence, fence_value, fence_event, ); std.debug.assert(hr == c.S_OK); const result = c.WaitForSingleObject(fence_event, c.INFINITE); std.debug.assert(result == c.WAIT_OBJECT_0); } // Private fn getCommandEncoder(queue: *Queue) !*CommandEncoder { if (queue.command_encoder) |command_encoder| return command_encoder; const command_encoder = try CommandEncoder.init(queue.device, &.{}); queue.command_encoder = command_encoder; return command_encoder; } };
0
repos/mach-sysgpu
repos/mach-sysgpu/src/metal.zig
const std = @import("std"); const ca = @import("objc").quartz_core.ca; const cg = @import("objc").core_graphics.cg; const mtl = @import("objc").metal.mtl; const objc = @import("objc").objc; const ns = @import("objc").foundation.ns; const sysgpu = @import("sysgpu/main.zig"); const limits = @import("limits.zig"); const utils = @import("utils.zig"); const shader = @import("shader.zig"); const conv = @import("metal/conv.zig"); const log = std.log.scoped(.metal); const upload_page_size = 64 * 1024 * 1024; // TODO - split writes and/or support large uploads const max_storage_buffers_per_shader_stage = 8; const max_uniform_buffers_per_shader_stage = 12; const max_buffers_per_stage = 20; const max_vertex_buffers = 8; const slot_vertex_buffers = 20; const slot_buffer_lengths = 28; var allocator: std.mem.Allocator = undefined; pub const InitOptions = struct {}; pub fn init(alloc: std.mem.Allocator, options: InitOptions) !void { _ = options; allocator = alloc; } fn isDepthFormat(format: mtl.PixelFormat) bool { return switch (format) { mtl.PixelFormatDepth16Unorm => true, mtl.PixelFormatDepth24Unorm_Stencil8 => true, mtl.PixelFormatDepth32Float => true, mtl.PixelFormatDepth32Float_Stencil8 => true, else => false, }; } fn isStencilFormat(format: mtl.PixelFormat) bool { return switch (format) { mtl.PixelFormatStencil8 => true, mtl.PixelFormatDepth24Unorm_Stencil8 => true, mtl.PixelFormatDepth32Float_Stencil8 => true, else => false, }; } fn entrypointString(name: [*:0]const u8) [*:0]const u8 { return if (std.mem.eql(u8, std.mem.span(name), "main")) "main_" else name; } fn entrypointSlice(name: []const u8) []const u8 { return if (std.mem.eql(u8, name, "main")) "main_" else name; } const MapCallback = struct { buffer: *Buffer, callback: sysgpu.Buffer.MapCallback, userdata: ?*anyopaque, }; const BindingPoint = shader.CodeGen.BindingPoint; const BindingTable = shader.CodeGen.BindingTable; pub const Instance = struct { manager: utils.Manager(Instance) = .{}, pub fn init(desc: *const sysgpu.Instance.Descriptor) !*Instance { // TODO _ = desc; ns.init(); ca.init(); mtl.init(); const instance = try allocator.create(Instance); instance.* = .{}; return instance; } pub fn deinit(instance: *Instance) void { allocator.destroy(instance); } pub fn createSurface(instance: *Instance, desc: *const sysgpu.Surface.Descriptor) !*Surface { return Surface.init(instance, desc); } }; pub const Adapter = struct { manager: utils.Manager(Adapter) = .{}, mtl_device: *mtl.Device, pub fn init(instance: *Instance, options: *const sysgpu.RequestAdapterOptions) !*Adapter { _ = instance; _ = options; // TODO - choose appropriate device from options const mtl_device = mtl.createSystemDefaultDevice() orelse { return error.NoAdapterFound; }; errdefer mtl_device.release(); const adapter = try allocator.create(Adapter); adapter.* = .{ .mtl_device = mtl_device }; return adapter; } pub fn deinit(adapter: *Adapter) void { adapter.mtl_device.release(); allocator.destroy(adapter); } pub fn createDevice(adapter: *Adapter, desc: ?*const sysgpu.Device.Descriptor) !*Device { return Device.init(adapter, desc); } pub fn getProperties(adapter: *Adapter) sysgpu.Adapter.Properties { const mtl_device = adapter.mtl_device; return .{ .vendor_id = 0, // TODO .vendor_name = "", // TODO .architecture = "", // TODO .device_id = 0, // TODO .name = mtl_device.name().UTF8String(), .driver_description = "", // TODO .adapter_type = if (mtl_device.isLowPower()) .integrated_gpu else .discrete_gpu, .backend_type = .metal, .compatibility_mode = .false, }; } }; pub const Surface = struct { manager: utils.Manager(Surface) = .{}, layer: *ca.MetalLayer, pub fn init(instance: *Instance, desc: *const sysgpu.Surface.Descriptor) !*Surface { _ = instance; if (utils.findChained(sysgpu.Surface.DescriptorFromMetalLayer, desc.next_in_chain.generic)) |mtl_desc| { const surface = try allocator.create(Surface); surface.* = .{ .layer = @ptrCast(mtl_desc.layer) }; return surface; } else { return error.InvalidDescriptor; } } pub fn deinit(surface: *Surface) void { allocator.destroy(surface); } }; pub const Device = struct { manager: utils.Manager(Device) = .{}, mtl_device: *mtl.Device, queue: ?*Queue = null, lost_cb: ?sysgpu.Device.LostCallback = null, lost_cb_userdata: ?*anyopaque = null, log_cb: ?sysgpu.LoggingCallback = null, log_cb_userdata: ?*anyopaque = null, err_cb: ?sysgpu.ErrorCallback = null, err_cb_userdata: ?*anyopaque = null, streaming_manager: StreamingManager = undefined, reference_trackers: std.ArrayListUnmanaged(*ReferenceTracker) = .{}, map_callbacks: std.ArrayListUnmanaged(MapCallback) = .{}, free_lengths_buffers: std.ArrayListUnmanaged(*mtl.Buffer) = .{}, pub fn init(adapter: *Adapter, desc: ?*const sysgpu.Device.Descriptor) !*Device { // TODO _ = desc; const mtl_device = adapter.mtl_device; var device = try allocator.create(Device); device.* = .{ .mtl_device = mtl_device, }; device.streaming_manager = try StreamingManager.init(device); errdefer device.streaming_manager.deinit(); return device; } pub fn deinit(device: *Device) void { if (device.lost_cb) |lost_cb| { lost_cb(.destroyed, "Device was destroyed.", device.lost_cb_userdata); } if (device.queue) |queue| queue.waitUntil(queue.fence_value); device.processQueuedOperations(); for (device.free_lengths_buffers.items) |mtl_buffer| mtl_buffer.release(); device.free_lengths_buffers.deinit(allocator); device.map_callbacks.deinit(allocator); device.reference_trackers.deinit(allocator); device.streaming_manager.deinit(); if (device.queue) |queue| queue.manager.release(); allocator.destroy(device); } pub fn createBindGroup(device: *Device, desc: *const sysgpu.BindGroup.Descriptor) !*BindGroup { return BindGroup.init(device, desc); } pub fn createBindGroupLayout(device: *Device, desc: *const sysgpu.BindGroupLayout.Descriptor) !*BindGroupLayout { return BindGroupLayout.init(device, desc); } pub fn createBuffer(device: *Device, desc: *const sysgpu.Buffer.Descriptor) !*Buffer { return Buffer.init(device, desc); } pub fn createCommandEncoder(device: *Device, desc: *const sysgpu.CommandEncoder.Descriptor) !*CommandEncoder { return CommandEncoder.init(device, desc); } pub fn createComputePipeline(device: *Device, desc: *const sysgpu.ComputePipeline.Descriptor) !*ComputePipeline { return ComputePipeline.init(device, desc); } pub fn createPipelineLayout(device: *Device, desc: *const sysgpu.PipelineLayout.Descriptor) !*PipelineLayout { return PipelineLayout.init(device, desc); } pub fn createRenderPipeline(device: *Device, desc: *const sysgpu.RenderPipeline.Descriptor) !*RenderPipeline { return RenderPipeline.init(device, desc); } pub fn createSampler(device: *Device, desc: *const sysgpu.Sampler.Descriptor) !*Sampler { return Sampler.init(device, desc); } pub fn createShaderModuleAir(device: *Device, air: *shader.Air, label: [*:0]const u8) !*ShaderModule { return ShaderModule.initAir(device, air, label); } pub fn createShaderModuleSpirv(device: *Device, code: [*]const u32, code_size: u32) !*ShaderModule { _ = code; _ = code_size; _ = device; return error.Unsupported; } pub fn createShaderModuleHLSL(device: *Device, code: []const u8) !*ShaderModule { _ = code; _ = device; return error.Unsupported; } pub fn createShaderModuleMSL( device: *Device, label: [*:0]const u8, code: []const u8, workgroup_size: sysgpu.ShaderModule.WorkgroupSize, ) !*ShaderModule { const module = try allocator.create(ShaderModule); module.* = .{ .device = device, .label = label, .code = .{ .metal = .{ .code = code, .workgroup_size = workgroup_size } }, }; return module; } pub fn createSwapChain(device: *Device, surface: *Surface, desc: *const sysgpu.SwapChain.Descriptor) !*SwapChain { return SwapChain.init(device, surface, desc); } pub fn createTexture(device: *Device, desc: *const sysgpu.Texture.Descriptor) !*Texture { return Texture.init(device, desc); } pub fn getQueue(device: *Device) !*Queue { if (device.queue == null) { device.queue = try Queue.init(device); } return device.queue.?; } pub fn tick(device: *Device) !void { device.processQueuedOperations(); } // Internal pub fn processQueuedOperations(device: *Device) void { // Reference trackers if (device.queue) |queue| { const completed_value = queue.completed_value.load(.Acquire); var i: usize = 0; while (i < device.reference_trackers.items.len) { const reference_tracker = device.reference_trackers.items[i]; if (reference_tracker.fence_value <= completed_value) { reference_tracker.deinit(); _ = device.reference_trackers.swapRemove(i); } else { i += 1; } } } // MapAsync { var i: usize = 0; while (i < device.map_callbacks.items.len) { const map_callback = device.map_callbacks.items[i]; if (map_callback.buffer.gpu_count == 0) { map_callback.buffer.executeMapAsync(map_callback); _ = device.map_callbacks.swapRemove(i); } else { i += 1; } } } } }; pub const StreamingManager = struct { device: *Device, free_buffers: std.ArrayListUnmanaged(*mtl.Buffer) = .{}, pub fn init(device: *Device) !StreamingManager { return .{ .device = device, }; } pub fn deinit(manager: *StreamingManager) void { for (manager.free_buffers.items) |mtl_buffer| mtl_buffer.release(); manager.free_buffers.deinit(allocator); } pub fn acquire(manager: *StreamingManager) !*mtl.Buffer { const device = manager.device; // Recycle finished buffers if (manager.free_buffers.items.len == 0) { device.processQueuedOperations(); } // Create new buffer if (manager.free_buffers.items.len == 0) { const pool = objc.autoreleasePoolPush(); defer objc.autoreleasePoolPop(pool); const mtl_device = manager.device.mtl_device; const mtl_buffer = mtl_device.newBufferWithLength_options(upload_page_size, mtl.ResourceCPUCacheModeWriteCombined) orelse { return error.NewBufferFailed; }; mtl_buffer.setLabel(ns.String.stringWithUTF8String("upload")); try manager.free_buffers.append(allocator, mtl_buffer); } // Result return manager.free_buffers.pop(); } pub fn release(manager: *StreamingManager, mtl_buffer: *mtl.Buffer) void { manager.free_buffers.append(allocator, mtl_buffer) catch { std.debug.panic("OutOfMemory", .{}); }; } }; pub const LengthsBuffer = struct { device: *Device, mtl_buffer: *mtl.Buffer, data: [max_buffers_per_stage]u32, apply_count: u32 = 0, pub fn init(device: *Device) !LengthsBuffer { const pool = objc.autoreleasePoolPush(); defer objc.autoreleasePoolPop(pool); const mtl_device = device.mtl_device; var mtl_buffer: *mtl.Buffer = undefined; if (device.free_lengths_buffers.items.len > 0) { mtl_buffer = device.free_lengths_buffers.pop(); } else { mtl_buffer = mtl_device.newBufferWithLength_options(max_buffers_per_stage * @sizeOf(u32), 0) orelse { return error.NewBufferFailed; }; mtl_buffer.setLabel(ns.String.stringWithUTF8String("buffer lengths")); } return .{ .device = device, .mtl_buffer = mtl_buffer, .data = std.mem.zeroes([max_buffers_per_stage]u32), }; } pub fn deinit(lengths_buffer: *LengthsBuffer) void { const device = lengths_buffer.device; device.free_lengths_buffers.append(allocator, lengths_buffer.mtl_buffer) catch std.debug.panic("OutOfMemory", .{}); } pub fn set(lengths_buffer: *LengthsBuffer, slot: u32, size: u32) void { if (lengths_buffer.data[slot] != size) { lengths_buffer.data[slot] = size; lengths_buffer.apply_count = @max(lengths_buffer.apply_count, slot + 1); } } pub fn apply_compute(lengths_buffer: *LengthsBuffer, mtl_encoder: *mtl.ComputeCommandEncoder) void { if (lengths_buffer.apply_count > 0) { mtl_encoder.setBytes_length_atIndex(&lengths_buffer.data, lengths_buffer.apply_count * @sizeOf(u32), slot_buffer_lengths); lengths_buffer.apply_count = 0; } } pub fn apply_vertex(lengths_buffer: *LengthsBuffer, mtl_encoder: *mtl.RenderCommandEncoder) void { if (lengths_buffer.apply_count > 0) { mtl_encoder.setVertexBytes_length_atIndex(&lengths_buffer.data, lengths_buffer.apply_count * @sizeOf(u32), slot_buffer_lengths); lengths_buffer.apply_count = 0; } } pub fn apply_fragment(lengths_buffer: *LengthsBuffer, mtl_encoder: *mtl.RenderCommandEncoder) void { if (lengths_buffer.apply_count > 0) { mtl_encoder.setFragmentBytes_length_atIndex(&lengths_buffer.data, lengths_buffer.apply_count * @sizeOf(u32), slot_buffer_lengths); lengths_buffer.apply_count = 0; } } }; pub const SwapChain = struct { manager: utils.Manager(SwapChain) = .{}, device: *Device, surface: *Surface, current_drawable: ?*ca.MetalDrawable = null, pub fn init(device: *Device, surface: *Surface, desc: *const sysgpu.SwapChain.Descriptor) !*SwapChain { const layer = surface.layer; const size = cg.Size{ .width = @floatFromInt(desc.width), .height = @floatFromInt(desc.height) }; layer.setDevice(device.mtl_device); layer.setPixelFormat(conv.metalPixelFormat(desc.format)); layer.setFramebufferOnly(!(desc.usage.storage_binding or desc.usage.render_attachment)); layer.setDrawableSize(size); layer.setMaximumDrawableCount(if (desc.present_mode == .mailbox) 3 else 2); layer.setDisplaySyncEnabled(desc.present_mode != .immediate); const swapchain = try allocator.create(SwapChain); swapchain.* = .{ .device = device, .surface = surface }; return swapchain; } pub fn deinit(swapchain: *SwapChain) void { if (swapchain.current_drawable) |drawable| drawable.release(); allocator.destroy(swapchain); } pub fn getCurrentTextureView(swapchain: *SwapChain) !*TextureView { const pool = objc.autoreleasePoolPush(); defer objc.autoreleasePoolPop(pool); if (swapchain.current_drawable) |drawable| drawable.release(); swapchain.current_drawable = swapchain.surface.layer.nextDrawable(); swapchain.device.processQueuedOperations(); if (swapchain.current_drawable) |drawable| { _ = drawable.retain(); return TextureView.initFromMtlTexture(drawable.texture()); } else { std.debug.panic("getCurrentTextureView no drawable", .{}); } } pub fn present(swapchain: *SwapChain) !void { const pool = objc.autoreleasePoolPush(); defer objc.autoreleasePoolPop(pool); if (swapchain.current_drawable) |_| { const queue = try swapchain.device.getQueue(); const command_buffer = queue.command_queue.commandBuffer() orelse { return error.NewCommandBufferFailed; }; command_buffer.presentDrawable(@ptrCast(swapchain.current_drawable)); // TODO - objc casting? command_buffer.commit(); } } }; pub const Buffer = struct { manager: utils.Manager(Buffer) = .{}, device: *Device, mtl_buffer: *mtl.Buffer, gpu_count: u32 = 0, // TODO - packed buffer descriptor struct size: u64, usage: sysgpu.Buffer.UsageFlags, pub fn init(device: *Device, desc: *const sysgpu.Buffer.Descriptor) !*Buffer { const pool = objc.autoreleasePoolPush(); defer objc.autoreleasePoolPop(pool); const mtl_device = device.mtl_device; const mtl_buffer = mtl_device.newBufferWithLength_options( desc.size, conv.metalResourceOptionsForBuffer(desc.usage), ) orelse { return error.NewBufferFailed; }; errdefer mtl_buffer.release(); if (desc.label) |label| { mtl_buffer.setLabel(ns.String.stringWithUTF8String(label)); } const buffer = try allocator.create(Buffer); buffer.* = .{ .device = device, .mtl_buffer = mtl_buffer, .size = desc.size, .usage = desc.usage, }; return buffer; } pub fn deinit(buffer: *Buffer) void { buffer.mtl_buffer.release(); allocator.destroy(buffer); } pub fn getMappedRange(buffer: *Buffer, offset: usize, size: usize) !?*anyopaque { _ = size; const mtl_buffer = buffer.mtl_buffer; const base: [*]const u8 = @ptrCast(mtl_buffer.contents()); return @constCast(base + offset); } pub fn getSize(buffer: *Buffer) u64 { return buffer.size; } pub fn getUsage(buffer: *Buffer) sysgpu.Buffer.UsageFlags { return buffer.usage; } pub fn mapAsync( buffer: *Buffer, mode: sysgpu.MapModeFlags, offset: usize, size: usize, callback: sysgpu.Buffer.MapCallback, userdata: ?*anyopaque, ) !void { _ = size; _ = offset; _ = mode; const map_callback = MapCallback{ .buffer = buffer, .callback = callback, .userdata = userdata }; if (buffer.gpu_count == 0) { buffer.executeMapAsync(map_callback); } else { try buffer.device.map_callbacks.append(allocator, map_callback); } } pub fn setLabel(buffer: *Buffer, label: [*:0]const u8) void { const pool = objc.autoreleasePoolPush(); defer objc.autoreleasePoolPop(pool); const mtl_buffer = buffer.mtl_buffer; mtl_buffer.setLabel(ns.String.stringWithUTF8String(label)); } pub fn unmap(buffer: *Buffer) !void { _ = buffer; } // Internal pub fn executeMapAsync(buffer: *Buffer, map_callback: MapCallback) void { _ = buffer; map_callback.callback(.success, map_callback.userdata); } }; pub const Texture = struct { manager: utils.Manager(Texture) = .{}, mtl_texture: *mtl.Texture, pub fn init(device: *Device, desc: *const sysgpu.Texture.Descriptor) !*Texture { const pool = objc.autoreleasePoolPush(); defer objc.autoreleasePoolPop(pool); const mtl_device = device.mtl_device; var mtl_desc = mtl.TextureDescriptor.alloc().init(); defer mtl_desc.release(); mtl_desc.setTextureType(conv.metalTextureType(desc.dimension, desc.size, desc.sample_count)); mtl_desc.setPixelFormat(conv.metalPixelFormat(desc.format)); mtl_desc.setWidth(desc.size.width); mtl_desc.setHeight(desc.size.height); mtl_desc.setDepth(if (desc.dimension == .dimension_3d) desc.size.depth_or_array_layers else 1); mtl_desc.setMipmapLevelCount(desc.mip_level_count); mtl_desc.setSampleCount(desc.sample_count); mtl_desc.setArrayLength(if (desc.dimension == .dimension_3d) 1 else desc.size.depth_or_array_layers); mtl_desc.setStorageMode(conv.metalStorageModeForTexture(desc.usage)); mtl_desc.setUsage(conv.metalTextureUsage(desc.usage, desc.view_format_count)); const mtl_texture = mtl_device.newTextureWithDescriptor(mtl_desc) orelse { return error.NewTextureFailed; }; errdefer mtl_texture.release(); if (desc.label) |label| { mtl_texture.setLabel(ns.String.stringWithUTF8String(label)); } const texture = try allocator.create(Texture); texture.* = .{ .mtl_texture = mtl_texture, }; return texture; } pub fn deinit(texture: *Texture) void { texture.mtl_texture.release(); allocator.destroy(texture); } pub fn createView(texture: *Texture, desc: *const sysgpu.TextureView.Descriptor) !*TextureView { return TextureView.init(texture, desc); } }; pub const TextureView = struct { manager: utils.Manager(TextureView) = .{}, mtl_texture: *mtl.Texture, pub fn init(texture: *Texture, desc: *const sysgpu.TextureView.Descriptor) !*TextureView { const pool = objc.autoreleasePoolPush(); defer objc.autoreleasePoolPop(pool); var mtl_texture = texture.mtl_texture; const texture_format = mtl_texture.pixelFormat(); const texture_type = mtl_texture.textureType(); const texture_mip_level_count = mtl_texture.mipmapLevelCount(); const texture_array_layer_count = mtl_texture.arrayLength(); const view_format = if (desc.format != .undefined) conv.metalPixelFormatForView(desc.format, texture_format, desc.aspect) else texture_format; const view_type = if (desc.dimension != .dimension_undefined) conv.metalTextureTypeForView(desc.dimension) else texture_type; const view_base_mip_level = desc.base_mip_level; const view_mip_level_count = if (desc.mip_level_count == sysgpu.mip_level_count_undefined) texture_mip_level_count - desc.base_mip_level else desc.mip_level_count; const view_base_array_layer = desc.base_array_layer; const view_array_layer_count = if (desc.array_layer_count == sysgpu.array_layer_count_undefined) texture_array_layer_count - desc.base_array_layer else desc.array_layer_count; if (view_format != texture_format or view_type != texture_type or view_base_mip_level != 0 or view_mip_level_count != texture_mip_level_count or view_base_array_layer != 0 or view_array_layer_count != texture_array_layer_count) { mtl_texture = mtl_texture.newTextureViewWithPixelFormat_textureType_levels_slices( view_format, view_type, ns.Range.init(view_base_mip_level, view_mip_level_count), ns.Range.init(view_base_array_layer, view_array_layer_count), ) orelse { return error.NewTextureViewFailed; }; if (desc.label) |label| { mtl_texture.setLabel(ns.String.stringWithUTF8String(label)); } } else { _ = mtl_texture.retain(); } const view = try allocator.create(TextureView); view.* = .{ .mtl_texture = mtl_texture, }; return view; } pub fn initFromMtlTexture(mtl_texture: *mtl.Texture) !*TextureView { const view = try allocator.create(TextureView); view.* = .{ .mtl_texture = mtl_texture.retain(), }; return view; } pub fn deinit(view: *TextureView) void { view.mtl_texture.release(); allocator.destroy(view); } }; pub const Sampler = struct { manager: utils.Manager(TextureView) = .{}, mtl_sampler: *mtl.SamplerState, pub fn init(device: *Device, desc: *const sysgpu.Sampler.Descriptor) !*Sampler { const pool = objc.autoreleasePoolPush(); defer objc.autoreleasePoolPop(pool); const mtl_device = device.mtl_device; var mtl_desc = mtl.SamplerDescriptor.alloc().init(); defer mtl_desc.release(); mtl_desc.setMinFilter(conv.metalSamplerMinMagFilter(desc.min_filter)); mtl_desc.setMagFilter(conv.metalSamplerMinMagFilter(desc.mag_filter)); mtl_desc.setMipFilter(conv.metalSamplerMipFilter(desc.mipmap_filter)); mtl_desc.setMaxAnisotropy(desc.max_anisotropy); mtl_desc.setSAddressMode(conv.metalSamplerAddressMode(desc.address_mode_u)); mtl_desc.setTAddressMode(conv.metalSamplerAddressMode(desc.address_mode_v)); mtl_desc.setRAddressMode(conv.metalSamplerAddressMode(desc.address_mode_w)); mtl_desc.setLodMinClamp(desc.lod_min_clamp); mtl_desc.setLodMaxClamp(desc.lod_max_clamp); if (desc.compare != .undefined) mtl_desc.setCompareFunction(conv.metalCompareFunction(desc.compare)); if (desc.label) |label| mtl_desc.setLabel(ns.String.stringWithUTF8String(label)); const mtl_sampler = mtl_device.newSamplerStateWithDescriptor(mtl_desc) orelse { return error.NewSamplerFailed; }; errdefer mtl_sampler.release(); const sampler = try allocator.create(Sampler); sampler.* = .{ .mtl_sampler = mtl_sampler, }; return sampler; } pub fn deinit(sampler: *Sampler) void { sampler.mtl_sampler.release(); allocator.destroy(sampler); } }; pub const BindGroupLayout = struct { manager: utils.Manager(BindGroupLayout) = .{}, entries: []const sysgpu.BindGroupLayout.Entry, pub fn init(device: *Device, desc: *const sysgpu.BindGroupLayout.Descriptor) !*BindGroupLayout { _ = device; var entries: []const sysgpu.BindGroupLayout.Entry = undefined; if (desc.entry_count > 0) { entries = try allocator.dupe(sysgpu.BindGroupLayout.Entry, desc.entries.?[0..desc.entry_count]); } else { entries = &[_]sysgpu.BindGroupLayout.Entry{}; } const layout = try allocator.create(BindGroupLayout); layout.* = .{ .entries = entries, }; return layout; } pub fn deinit(layout: *BindGroupLayout) void { if (layout.entries.len > 0) allocator.free(layout.entries); allocator.destroy(layout); } // Internal pub fn getEntry(layout: *BindGroupLayout, binding: u32) ?*const sysgpu.BindGroupLayout.Entry { for (layout.entries) |*entry| { if (entry.binding == binding) return entry; } return null; } pub fn getDynamicIndex(layout: *BindGroupLayout, binding: u32) ?u32 { var index: u32 = 0; for (layout.entries) |entry| { if (entry.buffer.has_dynamic_offset == .false) continue; if (entry.binding == binding) return index; index += 1; } return null; } }; pub const BindGroup = struct { const Kind = enum { buffer, sampler, texture, }; const Entry = struct { kind: Kind = undefined, binding: u32, visibility: sysgpu.ShaderStageFlags, dynamic_index: ?u32, buffer: ?*Buffer = null, offset: u32 = 0, size: u32 = 0, sampler: ?*mtl.SamplerState = null, texture: ?*mtl.Texture = null, }; manager: utils.Manager(BindGroup) = .{}, entries: []const Entry, pub fn init(device: *Device, desc: *const sysgpu.BindGroup.Descriptor) !*BindGroup { _ = device; const layout: *BindGroupLayout = @ptrCast(@alignCast(desc.layout)); var mtl_entries = try allocator.alloc(Entry, desc.entry_count); errdefer allocator.free(mtl_entries); for (desc.entries.?[0..desc.entry_count], 0..) |entry, i| { var mtl_entry = &mtl_entries[i]; const bind_group_entry = layout.getEntry(entry.binding) orelse return error.UnknownBinding; mtl_entry.* = .{ .binding = entry.binding, .visibility = bind_group_entry.visibility, .dynamic_index = layout.getDynamicIndex(entry.binding), }; if (entry.buffer) |buffer_raw| { const buffer: *Buffer = @ptrCast(@alignCast(buffer_raw)); buffer.manager.reference(); mtl_entry.kind = .buffer; mtl_entry.buffer = buffer; mtl_entry.offset = @intCast(entry.offset); mtl_entry.size = @intCast(entry.size); } else if (entry.sampler) |sampler_raw| { const sampler: *Sampler = @ptrCast(@alignCast(sampler_raw)); mtl_entry.kind = .sampler; mtl_entry.sampler = sampler.mtl_sampler.retain(); } else if (entry.texture_view) |texture_view_raw| { const texture_view: *TextureView = @ptrCast(@alignCast(texture_view_raw)); mtl_entry.kind = .texture; mtl_entry.texture = texture_view.mtl_texture.retain(); } } const group = try allocator.create(BindGroup); group.* = .{ .entries = mtl_entries }; return group; } pub fn deinit(group: *BindGroup) void { for (group.entries) |entry| { if (entry.buffer) |buffer| buffer.manager.release(); if (entry.sampler) |sampler| sampler.release(); if (entry.texture) |texture| texture.release(); } allocator.free(group.entries); allocator.destroy(group); } }; pub const PipelineLayout = struct { manager: utils.Manager(PipelineLayout) = .{}, group_layouts: []*BindGroupLayout, vertex_bindings: BindingTable, fragment_bindings: BindingTable, compute_bindings: BindingTable, pub fn init(device: *Device, desc: *const sysgpu.PipelineLayout.Descriptor) !*PipelineLayout { _ = device; var group_layouts = try allocator.alloc(*BindGroupLayout, desc.bind_group_layout_count); errdefer allocator.free(group_layouts); for (0..desc.bind_group_layout_count) |i| { const layout: *BindGroupLayout = @ptrCast(@alignCast(desc.bind_group_layouts.?[i])); layout.manager.reference(); group_layouts[i] = layout; } var vertex_bindings = try buildBindings(group_layouts, .{ .vertex = true }); errdefer vertex_bindings.deinit(allocator); var fragment_bindings = try buildBindings(group_layouts, .{ .fragment = true }); errdefer fragment_bindings.deinit(allocator); var compute_bindings = try buildBindings(group_layouts, .{ .compute = true }); errdefer compute_bindings.deinit(allocator); const layout = try allocator.create(PipelineLayout); layout.* = .{ .group_layouts = group_layouts, .vertex_bindings = vertex_bindings, .fragment_bindings = fragment_bindings, .compute_bindings = compute_bindings, }; return layout; } pub fn buildBindings( group_layouts: []*BindGroupLayout, visibility: sysgpu.ShaderStageFlags, ) !BindingTable { var bindings: BindingTable = .{}; var buffer_index: u32 = 0; var sampler_index: u32 = 0; var texture_index: u32 = 0; for (group_layouts, 0..) |group_layout, group| { for (group_layout.entries) |entry| { const key = BindingPoint{ .group = @intCast(group), .binding = entry.binding }; if ((visibility.vertex and entry.visibility.vertex) or (visibility.fragment and entry.visibility.fragment) or (visibility.compute and entry.visibility.compute)) { if (entry.buffer.type != .undefined) { try bindings.put(allocator, key, buffer_index); buffer_index += 1; } else if (entry.sampler.type != .undefined) { try bindings.put(allocator, key, sampler_index); sampler_index += 1; } else if (entry.texture.sample_type != .undefined or entry.storage_texture.format != .undefined) { try bindings.put(allocator, key, texture_index); texture_index += 1; } } } } return bindings; } pub fn initDefault(device: *Device, default_pipeline_layout: utils.DefaultPipelineLayoutDescriptor) !*PipelineLayout { const groups = default_pipeline_layout.groups; var bind_group_layouts = std.BoundedArray(*sysgpu.BindGroupLayout, limits.max_bind_groups){}; defer { for (bind_group_layouts.slice()) |bind_group_layout_raw| { const bind_group_layout: *BindGroupLayout = @ptrCast(@alignCast(bind_group_layout_raw)); bind_group_layout.manager.release(); } } for (groups.slice()) |entries| { const bind_group_layout = try device.createBindGroupLayout( &sysgpu.BindGroupLayout.Descriptor.init(.{ .entries = entries.items }), ); bind_group_layouts.appendAssumeCapacity(@ptrCast(bind_group_layout)); } return device.createPipelineLayout( &sysgpu.PipelineLayout.Descriptor.init(.{ .bind_group_layouts = bind_group_layouts.slice() }), ); } pub fn deinit(layout: *PipelineLayout) void { for (layout.group_layouts) |group_layout| group_layout.manager.release(); layout.vertex_bindings.deinit(allocator); layout.fragment_bindings.deinit(allocator); layout.compute_bindings.deinit(allocator); allocator.free(layout.group_layouts); allocator.destroy(layout); } }; pub const ShaderModule = struct { manager: utils.Manager(ShaderModule) = .{}, device: *Device, label: [*:0]const u8, code: union(enum) { metal: struct { code: []const u8, workgroup_size: sysgpu.ShaderModule.WorkgroupSize, }, air: *shader.Air, }, pub fn initAir( device: *Device, air: *shader.Air, label: [*:0]const u8, ) !*ShaderModule { const module = try allocator.create(ShaderModule); module.* = .{ .device = device, .code = .{ .air = air }, .label = label, }; return module; } pub fn deinit(module: *ShaderModule) void { if (module.code == .air) { module.code.air.deinit(allocator); allocator.destroy(module.code.air); } allocator.destroy(module); } // Internal pub fn compile( module: *ShaderModule, entrypoint: [*:0]const u8, stage: shader.CodeGen.Stage, bindings: *const BindingTable, ) !*mtl.Function { const pool = objc.autoreleasePoolPush(); defer objc.autoreleasePoolPop(pool); const mtl_device = module.device.mtl_device; const code = switch (module.code) { .air => |air| try shader.CodeGen.generate( allocator, air, .msl, false, .{ .emit_source_file = "" }, .{ .name = entrypoint, .stage = stage }, bindings, module.label, ), .metal => |metal| metal.code, }; defer if (module.code == .air) allocator.free(code); const ns_code = ns.String.alloc().initWithBytesNoCopy_length_encoding_freeWhenDone( @constCast(code.ptr), code.len, ns.UTF8StringEncoding, false, ); defer ns_code.release(); var err: ?*ns.Error = undefined; const library = mtl_device.newLibraryWithSource_options_error(ns_code, null, &err) orelse { std.log.err("{s}", .{err.?.localizedDescription().UTF8String()}); return error.NewLibraryFailed; }; defer library.release(); const mtl_entrypoint = entrypointString(entrypoint); return library.newFunctionWithName(ns.String.stringWithUTF8String(mtl_entrypoint)) orelse { return error.NewFunctionFailed; }; } pub fn getThreadgroupSize(shader_module: *ShaderModule, entry_point: [*:0]const u8) !mtl.Size { switch (shader_module.code) { .metal => |metal| return mtl.Size.init( metal.workgroup_size.x, metal.workgroup_size.y, metal.workgroup_size.z, ), .air => |air| { if (air.findFunction(std.mem.span(entry_point))) |fn_inst| { switch (fn_inst.stage) { .compute => |workgroup_size| { return mtl.Size.init( @intCast(air.resolveInt(workgroup_size.x) orelse 1), @intCast(air.resolveInt(workgroup_size.y) orelse 1), @intCast(air.resolveInt(workgroup_size.z) orelse 1), ); }, else => {}, } } return error.UnknownThreadgroupSize; }, } } }; pub const ComputePipeline = struct { manager: utils.Manager(ComputePipeline) = .{}, mtl_pipeline: *mtl.ComputePipelineState, layout: *PipelineLayout, threadgroup_size: mtl.Size, pub fn init(device: *Device, desc: *const sysgpu.ComputePipeline.Descriptor) !*ComputePipeline { const pool = objc.autoreleasePoolPush(); defer objc.autoreleasePoolPop(pool); const mtl_device = device.mtl_device; var mtl_desc = mtl.ComputePipelineDescriptor.alloc().init(); defer mtl_desc.release(); if (desc.label) |label| { mtl_desc.setLabel(ns.String.stringWithUTF8String(label)); } const compute_module: *ShaderModule = @ptrCast(@alignCast(desc.compute.module)); // Pipeline Layout var layout: *PipelineLayout = undefined; if (desc.layout) |layout_raw| { layout = @ptrCast(@alignCast(layout_raw)); layout.manager.reference(); } else if (compute_module.code == .air) { var layout_desc = utils.DefaultPipelineLayoutDescriptor.init(allocator); defer layout_desc.deinit(); try layout_desc.addFunction(compute_module.code.air, .{ .compute = true }, desc.compute.entry_point); layout = try PipelineLayout.initDefault(device, layout_desc); } else { @panic( \\Cannot create pipeline descriptor autoamtically. \\Please provide it yourself or write the shader in WGSL. ); } errdefer layout.manager.release(); // Shaders const compute_fn = try compute_module.compile(desc.compute.entry_point, .compute, &layout.compute_bindings); defer compute_fn.release(); mtl_desc.setComputeFunction(compute_fn); const threadgroup_size = try compute_module.getThreadgroupSize(desc.compute.entry_point); // PSO var err: ?*ns.Error = undefined; const mtl_pipeline = mtl_device.newComputePipelineStateWithDescriptor_options_reflection_error( mtl_desc, mtl.PipelineOptionNone, null, &err, ) orelse { // TODO std.log.err("{s}", .{err.?.localizedDescription().UTF8String()}); return error.NewComputePipelineStateFailed; }; errdefer mtl_pipeline.release(); // Result const pipeline = try allocator.create(ComputePipeline); pipeline.* = .{ .mtl_pipeline = mtl_pipeline, .layout = layout, .threadgroup_size = threadgroup_size, }; return pipeline; } pub fn deinit(pipeline: *ComputePipeline) void { pipeline.mtl_pipeline.release(); pipeline.layout.manager.release(); allocator.destroy(pipeline); } pub fn getBindGroupLayout(pipeline: *ComputePipeline, group_index: u32) *BindGroupLayout { return @ptrCast(pipeline.layout.group_layouts[group_index]); } }; pub const RenderPipeline = struct { manager: utils.Manager(RenderPipeline) = .{}, mtl_pipeline: *mtl.RenderPipelineState, layout: *PipelineLayout, primitive_type: mtl.PrimitiveType, winding: mtl.Winding, cull_mode: mtl.CullMode, depth_stencil_state: ?*mtl.DepthStencilState, depth_bias: f32, depth_bias_slope_scale: f32, depth_bias_clamp: f32, pub fn init(device: *Device, desc: *const sysgpu.RenderPipeline.Descriptor) !*RenderPipeline { const pool = objc.autoreleasePoolPush(); defer objc.autoreleasePoolPop(pool); const mtl_device = device.mtl_device; var mtl_desc = mtl.RenderPipelineDescriptor.alloc().init(); defer mtl_desc.release(); if (desc.label) |label| { mtl_desc.setLabel(ns.String.stringWithUTF8String(label)); } const vertex_module: *ShaderModule = @ptrCast(@alignCast(desc.vertex.module)); // Pipeline Layout var layout: *PipelineLayout = undefined; if (desc.layout) |layout_raw| { layout = @ptrCast(@alignCast(layout_raw)); layout.manager.reference(); } else if (vertex_module.code == .air) { var layout_desc = utils.DefaultPipelineLayoutDescriptor.init(allocator); defer layout_desc.deinit(); try layout_desc.addFunction(vertex_module.code.air, .{ .vertex = true }, desc.vertex.entry_point); if (desc.fragment) |frag| { const frag_module: *ShaderModule = @ptrCast(@alignCast(frag.module)); if (frag_module.code == .air) { try layout_desc.addFunction(frag_module.code.air, .{ .fragment = true }, frag.entry_point); } else { @panic( \\Cannot create pipeline descriptor automatically. \\Please provide it yourself or write the shader in WGSL. ); } } layout = try PipelineLayout.initDefault(device, layout_desc); } else { @panic( \\Cannot create pipeline descriptor automatically. \\Please provide it yourself or write the shader in WGSL. ); } errdefer layout.manager.release(); // vertex const vertex_fn = try vertex_module.compile(desc.vertex.entry_point, .vertex, &layout.vertex_bindings); defer vertex_fn.release(); mtl_desc.setVertexFunction(vertex_fn); // vertex constants - TODO if (desc.vertex.buffer_count > 0) { const mtl_vertex_descriptor = mtl.VertexDescriptor.vertexDescriptor(); const mtl_layouts = mtl_vertex_descriptor.layouts(); const mtl_attributes = mtl_vertex_descriptor.attributes(); for (desc.vertex.buffers.?[0..desc.vertex.buffer_count], 0..) |buffer, i| { const buffer_index = slot_vertex_buffers + i; const mtl_layout = mtl_layouts.objectAtIndexedSubscript(buffer_index); mtl_layout.setStride(buffer.array_stride); mtl_layout.setStepFunction(conv.metalVertexStepFunction(buffer.step_mode)); mtl_layout.setStepRate(1); for (buffer.attributes.?[0..buffer.attribute_count]) |attr| { const mtl_attribute = mtl_attributes.objectAtIndexedSubscript(attr.shader_location); mtl_attribute.setFormat(conv.metalVertexFormat(attr.format)); mtl_attribute.setOffset(attr.offset); mtl_attribute.setBufferIndex(buffer_index); } } mtl_desc.setVertexDescriptor(mtl_vertex_descriptor); } // primitive const primitive_type = conv.metalPrimitiveType(desc.primitive.topology); mtl_desc.setInputPrimitiveTopology(conv.metalPrimitiveTopologyClass(desc.primitive.topology)); // strip_index_format const winding = conv.metalWinding(desc.primitive.front_face); const cull_mode = conv.metalCullMode(desc.primitive.cull_mode); // depth-stencil const depth_stencil_state = blk: { if (desc.depth_stencil) |ds| { var front_desc = mtl.StencilDescriptor.alloc().init(); defer front_desc.release(); front_desc.setStencilCompareFunction(conv.metalCompareFunction(ds.stencil_front.compare)); front_desc.setStencilFailureOperation(conv.metalStencilOperation(ds.stencil_front.fail_op)); front_desc.setDepthFailureOperation(conv.metalStencilOperation(ds.stencil_front.depth_fail_op)); front_desc.setDepthStencilPassOperation(conv.metalStencilOperation(ds.stencil_front.pass_op)); front_desc.setReadMask(ds.stencil_read_mask); front_desc.setWriteMask(ds.stencil_write_mask); var back_desc = mtl.StencilDescriptor.alloc().init(); defer back_desc.release(); back_desc.setStencilCompareFunction(conv.metalCompareFunction(ds.stencil_back.compare)); back_desc.setStencilFailureOperation(conv.metalStencilOperation(ds.stencil_back.fail_op)); back_desc.setDepthFailureOperation(conv.metalStencilOperation(ds.stencil_back.depth_fail_op)); back_desc.setDepthStencilPassOperation(conv.metalStencilOperation(ds.stencil_back.pass_op)); back_desc.setReadMask(ds.stencil_read_mask); back_desc.setWriteMask(ds.stencil_write_mask); var depth_stencil_desc = mtl.DepthStencilDescriptor.alloc().init(); defer depth_stencil_desc.release(); depth_stencil_desc.setDepthCompareFunction(conv.metalCompareFunction(ds.depth_compare)); depth_stencil_desc.setDepthWriteEnabled(ds.depth_write_enabled == .true); depth_stencil_desc.setFrontFaceStencil(front_desc); depth_stencil_desc.setBackFaceStencil(back_desc); if (desc.label) |label| { depth_stencil_desc.setLabel(ns.String.stringWithUTF8String(label)); } break :blk mtl_device.newDepthStencilStateWithDescriptor(depth_stencil_desc); } else { break :blk null; } }; errdefer if (depth_stencil_state) |ds| ds.release(); const depth_bias = if (desc.depth_stencil != null) @as(f32, @floatFromInt(desc.depth_stencil.?.depth_bias)) else 0.0; // TODO - int to float conversion const depth_bias_slope_scale = if (desc.depth_stencil != null) desc.depth_stencil.?.depth_bias_slope_scale else 0.0; const depth_bias_clamp = if (desc.depth_stencil != null) desc.depth_stencil.?.depth_bias_clamp else 0.0; // multisample mtl_desc.setSampleCount(desc.multisample.count); // mask - TODO mtl_desc.setAlphaToCoverageEnabled(desc.multisample.alpha_to_coverage_enabled == .true); // fragment if (desc.fragment) |frag| { const frag_module: *ShaderModule = @ptrCast(@alignCast(frag.module)); const frag_fn = try frag_module.compile(frag.entry_point, .fragment, &layout.fragment_bindings); defer frag_fn.release(); mtl_desc.setFragmentFunction(frag_fn); } // attachments if (desc.fragment) |frag| { for (frag.targets.?[0..frag.target_count], 0..) |target, i| { var attach = mtl_desc.colorAttachments().objectAtIndexedSubscript(i); attach.setPixelFormat(conv.metalPixelFormat(target.format)); attach.setWriteMask(conv.metalColorWriteMask(target.write_mask)); if (target.blend) |blend| { attach.setBlendingEnabled(true); attach.setSourceRGBBlendFactor(conv.metalBlendFactor(blend.color.src_factor, true)); attach.setDestinationRGBBlendFactor(conv.metalBlendFactor(blend.color.dst_factor, true)); attach.setRgbBlendOperation(conv.metalBlendOperation(blend.color.operation)); attach.setSourceAlphaBlendFactor(conv.metalBlendFactor(blend.alpha.src_factor, false)); attach.setDestinationAlphaBlendFactor(conv.metalBlendFactor(blend.alpha.dst_factor, false)); attach.setAlphaBlendOperation(conv.metalBlendOperation(blend.alpha.operation)); } } } if (desc.depth_stencil) |ds| { const format = conv.metalPixelFormat(ds.format); if (isDepthFormat(format)) mtl_desc.setDepthAttachmentPixelFormat(format); if (isStencilFormat(format)) mtl_desc.setStencilAttachmentPixelFormat(format); } // PSO var err: ?*ns.Error = undefined; const mtl_pipeline = mtl_device.newRenderPipelineStateWithDescriptor_error(mtl_desc, &err) orelse { // TODO std.log.err("{s}", .{err.?.localizedDescription().UTF8String()}); return error.NewRenderPipelineStateFailed; }; errdefer mtl_pipeline.release(); // Result const pipeline = try allocator.create(RenderPipeline); pipeline.* = .{ .mtl_pipeline = mtl_pipeline, .layout = layout, .primitive_type = primitive_type, .winding = winding, .cull_mode = cull_mode, .depth_stencil_state = depth_stencil_state, .depth_bias = depth_bias, .depth_bias_slope_scale = depth_bias_slope_scale, .depth_bias_clamp = depth_bias_clamp, }; return pipeline; } pub fn deinit(pipeline: *RenderPipeline) void { pipeline.mtl_pipeline.release(); if (pipeline.depth_stencil_state) |ds| ds.release(); pipeline.layout.manager.release(); allocator.destroy(pipeline); } pub fn getBindGroupLayout(pipeline: *RenderPipeline, group_index: u32) *BindGroupLayout { return @ptrCast(pipeline.layout.group_layouts[group_index]); } }; pub const CommandBuffer = struct { pub const StreamingResult = struct { buffer: *mtl.Buffer, map: [*]u8, offset: u32, }; manager: utils.Manager(CommandBuffer) = .{}, device: *Device, mtl_command_buffer: *mtl.CommandBuffer, reference_tracker: *ReferenceTracker, upload_buffer: ?*mtl.Buffer = null, upload_map: ?[*]u8 = null, next_offset: u32 = upload_page_size, pub fn init(device: *Device) !*CommandBuffer { const pool = objc.autoreleasePoolPush(); defer objc.autoreleasePoolPop(pool); const queue = try device.getQueue(); const mtl_command_buffer = queue.command_queue.commandBuffer() orelse { return error.NewCommandBufferFailed; }; errdefer mtl_command_buffer.release(); const reference_tracker = try ReferenceTracker.init(device); errdefer reference_tracker.deinit(); const command_buffer = try allocator.create(CommandBuffer); command_buffer.* = .{ .device = device, .mtl_command_buffer = mtl_command_buffer.retain(), .reference_tracker = reference_tracker, }; return command_buffer; } pub fn deinit(command_buffer: *CommandBuffer) void { // reference_tracker lifetime is managed externally command_buffer.mtl_command_buffer.release(); allocator.destroy(command_buffer); } pub fn upload(command_buffer: *CommandBuffer, size: u64) !StreamingResult { if (command_buffer.next_offset + size > upload_page_size) { const streaming_manager = &command_buffer.device.streaming_manager; std.debug.assert(size <= upload_page_size); // TODO - support large uploads const mtl_buffer = try streaming_manager.acquire(); try command_buffer.reference_tracker.referenceUploadPage(mtl_buffer); command_buffer.upload_buffer = mtl_buffer; command_buffer.upload_map = @ptrCast(mtl_buffer.contents()); command_buffer.next_offset = 0; } const offset = command_buffer.next_offset; command_buffer.next_offset = @intCast((offset + size + 255) / 256 * 256); return StreamingResult{ .buffer = command_buffer.upload_buffer.?, .map = command_buffer.upload_map.? + offset, .offset = offset, }; } }; pub const ReferenceTracker = struct { device: *Device, fence_value: u64 = 0, buffers: std.ArrayListUnmanaged(*Buffer) = .{}, bind_groups: std.ArrayListUnmanaged(*BindGroup) = .{}, upload_pages: std.ArrayListUnmanaged(*mtl.Buffer) = .{}, pub fn init(device: *Device) !*ReferenceTracker { const tracker = try allocator.create(ReferenceTracker); tracker.* = .{ .device = device, }; return tracker; } pub fn deinit(tracker: *ReferenceTracker) void { const device = tracker.device; for (tracker.buffers.items) |buffer| { buffer.gpu_count -= 1; buffer.manager.release(); } for (tracker.bind_groups.items) |group| { for (group.entries) |entry| { switch (entry.kind) { .buffer => entry.buffer.?.gpu_count -= 1, else => {}, } } group.manager.release(); } for (tracker.upload_pages.items) |buffer| { device.streaming_manager.release(buffer); } tracker.buffers.deinit(allocator); tracker.bind_groups.deinit(allocator); tracker.upload_pages.deinit(allocator); allocator.destroy(tracker); } pub fn referenceBuffer(tracker: *ReferenceTracker, buffer: *Buffer) !void { buffer.manager.reference(); try tracker.buffers.append(allocator, buffer); } pub fn referenceBindGroup(tracker: *ReferenceTracker, group: *BindGroup) !void { group.manager.reference(); try tracker.bind_groups.append(allocator, group); } pub fn referenceUploadPage(tracker: *ReferenceTracker, upload_page: *mtl.Buffer) !void { try tracker.upload_pages.append(allocator, upload_page); } pub fn submit(tracker: *ReferenceTracker, queue: *Queue) !void { tracker.fence_value = queue.fence_value; for (tracker.buffers.items) |buffer| { buffer.gpu_count += 1; } for (tracker.bind_groups.items) |group| { for (group.entries) |entry| { switch (entry.kind) { .buffer => entry.buffer.?.gpu_count += 1, else => {}, } } } try tracker.device.reference_trackers.append(allocator, tracker); } }; pub const CommandEncoder = struct { manager: utils.Manager(CommandEncoder) = .{}, device: *Device, command_buffer: *CommandBuffer, reference_tracker: *ReferenceTracker, mtl_encoder: ?*mtl.BlitCommandEncoder = null, pub fn init(device: *Device, desc: ?*const sysgpu.CommandEncoder.Descriptor) !*CommandEncoder { // TODO _ = desc; const command_buffer = try CommandBuffer.init(device); const encoder = try allocator.create(CommandEncoder); encoder.* = .{ .device = device, .command_buffer = command_buffer, .reference_tracker = command_buffer.reference_tracker, }; return encoder; } pub fn deinit(encoder: *CommandEncoder) void { if (encoder.mtl_encoder) |mtl_encoder| return mtl_encoder.release(); encoder.command_buffer.manager.release(); allocator.destroy(encoder); } pub fn beginComputePass(encoder: *CommandEncoder, desc: *const sysgpu.ComputePassDescriptor) !*ComputePassEncoder { encoder.endBlitEncoder(); return ComputePassEncoder.init(encoder, desc); } pub fn beginRenderPass(encoder: *CommandEncoder, desc: *const sysgpu.RenderPassDescriptor) !*RenderPassEncoder { encoder.endBlitEncoder(); return RenderPassEncoder.init(encoder, desc); } pub fn copyBufferToBuffer( encoder: *CommandEncoder, source: *Buffer, source_offset: u64, destination: *Buffer, destination_offset: u64, size: u64, ) !void { const mtl_encoder = try encoder.getBlitEncoder(); try encoder.reference_tracker.referenceBuffer(source); try encoder.reference_tracker.referenceBuffer(destination); mtl_encoder.copyFromBuffer_sourceOffset_toBuffer_destinationOffset_size( source.mtl_buffer, source_offset, destination.mtl_buffer, destination_offset, size, ); } pub fn copyBufferToTexture( encoder: *CommandEncoder, source: *const sysgpu.ImageCopyBuffer, destination: *const sysgpu.ImageCopyTexture, copy_size: *const sysgpu.Extent3D, ) !void { const mtl_encoder = try encoder.getBlitEncoder(); const source_buffer: *Buffer = @ptrCast(@alignCast(source.buffer)); const destination_texture: *Texture = @ptrCast(@alignCast(destination.texture)); try encoder.reference_tracker.referenceBuffer(source_buffer); // TODO - test 3D/array issues mtl_encoder.copyFromBuffer_sourceOffset_sourceBytesPerRow_sourceBytesPerImage_sourceSize_toTexture_destinationSlice_destinationLevel_destinationOrigin( source_buffer.mtl_buffer, source.layout.offset, source.layout.bytes_per_row, source.layout.bytes_per_row * source.layout.rows_per_image, mtl.Size.init(copy_size.width, copy_size.height, copy_size.depth_or_array_layers), destination_texture.mtl_texture, destination.origin.z, destination.mip_level, mtl.Origin.init(destination.origin.x, destination.origin.y, destination.origin.z), ); } pub fn copyTextureToTexture( encoder: *CommandEncoder, source: *const sysgpu.ImageCopyTexture, destination: *const sysgpu.ImageCopyTexture, copy_size: *const sysgpu.Extent3D, ) !void { const mtl_encoder = try encoder.getBlitEncoder(); const source_texture: *Texture = @ptrCast(@alignCast(source.texture)); const destination_texture: *Texture = @ptrCast(@alignCast(destination.texture)); // TODO - test 3D/array issues mtl_encoder.copyFromTexture_sourceSlice_sourceLevel_sourceOrigin_sourceSize_toTexture_destinationSlice_destinationLevel_destinationOrigin( source_texture.mtl_texture, source.origin.z, source.mip_level, mtl.Origin.init(source.origin.x, source.origin.y, source.origin.z), mtl.Size.init(copy_size.width, copy_size.height, copy_size.depth_or_array_layers), destination_texture.mtl_texture, destination.origin.z, destination.mip_level, mtl.Origin.init(destination.origin.x, destination.origin.y, destination.origin.z), ); } pub fn finish(encoder: *CommandEncoder, desc: *const sysgpu.CommandBuffer.Descriptor) !*CommandBuffer { const pool = objc.autoreleasePoolPush(); defer objc.autoreleasePoolPop(pool); const command_buffer = encoder.command_buffer; const mtl_command_buffer = command_buffer.mtl_command_buffer; encoder.endBlitEncoder(); if (desc.label) |label| { mtl_command_buffer.setLabel(ns.String.stringWithUTF8String(label)); } return command_buffer; } pub fn writeBuffer(encoder: *CommandEncoder, buffer: *Buffer, offset: u64, data: [*]const u8, size: u64) !void { const mtl_encoder = try encoder.getBlitEncoder(); const stream = try encoder.command_buffer.upload(size); @memcpy(stream.map[0..size], data[0..size]); try encoder.reference_tracker.referenceBuffer(buffer); mtl_encoder.copyFromBuffer_sourceOffset_toBuffer_destinationOffset_size( stream.buffer, stream.offset, buffer.mtl_buffer, offset, size, ); } // Internal pub fn writeTexture( encoder: *CommandEncoder, destination: *const sysgpu.ImageCopyTexture, data: [*]const u8, data_size: usize, data_layout: *const sysgpu.Texture.DataLayout, write_size: *const sysgpu.Extent3D, ) !void { const mtl_encoder = try encoder.getBlitEncoder(); const texture: *Texture = @ptrCast(@alignCast(destination.texture)); const stream = try encoder.command_buffer.upload(data_size); @memcpy(stream.map[0..data_size], data[0..data_size]); mtl_encoder.copyFromBuffer_sourceOffset_sourceBytesPerRow_sourceBytesPerImage_sourceSize_toTexture_destinationSlice_destinationLevel_destinationOrigin( stream.buffer, stream.offset + data_layout.offset, data_layout.bytes_per_row, data_layout.bytes_per_row * data_layout.rows_per_image, mtl.Size.init(write_size.width, write_size.height, write_size.depth_or_array_layers), texture.mtl_texture, destination.origin.z, destination.mip_level, mtl.Origin.init(destination.origin.x, destination.origin.y, destination.origin.z), ); } // Internal fn getBlitEncoder(encoder: *CommandEncoder) !*mtl.BlitCommandEncoder { if (encoder.mtl_encoder) |mtl_encoder| return mtl_encoder; const pool = objc.autoreleasePoolPush(); defer objc.autoreleasePoolPop(pool); const mtl_command_buffer = encoder.command_buffer.mtl_command_buffer; const mtl_desc = mtl.BlitPassDescriptor.new(); defer mtl_desc.release(); const mtl_encoder = mtl_command_buffer.blitCommandEncoderWithDescriptor(mtl_desc) orelse { return error.BlitCommandEncoderFailed; }; encoder.mtl_encoder = mtl_encoder.retain(); return mtl_encoder; } fn endBlitEncoder(encoder: *CommandEncoder) void { if (encoder.mtl_encoder) |mtl_encoder| { mtl_encoder.endEncoding(); mtl_encoder.release(); encoder.mtl_encoder = null; } } }; pub const ComputePassEncoder = struct { manager: utils.Manager(ComputePassEncoder) = .{}, mtl_encoder: *mtl.ComputeCommandEncoder, lengths_buffer: LengthsBuffer, reference_tracker: *ReferenceTracker, pipeline: ?*ComputePipeline = null, threadgroup_size: mtl.Size = undefined, bindings: *BindingTable = undefined, pub fn init(cmd_encoder: *CommandEncoder, desc: *const sysgpu.ComputePassDescriptor) !*ComputePassEncoder { const pool = objc.autoreleasePoolPush(); defer objc.autoreleasePoolPop(pool); const mtl_command_buffer = cmd_encoder.command_buffer.mtl_command_buffer; var mtl_desc = mtl.ComputePassDescriptor.new(); defer mtl_desc.release(); const mtl_encoder = mtl_command_buffer.computeCommandEncoderWithDescriptor(mtl_desc) orelse { return error.ComputeCommandEncoderFailed; }; errdefer mtl_encoder.release(); if (desc.label) |label| { mtl_encoder.setLabel(ns.String.stringWithUTF8String(label)); } const lengths_buffer = try LengthsBuffer.init(cmd_encoder.device); mtl_encoder.setBuffer_offset_atIndex(lengths_buffer.mtl_buffer, 0, slot_buffer_lengths); const encoder = try allocator.create(ComputePassEncoder); encoder.* = .{ .mtl_encoder = mtl_encoder.retain(), .lengths_buffer = lengths_buffer, .reference_tracker = cmd_encoder.reference_tracker, }; return encoder; } pub fn deinit(encoder: *ComputePassEncoder) void { if (encoder.pipeline) |pipeline| pipeline.manager.release(); encoder.lengths_buffer.deinit(); encoder.mtl_encoder.release(); allocator.destroy(encoder); } pub fn dispatchWorkgroups(encoder: *ComputePassEncoder, workgroup_count_x: u32, workgroup_count_y: u32, workgroup_count_z: u32) !void { const mtl_encoder = encoder.mtl_encoder; mtl_encoder.dispatchThreadgroups_threadsPerThreadgroup( mtl.Size.init(workgroup_count_x, workgroup_count_y, workgroup_count_z), encoder.threadgroup_size, ); } pub fn end(encoder: *ComputePassEncoder) void { const mtl_encoder = encoder.mtl_encoder; mtl_encoder.endEncoding(); } pub fn setBindGroup(encoder: *ComputePassEncoder, group_index: u32, group: *BindGroup, dynamic_offset_count: usize, dynamic_offsets: ?[*]const u32) !void { _ = dynamic_offset_count; const mtl_encoder = encoder.mtl_encoder; try encoder.reference_tracker.referenceBindGroup(group); for (group.entries) |entry| { const key = BindingPoint{ .group = group_index, .binding = entry.binding }; if (encoder.bindings.get(key)) |slot| { switch (entry.kind) { .buffer => { encoder.lengths_buffer.set(entry.binding, entry.size); const offset = entry.offset + if (entry.dynamic_index) |i| dynamic_offsets.?[i] else 0; mtl_encoder.setBuffer_offset_atIndex(entry.buffer.?.mtl_buffer, offset, slot); }, .sampler => mtl_encoder.setSamplerState_atIndex(entry.sampler, slot), .texture => mtl_encoder.setTexture_atIndex(entry.texture, slot), } } } encoder.lengths_buffer.apply_compute(mtl_encoder); } pub fn setPipeline(encoder: *ComputePassEncoder, pipeline: *ComputePipeline) !void { const mtl_encoder = encoder.mtl_encoder; mtl_encoder.setComputePipelineState(pipeline.mtl_pipeline); if (encoder.pipeline) |old_pipeline| old_pipeline.manager.release(); encoder.pipeline = pipeline; encoder.bindings = &pipeline.layout.compute_bindings; encoder.threadgroup_size = pipeline.threadgroup_size; pipeline.manager.reference(); } }; pub const RenderPassEncoder = struct { manager: utils.Manager(RenderPassEncoder) = .{}, mtl_encoder: *mtl.RenderCommandEncoder, vertex_lengths_buffer: LengthsBuffer, fragment_lengths_buffer: LengthsBuffer, reference_tracker: *ReferenceTracker, pipeline: ?*RenderPipeline = null, vertex_bindings: *BindingTable = undefined, fragment_bindings: *BindingTable = undefined, primitive_type: mtl.PrimitiveType = undefined, index_type: mtl.IndexType = undefined, index_element_size: usize = undefined, index_buffer: *mtl.Buffer = undefined, index_buffer_offset: ns.UInteger = undefined, pub fn init(cmd_encoder: *CommandEncoder, desc: *const sysgpu.RenderPassDescriptor) !*RenderPassEncoder { const pool = objc.autoreleasePoolPush(); defer objc.autoreleasePoolPop(pool); const mtl_command_buffer = cmd_encoder.command_buffer.mtl_command_buffer; var mtl_desc = mtl.RenderPassDescriptor.new(); defer mtl_desc.release(); // color for (desc.color_attachments.?[0..desc.color_attachment_count], 0..) |attach, i| { var mtl_attach = mtl_desc.colorAttachments().objectAtIndexedSubscript(i); if (attach.view) |view| { const mtl_view: *TextureView = @ptrCast(@alignCast(view)); mtl_attach.setTexture(mtl_view.mtl_texture); } if (attach.resolve_target) |view| { const mtl_view: *TextureView = @ptrCast(@alignCast(view)); mtl_attach.setResolveTexture(mtl_view.mtl_texture); } mtl_attach.setLoadAction(conv.metalLoadAction(attach.load_op)); mtl_attach.setStoreAction(conv.metalStoreAction(attach.store_op, attach.resolve_target != null)); if (attach.load_op == .clear) { mtl_attach.setClearColor(mtl.ClearColor.init( @floatCast(attach.clear_value.r), @floatCast(attach.clear_value.g), @floatCast(attach.clear_value.b), @floatCast(attach.clear_value.a), )); } } // depth-stencil if (desc.depth_stencil_attachment) |attach| { const mtl_view: *TextureView = @ptrCast(@alignCast(attach.view)); const format = mtl_view.mtl_texture.pixelFormat(); if (isDepthFormat(format)) { var mtl_attach = mtl_desc.depthAttachment(); mtl_attach.setTexture(mtl_view.mtl_texture); mtl_attach.setLoadAction(conv.metalLoadAction(attach.depth_load_op)); mtl_attach.setStoreAction(conv.metalStoreAction(attach.depth_store_op, false)); if (attach.depth_load_op == .clear) { mtl_attach.setClearDepth(attach.depth_clear_value); } } if (isStencilFormat(format)) { var mtl_attach = mtl_desc.stencilAttachment(); mtl_attach.setTexture(mtl_view.mtl_texture); mtl_attach.setLoadAction(conv.metalLoadAction(attach.stencil_load_op)); mtl_attach.setStoreAction(conv.metalStoreAction(attach.stencil_store_op, false)); if (attach.stencil_load_op == .clear) { mtl_attach.setClearStencil(attach.stencil_clear_value); } } } // occlusion_query - TODO // timestamps - TODO const mtl_encoder = mtl_command_buffer.renderCommandEncoderWithDescriptor(mtl_desc) orelse { return error.RenderCommandEncoderFailed; }; errdefer mtl_encoder.release(); if (desc.label) |label| { mtl_encoder.setLabel(ns.String.stringWithUTF8String(label)); } const vertex_lengths_buffer = try LengthsBuffer.init(cmd_encoder.device); const fragment_lengths_buffer = try LengthsBuffer.init(cmd_encoder.device); mtl_encoder.setVertexBuffer_offset_atIndex(vertex_lengths_buffer.mtl_buffer, 0, slot_buffer_lengths); mtl_encoder.setFragmentBuffer_offset_atIndex(fragment_lengths_buffer.mtl_buffer, 0, slot_buffer_lengths); const encoder = try allocator.create(RenderPassEncoder); encoder.* = .{ .mtl_encoder = mtl_encoder.retain(), .vertex_lengths_buffer = vertex_lengths_buffer, .fragment_lengths_buffer = fragment_lengths_buffer, .reference_tracker = cmd_encoder.reference_tracker, }; return encoder; } pub fn deinit(encoder: *RenderPassEncoder) void { if (encoder.pipeline) |pipeline| pipeline.manager.release(); encoder.vertex_lengths_buffer.deinit(); encoder.fragment_lengths_buffer.deinit(); encoder.mtl_encoder.release(); allocator.destroy(encoder); } pub fn draw( encoder: *RenderPassEncoder, vertex_count: u32, instance_count: u32, first_vertex: u32, first_instance: u32, ) !void { const mtl_encoder = encoder.mtl_encoder; mtl_encoder.drawPrimitives_vertexStart_vertexCount_instanceCount_baseInstance( encoder.primitive_type, first_vertex, vertex_count, instance_count, first_instance, ); } pub fn drawIndexed( encoder: *RenderPassEncoder, index_count: u32, instance_count: u32, first_index: u32, base_vertex: i32, first_instance: u32, ) !void { const mtl_encoder = encoder.mtl_encoder; mtl_encoder.drawIndexedPrimitives_indexCount_indexType_indexBuffer_indexBufferOffset_instanceCount_baseVertex_baseInstance( encoder.primitive_type, index_count, encoder.index_type, encoder.index_buffer, encoder.index_buffer_offset + first_index * encoder.index_element_size, instance_count, base_vertex, first_instance, ); } pub fn end(encoder: *RenderPassEncoder) !void { const mtl_encoder = encoder.mtl_encoder; mtl_encoder.endEncoding(); } pub fn setBindGroup(encoder: *RenderPassEncoder, group_index: u32, group: *BindGroup, dynamic_offset_count: usize, dynamic_offsets: ?[*]const u32) !void { _ = dynamic_offset_count; const mtl_encoder = encoder.mtl_encoder; try encoder.reference_tracker.referenceBindGroup(group); for (group.entries) |entry| { const key = BindingPoint{ .group = group_index, .binding = entry.binding }; switch (entry.kind) { .buffer => { const offset = entry.offset + if (entry.dynamic_index) |i| dynamic_offsets.?[i] else 0; if (entry.visibility.vertex) { if (encoder.vertex_bindings.get(key)) |slot| { encoder.vertex_lengths_buffer.set(entry.binding, entry.size); mtl_encoder.setVertexBuffer_offset_atIndex(entry.buffer.?.mtl_buffer, offset, slot); } } if (entry.visibility.fragment) { encoder.fragment_lengths_buffer.set(entry.binding, entry.size); if (encoder.fragment_bindings.get(key)) |slot| { mtl_encoder.setFragmentBuffer_offset_atIndex(entry.buffer.?.mtl_buffer, offset, slot); } } }, .sampler => { if (entry.visibility.vertex) { if (encoder.vertex_bindings.get(key)) |slot| { mtl_encoder.setVertexSamplerState_atIndex(entry.sampler, slot); } } if (entry.visibility.fragment) { if (encoder.fragment_bindings.get(key)) |slot| { mtl_encoder.setFragmentSamplerState_atIndex(entry.sampler, slot); } } }, .texture => { if (entry.visibility.vertex) { if (encoder.vertex_bindings.get(key)) |slot| { mtl_encoder.setVertexTexture_atIndex(entry.texture, slot); } } if (entry.visibility.fragment) { if (encoder.fragment_bindings.get(key)) |slot| { mtl_encoder.setFragmentTexture_atIndex(entry.texture, slot); } } }, } } encoder.vertex_lengths_buffer.apply_vertex(mtl_encoder); encoder.fragment_lengths_buffer.apply_fragment(mtl_encoder); } pub fn setIndexBuffer(encoder: *RenderPassEncoder, buffer: *Buffer, format: sysgpu.IndexFormat, offset: u64, size: u64) !void { try encoder.reference_tracker.referenceBuffer(buffer); _ = size; encoder.index_type = conv.metalIndexType(format); encoder.index_element_size = conv.metalIndexElementSize(format); encoder.index_buffer = buffer.mtl_buffer; encoder.index_buffer_offset = offset; } pub fn setPipeline(encoder: *RenderPassEncoder, pipeline: *RenderPipeline) !void { const mtl_encoder = encoder.mtl_encoder; mtl_encoder.setRenderPipelineState(pipeline.mtl_pipeline); mtl_encoder.setFrontFacingWinding(pipeline.winding); mtl_encoder.setCullMode(pipeline.cull_mode); if (pipeline.depth_stencil_state) |state| { mtl_encoder.setDepthStencilState(state); mtl_encoder.setDepthBias_slopeScale_clamp( pipeline.depth_bias, pipeline.depth_bias_slope_scale, pipeline.depth_bias_clamp, ); } if (encoder.pipeline) |old_pipeline| old_pipeline.manager.release(); encoder.pipeline = pipeline; encoder.vertex_bindings = &pipeline.layout.vertex_bindings; encoder.fragment_bindings = &pipeline.layout.fragment_bindings; encoder.primitive_type = pipeline.primitive_type; pipeline.manager.reference(); } pub fn setScissorRect(encoder: *RenderPassEncoder, x: u32, y: u32, width: u32, height: u32) !void { const mtl_encoder = encoder.mtl_encoder; const scissor_rect = mtl.ScissorRect.init(x, y, width, height); mtl_encoder.setScissorRect(scissor_rect); } pub fn setVertexBuffer(encoder: *RenderPassEncoder, slot: u32, buffer: *Buffer, offset: u64, size: u64) !void { _ = size; const mtl_encoder = encoder.mtl_encoder; try encoder.reference_tracker.referenceBuffer(buffer); mtl_encoder.setVertexBuffer_offset_atIndex(buffer.mtl_buffer, offset, slot_vertex_buffers + slot); } pub fn setViewport( encoder: *RenderPassEncoder, x: f32, y: f32, width: f32, height: f32, min_depth: f32, max_depth: f32, ) !void { const mtl_encoder = encoder.mtl_encoder; const viewport = mtl.Viewport.init(x, y, width, height, min_depth, max_depth); mtl_encoder.setViewport(viewport); } }; pub const Queue = struct { const CompletedContext = extern struct { queue: *Queue, fence_value: u64, }; manager: utils.Manager(Queue) = .{}, device: *Device, command_queue: *mtl.CommandQueue, fence_value: u64 = 0, completed_value: std.atomic.Value(u64) = std.atomic.Value(u64).init(0), command_encoder: ?*CommandEncoder = null, pub fn init(device: *Device) !*Queue { const mtl_device = device.mtl_device; const command_queue = mtl_device.newCommandQueue() orelse { return error.NewCommandQueueFailed; }; errdefer command_queue.release(); const queue = try allocator.create(Queue); queue.* = .{ .device = device, .command_queue = command_queue, }; return queue; } pub fn deinit(queue: *Queue) void { if (queue.command_encoder) |command_encoder| command_encoder.manager.release(); queue.command_queue.release(); allocator.destroy(queue); } pub fn submit(queue: *Queue, commands: []const *CommandBuffer) !void { if (queue.command_encoder) |command_encoder| { const command_buffer = try command_encoder.finish(&.{}); command_buffer.manager.reference(); // handled in main.zig defer command_buffer.manager.release(); try queue.submitCommandBuffer(command_buffer); command_encoder.manager.release(); queue.command_encoder = null; } for (commands) |command_buffer| { try queue.submitCommandBuffer(command_buffer); } } pub fn writeBuffer(queue: *Queue, buffer: *Buffer, offset: u64, data: [*]const u8, size: u64) !void { const encoder = try queue.getCommandEncoder(); try encoder.writeBuffer(buffer, offset, data, size); } pub fn writeTexture( queue: *Queue, destination: *const sysgpu.ImageCopyTexture, data: [*]const u8, data_size: usize, data_layout: *const sysgpu.Texture.DataLayout, write_size: *const sysgpu.Extent3D, ) !void { const encoder = try queue.getCommandEncoder(); try encoder.writeTexture(destination, data, data_size, data_layout, write_size); } // Internal pub fn waitUntil(queue: *Queue, fence_value: u64) void { // TODO - avoid spin loop while (queue.completed_value.load(.Acquire) < fence_value) {} } // Private fn getCommandEncoder(queue: *Queue) !*CommandEncoder { if (queue.command_encoder) |command_encoder| return command_encoder; const command_encoder = try CommandEncoder.init(queue.device, &.{}); queue.command_encoder = command_encoder; return command_encoder; } fn submitCommandBuffer(queue: *Queue, command_buffer: *CommandBuffer) !void { const mtl_command_buffer = command_buffer.mtl_command_buffer; queue.fence_value += 1; try command_buffer.reference_tracker.submit(queue); const ctx = CompletedContext{ .queue = queue, .fence_value = queue.fence_value, }; mtl_command_buffer.addCompletedHandler(ctx, completedHandler); mtl_command_buffer.commit(); } fn completedHandler(ctx: CompletedContext, mtl_command_buffer: *mtl.CommandBuffer) void { _ = mtl_command_buffer; ctx.queue.completed_value.store(ctx.fence_value, .Release); } }; test "reference declarations" { std.testing.refAllDeclsRecursive(@This()); }
0
repos/mach-sysgpu
repos/mach-sysgpu/src/gpu_allocator.zig
const std = @import("std"); pub const Error = error{ OutOfMemory, FreeingZeroSizeAllocation, InvalidAllocation, NoCompatibleMemoryFound, Other, }; pub const Allocation = struct { offset: u64, chunk: u64, }; pub const Allocator = union(enum) { offset_allocator: OffsetAllocator, dedicated_block_allocator: DedicatedBlockAllocator, pub fn initOffsetAllocator( allocator: std.mem.Allocator, size: u32, max_allocations: ?u32, ) std.mem.Allocator.Error!Allocator { return .{ .offset_allocator = try OffsetAllocator.init( allocator, size, max_allocations, ), }; } pub fn initDedicatedBlockAllocator( size: u64, ) std.mem.Allocator.Error!Allocator { return .{ .dedicated_block_allocator = try DedicatedBlockAllocator.init( size, ), }; } pub fn deinit(self: *Allocator) void { switch (self.*) { inline else => |*allocator| allocator.deinit(), } } pub fn reset(self: *Allocator) std.mem.Allocator.Error!void { switch (self.*) { inline else => |allocator| allocator.reset(), } } pub fn allocate( self: *Allocator, size: u32, ) Error!Allocation { return switch (self.*) { inline else => |*allocator| allocator.allocate(size), }; } pub fn free(self: *Allocator, allocation: Allocation) Error!void { return switch (self.*) { inline else => |*allocator| allocator.free(allocation), }; } pub fn getSize(self: *const Allocator) u64 { return switch (self.*) { inline else => |allocator| allocator.getSize(), }; } pub fn getAllocated(self: *const Allocator) u64 { return switch (self.*) { inline else => |allocator| allocator.getAllocated(), }; } pub fn availableMemory(self: *const Allocator) u64 { return self.getSize() - self.getAllocated(); } pub fn isEmpty(self: *const Allocator) bool { return self.getAllocated() == 0; } }; pub const DedicatedBlockAllocator = struct { size: u64, allocated: u64, pub fn init( size: u64, ) std.mem.Allocator.Error!DedicatedBlockAllocator { return .{ .size = size, .allocated = 0, }; } pub fn deinit(self: *DedicatedBlockAllocator) void { _ = self; } pub fn allocate( self: *DedicatedBlockAllocator, size: u32, ) Error!Allocation { if (self.allocated != 0) { return Error.OutOfMemory; } if (self.size != size) { return Error.OutOfMemory; } self.allocated = size; return .{ .offset = 0, .chunk = 1, }; } pub fn free(self: *DedicatedBlockAllocator, allocation: Allocation) Error!void { _ = allocation; self.allocated = 0; } pub fn getSize(self: *const DedicatedBlockAllocator) u64 { return self.size; } pub fn getAllocated(self: *const DedicatedBlockAllocator) u64 { return self.allocated; } }; // OffsetAllocator from https://github.com/sebbbi/OffsetAllocator // rewritten in zig pub const OffsetAllocator = struct { const NodeIndex = u32; const Node = struct { data_offset: u32 = 0, data_size: u32 = 0, bin_list_prev: ?NodeIndex = null, bin_list_next: ?NodeIndex = null, neighbour_prev: ?NodeIndex = null, neighbour_next: ?NodeIndex = null, used: bool = false, }; const num_top_bins: u32 = 32; const bins_per_leaf: u32 = 8; const top_bins_index_shift: u32 = 3; const lead_bins_index_mask: u32 = 0x7; const num_leaf_bins: u32 = num_top_bins * bins_per_leaf; allocator: std.mem.Allocator, size: u32, max_allocations: u32, free_storage: u32 = 0, used_bins_top: u32 = 0, used_bins: [num_top_bins]u8 = undefined, bin_indices: [num_leaf_bins]?NodeIndex = undefined, nodes: ?[]Node, free_nodes: ?[]NodeIndex, free_offset: u32 = 0, const SmallFloat = struct { const mantissa_bits: u32 = 3; const mantissa_value: u32 = 1 << mantissa_bits; const mantissa_mask: u32 = mantissa_value - 1; pub fn toFloatRoundUp(size: u32) u32 { var exp: u32 = 0; var mantissa: u32 = 0; if (size < mantissa_value) { mantissa = size; } else { const leading_zeros = @clz(size); const highestSetBit = 31 - leading_zeros; const mantissa_start_bit = highestSetBit - mantissa_bits; exp = mantissa_start_bit + 1; mantissa = (size >> @as(u5, @truncate(mantissa_start_bit))) & mantissa_mask; const low_bits_mask = (@as(u32, 1) << @as(u5, @truncate(mantissa_start_bit))) - 1; if ((size & low_bits_mask) != 0) { mantissa += 1; } } return (exp << mantissa_bits) + mantissa; } pub fn toFloatRoundDown(size: u32) u32 { var exp: u32 = 0; var mantissa: u32 = 0; if (size < mantissa_value) { mantissa = size; } else { const leading_zeros = @clz(size); const highestSetBit = 31 - leading_zeros; const mantissa_start_bit = highestSetBit - mantissa_bits; exp = mantissa_start_bit + 1; mantissa = (size >> @as(u5, @truncate(mantissa_start_bit))) & mantissa_mask; } return (exp << mantissa_bits) | mantissa; } }; fn findLowestSetBitAfter(v: u32, start_idx: u32) ?u32 { const mask_before_start_index: u32 = (@as(u32, 1) << @as(u5, @truncate(start_idx))) - 1; const mask_after_start_index: u32 = ~mask_before_start_index; const bits_after: u32 = v & mask_after_start_index; if (bits_after == 0) return null; return @ctz(bits_after); } pub fn init(allocator: std.mem.Allocator, size: u32, max_allocations: ?u32) std.mem.Allocator.Error!OffsetAllocator { var self = OffsetAllocator{ .allocator = allocator, .size = size, .max_allocations = max_allocations orelse 128 * 1024, .nodes = null, .free_nodes = null, }; try self.reset(); return self; } pub fn reset(self: *OffsetAllocator) std.mem.Allocator.Error!void { self.free_storage = 0; self.used_bins_top = 0; self.free_offset = self.max_allocations - 1; for (0..num_top_bins) |i| { self.used_bins[i] = 0; } for (0..num_leaf_bins) |i| { self.bin_indices[i] = null; } if (self.nodes) |nodes| { self.allocator.free(nodes); self.nodes = null; } if (self.free_nodes) |free_nodes| { self.allocator.free(free_nodes); self.free_nodes = null; } self.nodes = try self.allocator.alloc(Node, self.max_allocations); self.free_nodes = try self.allocator.alloc(NodeIndex, self.max_allocations); for (0..self.max_allocations) |i| { self.free_nodes.?[i] = self.max_allocations - @as(u32, @truncate(i)) - 1; } _ = self.insertNodeIntoBin(self.size, 0); } pub fn deinit(self: *OffsetAllocator) void { if (self.nodes) |nodes| { self.allocator.free(nodes); self.nodes = null; } if (self.free_nodes) |free_nodes| { self.allocator.free(free_nodes); self.free_nodes = null; } } pub fn allocate( self: *OffsetAllocator, size: u32, ) Error!Allocation { if (self.free_offset == 0) { return Error.OutOfMemory; } const min_bin_index = SmallFloat.toFloatRoundUp(@intCast(size)); const min_top_bin_index: u32 = min_bin_index >> top_bins_index_shift; const min_leaf_bin_index: u32 = min_bin_index & lead_bins_index_mask; var top_bin_index = min_top_bin_index; var leaf_bin_index: ?u32 = null; if ((self.used_bins_top & (@as(u32, 1) << @as(u5, @truncate(top_bin_index)))) != 0) { leaf_bin_index = findLowestSetBitAfter(self.used_bins[top_bin_index], min_leaf_bin_index); } if (leaf_bin_index == null) { const found_top_bin_index = findLowestSetBitAfter(self.used_bins_top, min_top_bin_index + 1); if (found_top_bin_index == null) { return Error.OutOfMemory; } top_bin_index = found_top_bin_index.?; leaf_bin_index = @ctz(self.used_bins[top_bin_index]); } const bin_index = (top_bin_index << top_bins_index_shift) | leaf_bin_index.?; const node_index = self.bin_indices[bin_index].?; const node = &self.nodes.?[node_index]; const node_total_size = node.data_size; node.data_size = @intCast(size); node.used = true; self.bin_indices[bin_index] = node.bin_list_next; if (node.bin_list_next) |bln| self.nodes.?[bln].bin_list_prev = null; self.free_storage -= node_total_size; // debug // std.debug.print("free storage: {} ({}) (allocate)\n", .{ self.free_storage, node_total_size }); if (self.bin_indices[bin_index] == null) { self.used_bins[top_bin_index] &= @as(u8, @truncate(~(@as(u32, 1) << @as(u5, @truncate(leaf_bin_index.?))))); if (self.used_bins[top_bin_index] == 0) { self.used_bins_top &= ~(@as(u32, 1) << @as(u5, @truncate(top_bin_index))); } } const remainder_size = node_total_size - size; if (remainder_size > 0) { const new_node_index = self.insertNodeIntoBin(@intCast(remainder_size), @intCast(node.data_offset + size)); if (node.neighbour_next) |nnn| self.nodes.?[nnn].neighbour_prev = new_node_index; self.nodes.?[new_node_index].neighbour_prev = node_index; self.nodes.?[new_node_index].neighbour_next = node.neighbour_next; node.neighbour_next = new_node_index; } return .{ .offset = node.data_offset, .chunk = node_index, }; } pub fn free(self: *OffsetAllocator, allocation: Allocation) Error!void { if (self.nodes == null) { return Error.InvalidAllocation; } const node_index = allocation.chunk; const node = &self.nodes.?[node_index]; if (!node.used) { return Error.InvalidAllocation; } var offset = node.data_offset; var size = node.data_size; if (node.neighbour_prev != null and self.nodes.?[node.neighbour_prev.?].used == false) { const prev_node = &self.nodes.?[node.neighbour_prev.?]; offset = prev_node.data_offset; size += prev_node.data_size; self.removeNodeFromBin(node.neighbour_prev.?); std.debug.assert(prev_node.neighbour_next == @as(u32, @truncate(node_index))); node.neighbour_prev = prev_node.neighbour_prev; } if (node.neighbour_next != null and self.nodes.?[node.neighbour_next.?].used == false) { const next_node = &self.nodes.?[node.neighbour_next.?]; size += next_node.data_size; self.removeNodeFromBin(node.neighbour_next.?); std.debug.assert(next_node.neighbour_prev == @as(u32, @truncate(node_index))); node.neighbour_next = next_node.neighbour_next; } const neighbour_prev = node.neighbour_prev; const neighbour_next = node.neighbour_next; // debug // std.debug.print("putting node {} into freelist[{}] (free)\n", .{ node_index, self.free_offset + 1 }); self.free_offset += 1; self.free_nodes.?[self.free_offset] = @intCast(node_index); const combined_node_index = self.insertNodeIntoBin(size, offset); if (neighbour_next) |nn| { self.nodes.?[combined_node_index].neighbour_next = neighbour_next; self.nodes.?[nn].neighbour_prev = combined_node_index; } if (neighbour_prev) |np| { self.nodes.?[combined_node_index].neighbour_prev = neighbour_prev; self.nodes.?[np].neighbour_next = combined_node_index; } } pub fn insertNodeIntoBin(self: *OffsetAllocator, size: u32, data_offset: u32) u32 { const bin_index = SmallFloat.toFloatRoundDown(size); const top_bin_index: u32 = bin_index >> top_bins_index_shift; const leaf_bin_index: u32 = bin_index & lead_bins_index_mask; if (self.bin_indices[bin_index] == null) { self.used_bins[top_bin_index] |= @as(u8, @truncate(@as(u32, 1) << @as(u5, @truncate(leaf_bin_index)))); self.used_bins_top |= @as(u32, 1) << @as(u5, @truncate(top_bin_index)); } const top_node_index = self.bin_indices[bin_index]; const node_index = self.free_nodes.?[self.free_offset]; self.free_offset -= 1; // debug // std.debug.print("getting node {} from freelist[{}]\n", .{ node_index, self.free_offset + 1 }); self.nodes.?[node_index] = .{ .data_offset = data_offset, .data_size = size, .bin_list_next = top_node_index, }; if (top_node_index) |tni| self.nodes.?[tni].bin_list_prev = node_index; self.bin_indices[bin_index] = node_index; self.free_storage += size; // debug // std.debug.print("free storage: {} ({}) (insertNodeIntoBin)\n", .{ self.free_storage, size }); return node_index; } pub fn removeNodeFromBin(self: *OffsetAllocator, node_index: NodeIndex) void { const node = &self.nodes.?[node_index]; if (node.bin_list_prev) |blp| { self.nodes.?[blp].bin_list_next = node.bin_list_next; if (node.bin_list_next) |bln| self.nodes.?[bln].bin_list_prev = node.bin_list_prev; } else { const bin_index = SmallFloat.toFloatRoundDown(node.data_size); const top_bin_index: u32 = bin_index >> top_bins_index_shift; const leaf_bin_index: u32 = bin_index & lead_bins_index_mask; self.bin_indices[bin_index] = node.bin_list_next; if (node.bin_list_next) |bln| self.nodes.?[bln].bin_list_prev = null; if (self.bin_indices[bin_index] == null) { self.used_bins[top_bin_index] &= @as(u8, @truncate(~(@as(u32, 1) << @as(u5, @truncate(leaf_bin_index))))); if (self.used_bins[top_bin_index] == 0) { self.used_bins_top &= ~(@as(u32, 1) << @as(u5, @truncate(top_bin_index))); } } } // debug // std.debug.print("putting node {} into freelist[{}] (removeNodeFromBin)\n", .{ node_index, self.free_offset + 1 }); self.free_offset += 1; self.free_nodes.?[self.free_offset] = node_index; self.free_storage -= node.data_size; // debug // std.debug.print("free storage: {} ({}) (removeNodeFromBin)\n", .{ self.free_storage, node.data_size }); } pub fn getSize(self: *const OffsetAllocator) u64 { return self.size; } pub fn getAllocated(self: *const OffsetAllocator) u64 { return self.size - self.free_storage; } }; test "basic" { var allocator = try Allocator.initOffsetAllocator( std.testing.allocator, 1024 * 1024 * 256, null, ); defer allocator.deinit(); const a = try allocator.allocate(1337); const offset = a.offset; try std.testing.expectEqual(@as(u64, 0), offset); try allocator.free(a); } test "allocate" { var allocator = try Allocator.initOffsetAllocator( std.testing.allocator, 1024 * 1024 * 256, null, ); defer allocator.deinit(); { const a = try allocator.allocate(0); try std.testing.expectEqual(@as(u64, 0), a.offset); const b = try allocator.allocate(1); try std.testing.expectEqual(@as(u64, 0), b.offset); const c = try allocator.allocate(123); try std.testing.expectEqual(@as(u64, 1), c.offset); const d = try allocator.allocate(1234); try std.testing.expectEqual(@as(u64, 124), d.offset); try allocator.free(a); try allocator.free(b); try allocator.free(c); try allocator.free(d); const validate = try allocator.allocate(1024 * 1024 * 256); try std.testing.expectEqual(@as(u64, 0), validate.offset); try allocator.free(validate); } { const a = try allocator.allocate(1024); try std.testing.expectEqual(@as(u64, 0), a.offset); const b = try allocator.allocate(3456); try std.testing.expectEqual(@as(u64, 1024), b.offset); try allocator.free(a); const c = try allocator.allocate(1024); try std.testing.expectEqual(@as(u64, 0), c.offset); try allocator.free(b); try allocator.free(c); const validate = try allocator.allocate(1024 * 1024 * 256); try std.testing.expectEqual(@as(u64, 0), validate.offset); try allocator.free(validate); } { const a = try allocator.allocate(1024); try std.testing.expectEqual(@as(u64, 0), a.offset); const b = try allocator.allocate(3456); try std.testing.expectEqual(@as(u64, 1024), b.offset); try allocator.free(a); const c = try allocator.allocate(2345); try std.testing.expectEqual(@as(u64, 1024 + 3456), c.offset); const d = try allocator.allocate(456); try std.testing.expectEqual(@as(u64, 0), d.offset); const e = try allocator.allocate(512); try std.testing.expectEqual(@as(u64, 456), e.offset); try allocator.free(b); try allocator.free(c); try allocator.free(d); try allocator.free(e); const validate = try allocator.allocate(1024 * 1024 * 256); try std.testing.expectEqual(@as(u64, 0), validate.offset); try allocator.free(validate); } }
0
repos/mach-sysgpu
repos/mach-sysgpu/src/main.zig
const std = @import("std"); const builtin = @import("builtin"); const build_options = @import("build-options"); pub const sysgpu = @import("sysgpu/main.zig"); pub const shader = @import("shader.zig"); const utils = @import("utils.zig"); const backend_type: sysgpu.BackendType = if (build_options.backend != .default) build_options.backend else switch (builtin.target.os.tag) { .linux => .vulkan, .macos, .ios => .metal, .windows => .d3d12, else => @compileError("unsupported platform"), }; const impl = switch (backend_type) { .d3d12 => @import("d3d12.zig"), .metal => @import("metal.zig"), .opengl => @import("opengl.zig"), .vulkan => @import("vulkan.zig"), else => @compileError("unsupported backend"), }; var inited = false; var allocator: std.mem.Allocator = undefined; pub const Impl = sysgpu.Interface(struct { pub fn init(alloc: std.mem.Allocator, options: impl.InitOptions) !void { inited = true; allocator = alloc; try impl.init(alloc, options); } pub inline fn createInstance(descriptor: ?*const sysgpu.Instance.Descriptor) ?*sysgpu.Instance { if (builtin.mode == .Debug and !inited) { std.log.err("sysgpu not initialized; did you forget to call sysgpu.Impl.init()?", .{}); } const instance = impl.Instance.init(descriptor orelse &sysgpu.Instance.Descriptor{}) catch @panic("api error"); return @as(*sysgpu.Instance, @ptrCast(instance)); } pub inline fn getProcAddress(device: *sysgpu.Device, proc_name: [*:0]const u8) ?sysgpu.Proc { _ = device; _ = proc_name; @panic("unimplemented"); } pub inline fn adapterCreateDevice(adapter_raw: *sysgpu.Adapter, descriptor: ?*const sysgpu.Device.Descriptor) ?*sysgpu.Device { const adapter: *impl.Adapter = @ptrCast(@alignCast(adapter_raw)); const device = adapter.createDevice(descriptor) catch return null; if (descriptor) |desc| { device.lost_cb = desc.device_lost_callback; device.lost_cb_userdata = desc.device_lost_userdata; } return @as(*sysgpu.Device, @ptrCast(device)); } pub inline fn adapterEnumerateFeatures(adapter: *sysgpu.Adapter, features: ?[*]sysgpu.FeatureName) usize { _ = adapter; _ = features; @panic("unimplemented"); } pub inline fn adapterGetLimits(adapter: *sysgpu.Adapter, limits: *sysgpu.SupportedLimits) u32 { _ = adapter; _ = limits; @panic("unimplemented"); } pub inline fn adapterGetInstance(adapter: *sysgpu.Adapter) *sysgpu.Instance { _ = adapter; @panic("unimplemented"); } pub inline fn adapterGetProperties(adapter_raw: *sysgpu.Adapter, properties: *sysgpu.Adapter.Properties) void { const adapter: *impl.Adapter = @ptrCast(@alignCast(adapter_raw)); properties.* = adapter.getProperties(); } pub inline fn adapterHasFeature(adapter: *sysgpu.Adapter, feature: sysgpu.FeatureName) u32 { _ = adapter; _ = feature; @panic("unimplemented"); } pub inline fn adapterPropertiesFreeMembers(value: sysgpu.Adapter.Properties) void { _ = value; @panic("unimplemented"); } pub inline fn adapterRequestDevice(adapter: *sysgpu.Adapter, descriptor: ?*const sysgpu.Device.Descriptor, callback: sysgpu.RequestDeviceCallback, userdata: ?*anyopaque) void { _ = adapter; _ = descriptor; _ = callback; _ = userdata; @panic("unimplemented"); } pub inline fn adapterReference(adapter_raw: *sysgpu.Adapter) void { const adapter: *impl.Adapter = @ptrCast(@alignCast(adapter_raw)); adapter.manager.reference(); } pub inline fn adapterRelease(adapter_raw: *sysgpu.Adapter) void { const adapter: *impl.Adapter = @ptrCast(@alignCast(adapter_raw)); adapter.manager.release(); } pub inline fn bindGroupSetLabel(bind_group: *sysgpu.BindGroup, label: [*:0]const u8) void { _ = bind_group; _ = label; @panic("unimplemented"); } pub inline fn bindGroupReference(bind_group_raw: *sysgpu.BindGroup) void { const bind_group: *impl.BindGroup = @ptrCast(@alignCast(bind_group_raw)); bind_group.manager.reference(); } pub inline fn bindGroupRelease(bind_group_raw: *sysgpu.BindGroup) void { const bind_group: *impl.BindGroup = @ptrCast(@alignCast(bind_group_raw)); bind_group.manager.release(); } pub inline fn bindGroupLayoutSetLabel(bind_group_layout: *sysgpu.BindGroupLayout, label: [*:0]const u8) void { _ = bind_group_layout; _ = label; @panic("unimplemented"); } pub inline fn bindGroupLayoutReference(bind_group_layout_raw: *sysgpu.BindGroupLayout) void { const bind_group_layout: *impl.BindGroupLayout = @ptrCast(@alignCast(bind_group_layout_raw)); bind_group_layout.manager.reference(); } pub inline fn bindGroupLayoutRelease(bind_group_layout_raw: *sysgpu.BindGroupLayout) void { const bind_group_layout: *impl.BindGroupLayout = @ptrCast(@alignCast(bind_group_layout_raw)); bind_group_layout.manager.release(); } pub inline fn bufferDestroy(buffer: *sysgpu.Buffer) void { _ = buffer; @panic("unimplemented"); } pub inline fn bufferGetConstMappedRange(buffer_raw: *sysgpu.Buffer, offset: usize, size: usize) ?*const anyopaque { const buffer: *impl.Buffer = @ptrCast(@alignCast(buffer_raw)); return buffer.getMappedRange(offset, size) catch @panic("api error"); } pub inline fn bufferGetMappedRange(buffer_raw: *sysgpu.Buffer, offset: usize, size: usize) ?*anyopaque { const buffer: *impl.Buffer = @ptrCast(@alignCast(buffer_raw)); return buffer.getMappedRange(offset, size) catch @panic("api error"); } pub inline fn bufferGetSize(buffer_raw: *sysgpu.Buffer) u64 { const buffer: *impl.Buffer = @ptrCast(@alignCast(buffer_raw)); return buffer.getSize(); } pub inline fn bufferGetUsage(buffer_raw: *sysgpu.Buffer) sysgpu.Buffer.UsageFlags { const buffer: *impl.Buffer = @ptrCast(@alignCast(buffer_raw)); return buffer.getUsage(); } pub inline fn bufferMapAsync(buffer_raw: *sysgpu.Buffer, mode: sysgpu.MapModeFlags, offset: usize, size: usize, callback: sysgpu.Buffer.MapCallback, userdata: ?*anyopaque) void { const buffer: *impl.Buffer = @ptrCast(@alignCast(buffer_raw)); buffer.mapAsync(mode, offset, size, callback, userdata) catch @panic("api error"); } pub inline fn bufferSetLabel(buffer_raw: *sysgpu.Buffer, label: [*:0]const u8) void { const buffer: *impl.Buffer = @ptrCast(@alignCast(buffer_raw)); buffer.setLabel(label); } pub inline fn bufferUnmap(buffer_raw: *sysgpu.Buffer) void { const buffer: *impl.Buffer = @ptrCast(@alignCast(buffer_raw)); buffer.unmap() catch @panic("api error"); } pub inline fn bufferReference(buffer_raw: *sysgpu.Buffer) void { const buffer: *impl.Buffer = @ptrCast(@alignCast(buffer_raw)); buffer.manager.reference(); } pub inline fn bufferRelease(buffer_raw: *sysgpu.Buffer) void { const buffer: *impl.Buffer = @ptrCast(@alignCast(buffer_raw)); buffer.manager.release(); } pub inline fn commandBufferSetLabel(command_buffer: *sysgpu.CommandBuffer, label: [*:0]const u8) void { _ = command_buffer; _ = label; @panic("unimplemented"); } pub inline fn commandBufferReference(command_buffer_raw: *sysgpu.CommandBuffer) void { const command_buffer: *impl.CommandBuffer = @ptrCast(@alignCast(command_buffer_raw)); command_buffer.manager.reference(); } pub inline fn commandBufferRelease(command_buffer_raw: *sysgpu.CommandBuffer) void { const command_buffer: *impl.CommandBuffer = @ptrCast(@alignCast(command_buffer_raw)); command_buffer.manager.release(); } pub inline fn commandEncoderBeginComputePass(command_encoder_raw: *sysgpu.CommandEncoder, descriptor: ?*const sysgpu.ComputePassDescriptor) *sysgpu.ComputePassEncoder { const command_encoder: *impl.CommandEncoder = @ptrCast(@alignCast(command_encoder_raw)); const compute_pass = command_encoder.beginComputePass(descriptor orelse &.{}) catch @panic("api error"); return @ptrCast(compute_pass); } pub inline fn commandEncoderBeginRenderPass(command_encoder_raw: *sysgpu.CommandEncoder, descriptor: *const sysgpu.RenderPassDescriptor) *sysgpu.RenderPassEncoder { const command_encoder: *impl.CommandEncoder = @ptrCast(@alignCast(command_encoder_raw)); const render_pass = command_encoder.beginRenderPass(descriptor) catch @panic("api error"); return @ptrCast(render_pass); } pub inline fn commandEncoderClearBuffer(command_encoder: *sysgpu.CommandEncoder, buffer: *sysgpu.Buffer, offset: u64, size: u64) void { _ = command_encoder; _ = buffer; _ = offset; _ = size; @panic("unimplemented"); } pub inline fn commandEncoderCopyBufferToBuffer(command_encoder_raw: *sysgpu.CommandEncoder, source_raw: *sysgpu.Buffer, source_offset: u64, destination_raw: *sysgpu.Buffer, destination_offset: u64, size: u64) void { const command_encoder: *impl.CommandEncoder = @ptrCast(@alignCast(command_encoder_raw)); const source: *impl.Buffer = @ptrCast(@alignCast(source_raw)); const destination: *impl.Buffer = @ptrCast(@alignCast(destination_raw)); command_encoder.copyBufferToBuffer(source, source_offset, destination, destination_offset, size) catch @panic("api error"); } pub inline fn commandEncoderCopyBufferToTexture(command_encoder_raw: *sysgpu.CommandEncoder, source: *const sysgpu.ImageCopyBuffer, destination: *const sysgpu.ImageCopyTexture, copy_size: *const sysgpu.Extent3D) void { const command_encoder: *impl.CommandEncoder = @ptrCast(@alignCast(command_encoder_raw)); command_encoder.copyBufferToTexture(source, destination, copy_size) catch @panic("api error"); } pub inline fn commandEncoderCopyTextureToBuffer(command_encoder: *sysgpu.CommandEncoder, source: *const sysgpu.ImageCopyTexture, destination: *const sysgpu.ImageCopyBuffer, copy_size: *const sysgpu.Extent3D) void { _ = command_encoder; _ = source; _ = destination; _ = copy_size; @panic("unimplemented"); } pub inline fn commandEncoderCopyTextureToTexture(command_encoder_raw: *sysgpu.CommandEncoder, source: *const sysgpu.ImageCopyTexture, destination: *const sysgpu.ImageCopyTexture, copy_size: *const sysgpu.Extent3D) void { const command_encoder: *impl.CommandEncoder = @ptrCast(@alignCast(command_encoder_raw)); command_encoder.copyTextureToTexture(source, destination, copy_size) catch @panic("api error"); } pub inline fn commandEncoderCopyTextureToTextureInternal(command_encoder: *sysgpu.CommandEncoder, source: *const sysgpu.ImageCopyTexture, destination: *const sysgpu.ImageCopyTexture, copy_size: *const sysgpu.Extent3D) void { _ = command_encoder; _ = source; _ = destination; _ = copy_size; @panic("unimplemented"); } pub inline fn commandEncoderFinish(command_encoder_raw: *sysgpu.CommandEncoder, descriptor: ?*const sysgpu.CommandBuffer.Descriptor) *sysgpu.CommandBuffer { const command_encoder: *impl.CommandEncoder = @ptrCast(@alignCast(command_encoder_raw)); const command_buffer = command_encoder.finish(descriptor orelse &.{}) catch @panic("api error"); command_buffer.manager.reference(); return @ptrCast(command_buffer); } pub inline fn commandEncoderInjectValidationError(command_encoder: *sysgpu.CommandEncoder, message: [*:0]const u8) void { _ = command_encoder; _ = message; @panic("unimplemented"); } pub inline fn commandEncoderInsertDebugMarker(command_encoder: *sysgpu.CommandEncoder, marker_label: [*:0]const u8) void { _ = command_encoder; _ = marker_label; @panic("unimplemented"); } pub inline fn commandEncoderPopDebugGroup(command_encoder: *sysgpu.CommandEncoder) void { _ = command_encoder; @panic("unimplemented"); } pub inline fn commandEncoderPushDebugGroup(command_encoder: *sysgpu.CommandEncoder, group_label: [*:0]const u8) void { _ = command_encoder; _ = group_label; @panic("unimplemented"); } pub inline fn commandEncoderResolveQuerySet(command_encoder: *sysgpu.CommandEncoder, query_set: *sysgpu.QuerySet, first_query: u32, query_count: u32, destination: *sysgpu.Buffer, destination_offset: u64) void { _ = command_encoder; _ = query_set; _ = first_query; _ = query_count; _ = destination; _ = destination_offset; @panic("unimplemented"); } pub inline fn commandEncoderSetLabel(command_encoder: *sysgpu.CommandEncoder, label: [*:0]const u8) void { _ = command_encoder; _ = label; @panic("unimplemented"); } pub inline fn commandEncoderWriteBuffer(command_encoder_raw: *sysgpu.CommandEncoder, buffer_raw: *sysgpu.Buffer, buffer_offset: u64, data: [*]const u8, size: u64) void { const command_encoder: *impl.CommandEncoder = @ptrCast(@alignCast(command_encoder_raw)); const buffer: *impl.Buffer = @ptrCast(@alignCast(buffer_raw)); command_encoder.writeBuffer(buffer, buffer_offset, @ptrCast(data), size) catch @panic("api error"); } pub inline fn commandEncoderWriteTimestamp(command_encoder: *sysgpu.CommandEncoder, query_set: *sysgpu.QuerySet, query_index: u32) void { _ = command_encoder; _ = query_set; _ = query_index; @panic("unimplemented"); } pub inline fn commandEncoderReference(command_encoder_raw: *sysgpu.CommandEncoder) void { const command_encoder: *impl.CommandEncoder = @ptrCast(@alignCast(command_encoder_raw)); command_encoder.manager.reference(); } pub inline fn commandEncoderRelease(command_encoder_raw: *sysgpu.CommandEncoder) void { const command_encoder: *impl.CommandEncoder = @ptrCast(@alignCast(command_encoder_raw)); command_encoder.manager.release(); } pub inline fn computePassEncoderDispatchWorkgroups(compute_pass_encoder_raw: *sysgpu.ComputePassEncoder, workgroup_count_x: u32, workgroup_count_y: u32, workgroup_count_z: u32) void { const compute_pass_encoder: *impl.ComputePassEncoder = @ptrCast(@alignCast(compute_pass_encoder_raw)); compute_pass_encoder.dispatchWorkgroups(workgroup_count_x, workgroup_count_y, workgroup_count_z) catch @panic("api error"); } pub inline fn computePassEncoderDispatchWorkgroupsIndirect(compute_pass_encoder: *sysgpu.ComputePassEncoder, indirect_buffer: *sysgpu.Buffer, indirect_offset: u64) void { _ = compute_pass_encoder; _ = indirect_buffer; _ = indirect_offset; @panic("unimplemented"); } pub inline fn computePassEncoderEnd(compute_pass_encoder_raw: *sysgpu.ComputePassEncoder) void { const compute_pass_encoder: *impl.ComputePassEncoder = @ptrCast(@alignCast(compute_pass_encoder_raw)); compute_pass_encoder.end(); } pub inline fn computePassEncoderInsertDebugMarker(compute_pass_encoder: *sysgpu.ComputePassEncoder, marker_label: [*:0]const u8) void { _ = compute_pass_encoder; _ = marker_label; @panic("unimplemented"); } pub inline fn computePassEncoderPopDebugGroup(compute_pass_encoder: *sysgpu.ComputePassEncoder) void { _ = compute_pass_encoder; @panic("unimplemented"); } pub inline fn computePassEncoderPushDebugGroup(compute_pass_encoder: *sysgpu.ComputePassEncoder, group_label: [*:0]const u8) void { _ = compute_pass_encoder; _ = group_label; @panic("unimplemented"); } pub inline fn computePassEncoderSetBindGroup(compute_pass_encoder_raw: *sysgpu.ComputePassEncoder, group_index: u32, group_raw: *sysgpu.BindGroup, dynamic_offset_count: usize, dynamic_offsets: ?[*]const u32) void { const compute_pass_encoder: *impl.ComputePassEncoder = @ptrCast(@alignCast(compute_pass_encoder_raw)); const group: *impl.BindGroup = @ptrCast(@alignCast(group_raw)); compute_pass_encoder.setBindGroup(group_index, group, dynamic_offset_count, dynamic_offsets) catch @panic("api error"); } pub inline fn computePassEncoderSetLabel(compute_pass_encoder: *sysgpu.ComputePassEncoder, label: [*:0]const u8) void { _ = compute_pass_encoder; _ = label; @panic("unimplemented"); } pub inline fn computePassEncoderSetPipeline(compute_pass_encoder_raw: *sysgpu.ComputePassEncoder, pipeline_raw: *sysgpu.ComputePipeline) void { const compute_pass_encoder: *impl.ComputePassEncoder = @ptrCast(@alignCast(compute_pass_encoder_raw)); const pipeline: *impl.ComputePipeline = @ptrCast(@alignCast(pipeline_raw)); compute_pass_encoder.setPipeline(pipeline) catch @panic("api error"); } pub inline fn computePassEncoderWriteTimestamp(compute_pass_encoder: *sysgpu.ComputePassEncoder, query_set: *sysgpu.QuerySet, query_index: u32) void { _ = compute_pass_encoder; _ = query_set; _ = query_index; @panic("unimplemented"); } pub inline fn computePassEncoderReference(compute_pass_encoder_raw: *sysgpu.ComputePassEncoder) void { const compute_pass_encoder: *impl.ComputePassEncoder = @ptrCast(@alignCast(compute_pass_encoder_raw)); compute_pass_encoder.manager.reference(); } pub inline fn computePassEncoderRelease(compute_pass_encoder_raw: *sysgpu.ComputePassEncoder) void { const compute_pass_encoder: *impl.ComputePassEncoder = @ptrCast(@alignCast(compute_pass_encoder_raw)); compute_pass_encoder.manager.release(); } pub inline fn computePipelineGetBindGroupLayout(compute_pipeline_raw: *sysgpu.ComputePipeline, group_index: u32) *sysgpu.BindGroupLayout { const compute_pipeline: *impl.ComputePipeline = @ptrCast(@alignCast(compute_pipeline_raw)); const layout = compute_pipeline.getBindGroupLayout(group_index); layout.manager.reference(); return @ptrCast(layout); } pub inline fn computePipelineSetLabel(compute_pipeline: *sysgpu.ComputePipeline, label: [*:0]const u8) void { _ = compute_pipeline; _ = label; @panic("unimplemented"); } pub inline fn computePipelineReference(compute_pipeline_raw: *sysgpu.ComputePipeline) void { const compute_pipeline: *impl.ComputePipeline = @ptrCast(@alignCast(compute_pipeline_raw)); compute_pipeline.manager.reference(); } pub inline fn computePipelineRelease(compute_pipeline_raw: *sysgpu.ComputePipeline) void { const compute_pipeline: *impl.ComputePipeline = @ptrCast(@alignCast(compute_pipeline_raw)); compute_pipeline.manager.release(); } pub inline fn deviceCreateBindGroup(device_raw: *sysgpu.Device, descriptor: *const sysgpu.BindGroup.Descriptor) *sysgpu.BindGroup { const device: *impl.Device = @ptrCast(@alignCast(device_raw)); const group = device.createBindGroup(descriptor) catch @panic("api error"); return @ptrCast(group); } pub inline fn deviceCreateBindGroupLayout(device_raw: *sysgpu.Device, descriptor: *const sysgpu.BindGroupLayout.Descriptor) *sysgpu.BindGroupLayout { const device: *impl.Device = @ptrCast(@alignCast(device_raw)); const layout = device.createBindGroupLayout(descriptor) catch @panic("api error"); return @ptrCast(layout); } pub inline fn deviceCreateBuffer(device_raw: *sysgpu.Device, descriptor: *const sysgpu.Buffer.Descriptor) *sysgpu.Buffer { const device: *impl.Device = @ptrCast(@alignCast(device_raw)); const buffer = device.createBuffer(descriptor) catch @panic("api error"); return @ptrCast(buffer); } pub inline fn deviceCreateCommandEncoder(device_raw: *sysgpu.Device, descriptor: ?*const sysgpu.CommandEncoder.Descriptor) *sysgpu.CommandEncoder { const device: *impl.Device = @ptrCast(@alignCast(device_raw)); const command_encoder = device.createCommandEncoder(descriptor orelse &.{}) catch @panic("api error"); return @ptrCast(command_encoder); } pub inline fn deviceCreateComputePipeline(device_raw: *sysgpu.Device, descriptor: *const sysgpu.ComputePipeline.Descriptor) *sysgpu.ComputePipeline { const device: *impl.Device = @ptrCast(@alignCast(device_raw)); const pipeline = device.createComputePipeline(descriptor) catch @panic("api error"); return @ptrCast(pipeline); } pub inline fn deviceCreateComputePipelineAsync(device: *sysgpu.Device, descriptor: *const sysgpu.ComputePipeline.Descriptor, callback: sysgpu.CreateComputePipelineAsyncCallback, userdata: ?*anyopaque) void { _ = device; _ = descriptor; _ = callback; _ = userdata; @panic("unimplemented"); } pub inline fn deviceCreateErrorBuffer(device: *sysgpu.Device, descriptor: *const sysgpu.Buffer.Descriptor) *sysgpu.Buffer { _ = device; _ = descriptor; @panic("unimplemented"); } pub inline fn deviceCreateErrorExternalTexture(device: *sysgpu.Device) *sysgpu.ExternalTexture { _ = device; @panic("unimplemented"); } pub inline fn deviceCreateErrorTexture(device: *sysgpu.Device, descriptor: *const sysgpu.Texture.Descriptor) *sysgpu.Texture { _ = device; _ = descriptor; @panic("unimplemented"); } pub inline fn deviceCreateExternalTexture(device: *sysgpu.Device, external_texture_descriptor: *const sysgpu.ExternalTexture.Descriptor) *sysgpu.ExternalTexture { _ = device; _ = external_texture_descriptor; @panic("unimplemented"); } pub inline fn deviceCreatePipelineLayout(device_raw: *sysgpu.Device, pipeline_layout_descriptor: *const sysgpu.PipelineLayout.Descriptor) *sysgpu.PipelineLayout { const device: *impl.Device = @ptrCast(@alignCast(device_raw)); const layout = device.createPipelineLayout(pipeline_layout_descriptor) catch @panic("api error"); return @ptrCast(layout); } pub inline fn deviceCreateQuerySet(device: *sysgpu.Device, descriptor: *const sysgpu.QuerySet.Descriptor) *sysgpu.QuerySet { _ = device; _ = descriptor; @panic("unimplemented"); } pub inline fn deviceCreateRenderBundleEncoder(device: *sysgpu.Device, descriptor: *const sysgpu.RenderBundleEncoder.Descriptor) *sysgpu.RenderBundleEncoder { _ = device; _ = descriptor; @panic("unimplemented"); } pub inline fn deviceCreateRenderPipeline(device_raw: *sysgpu.Device, descriptor: *const sysgpu.RenderPipeline.Descriptor) *sysgpu.RenderPipeline { const device: *impl.Device = @ptrCast(@alignCast(device_raw)); const render_pipeline = device.createRenderPipeline(descriptor) catch @panic("api error"); return @ptrCast(render_pipeline); } pub inline fn deviceCreateRenderPipelineAsync(device: *sysgpu.Device, descriptor: *const sysgpu.RenderPipeline.Descriptor, callback: sysgpu.CreateRenderPipelineAsyncCallback, userdata: ?*anyopaque) void { _ = device; _ = descriptor; _ = callback; _ = userdata; @panic("unimplemented"); } pub inline fn deviceCreateSampler(device_raw: *sysgpu.Device, descriptor: ?*const sysgpu.Sampler.Descriptor) *sysgpu.Sampler { const device: *impl.Device = @ptrCast(@alignCast(device_raw)); const sampler = device.createSampler(descriptor orelse &sysgpu.Sampler.Descriptor{}) catch @panic("api error"); return @ptrCast(sampler); } pub inline fn deviceCreateShaderModule(device_raw: *sysgpu.Device, descriptor: *const sysgpu.ShaderModule.Descriptor) *sysgpu.ShaderModule { const device: *impl.Device = @ptrCast(@alignCast(device_raw)); var errors = try shader.ErrorList.init(allocator); defer errors.deinit(); if (utils.findChained(sysgpu.ShaderModule.WGSLDescriptor, descriptor.next_in_chain.generic)) |wgsl_descriptor| { const source = std.mem.span(wgsl_descriptor.code); var ast = shader.Ast.parse(allocator, &errors, source) catch |err| switch (err) { error.Parsing => { errors.print(source, null) catch @panic("api error"); std.process.exit(1); }, else => @panic("api error"), }; defer ast.deinit(allocator); const air = allocator.create(shader.Air) catch @panic("api error"); air.* = shader.Air.generate(allocator, &ast, &errors, null) catch |err| switch (err) { error.AnalysisFail => { errors.print(source, null) catch @panic("api error"); std.process.exit(1); }, else => @panic("api error"), }; const shader_module = device.createShaderModuleAir(air, descriptor.label orelse "<ShaderModule label not specified>") catch @panic("api error"); return @ptrCast(shader_module); } else if (utils.findChained(sysgpu.ShaderModule.SPIRVDescriptor, descriptor.next_in_chain.generic)) |spirv_descriptor| { const shader_module = device.createShaderModuleSpirv(spirv_descriptor.code, spirv_descriptor.code_size) catch @panic("api error"); return @ptrCast(shader_module); } else if (utils.findChained(sysgpu.ShaderModule.HLSLDescriptor, descriptor.next_in_chain.generic)) |hlsl_descriptor| { const shader_module = device.createShaderModuleHLSL(hlsl_descriptor.code[0..hlsl_descriptor.code_size]) catch @panic("api error"); return @ptrCast(shader_module); } else if (utils.findChained(sysgpu.ShaderModule.MSLDescriptor, descriptor.next_in_chain.generic)) |msl_descriptor| { const shader_module = device.createShaderModuleMSL( descriptor.label orelse "<ShaderModule label not specified>", msl_descriptor.code[0..msl_descriptor.code_size], msl_descriptor.workgroup_size, ) catch @panic("api error"); return @ptrCast(shader_module); } @panic("unimplemented"); } pub inline fn deviceCreateSwapChain(device_raw: *sysgpu.Device, surface_raw: ?*sysgpu.Surface, descriptor: *const sysgpu.SwapChain.Descriptor) *sysgpu.SwapChain { const device: *impl.Device = @ptrCast(@alignCast(device_raw)); const surface: *impl.Surface = @ptrCast(@alignCast(surface_raw.?)); const swapchain = device.createSwapChain(surface, descriptor) catch @panic("api error"); return @ptrCast(swapchain); } pub inline fn deviceCreateTexture(device_raw: *sysgpu.Device, descriptor: *const sysgpu.Texture.Descriptor) *sysgpu.Texture { const device: *impl.Device = @ptrCast(@alignCast(device_raw)); const texture = device.createTexture(descriptor) catch @panic("api error"); return @ptrCast(texture); } pub inline fn deviceDestroy(device: *sysgpu.Device) void { _ = device; @panic("unimplemented"); } pub inline fn deviceEnumerateFeatures(device: *sysgpu.Device, features: ?[*]sysgpu.FeatureName) usize { _ = device; _ = features; @panic("unimplemented"); } pub inline fn deviceGetLimits(device: *sysgpu.Device, limits: *sysgpu.SupportedLimits) u32 { _ = device; _ = limits; @panic("unimplemented"); } pub inline fn deviceGetQueue(device_raw: *sysgpu.Device) *sysgpu.Queue { const device: *impl.Device = @ptrCast(@alignCast(device_raw)); const queue = device.getQueue() catch @panic("api error"); queue.manager.reference(); return @ptrCast(queue); } pub inline fn deviceHasFeature(device: *sysgpu.Device, feature: sysgpu.FeatureName) u32 { _ = device; _ = feature; @panic("unimplemented"); } pub inline fn deviceImportSharedFence(device: *sysgpu.Device, descriptor: *const sysgpu.SharedFence.Descriptor) *sysgpu.SharedFence { _ = device; _ = descriptor; @panic("unimplemented"); } pub inline fn deviceImportSharedTextureMemory(device: *sysgpu.Device, descriptor: *const sysgpu.SharedTextureMemory.Descriptor) *sysgpu.SharedTextureMemory { _ = device; _ = descriptor; @panic("unimplemented"); } pub inline fn deviceInjectError(device: *sysgpu.Device, typ: sysgpu.ErrorType, message: [*:0]const u8) void { _ = device; _ = typ; _ = message; @panic("unimplemented"); } pub inline fn deviceLoseForTesting(device: *sysgpu.Device) void { _ = device; @panic("unimplemented"); } pub inline fn devicePopErrorScope(device: *sysgpu.Device, callback: sysgpu.ErrorCallback, userdata: ?*anyopaque) void { _ = device; _ = callback; _ = userdata; @panic("unimplemented"); } pub inline fn devicePushErrorScope(device: *sysgpu.Device, filter: sysgpu.ErrorFilter) void { _ = device; _ = filter; @panic("unimplemented"); } pub inline fn deviceSetDeviceLostCallback(device_raw: *sysgpu.Device, callback: ?sysgpu.Device.LostCallback, userdata: ?*anyopaque) void { const device: *impl.Device = @ptrCast(@alignCast(device_raw)); device.lost_cb = callback; device.lost_cb_userdata = userdata; } pub inline fn deviceSetLabel(device: *sysgpu.Device, label: [*:0]const u8) void { _ = device; _ = label; @panic("unimplemented"); } pub inline fn deviceSetLoggingCallback(device_raw: *sysgpu.Device, callback: ?sysgpu.LoggingCallback, userdata: ?*anyopaque) void { const device: *impl.Device = @ptrCast(@alignCast(device_raw)); device.log_cb = callback; device.log_cb_userdata = userdata; } pub inline fn deviceSetUncapturedErrorCallback(device_raw: *sysgpu.Device, callback: ?sysgpu.ErrorCallback, userdata: ?*anyopaque) void { const device: *impl.Device = @ptrCast(@alignCast(device_raw)); device.err_cb = callback; device.err_cb_userdata = userdata; } pub inline fn deviceTick(device_raw: *sysgpu.Device) void { const device: *impl.Device = @ptrCast(@alignCast(device_raw)); device.tick() catch @panic("api error"); } pub inline fn machDeviceWaitForCommandsToBeScheduled(device: *sysgpu.Device) void { _ = device; } pub inline fn deviceReference(device_raw: *sysgpu.Device) void { const device: *impl.Device = @ptrCast(@alignCast(device_raw)); device.manager.reference(); } pub inline fn deviceRelease(device_raw: *sysgpu.Device) void { const device: *impl.Device = @ptrCast(@alignCast(device_raw)); device.manager.release(); } pub inline fn externalTextureDestroy(external_texture: *sysgpu.ExternalTexture) void { _ = external_texture; @panic("unimplemented"); } pub inline fn externalTextureSetLabel(external_texture: *sysgpu.ExternalTexture, label: [*:0]const u8) void { _ = external_texture; _ = label; @panic("unimplemented"); } pub inline fn externalTextureReference(external_texture: *sysgpu.ExternalTexture) void { _ = external_texture; @panic("unimplemented"); } pub inline fn externalTextureRelease(external_texture: *sysgpu.ExternalTexture) void { _ = external_texture; @panic("unimplemented"); } pub inline fn instanceCreateSurface(instance_raw: *sysgpu.Instance, descriptor: *const sysgpu.Surface.Descriptor) *sysgpu.Surface { const instance: *impl.Instance = @ptrCast(@alignCast(instance_raw)); const surface = instance.createSurface(descriptor) catch @panic("api error"); return @ptrCast(surface); } pub inline fn instanceProcessEvents(instance: *sysgpu.Instance) void { _ = instance; @panic("unimplemented"); } pub inline fn instanceRequestAdapter( instance_raw: *sysgpu.Instance, options: ?*const sysgpu.RequestAdapterOptions, callback: sysgpu.RequestAdapterCallback, userdata: ?*anyopaque, ) void { const instance: *impl.Instance = @ptrCast(@alignCast(instance_raw)); const adapter = impl.Adapter.init(instance, options orelse &sysgpu.RequestAdapterOptions{}) catch |err| { return callback(.err, undefined, @errorName(err), userdata); }; callback(.success, @as(*sysgpu.Adapter, @ptrCast(adapter)), null, userdata); } pub inline fn instanceReference(instance_raw: *sysgpu.Instance) void { const instance: *impl.Instance = @ptrCast(@alignCast(instance_raw)); instance.manager.reference(); } pub inline fn instanceRelease(instance_raw: *sysgpu.Instance) void { const instance: *impl.Instance = @ptrCast(@alignCast(instance_raw)); instance.manager.release(); } pub inline fn pipelineLayoutSetLabel(pipeline_layout: *sysgpu.PipelineLayout, label: [*:0]const u8) void { _ = pipeline_layout; _ = label; @panic("unimplemented"); } pub inline fn pipelineLayoutReference(pipeline_layout_raw: *sysgpu.PipelineLayout) void { const pipeline_layout: *impl.PipelineLayout = @ptrCast(@alignCast(pipeline_layout_raw)); pipeline_layout.manager.reference(); } pub inline fn pipelineLayoutRelease(pipeline_layout_raw: *sysgpu.PipelineLayout) void { const pipeline_layout: *impl.PipelineLayout = @ptrCast(@alignCast(pipeline_layout_raw)); pipeline_layout.manager.release(); } pub inline fn querySetDestroy(query_set: *sysgpu.QuerySet) void { _ = query_set; @panic("unimplemented"); } pub inline fn querySetGetCount(query_set: *sysgpu.QuerySet) u32 { _ = query_set; @panic("unimplemented"); } pub inline fn querySetGetType(query_set: *sysgpu.QuerySet) sysgpu.QueryType { _ = query_set; @panic("unimplemented"); } pub inline fn querySetSetLabel(query_set: *sysgpu.QuerySet, label: [*:0]const u8) void { _ = query_set; _ = label; @panic("unimplemented"); } pub inline fn querySetReference(query_set: *sysgpu.QuerySet) void { _ = query_set; @panic("unimplemented"); } pub inline fn querySetRelease(query_set: *sysgpu.QuerySet) void { _ = query_set; @panic("unimplemented"); } pub inline fn queueCopyTextureForBrowser(queue: *sysgpu.Queue, source: *const sysgpu.ImageCopyTexture, destination: *const sysgpu.ImageCopyTexture, copy_size: *const sysgpu.Extent3D, options: *const sysgpu.CopyTextureForBrowserOptions) void { _ = queue; _ = source; _ = destination; _ = copy_size; _ = options; @panic("unimplemented"); } pub inline fn queueOnSubmittedWorkDone(queue: *sysgpu.Queue, signal_value: u64, callback: sysgpu.Queue.WorkDoneCallback, userdata: ?*anyopaque) void { _ = queue; _ = signal_value; _ = callback; _ = userdata; @panic("unimplemented"); } pub inline fn queueSetLabel(queue: *sysgpu.Queue, label: [*:0]const u8) void { _ = queue; _ = label; @panic("unimplemented"); } pub inline fn queueSubmit(queue_raw: *sysgpu.Queue, command_count: usize, commands_raw: [*]const *const sysgpu.CommandBuffer) void { const queue: *impl.Queue = @ptrCast(@alignCast(queue_raw)); const commands: []const *impl.CommandBuffer = @ptrCast(commands_raw[0..command_count]); queue.submit(commands) catch @panic("api error"); } pub inline fn queueWriteBuffer(queue_raw: *sysgpu.Queue, buffer_raw: *sysgpu.Buffer, buffer_offset: u64, data: *const anyopaque, size: usize) void { const queue: *impl.Queue = @ptrCast(@alignCast(queue_raw)); const buffer: *impl.Buffer = @ptrCast(@alignCast(buffer_raw)); queue.writeBuffer(buffer, buffer_offset, @ptrCast(data), size) catch @panic("api error"); } pub inline fn queueWriteTexture(queue_raw: *sysgpu.Queue, destination: *const sysgpu.ImageCopyTexture, data: *const anyopaque, data_size: usize, data_layout: *const sysgpu.Texture.DataLayout, write_size: *const sysgpu.Extent3D) void { const queue: *impl.Queue = @ptrCast(@alignCast(queue_raw)); queue.writeTexture(destination, @ptrCast(data), data_size, data_layout, write_size) catch @panic("api error"); } pub inline fn queueReference(queue_raw: *sysgpu.Queue) void { const queue: *impl.Queue = @ptrCast(@alignCast(queue_raw)); queue.manager.reference(); } pub inline fn queueRelease(queue_raw: *sysgpu.Queue) void { const queue: *impl.Queue = @ptrCast(@alignCast(queue_raw)); queue.manager.release(); } pub inline fn renderBundleReference(render_bundle: *sysgpu.RenderBundle) void { _ = render_bundle; @panic("unimplemented"); } pub inline fn renderBundleRelease(render_bundle: *sysgpu.RenderBundle) void { _ = render_bundle; @panic("unimplemented"); } pub inline fn renderBundleSetLabel(render_bundle: *sysgpu.RenderBundle, name: [*:0]const u8) void { _ = name; _ = render_bundle; @panic("unimplemented"); } pub inline fn renderBundleEncoderDraw(render_bundle_encoder: *sysgpu.RenderBundleEncoder, vertex_count: u32, instance_count: u32, first_vertex: u32, first_instance: u32) void { _ = render_bundle_encoder; _ = vertex_count; _ = instance_count; _ = first_vertex; _ = first_instance; @panic("unimplemented"); } pub inline fn renderBundleEncoderDrawIndexed(render_bundle_encoder: *sysgpu.RenderBundleEncoder, index_count: u32, instance_count: u32, first_index: u32, base_vertex: i32, first_instance: u32) void { _ = render_bundle_encoder; _ = index_count; _ = instance_count; _ = first_index; _ = base_vertex; _ = first_instance; @panic("unimplemented"); } pub inline fn renderBundleEncoderDrawIndexedIndirect(render_bundle_encoder: *sysgpu.RenderBundleEncoder, indirect_buffer: *sysgpu.Buffer, indirect_offset: u64) void { _ = render_bundle_encoder; _ = indirect_buffer; _ = indirect_offset; @panic("unimplemented"); } pub inline fn renderBundleEncoderDrawIndirect(render_bundle_encoder: *sysgpu.RenderBundleEncoder, indirect_buffer: *sysgpu.Buffer, indirect_offset: u64) void { _ = render_bundle_encoder; _ = indirect_buffer; _ = indirect_offset; @panic("unimplemented"); } pub inline fn renderBundleEncoderFinish(render_bundle_encoder: *sysgpu.RenderBundleEncoder, descriptor: ?*const sysgpu.RenderBundle.Descriptor) *sysgpu.RenderBundle { _ = render_bundle_encoder; _ = descriptor; @panic("unimplemented"); } pub inline fn renderBundleEncoderInsertDebugMarker(render_bundle_encoder: *sysgpu.RenderBundleEncoder, marker_label: [*:0]const u8) void { _ = render_bundle_encoder; _ = marker_label; @panic("unimplemented"); } pub inline fn renderBundleEncoderPopDebugGroup(render_bundle_encoder: *sysgpu.RenderBundleEncoder) void { _ = render_bundle_encoder; @panic("unimplemented"); } pub inline fn renderBundleEncoderPushDebugGroup(render_bundle_encoder: *sysgpu.RenderBundleEncoder, group_label: [*:0]const u8) void { _ = render_bundle_encoder; _ = group_label; @panic("unimplemented"); } pub inline fn renderBundleEncoderSetBindGroup(render_bundle_encoder: *sysgpu.RenderBundleEncoder, group_index: u32, group: *sysgpu.BindGroup, dynamic_offset_count: usize, dynamic_offsets: ?[*]const u32) void { _ = render_bundle_encoder; _ = group_index; _ = group; _ = dynamic_offset_count; _ = dynamic_offsets; @panic("unimplemented"); } pub inline fn renderBundleEncoderSetIndexBuffer(render_bundle_encoder: *sysgpu.RenderBundleEncoder, buffer: *sysgpu.Buffer, format: sysgpu.IndexFormat, offset: u64, size: u64) void { _ = render_bundle_encoder; _ = buffer; _ = format; _ = offset; _ = size; @panic("unimplemented"); } pub inline fn renderBundleEncoderSetLabel(render_bundle_encoder: *sysgpu.RenderBundleEncoder, label: [*:0]const u8) void { _ = render_bundle_encoder; _ = label; @panic("unimplemented"); } pub inline fn renderBundleEncoderSetPipeline(render_bundle_encoder: *sysgpu.RenderBundleEncoder, pipeline: *sysgpu.RenderPipeline) void { _ = render_bundle_encoder; _ = pipeline; @panic("unimplemented"); } pub inline fn renderBundleEncoderSetVertexBuffer(render_bundle_encoder: *sysgpu.RenderBundleEncoder, slot: u32, buffer: *sysgpu.Buffer, offset: u64, size: u64) void { _ = render_bundle_encoder; _ = slot; _ = buffer; _ = offset; _ = size; @panic("unimplemented"); } pub inline fn renderBundleEncoderReference(render_bundle_encoder: *sysgpu.RenderBundleEncoder) void { _ = render_bundle_encoder; @panic("unimplemented"); } pub inline fn renderBundleEncoderRelease(render_bundle_encoder: *sysgpu.RenderBundleEncoder) void { _ = render_bundle_encoder; @panic("unimplemented"); } pub inline fn renderPassEncoderBeginOcclusionQuery(render_pass_encoder: *sysgpu.RenderPassEncoder, query_index: u32) void { _ = render_pass_encoder; _ = query_index; @panic("unimplemented"); } pub inline fn renderPassEncoderDraw(render_pass_encoder_raw: *sysgpu.RenderPassEncoder, vertex_count: u32, instance_count: u32, first_vertex: u32, first_instance: u32) void { const render_pass_encoder: *impl.RenderPassEncoder = @ptrCast(@alignCast(render_pass_encoder_raw)); render_pass_encoder.draw(vertex_count, instance_count, first_vertex, first_instance) catch @panic("api error"); } pub inline fn renderPassEncoderDrawIndexed(render_pass_encoder_raw: *sysgpu.RenderPassEncoder, index_count: u32, instance_count: u32, first_index: u32, base_vertex: i32, first_instance: u32) void { const render_pass_encoder: *impl.RenderPassEncoder = @ptrCast(@alignCast(render_pass_encoder_raw)); render_pass_encoder.drawIndexed(index_count, instance_count, first_index, base_vertex, first_instance) catch @panic("api error"); } pub inline fn renderPassEncoderDrawIndexedIndirect(render_pass_encoder: *sysgpu.RenderPassEncoder, indirect_buffer: *sysgpu.Buffer, indirect_offset: u64) void { _ = render_pass_encoder; _ = indirect_buffer; _ = indirect_offset; @panic("unimplemented"); } pub inline fn renderPassEncoderDrawIndirect(render_pass_encoder: *sysgpu.RenderPassEncoder, indirect_buffer: *sysgpu.Buffer, indirect_offset: u64) void { _ = render_pass_encoder; _ = indirect_buffer; _ = indirect_offset; @panic("unimplemented"); } pub inline fn renderPassEncoderEnd(render_pass_encoder_raw: *sysgpu.RenderPassEncoder) void { const render_pass_encoder: *impl.RenderPassEncoder = @ptrCast(@alignCast(render_pass_encoder_raw)); render_pass_encoder.end() catch @panic("api error"); } pub inline fn renderPassEncoderEndOcclusionQuery(render_pass_encoder: *sysgpu.RenderPassEncoder) void { _ = render_pass_encoder; @panic("unimplemented"); } pub inline fn renderPassEncoderExecuteBundles(render_pass_encoder: *sysgpu.RenderPassEncoder, bundles_count: usize, bundles: [*]const *const sysgpu.RenderBundle) void { _ = render_pass_encoder; _ = bundles_count; _ = bundles; @panic("unimplemented"); } pub inline fn renderPassEncoderInsertDebugMarker(render_pass_encoder: *sysgpu.RenderPassEncoder, marker_label: [*:0]const u8) void { _ = render_pass_encoder; _ = marker_label; @panic("unimplemented"); } pub inline fn renderPassEncoderPopDebugGroup(render_pass_encoder: *sysgpu.RenderPassEncoder) void { _ = render_pass_encoder; @panic("unimplemented"); } pub inline fn renderPassEncoderPushDebugGroup(render_pass_encoder: *sysgpu.RenderPassEncoder, group_label: [*:0]const u8) void { _ = render_pass_encoder; _ = group_label; @panic("unimplemented"); } pub inline fn renderPassEncoderSetBindGroup( render_pass_encoder_raw: *sysgpu.RenderPassEncoder, group_index: u32, group_raw: *sysgpu.BindGroup, dynamic_offset_count: usize, dynamic_offsets: ?[*]const u32, ) void { const render_pass_encoder: *impl.RenderPassEncoder = @ptrCast(@alignCast(render_pass_encoder_raw)); const group: *impl.BindGroup = @ptrCast(@alignCast(group_raw)); render_pass_encoder.setBindGroup(group_index, group, dynamic_offset_count, dynamic_offsets) catch @panic("api error"); } pub inline fn renderPassEncoderSetBlendConstant(render_pass_encoder: *sysgpu.RenderPassEncoder, color: *const sysgpu.Color) void { _ = render_pass_encoder; _ = color; @panic("unimplemented"); } pub inline fn renderPassEncoderSetIndexBuffer(render_pass_encoder_raw: *sysgpu.RenderPassEncoder, buffer_raw: *sysgpu.Buffer, format: sysgpu.IndexFormat, offset: u64, size: u64) void { const render_pass_encoder: *impl.RenderPassEncoder = @ptrCast(@alignCast(render_pass_encoder_raw)); const buffer: *impl.Buffer = @ptrCast(@alignCast(buffer_raw)); render_pass_encoder.setIndexBuffer(buffer, format, offset, size) catch @panic("api error"); } pub inline fn renderPassEncoderSetLabel(render_pass_encoder: *sysgpu.RenderPassEncoder, label: [*:0]const u8) void { _ = render_pass_encoder; _ = label; @panic("unimplemented"); } pub inline fn renderPassEncoderSetPipeline(render_pass_encoder_raw: *sysgpu.RenderPassEncoder, pipeline_raw: *sysgpu.RenderPipeline) void { const render_pass_encoder: *impl.RenderPassEncoder = @ptrCast(@alignCast(render_pass_encoder_raw)); const pipeline: *impl.RenderPipeline = @ptrCast(@alignCast(pipeline_raw)); render_pass_encoder.setPipeline(pipeline) catch @panic("api error"); } pub inline fn renderPassEncoderSetScissorRect(render_pass_encoder_raw: *sysgpu.RenderPassEncoder, x: u32, y: u32, width: u32, height: u32) void { const render_pass_encoder: *impl.RenderPassEncoder = @ptrCast(@alignCast(render_pass_encoder_raw)); render_pass_encoder.setScissorRect(x, y, width, height) catch @panic("api error"); } pub inline fn renderPassEncoderSetStencilReference(render_pass_encoder: *sysgpu.RenderPassEncoder, reference: u32) void { _ = render_pass_encoder; _ = reference; @panic("unimplemented"); } pub inline fn renderPassEncoderSetVertexBuffer(render_pass_encoder_raw: *sysgpu.RenderPassEncoder, slot: u32, buffer_raw: *sysgpu.Buffer, offset: u64, size: u64) void { const render_pass_encoder: *impl.RenderPassEncoder = @ptrCast(@alignCast(render_pass_encoder_raw)); const buffer: *impl.Buffer = @ptrCast(@alignCast(buffer_raw)); render_pass_encoder.setVertexBuffer(slot, buffer, offset, size) catch @panic("api error"); } pub inline fn renderPassEncoderSetViewport(render_pass_encoder_raw: *sysgpu.RenderPassEncoder, x: f32, y: f32, width: f32, height: f32, min_depth: f32, max_depth: f32) void { const render_pass_encoder: *impl.RenderPassEncoder = @ptrCast(@alignCast(render_pass_encoder_raw)); render_pass_encoder.setViewport(x, y, width, height, min_depth, max_depth) catch @panic("api error"); } pub inline fn renderPassEncoderWriteTimestamp(render_pass_encoder: *sysgpu.RenderPassEncoder, query_set: *sysgpu.QuerySet, query_index: u32) void { _ = render_pass_encoder; _ = query_set; _ = query_index; @panic("unimplemented"); } pub inline fn renderPassEncoderReference(render_pass_encoder_raw: *sysgpu.RenderPassEncoder) void { const render_pass_encoder: *impl.RenderPassEncoder = @ptrCast(@alignCast(render_pass_encoder_raw)); render_pass_encoder.manager.reference(); } pub inline fn renderPassEncoderRelease(render_pass_encoder_raw: *sysgpu.RenderPassEncoder) void { const render_pass_encoder: *impl.RenderPassEncoder = @ptrCast(@alignCast(render_pass_encoder_raw)); render_pass_encoder.manager.release(); } pub inline fn renderPipelineGetBindGroupLayout(render_pipeline_raw: *sysgpu.RenderPipeline, group_index: u32) *sysgpu.BindGroupLayout { const render_pipeline: *impl.RenderPipeline = @ptrCast(@alignCast(render_pipeline_raw)); const layout: *impl.BindGroupLayout = render_pipeline.getBindGroupLayout(group_index); layout.manager.reference(); return @ptrCast(layout); } pub inline fn renderPipelineSetLabel(render_pipeline: *sysgpu.RenderPipeline, label: [*:0]const u8) void { _ = render_pipeline; _ = label; @panic("unimplemented"); } pub inline fn renderPipelineReference(render_pipeline_raw: *sysgpu.RenderPipeline) void { const render_pipeline: *impl.RenderPipeline = @ptrCast(@alignCast(render_pipeline_raw)); render_pipeline.manager.reference(); } pub inline fn renderPipelineRelease(render_pipeline_raw: *sysgpu.RenderPipeline) void { const render_pipeline: *impl.RenderPipeline = @ptrCast(@alignCast(render_pipeline_raw)); render_pipeline.manager.release(); } pub inline fn samplerSetLabel(sampler: *sysgpu.Sampler, label: [*:0]const u8) void { _ = sampler; _ = label; @panic("unimplemented"); } pub inline fn samplerReference(sampler_raw: *sysgpu.Sampler) void { const sampler: *impl.Sampler = @ptrCast(@alignCast(sampler_raw)); sampler.manager.reference(); } pub inline fn samplerRelease(sampler_raw: *sysgpu.Sampler) void { const sampler: *impl.Sampler = @ptrCast(@alignCast(sampler_raw)); sampler.manager.release(); } pub inline fn shaderModuleGetCompilationInfo(shader_module: *sysgpu.ShaderModule, callback: sysgpu.CompilationInfoCallback, userdata: ?*anyopaque) void { _ = shader_module; _ = callback; _ = userdata; @panic("unimplemented"); } pub inline fn shaderModuleSetLabel(shader_module: *sysgpu.ShaderModule, label: [*:0]const u8) void { _ = shader_module; _ = label; @panic("unimplemented"); } pub inline fn shaderModuleReference(shader_module_raw: *sysgpu.ShaderModule) void { const shader_module: *impl.ShaderModule = @ptrCast(@alignCast(shader_module_raw)); shader_module.manager.reference(); } pub inline fn shaderModuleRelease(shader_module_raw: *sysgpu.ShaderModule) void { const shader_module: *impl.ShaderModule = @ptrCast(@alignCast(shader_module_raw)); shader_module.manager.release(); } pub inline fn sharedFenceExportInfo(shared_fence: *sysgpu.SharedFence, info: *sysgpu.SharedFence.ExportInfo) void { _ = shared_fence; _ = info; @panic("unimplemented"); } pub inline fn sharedFenceReference(shared_fence: *sysgpu.SharedFence) void { _ = shared_fence; @panic("unimplemented"); } pub inline fn sharedFenceRelease(shared_fence: *sysgpu.SharedFence) void { _ = shared_fence; @panic("unimplemented"); } pub inline fn sharedTextureMemoryBeginAccess(shared_texture_memory: *sysgpu.SharedTextureMemory, texture: *sysgpu.Texture, descriptor: *const sysgpu.SharedTextureMemory.BeginAccessDescriptor) void { _ = shared_texture_memory; _ = texture; _ = descriptor; @panic("unimplemented"); } pub inline fn sharedTextureMemoryCreateTexture(shared_texture_memory: *sysgpu.SharedTextureMemory, descriptor: *const sysgpu.Texture.Descriptor) *sysgpu.Texture { _ = shared_texture_memory; _ = descriptor; @panic("unimplemented"); } pub inline fn sharedTextureMemoryEndAccess(shared_texture_memory: *sysgpu.SharedTextureMemory, texture: *sysgpu.Texture, descriptor: *sysgpu.SharedTextureMemory.EndAccessState) void { _ = shared_texture_memory; _ = texture; _ = descriptor; @panic("unimplemented"); } pub inline fn sharedTextureMemoryEndAccessStateFreeMembers(value: sysgpu.SharedTextureMemory.EndAccessState) void { _ = value; @panic("unimplemented"); } pub inline fn sharedTextureMemoryGetProperties(shared_texture_memory: *sysgpu.SharedTextureMemory, properties: *sysgpu.SharedTextureMemory.Properties) void { _ = shared_texture_memory; _ = properties; @panic("unimplemented"); } pub inline fn sharedTextureMemorySetLabel(shared_texture_memory: *sysgpu.SharedTextureMemory, label: [*:0]const u8) void { _ = shared_texture_memory; _ = label; @panic("unimplemented"); } pub inline fn sharedTextureMemoryReference(shared_texture_memory: *sysgpu.SharedTextureMemory) void { _ = shared_texture_memory; @panic("unimplemented"); } pub inline fn sharedTextureMemoryRelease(shared_texture_memory: *sysgpu.SharedTextureMemory) void { _ = shared_texture_memory; @panic("unimplemented"); } pub inline fn surfaceReference(surface_raw: *sysgpu.Surface) void { const surface: *impl.Surface = @ptrCast(@alignCast(surface_raw)); surface.manager.reference(); } pub inline fn surfaceRelease(surface_raw: *sysgpu.Surface) void { const surface: *impl.Surface = @ptrCast(@alignCast(surface_raw)); surface.manager.release(); } pub inline fn swapChainConfigure(swap_chain: *sysgpu.SwapChain, format: sysgpu.Texture.Format, allowed_usage: sysgpu.Texture.UsageFlags, width: u32, height: u32) void { _ = swap_chain; _ = format; _ = allowed_usage; _ = width; _ = height; @panic("unimplemented"); } pub inline fn swapChainGetCurrentTexture(swap_chain: *sysgpu.SwapChain) ?*sysgpu.Texture { _ = swap_chain; @panic("unimplemented"); } pub inline fn swapChainGetCurrentTextureView(swap_chain_raw: *sysgpu.SwapChain) ?*sysgpu.TextureView { const swap_chain: *impl.SwapChain = @ptrCast(@alignCast(swap_chain_raw)); const texture_view = swap_chain.getCurrentTextureView() catch @panic("api error"); return @ptrCast(texture_view); } pub inline fn swapChainPresent(swap_chain_raw: *sysgpu.SwapChain) void { const swap_chain: *impl.SwapChain = @ptrCast(@alignCast(swap_chain_raw)); swap_chain.present() catch @panic("api error"); } pub inline fn swapChainReference(swap_chain_raw: *sysgpu.SwapChain) void { const swap_chain: *impl.SwapChain = @ptrCast(@alignCast(swap_chain_raw)); swap_chain.manager.reference(); } pub inline fn swapChainRelease(swap_chain_raw: *sysgpu.SwapChain) void { const swap_chain: *impl.SwapChain = @ptrCast(@alignCast(swap_chain_raw)); swap_chain.manager.release(); } pub inline fn textureCreateView(texture_raw: *sysgpu.Texture, descriptor: ?*const sysgpu.TextureView.Descriptor) *sysgpu.TextureView { const texture: *impl.Texture = @ptrCast(@alignCast(texture_raw)); const texture_view = texture.createView(descriptor orelse &sysgpu.TextureView.Descriptor{}) catch @panic("api error"); return @ptrCast(texture_view); } pub inline fn textureDestroy(texture: *sysgpu.Texture) void { _ = texture; @panic("unimplemented"); } pub inline fn textureGetDepthOrArrayLayers(texture: *sysgpu.Texture) u32 { _ = texture; @panic("unimplemented"); } pub inline fn textureGetDimension(texture: *sysgpu.Texture) sysgpu.Texture.Dimension { _ = texture; @panic("unimplemented"); } pub inline fn textureGetFormat(texture: *sysgpu.Texture) sysgpu.Texture.Format { _ = texture; @panic("unimplemented"); } pub inline fn textureGetHeight(texture: *sysgpu.Texture) u32 { _ = texture; @panic("unimplemented"); } pub inline fn textureGetMipLevelCount(texture: *sysgpu.Texture) u32 { _ = texture; @panic("unimplemented"); } pub inline fn textureGetSampleCount(texture: *sysgpu.Texture) u32 { _ = texture; @panic("unimplemented"); } pub inline fn textureGetUsage(texture: *sysgpu.Texture) sysgpu.Texture.UsageFlags { _ = texture; @panic("unimplemented"); } pub inline fn textureGetWidth(texture: *sysgpu.Texture) u32 { _ = texture; @panic("unimplemented"); } pub inline fn textureSetLabel(texture: *sysgpu.Texture, label: [*:0]const u8) void { _ = texture; _ = label; @panic("unimplemented"); } pub inline fn textureReference(texture_raw: *sysgpu.Texture) void { const texture: *impl.Texture = @ptrCast(@alignCast(texture_raw)); texture.manager.reference(); } pub inline fn textureRelease(texture_raw: *sysgpu.Texture) void { const texture: *impl.Texture = @ptrCast(@alignCast(texture_raw)); texture.manager.release(); } pub inline fn textureViewSetLabel(texture_view: *sysgpu.TextureView, label: [*:0]const u8) void { _ = texture_view; _ = label; @panic("unimplemented"); } pub inline fn textureViewReference(texture_view_raw: *sysgpu.TextureView) void { const texture_view: *impl.TextureView = @ptrCast(@alignCast(texture_view_raw)); texture_view.manager.reference(); } pub inline fn textureViewRelease(texture_view_raw: *sysgpu.TextureView) void { const texture_view: *impl.TextureView = @ptrCast(@alignCast(texture_view_raw)); texture_view.manager.release(); } }); test "refAllDeclsRecursive" { // std.testing.refAllDeclsRecursive(@This()); _ = @import("shader/test.zig"); // // Force inline functions to be analyzed for semantic errors // // see e.g. https://github.com/ziglang/zig/issues/17390 // _ = &struct { // fn f() void { // foo1(); // foo2(); // foo3(); // } // }.f; } test "export" { _ = sysgpu.Export(Impl); }
0
repos/mach-sysgpu
repos/mach-sysgpu/src/vulkan.zig
const std = @import("std"); const builtin = @import("builtin"); const vk = @import("vulkan"); const sysgpu = @import("sysgpu/main.zig"); const limits = @import("limits.zig"); const shader = @import("shader.zig"); const utils = @import("utils.zig"); const conv = @import("vulkan/conv.zig"); const proc = @import("vulkan/proc.zig"); const log = std.log.scoped(.vulkan); const api_version = vk.makeApiVersion(0, 1, 1, 0); const upload_page_size = 64 * 1024 * 1024; // TODO - split writes and/or support large uploads const use_semaphore_wait = false; var allocator: std.mem.Allocator = undefined; var libvulkan: ?std.DynLib = null; var vkb: proc.BaseFunctions = undefined; var vki: proc.InstanceFunctions = undefined; var vkd: proc.DeviceFunctions = undefined; pub const InitOptions = struct { baseLoader: ?proc.BaseLoader = null, }; pub fn init(alloc: std.mem.Allocator, options: InitOptions) !void { allocator = alloc; if (options.baseLoader) |baseLoader| { vkb = try proc.loadBase(baseLoader); } else { libvulkan = try std.DynLib.openZ(switch (builtin.target.os.tag) { .windows => "vulkan-1.dll", .linux => "libvulkan.so.1", .macos => "libvulkan.1.dylib", else => @compileError("Unknown OS!"), }); vkb = try proc.loadBase(libVulkanBaseLoader); } } pub fn libVulkanBaseLoader(_: vk.Instance, name_ptr: [*:0]const u8) vk.PfnVoidFunction { const name = std.mem.span(name_ptr); return libvulkan.?.lookup(vk.PfnVoidFunction, name) orelse null; } const MapCallback = struct { buffer: *Buffer, callback: sysgpu.Buffer.MapCallback, userdata: ?*anyopaque, }; pub const Instance = struct { manager: utils.Manager(Instance) = .{}, vk_instance: vk.Instance, pub fn init(desc: *const sysgpu.Instance.Descriptor) !*Instance { _ = desc; // Query layers var count: u32 = 0; _ = try vkb.enumerateInstanceLayerProperties(&count, null); const available_layers = try allocator.alloc(vk.LayerProperties, count); defer allocator.free(available_layers); _ = try vkb.enumerateInstanceLayerProperties(&count, available_layers.ptr); var layers = std.BoundedArray([*:0]const u8, instance_layers.len){}; for (instance_layers) |optional| { for (available_layers) |available| { if (std.mem.eql( u8, std.mem.sliceTo(optional, 0), std.mem.sliceTo(&available.layer_name, 0), )) { layers.appendAssumeCapacity(optional); break; } } } // Query extensions _ = try vkb.enumerateInstanceExtensionProperties(null, &count, null); const available_extensions = try allocator.alloc(vk.ExtensionProperties, count); defer allocator.free(available_extensions); _ = try vkb.enumerateInstanceExtensionProperties(null, &count, available_extensions.ptr); var extensions = std.BoundedArray([*:0]const u8, instance_extensions.len){}; for (instance_extensions) |required| { for (available_extensions) |available| { if (std.mem.eql( u8, std.mem.sliceTo(required, 0), std.mem.sliceTo(&available.extension_name, 0), )) { extensions.appendAssumeCapacity(required); break; } } else { log.warn("unable to find required instance extension: {s}", .{required}); } } // Create instace const application_info = vk.ApplicationInfo{ .p_engine_name = "Banana", .application_version = 0, .engine_version = vk.makeApiVersion(0, 0, 1, 0), // TODO: get this from build.zig.zon .api_version = api_version, }; const instance_info = vk.InstanceCreateInfo{ .p_application_info = &application_info, .enabled_layer_count = layers.len, .pp_enabled_layer_names = layers.slice().ptr, .enabled_extension_count = extensions.len, .pp_enabled_extension_names = extensions.slice().ptr, }; const vk_instance = try vkb.createInstance(&instance_info, null); // Load instance functions vki = try proc.loadInstance(vk_instance, vkb.dispatch.vkGetInstanceProcAddr); const instance = try allocator.create(Instance); instance.* = .{ .vk_instance = vk_instance }; return instance; } const instance_layers = if (builtin.mode == .Debug) &[_][*:0]const u8{"VK_LAYER_KHRONOS_validation"} else &.{}; const instance_extensions: []const [*:0]const u8 = switch (builtin.target.os.tag) { .linux => &.{ vk.extension_info.khr_surface.name, vk.extension_info.khr_xlib_surface.name, vk.extension_info.khr_xcb_surface.name, // TODO: renderdoc will not work with this extension // vk.extension_info.khr_wayland_surface.name, }, .windows => &.{ vk.extension_info.khr_surface.name, vk.extension_info.khr_win_32_surface.name, }, .macos, .ios => &.{ vk.extension_info.khr_surface.name, vk.extension_info.ext_metal_surface.name, }, else => |tag| if (builtin.target.abi == .android) &.{ vk.extension_info.khr_surface.name, vk.extension_info.khr_android_surface.name, } else @compileError(std.fmt.comptimePrint("unsupported platform ({s})", .{@tagName(tag)})), }; pub fn deinit(instance: *Instance) void { const vk_instance = instance.vk_instance; vki.destroyInstance(vk_instance, null); allocator.destroy(instance); if (libvulkan) |*lib| lib.close(); } pub fn requestAdapter( instance: *Instance, options: ?*const sysgpu.RequestAdapterOptions, callback: sysgpu.RequestAdapterCallback, userdata: ?*anyopaque, ) !*Adapter { return Adapter.init(instance, options orelse &sysgpu.RequestAdapterOptions{}) catch |err| { callback(.err, undefined, @errorName(err), userdata); @panic("unimplemented"); // TODO - return dummy adapter }; } pub fn createSurface(instance: *Instance, desc: *const sysgpu.Surface.Descriptor) !*Surface { return Surface.init(instance, desc); } }; pub const Adapter = struct { manager: utils.Manager(Adapter) = .{}, instance: *Instance, physical_device: vk.PhysicalDevice, props: vk.PhysicalDeviceProperties, queue_family: u32, extensions: []const vk.ExtensionProperties, driver_desc: [:0]const u8, vendor_id: VendorID, pub fn init(instance: *Instance, options: *const sysgpu.RequestAdapterOptions) !*Adapter { const vk_instance = instance.vk_instance; var count: u32 = 0; _ = try vki.enumeratePhysicalDevices(vk_instance, &count, null); var physical_devices = try allocator.alloc(vk.PhysicalDevice, count); defer allocator.free(physical_devices); _ = try vki.enumeratePhysicalDevices(vk_instance, &count, physical_devices.ptr); // Find best device based on power preference var physical_device_info: ?struct { physical_device: vk.PhysicalDevice, props: vk.PhysicalDeviceProperties, queue_family: u32, score: u32, } = null; for (physical_devices[0..count]) |physical_device| { const props = vki.getPhysicalDeviceProperties(physical_device); const features = vki.getPhysicalDeviceFeatures(physical_device); const queue_family = try findQueueFamily(physical_device) orelse continue; if (isDeviceSuitable(props, features)) { const score = rateDevice(props, features, options.power_preference); if (score == 0) continue; if (physical_device_info == null or score > physical_device_info.?.score) { physical_device_info = .{ .physical_device = physical_device, .props = props, .queue_family = queue_family, .score = score, }; } } } if (physical_device_info) |info| { _ = try vki.enumerateDeviceExtensionProperties(info.physical_device, null, &count, null); const extensions = try allocator.alloc(vk.ExtensionProperties, count); errdefer allocator.free(extensions); _ = try vki.enumerateDeviceExtensionProperties(info.physical_device, null, &count, extensions.ptr); const driver_desc = try std.fmt.allocPrintZ( allocator, "Vulkan driver version {}.{}.{}", .{ vk.apiVersionMajor(info.props.driver_version), vk.apiVersionMinor(info.props.driver_version), vk.apiVersionPatch(info.props.driver_version), }, ); const adapter = try allocator.create(Adapter); adapter.* = .{ .instance = instance, .physical_device = info.physical_device, .props = info.props, .queue_family = info.queue_family, .extensions = extensions, .driver_desc = driver_desc, .vendor_id = @enumFromInt(info.props.vendor_id), }; return adapter; } return error.NoAdapterFound; } pub fn deinit(adapter: *Adapter) void { allocator.free(adapter.extensions); allocator.free(adapter.driver_desc); allocator.destroy(adapter); } pub fn createDevice(adapter: *Adapter, desc: ?*const sysgpu.Device.Descriptor) !*Device { return Device.init(adapter, desc); } pub fn getProperties(adapter: *Adapter) sysgpu.Adapter.Properties { return .{ .vendor_id = @intFromEnum(adapter.vendor_id), .vendor_name = adapter.vendor_id.name(), .architecture = "", // TODO .device_id = adapter.props.device_id, .name = @ptrCast(&adapter.props.device_name), .driver_description = adapter.driver_desc, .adapter_type = conv.sysgpuAdapterType(adapter.props.device_type), .backend_type = .vulkan, .compatibility_mode = .false, // TODO }; } pub fn hasExtension(adapter: *Adapter, name: []const u8) bool { for (adapter.extensions) |ext| { if (std.mem.eql(u8, name, std.mem.sliceTo(&ext.extension_name, 0))) { return true; } } return false; } fn isDeviceSuitable(props: vk.PhysicalDeviceProperties, features: vk.PhysicalDeviceFeatures) bool { return props.api_version >= api_version and // WebGPU features features.depth_bias_clamp == vk.TRUE and features.fragment_stores_and_atomics == vk.TRUE and features.full_draw_index_uint_32 == vk.TRUE and features.image_cube_array == vk.TRUE and features.independent_blend == vk.TRUE and features.sample_rate_shading == vk.TRUE and // At least one of the following texture compression forms (features.texture_compression_bc == vk.TRUE or features.texture_compression_etc2 == vk.TRUE or features.texture_compression_astc_ldr == vk.TRUE); } fn rateDevice( props: vk.PhysicalDeviceProperties, features: vk.PhysicalDeviceFeatures, power_preference: sysgpu.PowerPreference, ) u32 { _ = features; var score: u32 = 0; switch (props.device_type) { .integrated_gpu => if (power_preference == .low_power) { score += 1000; }, .discrete_gpu => if (power_preference == .high_performance) { score += 1000; }, else => {}, } score += props.limits.max_image_dimension_2d; return score; } fn findQueueFamily(device: vk.PhysicalDevice) !?u32 { var count: u32 = 0; _ = vki.getPhysicalDeviceQueueFamilyProperties(device, &count, null); const queue_families = try allocator.alloc(vk.QueueFamilyProperties, count); defer allocator.free(queue_families); _ = vki.getPhysicalDeviceQueueFamilyProperties(device, &count, queue_families.ptr); for (queue_families, 0..) |family, i| { if (family.queue_flags.graphics_bit and family.queue_flags.compute_bit) { return @intCast(i); } } return null; } const VendorID = enum(u32) { amd = 0x1002, apple = 0x106b, arm = 0x13B5, google = 0x1AE0, img_tec = 0x1010, intel = 0x8086, mesa = 0x10005, microsoft = 0x1414, nvidia = 0x10DE, qualcomm = 0x5143, samsung = 0x144d, _, pub fn name(vendor_id: VendorID) [:0]const u8 { return switch (vendor_id) { .amd => "AMD", .apple => "Apple", .arm => "ARM", .google => "Google", .img_tec => "Img Tec", .intel => "Intel", .mesa => "Mesa", .microsoft => "Microsoft", .nvidia => "Nvidia", .qualcomm => "Qualcomm", .samsung => "Samsung", _ => "Unknown", }; } }; }; pub const Surface = struct { manager: utils.Manager(Surface) = .{}, instance: *Instance, vk_surface: vk.SurfaceKHR, pub fn init(instance: *Instance, desc: *const sysgpu.Surface.Descriptor) !*Surface { const vk_instance = instance.vk_instance; const vk_surface = switch (builtin.target.os.tag) { .linux => blk: { if (utils.findChained(sysgpu.Surface.DescriptorFromXlibWindow, desc.next_in_chain.generic)) |x_desc| { break :blk try vki.createXlibSurfaceKHR( vk_instance, &vk.XlibSurfaceCreateInfoKHR{ .dpy = @ptrCast(x_desc.display), .window = x_desc.window, }, null, ); } else if (utils.findChained(sysgpu.Surface.DescriptorFromWaylandSurface, desc.next_in_chain.generic)) |wayland_desc| { _ = wayland_desc; @panic("unimplemented"); // TODO: renderdoc will not work with wayland // break :blk try vki.createWaylandSurfaceKHR( // vk_instance, // &vk.WaylandSurfaceCreateInfoKHR{ // .display = @ptrCast(wayland_desc.display), // .surface = @ptrCast(wayland_desc.surface), // }, // null, // ); } return error.InvalidDescriptor; }, .windows => blk: { if (utils.findChained(sysgpu.Surface.DescriptorFromWindowsHWND, desc.next_in_chain.generic)) |win_desc| { break :blk try vki.createWin32SurfaceKHR( vk_instance, &vk.Win32SurfaceCreateInfoKHR{ .hinstance = @ptrCast(win_desc.hinstance), .hwnd = @ptrCast(win_desc.hwnd), }, null, ); } return error.InvalidDescriptor; }, else => @compileError("unsupported platform"), }; const surface = try allocator.create(Surface); surface.* = .{ .instance = instance, .vk_surface = vk_surface, }; return surface; } pub fn deinit(surface: *Surface) void { const vk_instance = surface.instance.vk_instance; vki.destroySurfaceKHR(vk_instance, surface.vk_surface, null); allocator.destroy(surface); } }; pub const Device = struct { manager: utils.Manager(Device) = .{}, adapter: *Adapter, vk_device: vk.Device, render_passes: std.AutoHashMapUnmanaged(RenderPassKey, vk.RenderPass) = .{}, cmd_pool: vk.CommandPool, memory_allocator: MemoryAllocator, queue: ?Queue = null, streaming_manager: StreamingManager = undefined, submit_objects: std.ArrayListUnmanaged(SubmitObject) = .{}, map_callbacks: std.ArrayListUnmanaged(MapCallback) = .{}, /// Supported Depth-Stencil formats supported_ds_formats: std.AutoHashMapUnmanaged(vk.Format, void), lost_cb: ?sysgpu.Device.LostCallback = null, lost_cb_userdata: ?*anyopaque = null, log_cb: ?sysgpu.LoggingCallback = null, log_cb_userdata: ?*anyopaque = null, err_cb: ?sysgpu.ErrorCallback = null, err_cb_userdata: ?*anyopaque = null, pub fn init(adapter: *Adapter, descriptor: ?*const sysgpu.Device.Descriptor) !*Device { const queue_infos = &[_]vk.DeviceQueueCreateInfo{.{ .queue_family_index = adapter.queue_family, .queue_count = 1, .p_queue_priorities = &[_]f32{1.0}, }}; var features = vk.PhysicalDeviceFeatures2{ .features = .{} }; if (descriptor) |desc| { if (desc.required_features) |required_features| { for (required_features[0..desc.required_features_count]) |req_feature| { switch (req_feature) { .undefined => break, .depth_clip_control => features.features.depth_clamp = vk.TRUE, .pipeline_statistics_query => features.features.pipeline_statistics_query = vk.TRUE, .texture_compression_bc => features.features.texture_compression_bc = vk.TRUE, .texture_compression_etc2 => features.features.texture_compression_etc2 = vk.TRUE, .texture_compression_astc => features.features.texture_compression_astc_ldr = vk.TRUE, .indirect_first_instance => features.features.draw_indirect_first_instance = vk.TRUE, .shader_f16 => { var feature = vk.PhysicalDeviceShaderFloat16Int8FeaturesKHR{ .s_type = .physical_device_shader_float16_int8_features_khr, .shader_float_16 = vk.TRUE, }; features.p_next = @ptrCast(&feature); }, else => log.warn("unimplement feature: {s}", .{@tagName(req_feature)}), } } } } // Query layers var count: u32 = 0; _ = try vki.enumerateDeviceLayerProperties(adapter.physical_device, &count, null); const available_layers = try allocator.alloc(vk.LayerProperties, count); defer allocator.free(available_layers); _ = try vki.enumerateDeviceLayerProperties(adapter.physical_device, &count, available_layers.ptr); var layers = std.BoundedArray([*:0]const u8, device_layers.len){}; for (device_layers) |optional| { for (available_layers) |available| { if (std.mem.eql( u8, std.mem.sliceTo(optional, 0), std.mem.sliceTo(&available.layer_name, 0), )) { layers.appendAssumeCapacity(optional); break; } } } // Query extensions _ = try vki.enumerateDeviceExtensionProperties(adapter.physical_device, null, &count, null); const available_extensions = try allocator.alloc(vk.ExtensionProperties, count); defer allocator.free(available_extensions); _ = try vki.enumerateDeviceExtensionProperties(adapter.physical_device, null, &count, available_extensions.ptr); var extensions = std.BoundedArray([*:0]const u8, device_extensions.len){}; for (device_extensions) |required| { for (available_extensions) |available| { if (std.mem.eql( u8, std.mem.sliceTo(required, 0), std.mem.sliceTo(&available.extension_name, 0), )) { extensions.appendAssumeCapacity(required); break; } } else { log.warn("unable to find required device extension: {s}", .{required}); } } var create_info = vk.DeviceCreateInfo{ .queue_create_info_count = @intCast(queue_infos.len), .p_queue_create_infos = queue_infos.ptr, .enabled_layer_count = @intCast(layers.len), .pp_enabled_layer_names = layers.slice().ptr, .enabled_extension_count = @intCast(extensions.len), .pp_enabled_extension_names = extensions.slice().ptr, }; if (adapter.hasExtension("GetPhysicalDeviceProperties2")) { create_info.p_next = &features; } else { create_info.p_enabled_features = &features.features; } const vk_device = try vki.createDevice(adapter.physical_device, &create_info, null); vkd = try proc.loadDevice(vk_device, vki.dispatch.vkGetDeviceProcAddr); var supported_ds_formats = std.AutoHashMapUnmanaged(vk.Format, void){}; for ([_]vk.Format{ .d24_unorm_s8_uint, .s8_uint }) |format| { const properties = vki.getPhysicalDeviceFormatProperties(adapter.physical_device, format); if (properties.optimal_tiling_features.depth_stencil_attachment_bit) { try supported_ds_formats.put(allocator, format, {}); } } const cmd_pool = try vkd.createCommandPool(vk_device, &.{ .queue_family_index = adapter.queue_family, .flags = .{ .reset_command_buffer_bit = true }, }, null); const memory_allocator = MemoryAllocator.init(adapter.physical_device); var device = try allocator.create(Device); device.* = .{ .adapter = adapter, .vk_device = vk_device, .cmd_pool = cmd_pool, .memory_allocator = memory_allocator, .supported_ds_formats = supported_ds_formats, }; device.streaming_manager = try StreamingManager.init(device); errdefer device.streaming_manager.deinit(); return device; } pub fn deinit(device: *Device) void { const vk_device = device.vk_device; if (device.lost_cb) |lost_cb| { lost_cb(.destroyed, "Device was destroyed.", device.lost_cb_userdata); } device.waitAll() catch {}; device.processQueuedOperations(); device.map_callbacks.deinit(allocator); device.submit_objects.deinit(allocator); device.streaming_manager.deinit(); var rp_iter = device.render_passes.valueIterator(); while (rp_iter.next()) |render_pass| { vkd.destroyRenderPass(vk_device, render_pass.*, null); } device.render_passes.deinit(allocator); device.supported_ds_formats.deinit(allocator); vkd.destroyCommandPool(vk_device, device.cmd_pool, null); if (device.queue) |*queue| queue.manager.release(); vkd.destroyDevice(vk_device, null); allocator.destroy(device); } fn waitAll(device: *Device) !void { for (device.submit_objects.items) |*submit_object| try submit_object.wait(); } pub fn createBindGroup(device: *Device, desc: *const sysgpu.BindGroup.Descriptor) !*BindGroup { return BindGroup.init(device, desc); } pub fn createBindGroupLayout(device: *Device, desc: *const sysgpu.BindGroupLayout.Descriptor) !*BindGroupLayout { return BindGroupLayout.init(device, desc); } pub fn createBuffer(device: *Device, desc: *const sysgpu.Buffer.Descriptor) !*Buffer { return Buffer.init(device, desc); } pub fn createCommandEncoder(device: *Device, desc: *const sysgpu.CommandEncoder.Descriptor) !*CommandEncoder { return CommandEncoder.init(device, desc); } pub fn createComputePipeline(device: *Device, desc: *const sysgpu.ComputePipeline.Descriptor) !*ComputePipeline { return ComputePipeline.init(device, desc); } pub fn createPipelineLayout(device: *Device, desc: *const sysgpu.PipelineLayout.Descriptor) !*PipelineLayout { return PipelineLayout.init(device, desc); } pub fn createRenderPipeline(device: *Device, desc: *const sysgpu.RenderPipeline.Descriptor) !*RenderPipeline { return RenderPipeline.init(device, desc); } pub fn createSampler(device: *Device, desc: *const sysgpu.Sampler.Descriptor) !*Sampler { return Sampler.init(device, desc); } pub fn createShaderModuleAir(device: *Device, air: *shader.Air, label: [*:0]const u8) !*ShaderModule { _ = label; return ShaderModule.initAir(device, air); } pub fn createShaderModuleSpirv(device: *Device, code: [*]const u32, code_size: u32) !*ShaderModule { const vk_shader_module = try vkd.createShaderModule(device.vk_device, &vk.ShaderModuleCreateInfo{ .code_size = code_size, .p_code = code, }, null); const module = try allocator.create(ShaderModule); module.* = .{ .device = device, .vk_shader_module = vk_shader_module, }; return module; } pub fn createShaderModuleHLSL(device: *Device, code: []const u8) !*ShaderModule { _ = code; _ = device; return error.Unsupported; } pub fn createShaderModuleMSL( device: *Device, label: [*:0]const u8, code: []const u8, workgroup_size: sysgpu.ShaderModule.WorkgroupSize, ) !*ShaderModule { _ = label; _ = code; _ = device; _ = workgroup_size; return error.Unsupported; } pub fn createSwapChain(device: *Device, surface: *Surface, desc: *const sysgpu.SwapChain.Descriptor) !*SwapChain { return SwapChain.init(device, surface, desc); } pub fn createTexture(device: *Device, desc: *const sysgpu.Texture.Descriptor) !*Texture { return Texture.init(device, desc); } pub fn getQueue(device: *Device) !*Queue { if (device.queue == null) { device.queue = try Queue.init(device); } return &device.queue.?; } pub fn tick(device: *Device) !void { if (device.queue) |*queue| try queue.flush(); device.processQueuedOperations(); } const device_layers = if (builtin.mode == .Debug) &[_][*:0]const u8{"VK_LAYER_KHRONOS_validation"} else &.{}; const device_extensions = &[_][*:0]const u8{vk.extension_info.khr_swapchain.name}; pub const ResolveKey = struct { format: vk.Format, layout: vk.ImageLayout, }; pub const ColorAttachmentKey = struct { format: vk.Format, samples: u32, load_op: sysgpu.LoadOp, store_op: sysgpu.StoreOp, layout: vk.ImageLayout, resolve: ?ResolveKey, }; pub const DepthStencilAttachmentKey = struct { format: vk.Format, samples: u32, depth_load_op: sysgpu.LoadOp, depth_store_op: sysgpu.StoreOp, stencil_load_op: sysgpu.LoadOp, stencil_store_op: sysgpu.StoreOp, layout: vk.ImageLayout, read_only: bool, }; pub const RenderPassKey = struct { colors: std.BoundedArray(ColorAttachmentKey, 8), depth_stencil: ?DepthStencilAttachmentKey, pub fn init() RenderPassKey { var colors = std.BoundedArray(ColorAttachmentKey, 8){}; for (&colors.buffer) |*color| { color.* = .{ .format = .undefined, .samples = 1, .load_op = .load, .store_op = .store, .layout = .undefined, .resolve = null, }; } return .{ .colors = .{}, .depth_stencil = null, }; } }; fn createRenderPass(device: *Device, key: RenderPassKey) !vk.RenderPass { const vk_device = device.vk_device; if (device.render_passes.get(key)) |render_pass| return render_pass; var attachments = std.BoundedArray(vk.AttachmentDescription, 8){}; var color_refs = std.BoundedArray(vk.AttachmentReference, 8){}; var resolve_refs = std.BoundedArray(vk.AttachmentReference, 8){}; for (key.colors.slice()) |attach| { attachments.appendAssumeCapacity(.{ .format = attach.format, .samples = conv.vulkanSampleCount(attach.samples), .load_op = conv.vulkanLoadOp(attach.load_op), .store_op = conv.vulkanStoreOp(attach.store_op), .stencil_load_op = .dont_care, .stencil_store_op = .dont_care, .initial_layout = attach.layout, .final_layout = attach.layout, }); color_refs.appendAssumeCapacity(.{ .attachment = @intCast(attachments.len - 1), .layout = .color_attachment_optimal, }); if (attach.resolve) |resolve| { attachments.appendAssumeCapacity(.{ .format = resolve.format, .samples = conv.vulkanSampleCount(1), .load_op = .dont_care, .store_op = .store, .stencil_load_op = .dont_care, .stencil_store_op = .dont_care, .initial_layout = resolve.layout, .final_layout = resolve.layout, }); resolve_refs.appendAssumeCapacity(.{ .attachment = @intCast(attachments.len - 1), .layout = .color_attachment_optimal, }); } } const depth_stencil_ref = if (key.depth_stencil) |depth_stencil| blk: { const layout: vk.ImageLayout = if (depth_stencil.read_only) .depth_stencil_read_only_optimal else .depth_stencil_attachment_optimal; attachments.appendAssumeCapacity(.{ .format = depth_stencil.format, .samples = conv.vulkanSampleCount(depth_stencil.samples), .load_op = conv.vulkanLoadOp(depth_stencil.depth_load_op), .store_op = conv.vulkanStoreOp(depth_stencil.depth_store_op), .stencil_load_op = conv.vulkanLoadOp(depth_stencil.stencil_load_op), .stencil_store_op = conv.vulkanStoreOp(depth_stencil.stencil_store_op), .initial_layout = depth_stencil.layout, .final_layout = depth_stencil.layout, }); break :blk &vk.AttachmentReference{ .attachment = @intCast(attachments.len - 1), .layout = layout, }; } else null; const render_pass = try vkd.createRenderPass(vk_device, &vk.RenderPassCreateInfo{ .attachment_count = @intCast(attachments.len), .p_attachments = attachments.slice().ptr, .subpass_count = 1, .p_subpasses = &[_]vk.SubpassDescription{ .{ .pipeline_bind_point = .graphics, .color_attachment_count = @intCast(color_refs.len), .p_color_attachments = color_refs.slice().ptr, .p_resolve_attachments = if (resolve_refs.len != 0) resolve_refs.slice().ptr else null, .p_depth_stencil_attachment = depth_stencil_ref, }, }, }, null); try device.render_passes.put(allocator, key, render_pass); return render_pass; } pub fn processQueuedOperations(device: *Device) void { const vk_device = device.vk_device; // Submit objects { var i: usize = 0; while (i < device.submit_objects.items.len) { var submit_object = device.submit_objects.items[i]; const status = vkd.getFenceStatus(vk_device, submit_object.fence) catch unreachable; if (status == .success) { submit_object.deinit(); _ = device.submit_objects.swapRemove(i); } else { i += 1; } } } // MapAsync { var i: usize = 0; while (i < device.map_callbacks.items.len) { const map_callback = device.map_callbacks.items[i]; if (map_callback.buffer.gpu_count == 0) { map_callback.buffer.executeMapAsync(map_callback); _ = device.map_callbacks.swapRemove(i); } else { i += 1; } } } } }; pub const SubmitObject = struct { device: *Device, fence: vk.Fence, reference_trackers: std.ArrayListUnmanaged(*ReferenceTracker) = .{}, pub fn init(device: *Device) !SubmitObject { const vk_device = device.vk_device; const fence = try vkd.createFence(vk_device, &.{ .flags = .{ .signaled_bit = false } }, null); return .{ .device = device, .fence = fence, }; } pub fn deinit(object: *SubmitObject) void { const vk_device = object.device.vk_device; for (object.reference_trackers.items) |reference_tracker| reference_tracker.deinit(); vkd.destroyFence(vk_device, object.fence, null); object.reference_trackers.deinit(allocator); } pub fn wait(object: *SubmitObject) !void { const vk_device = object.device.vk_device; _ = try vkd.waitForFences(vk_device, 1, &[_]vk.Fence{object.fence}, vk.TRUE, std.math.maxInt(u64)); } }; pub const StreamingManager = struct { device: *Device, free_buffers: std.ArrayListUnmanaged(*Buffer) = .{}, pub fn init(device: *Device) !StreamingManager { return .{ .device = device, }; } pub fn deinit(manager: *StreamingManager) void { for (manager.free_buffers.items) |buffer| buffer.manager.release(); manager.free_buffers.deinit(allocator); } pub fn acquire(manager: *StreamingManager) !*Buffer { const device = manager.device; // Recycle finished buffers if (manager.free_buffers.items.len == 0) { device.processQueuedOperations(); } // Create new buffer if (manager.free_buffers.items.len == 0) { const buffer = try Buffer.init(device, &.{ .label = "upload", .usage = .{ .copy_src = true, .map_write = true, }, .size = upload_page_size, .mapped_at_creation = .true, }); errdefer _ = buffer.manager.release(); try manager.free_buffers.append(allocator, buffer); } // Result return manager.free_buffers.pop(); } pub fn release(manager: *StreamingManager, buffer: *Buffer) void { manager.free_buffers.append(allocator, buffer) catch { std.debug.panic("OutOfMemory", .{}); }; } }; pub const SwapChain = struct { manager: utils.Manager(SwapChain) = .{}, device: *Device, vk_swapchain: vk.SwapchainKHR, fence: vk.Fence, wait_semaphore: vk.Semaphore, signal_semaphore: vk.Semaphore, textures: []*Texture, texture_views: []*TextureView, texture_index: u32 = 0, current_texture_view: ?*TextureView = null, format: sysgpu.Texture.Format, pub fn init(device: *Device, surface: *Surface, desc: *const sysgpu.SwapChain.Descriptor) !*SwapChain { const vk_device = device.vk_device; const sc = try allocator.create(SwapChain); const capabilities = try vki.getPhysicalDeviceSurfaceCapabilitiesKHR( device.adapter.physical_device, surface.vk_surface, ); // TODO: query surface formats // TODO: query surface present modes const composite_alpha = blk: { const composite_alpha_flags = [_]vk.CompositeAlphaFlagsKHR{ .{ .opaque_bit_khr = true }, .{ .pre_multiplied_bit_khr = true }, .{ .post_multiplied_bit_khr = true }, .{ .inherit_bit_khr = true }, }; for (composite_alpha_flags) |flag| { if (@as(vk.Flags, @bitCast(flag)) & @as(vk.Flags, @bitCast(capabilities.supported_composite_alpha)) != 0) { break :blk flag; } } break :blk vk.CompositeAlphaFlagsKHR{}; }; const image_count = @max(capabilities.min_image_count + 1, capabilities.max_image_count); const format = conv.vulkanFormat(device, desc.format); const extent = vk.Extent2D{ .width = std.math.clamp( desc.width, capabilities.min_image_extent.width, capabilities.max_image_extent.width, ), .height = std.math.clamp( desc.height, capabilities.min_image_extent.height, capabilities.max_image_extent.height, ), }; const image_usage = conv.vulkanImageUsageFlags(desc.usage, desc.format); const present_mode = conv.vulkanPresentMode(desc.present_mode); const vk_swapchain = try vkd.createSwapchainKHR(vk_device, &.{ .surface = surface.vk_surface, .min_image_count = image_count, .image_format = format, .image_color_space = .srgb_nonlinear_khr, .image_extent = extent, .image_array_layers = 1, .image_usage = image_usage, .image_sharing_mode = .exclusive, .pre_transform = .{ .identity_bit_khr = true }, .composite_alpha = composite_alpha, .present_mode = present_mode, .clipped = vk.FALSE, }, null); const fence = try vkd.createFence(vk_device, &.{ .flags = .{ .signaled_bit = false } }, null); errdefer vkd.destroyFence(vk_device, fence, null); const wait_semaphore = try vkd.createSemaphore(vk_device, &.{}, null); errdefer vkd.destroySemaphore(vk_device, wait_semaphore, null); const signal_semaphore = try vkd.createSemaphore(vk_device, &.{}, null); errdefer vkd.destroySemaphore(vk_device, signal_semaphore, null); var images_len: u32 = 0; _ = try vkd.getSwapchainImagesKHR(vk_device, vk_swapchain, &images_len, null); const images = try allocator.alloc(vk.Image, images_len); defer allocator.free(images); _ = try vkd.getSwapchainImagesKHR(vk_device, vk_swapchain, &images_len, images.ptr); const textures = try allocator.alloc(*Texture, images_len); errdefer allocator.free(textures); const texture_views = try allocator.alloc(*TextureView, images_len); errdefer allocator.free(texture_views); for (0..images_len) |i| { const texture = try Texture.initForSwapChain(device, desc, images[i], sc); textures[i] = texture; texture_views[i] = try texture.createView(&.{ .format = desc.format, .dimension = .dimension_2d, }); } sc.* = .{ .device = device, .vk_swapchain = vk_swapchain, .fence = fence, .wait_semaphore = wait_semaphore, .signal_semaphore = signal_semaphore, .textures = textures, .texture_views = texture_views, .format = desc.format, }; return sc; } pub fn deinit(sc: *SwapChain) void { const vk_device = sc.device.vk_device; sc.device.waitAll() catch {}; for (sc.texture_views) |view| view.manager.release(); for (sc.textures) |texture| texture.manager.release(); vkd.destroySemaphore(vk_device, sc.wait_semaphore, null); vkd.destroySemaphore(vk_device, sc.signal_semaphore, null); vkd.destroyFence(vk_device, sc.fence, null); vkd.destroySwapchainKHR(vk_device, sc.vk_swapchain, null); allocator.free(sc.textures); allocator.free(sc.texture_views); allocator.destroy(sc); } pub fn getCurrentTextureView(sc: *SwapChain) !*TextureView { const vk_device = sc.device.vk_device; if (sc.current_texture_view) |view| { view.manager.reference(); return view; } const result = try vkd.acquireNextImageKHR( vk_device, sc.vk_swapchain, std.math.maxInt(u64), if (use_semaphore_wait) sc.wait_semaphore else .null_handle, if (!use_semaphore_wait) sc.fence else .null_handle, ); // Wait on the CPU so that GPU does not stall later during present. // This should be similar to using DXGI Waitable Object. if (!use_semaphore_wait) { _ = try vkd.waitForFences(vk_device, 1, &[_]vk.Fence{sc.fence}, vk.TRUE, std.math.maxInt(u64)); try vkd.resetFences(vk_device, 1, &[_]vk.Fence{sc.fence}); } sc.texture_index = result.image_index; var view = sc.texture_views[sc.texture_index]; view.manager.reference(); sc.current_texture_view = view; return view; } pub fn present(sc: *SwapChain) !void { const queue = try sc.device.getQueue(); const vk_queue = queue.vk_queue; const semaphore = sc.signal_semaphore; try queue.signal_semaphores.append(allocator, semaphore); try queue.flush(); _ = try vkd.queuePresentKHR(vk_queue, &.{ .wait_semaphore_count = 1, .p_wait_semaphores = &[_]vk.Semaphore{semaphore}, .swapchain_count = 1, .p_swapchains = &[_]vk.SwapchainKHR{sc.vk_swapchain}, .p_image_indices = &[_]u32{sc.texture_index}, }); sc.current_texture_view = null; } }; pub const Buffer = struct { manager: utils.Manager(Buffer) = .{}, device: *Device, vk_buffer: vk.Buffer, memory: vk.DeviceMemory, // NOTE - this is a naive sync solution as a placeholder until render graphs are implemented read_stage_mask: vk.PipelineStageFlags, read_access_mask: vk.AccessFlags, stage_buffer: ?*Buffer, gpu_count: u32 = 0, map: ?[*]u8, // TODO - packed buffer descriptor struct size: u64, usage: sysgpu.Buffer.UsageFlags, pub fn init(device: *Device, desc: *const sysgpu.Buffer.Descriptor) !*Buffer { const vk_device = device.vk_device; // Buffer const size = @max(4, desc.size); var usage = desc.usage; if (desc.mapped_at_creation == .true and !desc.usage.map_write) usage.copy_dst = true; const vk_buffer = try vkd.createBuffer(vk_device, &.{ .size = size, .usage = conv.vulkanBufferUsageFlags(usage), .sharing_mode = .exclusive, }, null); // Memory const requirements = vkd.getBufferMemoryRequirements(vk_device, vk_buffer); const mem_type: MemoryAllocator.MemoryKind = blk: { if (desc.usage.map_read) break :blk .linear_read_mappable; if (desc.usage.map_write) break :blk .linear_write_mappable; break :blk .linear; }; const mem_type_index = device.memory_allocator.findBestAllocator(requirements, mem_type) orelse @panic("unimplemented"); // TODO const memory = try vkd.allocateMemory(vk_device, &.{ .allocation_size = requirements.size, .memory_type_index = mem_type_index, }, null); try vkd.bindBufferMemory(vk_device, vk_buffer, memory, 0); // Upload buffer var stage_buffer: ?*Buffer = null; var map: ?*anyopaque = null; if (desc.mapped_at_creation == .true) { if (!desc.usage.map_write) { stage_buffer = try Buffer.init(device, &.{ .usage = .{ .copy_src = true, .map_write = true, }, .size = size, }); map = try vkd.mapMemory(vk_device, stage_buffer.?.memory, 0, size, .{}); } else { map = try vkd.mapMemory(vk_device, memory, 0, size, .{}); } } // Result const buffer = try allocator.create(Buffer); buffer.* = .{ .device = device, .vk_buffer = vk_buffer, .memory = memory, .read_stage_mask = conv.vulkanPipelineStageFlagsForBufferRead(desc.usage), .read_access_mask = conv.vulkanAccessFlagsForBufferRead(desc.usage), .stage_buffer = stage_buffer, .map = @ptrCast(map), .size = desc.size, .usage = desc.usage, }; return buffer; } pub fn deinit(buffer: *Buffer) void { const vk_device = buffer.device.vk_device; if (buffer.stage_buffer) |stage_buffer| stage_buffer.manager.release(); vkd.freeMemory(vk_device, buffer.memory, null); vkd.destroyBuffer(vk_device, buffer.vk_buffer, null); allocator.destroy(buffer); } pub fn getMappedRange(buffer: *Buffer, offset: usize, size: usize) !?*anyopaque { return @ptrCast(buffer.map.?[offset .. offset + size]); } pub fn getSize(buffer: *Buffer) u64 { return buffer.size; } pub fn getUsage(buffer: *Buffer) sysgpu.Buffer.UsageFlags { return buffer.usage; } pub fn mapAsync( buffer: *Buffer, mode: sysgpu.MapModeFlags, offset: usize, size: usize, callback: sysgpu.Buffer.MapCallback, userdata: ?*anyopaque, ) !void { _ = size; _ = offset; _ = mode; const map_callback = MapCallback{ .buffer = buffer, .callback = callback, .userdata = userdata }; if (buffer.gpu_count == 0) { buffer.executeMapAsync(map_callback); } else { try buffer.device.map_callbacks.append(allocator, map_callback); } } pub fn setLabel(buffer: *Buffer, label: [*:0]const u8) void { _ = label; _ = buffer; @panic("unimplemented"); } pub fn unmap(buffer: *Buffer) !void { const vk_device = buffer.device.vk_device; const queue = try buffer.device.getQueue(); var unmap_memory: vk.DeviceMemory = undefined; if (buffer.stage_buffer) |stage_buffer| { unmap_memory = stage_buffer.memory; const encoder = try queue.getCommandEncoder(); try encoder.copyBufferToBuffer(stage_buffer, 0, buffer, 0, buffer.size); stage_buffer.manager.release(); buffer.stage_buffer = null; } else { unmap_memory = buffer.memory; } vkd.unmapMemory(vk_device, unmap_memory); } // Internal pub fn executeMapAsync(buffer: *Buffer, map_callback: MapCallback) void { const vk_device = buffer.device.vk_device; const map = vkd.mapMemory(vk_device, buffer.memory, 0, buffer.size, .{}) catch { map_callback.callback(.unknown, map_callback.userdata); return; }; buffer.map = @ptrCast(map); map_callback.callback(.success, map_callback.userdata); } }; pub const Texture = struct { manager: utils.Manager(Texture) = .{}, device: *Device, extent: vk.Extent2D, image: vk.Image, memory: vk.DeviceMemory, swapchain: ?*SwapChain = null, // NOTE - this is a naive sync solution as a placeholder until render graphs are implemented read_stage_mask: vk.PipelineStageFlags, read_access_mask: vk.AccessFlags, read_image_layout: vk.ImageLayout, // TODO - packed texture descriptor struct usage: sysgpu.Texture.UsageFlags, dimension: sysgpu.Texture.Dimension, size: sysgpu.Extent3D, format: sysgpu.Texture.Format, mip_level_count: u32, sample_count: u32, pub fn init(device: *Device, desc: *const sysgpu.Texture.Descriptor) !*Texture { const vk_device = device.vk_device; // Image const cube_compatible = desc.dimension == .dimension_2d and desc.size.width == desc.size.height and desc.size.depth_or_array_layers >= 6; const extent = utils.calcExtent(desc.dimension, desc.size); const vk_image = try vkd.createImage(vk_device, &.{ .flags = conv.vulkanImageCreateFlags(cube_compatible, desc.view_format_count), .image_type = conv.vulkanImageType(desc.dimension), .format = conv.vulkanFormat(device, desc.format), .extent = .{ .width = extent.width, .height = extent.height, .depth = extent.depth }, .mip_levels = desc.mip_level_count, .array_layers = extent.array_count, .samples = conv.vulkanSampleCount(desc.sample_count), .tiling = .optimal, .usage = conv.vulkanImageUsageFlags(desc.usage, desc.format), .sharing_mode = .exclusive, .initial_layout = .undefined, }, null); // Memory const requirements = vkd.getImageMemoryRequirements(vk_device, vk_image); const mem_type = .linear; const mem_type_index = device.memory_allocator.findBestAllocator(requirements, mem_type) orelse @panic("unimplemented"); // TODO const memory = try vkd.allocateMemory(vk_device, &.{ .allocation_size = requirements.size, .memory_type_index = mem_type_index, }, null); try vkd.bindImageMemory(vk_device, vk_image, memory, 0); // Result var texture = try allocator.create(Texture); texture.* = .{ .device = device, .extent = .{ .width = extent.width, .height = extent.height }, .image = vk_image, .memory = memory, .swapchain = null, .read_stage_mask = conv.vulkanPipelineStageFlagsForImageRead(desc.usage, desc.format), .read_access_mask = conv.vulkanAccessFlagsForImageRead(desc.usage, desc.format), .read_image_layout = conv.vulkanImageLayoutForRead(desc.usage, desc.format), .usage = desc.usage, .dimension = desc.dimension, .size = desc.size, .format = desc.format, .mip_level_count = desc.mip_level_count, .sample_count = desc.sample_count, }; errdefer texture.manager.release(); // Transition to read-state const queue = try device.getQueue(); const encoder = try queue.getCommandEncoder(); try encoder.state_tracker.initTexture(texture); return texture; } pub fn initForSwapChain( device: *Device, desc: *const sysgpu.SwapChain.Descriptor, image: vk.Image, swapchain: *SwapChain, ) !*Texture { var texture = try allocator.create(Texture); texture.* = .{ .device = device, .extent = .{ .width = desc.width, .height = desc.height }, .image = image, .memory = .null_handle, .swapchain = swapchain, .read_stage_mask = conv.vulkanPipelineStageFlagsForImageRead(desc.usage, desc.format), .read_access_mask = conv.vulkanAccessFlagsForImageRead(desc.usage, desc.format), .read_image_layout = .present_src_khr, .usage = desc.usage, .dimension = .dimension_2d, .size = .{ .width = desc.width, .height = desc.height, .depth_or_array_layers = 1 }, .format = desc.format, .mip_level_count = 1, .sample_count = 1, }; errdefer texture.manager.release(); // Transition to read-state const queue = try device.getQueue(); const encoder = try queue.getCommandEncoder(); try encoder.state_tracker.initTexture(texture); return texture; } pub fn deinit(texture: *Texture) void { const vk_device = texture.device.vk_device; if (texture.swapchain == null) { vkd.freeMemory(vk_device, texture.memory, null); vkd.destroyImage(vk_device, texture.image, null); } allocator.destroy(texture); } pub fn createView(texture: *Texture, desc: *const sysgpu.TextureView.Descriptor) !*TextureView { return TextureView.init(texture, desc, texture.extent); } }; pub const TextureView = struct { manager: utils.Manager(TextureView) = .{}, device: *Device, texture: *Texture, vk_view: vk.ImageView, vk_format: vk.Format, extent: vk.Extent2D, pub fn init(texture: *Texture, desc: *const sysgpu.TextureView.Descriptor, extent: vk.Extent2D) !*TextureView { const vk_device = texture.device.vk_device; texture.manager.reference(); const texture_dimension: sysgpu.TextureView.Dimension = switch (texture.dimension) { .dimension_1d => .dimension_1d, .dimension_2d => .dimension_2d, .dimension_3d => .dimension_3d, }; const format = if (desc.format != .undefined) desc.format else texture.format; const dimension = if (desc.dimension != .dimension_undefined) desc.dimension else texture_dimension; const vk_format = conv.vulkanFormat(texture.device, format); const vk_view = try vkd.createImageView(vk_device, &.{ .image = texture.image, .view_type = conv.vulkanImageViewType(dimension), .format = vk_format, .components = .{ .r = .identity, .g = .identity, .b = .identity, .a = .identity, }, .subresource_range = .{ .aspect_mask = conv.vulkanImageAspectFlags(desc.aspect, format), .base_mip_level = desc.base_mip_level, .level_count = desc.mip_level_count, .base_array_layer = desc.base_array_layer, .layer_count = desc.array_layer_count, }, }, null); const view = try allocator.create(TextureView); view.* = .{ .device = texture.device, .texture = texture, .vk_view = vk_view, .vk_format = vk_format, .extent = extent, }; return view; } pub fn deinit(view: *TextureView) void { const vk_device = view.device.vk_device; vkd.destroyImageView(vk_device, view.vk_view, null); view.texture.manager.release(); allocator.destroy(view); } }; pub const Sampler = struct { manager: utils.Manager(Sampler) = .{}, device: *Device, vk_sampler: vk.Sampler, pub fn init(device: *Device, desc: *const sysgpu.Sampler.Descriptor) !*Sampler { const vk_device = device.vk_device; const vk_sampler = try vkd.createSampler(vk_device, &.{ .flags = .{}, .mag_filter = conv.vulkanFilter(desc.mag_filter), .min_filter = conv.vulkanFilter(desc.min_filter), .mipmap_mode = conv.vulkanSamplerMipmapMode(desc.mipmap_filter), .address_mode_u = conv.vulkanSamplerAddressMode(desc.address_mode_u), .address_mode_v = conv.vulkanSamplerAddressMode(desc.address_mode_v), .address_mode_w = conv.vulkanSamplerAddressMode(desc.address_mode_w), .mip_lod_bias = 0, .anisotropy_enable = @intFromBool(desc.max_anisotropy > 1), .max_anisotropy = @floatFromInt(desc.max_anisotropy), .compare_enable = @intFromBool(desc.compare != .undefined), .compare_op = if (desc.compare != .undefined) conv.vulkanCompareOp(desc.compare) else .never, .min_lod = desc.lod_min_clamp, .max_lod = desc.lod_max_clamp, .border_color = .float_transparent_black, .unnormalized_coordinates = vk.FALSE, }, null); // Result const sampler = try allocator.create(Sampler); sampler.* = .{ .device = device, .vk_sampler = vk_sampler, }; return sampler; } pub fn deinit(sampler: *Sampler) void { const vk_device = sampler.device.vk_device; vkd.destroySampler(vk_device, sampler.vk_sampler, null); allocator.destroy(sampler); } }; pub const BindGroupLayout = struct { const Entry = struct { binding: u32, descriptor_type: vk.DescriptorType, image_layout: vk.ImageLayout, }; manager: utils.Manager(BindGroupLayout) = .{}, device: *Device, vk_layout: vk.DescriptorSetLayout, desc_pool: vk.DescriptorPool, entries: std.ArrayListUnmanaged(Entry), const max_sets = 512; pub fn init(device: *Device, desc: *const sysgpu.BindGroupLayout.Descriptor) !*BindGroupLayout { const vk_device = device.vk_device; var bindings = try std.ArrayListUnmanaged(vk.DescriptorSetLayoutBinding).initCapacity(allocator, desc.entry_count); defer bindings.deinit(allocator); var desc_types = std.AutoArrayHashMap(vk.DescriptorType, u32).init(allocator); defer desc_types.deinit(); var entries = try std.ArrayListUnmanaged(Entry).initCapacity(allocator, desc.entry_count); errdefer entries.deinit(allocator); for (0..desc.entry_count) |entry_index| { const entry = desc.entries.?[entry_index]; const descriptor_type = conv.vulkanDescriptorType(entry); if (desc_types.getPtr(descriptor_type)) |count| { count.* += 1; } else { try desc_types.put(descriptor_type, 1); } bindings.appendAssumeCapacity(.{ .binding = entry.binding, .descriptor_type = descriptor_type, .descriptor_count = 1, .stage_flags = conv.vulkanShaderStageFlags(entry.visibility), }); entries.appendAssumeCapacity(.{ .binding = entry.binding, .descriptor_type = descriptor_type, .image_layout = conv.vulkanImageLayoutForTextureBinding(entry.texture.sample_type), }); } const vk_layout = try vkd.createDescriptorSetLayout(vk_device, &vk.DescriptorSetLayoutCreateInfo{ .binding_count = @intCast(bindings.items.len), .p_bindings = bindings.items.ptr, }, null); // Descriptor Pool var pool_sizes = try std.ArrayList(vk.DescriptorPoolSize).initCapacity(allocator, desc_types.count()); defer pool_sizes.deinit(); var desc_types_iter = desc_types.iterator(); while (desc_types_iter.next()) |entry| { pool_sizes.appendAssumeCapacity(.{ .type = entry.key_ptr.*, .descriptor_count = max_sets * entry.value_ptr.*, }); } const desc_pool = try vkd.createDescriptorPool(vk_device, &vk.DescriptorPoolCreateInfo{ .flags = .{ .free_descriptor_set_bit = true }, .max_sets = max_sets, .pool_size_count = @intCast(pool_sizes.items.len), .p_pool_sizes = pool_sizes.items.ptr, }, null); // Result const layout = try allocator.create(BindGroupLayout); layout.* = .{ .device = device, .vk_layout = vk_layout, .desc_pool = desc_pool, .entries = entries, }; return layout; } pub fn deinit(layout: *BindGroupLayout) void { const vk_device = layout.device.vk_device; vkd.destroyDescriptorSetLayout(vk_device, layout.vk_layout, null); vkd.destroyDescriptorPool(vk_device, layout.desc_pool, null); layout.entries.deinit(allocator); allocator.destroy(layout); } // Internal pub fn getEntry(layout: *BindGroupLayout, binding: u32) ?*const Entry { for (layout.entries.items) |*entry| { if (entry.binding == binding) return entry; } return null; } }; pub const BindGroup = struct { const BufferAccess = struct { buffer: *Buffer, storage: bool, }; const TextureViewAccess = struct { texture_view: *TextureView, storage: bool, }; manager: utils.Manager(BindGroup) = .{}, device: *Device, layout: *BindGroupLayout, desc_set: vk.DescriptorSet, buffers: std.ArrayListUnmanaged(BufferAccess), texture_views: std.ArrayListUnmanaged(TextureViewAccess), samplers: std.ArrayListUnmanaged(*Sampler), pub fn init(device: *Device, desc: *const sysgpu.BindGroup.Descriptor) !*BindGroup { const vk_device = device.vk_device; const layout: *BindGroupLayout = @ptrCast(@alignCast(desc.layout)); layout.manager.reference(); var desc_set: vk.DescriptorSet = undefined; try vkd.allocateDescriptorSets(vk_device, &vk.DescriptorSetAllocateInfo{ .descriptor_pool = layout.desc_pool, .descriptor_set_count = 1, .p_set_layouts = @ptrCast(&layout.vk_layout), }, @ptrCast(&desc_set)); var writes = try allocator.alloc(vk.WriteDescriptorSet, layout.entries.items.len); defer allocator.free(writes); var write_image_info = try allocator.alloc(vk.DescriptorImageInfo, layout.entries.items.len); defer allocator.free(write_image_info); var write_buffer_info = try allocator.alloc(vk.DescriptorBufferInfo, layout.entries.items.len); defer allocator.free(write_buffer_info); for (0..desc.entry_count) |i| { const entry = desc.entries.?[i]; const layout_entry = layout.getEntry(entry.binding) orelse return error.UnknownBinding; writes[i] = .{ .dst_set = desc_set, .dst_binding = layout_entry.binding, .dst_array_element = 0, .descriptor_count = 1, .descriptor_type = layout_entry.descriptor_type, .p_image_info = undefined, .p_buffer_info = undefined, .p_texel_buffer_view = undefined, }; switch (layout_entry.descriptor_type) { .sampler => { const sampler: *Sampler = @ptrCast(@alignCast(entry.sampler.?)); write_image_info[i] = .{ .sampler = sampler.vk_sampler, .image_view = .null_handle, .image_layout = .undefined, }; writes[i].p_image_info = @ptrCast(&write_image_info[i]); }, .sampled_image, .storage_image => { const texture_view: *TextureView = @ptrCast(@alignCast(entry.texture_view.?)); write_image_info[i] = .{ .sampler = .null_handle, .image_view = texture_view.vk_view, .image_layout = layout_entry.image_layout, }; writes[i].p_image_info = @ptrCast(&write_image_info[i]); }, .uniform_buffer, .storage_buffer, .uniform_buffer_dynamic, .storage_buffer_dynamic, => { const buffer: *Buffer = @ptrCast(@alignCast(entry.buffer.?)); write_buffer_info[i] = .{ .buffer = buffer.vk_buffer, .offset = desc.entries.?[i].offset, .range = desc.entries.?[i].size, }; writes[i].p_buffer_info = @ptrCast(&write_buffer_info[i]); }, else => unreachable, } } vkd.updateDescriptorSets(vk_device, @intCast(writes.len), writes.ptr, 0, undefined); // Resource tracking var buffers = std.ArrayListUnmanaged(BufferAccess){}; errdefer buffers.deinit(allocator); var texture_views = std.ArrayListUnmanaged(TextureViewAccess){}; errdefer texture_views.deinit(allocator); var samplers = std.ArrayListUnmanaged(*Sampler){}; errdefer samplers.deinit(allocator); for (0..desc.entry_count) |i| { const entry = desc.entries.?[i]; const layout_entry = layout.getEntry(entry.binding) orelse return error.UnknownBinding; switch (layout_entry.descriptor_type) { .sampler => { const sampler: *Sampler = @ptrCast(@alignCast(entry.sampler.?)); try samplers.append(allocator, sampler); sampler.manager.reference(); }, .sampled_image, .storage_image => { const texture_view: *TextureView = @ptrCast(@alignCast(entry.texture_view.?)); const storage = layout_entry.descriptor_type == .storage_image; try texture_views.append(allocator, .{ .texture_view = texture_view, .storage = storage }); texture_view.manager.reference(); }, .uniform_buffer, .uniform_buffer_dynamic, .storage_buffer, .storage_buffer_dynamic, => { const buffer: *Buffer = @ptrCast(@alignCast(entry.buffer.?)); const storage = layout_entry.descriptor_type == .storage_buffer or layout_entry.descriptor_type == .storage_buffer_dynamic; try buffers.append(allocator, .{ .buffer = buffer, .storage = storage }); buffer.manager.reference(); }, else => unreachable, } } // Result const bind_group = try allocator.create(BindGroup); bind_group.* = .{ .device = device, .layout = layout, .desc_set = desc_set, .buffers = buffers, .texture_views = texture_views, .samplers = samplers, }; return bind_group; } pub fn deinit(group: *BindGroup) void { const vk_device = group.device.vk_device; vkd.freeDescriptorSets(vk_device, group.layout.desc_pool, 1, @ptrCast(&group.desc_set)) catch unreachable; for (group.buffers.items) |access| access.buffer.manager.release(); for (group.texture_views.items) |access| access.texture_view.manager.release(); for (group.samplers.items) |sampler| sampler.manager.release(); group.layout.manager.release(); group.buffers.deinit(allocator); group.texture_views.deinit(allocator); group.samplers.deinit(allocator); allocator.destroy(group); } }; pub const PipelineLayout = struct { manager: utils.Manager(PipelineLayout) = .{}, device: *Device, vk_layout: vk.PipelineLayout, group_layouts: []*BindGroupLayout, pub fn init(device: *Device, desc: *const sysgpu.PipelineLayout.Descriptor) !*PipelineLayout { const vk_device = device.vk_device; var group_layouts = try allocator.alloc(*BindGroupLayout, desc.bind_group_layout_count); errdefer allocator.free(group_layouts); const set_layouts = try allocator.alloc(vk.DescriptorSetLayout, desc.bind_group_layout_count); defer allocator.free(set_layouts); for (0..desc.bind_group_layout_count) |i| { const layout: *BindGroupLayout = @ptrCast(@alignCast(desc.bind_group_layouts.?[i])); layout.manager.reference(); group_layouts[i] = layout; set_layouts[i] = layout.vk_layout; } const vk_layout = try vkd.createPipelineLayout(vk_device, &.{ .set_layout_count = @intCast(set_layouts.len), .p_set_layouts = set_layouts.ptr, }, null); const layout = try allocator.create(PipelineLayout); layout.* = .{ .device = device, .vk_layout = vk_layout, .group_layouts = group_layouts, }; return layout; } pub fn initDefault(device: *Device, default_pipeline_layout: utils.DefaultPipelineLayoutDescriptor) !*PipelineLayout { const groups = default_pipeline_layout.groups; var bind_group_layouts = std.BoundedArray(*sysgpu.BindGroupLayout, limits.max_bind_groups){}; defer { for (bind_group_layouts.slice()) |bind_group_layout| bind_group_layout.release(); } for (groups.slice()) |entries| { const bind_group_layout = try device.createBindGroupLayout( &sysgpu.BindGroupLayout.Descriptor.init(.{ .entries = entries.items }), ); bind_group_layouts.appendAssumeCapacity(@ptrCast(bind_group_layout)); } return device.createPipelineLayout( &sysgpu.PipelineLayout.Descriptor.init(.{ .bind_group_layouts = bind_group_layouts.slice() }), ); } pub fn deinit(layout: *PipelineLayout) void { const vk_device = layout.device.vk_device; for (layout.group_layouts) |group_layout| group_layout.manager.release(); vkd.destroyPipelineLayout(vk_device, layout.vk_layout, null); allocator.free(layout.group_layouts); allocator.destroy(layout); } }; pub const ShaderModule = struct { manager: utils.Manager(ShaderModule) = .{}, device: *Device, vk_shader_module: vk.ShaderModule, air: ?*shader.Air = null, pub fn initAir(device: *Device, air: *shader.Air) !*ShaderModule { const vk_device = device.vk_device; const code = try shader.CodeGen.generate(allocator, air, .spirv, true, .{ .emit_source_file = "" }, null, null, null); defer allocator.free(code); const vk_shader_module = try vkd.createShaderModule(vk_device, &vk.ShaderModuleCreateInfo{ .code_size = code.len, .p_code = @ptrCast(@alignCast(code.ptr)), }, null); const module = try allocator.create(ShaderModule); module.* = .{ .device = device, .vk_shader_module = vk_shader_module, .air = air, }; return module; } pub fn deinit(module: *ShaderModule) void { const vk_device = module.device.vk_device; vkd.destroyShaderModule(vk_device, module.vk_shader_module, null); if (module.air) |air| { air.deinit(allocator); allocator.destroy(air); } allocator.destroy(module); } }; pub const ComputePipeline = struct { manager: utils.Manager(ComputePipeline) = .{}, device: *Device, layout: *PipelineLayout, vk_pipeline: vk.Pipeline, pub fn init(device: *Device, desc: *const sysgpu.ComputePipeline.Descriptor) !*ComputePipeline { const vk_device = device.vk_device; // Shaders const compute_module: *ShaderModule = @ptrCast(@alignCast(desc.compute.module)); // Pipeline Layout var layout: *PipelineLayout = undefined; if (desc.layout) |layout_raw| { layout = @ptrCast(@alignCast(layout_raw)); layout.manager.reference(); } else if (compute_module.air) |air| { var layout_desc = utils.DefaultPipelineLayoutDescriptor.init(allocator); defer layout_desc.deinit(); try layout_desc.addFunction(air, .{ .compute = true }, desc.compute.entry_point); layout = try PipelineLayout.initDefault(device, layout_desc); } else { @panic( \\Cannot create pipeline descriptor autoamtically. \\Please provide it yourself or write the shader in WGSL. ); } errdefer layout.manager.release(); // PSO const stage = vk.PipelineShaderStageCreateInfo{ .stage = .{ .compute_bit = true }, .module = compute_module.vk_shader_module, .p_name = desc.compute.entry_point, }; var vk_pipeline: vk.Pipeline = undefined; _ = try vkd.createComputePipelines(vk_device, .null_handle, 1, &[_]vk.ComputePipelineCreateInfo{.{ .base_pipeline_index = -1, .layout = layout.vk_layout, .stage = stage, }}, null, @ptrCast(&vk_pipeline)); // Result const pipeline = try allocator.create(ComputePipeline); pipeline.* = .{ .device = device, .vk_pipeline = vk_pipeline, .layout = layout, }; return pipeline; } pub fn deinit(pipeline: *ComputePipeline) void { const vk_device = pipeline.device.vk_device; pipeline.layout.manager.release(); vkd.destroyPipeline(vk_device, pipeline.vk_pipeline, null); allocator.destroy(pipeline); } pub fn getBindGroupLayout(pipeline: *ComputePipeline, group_index: u32) *BindGroupLayout { return @ptrCast(pipeline.layout.group_layouts[group_index]); } }; pub const RenderPipeline = struct { manager: utils.Manager(RenderPipeline) = .{}, device: *Device, vk_pipeline: vk.Pipeline, layout: *PipelineLayout, pub fn init(device: *Device, desc: *const sysgpu.RenderPipeline.Descriptor) !*RenderPipeline { const vk_device = device.vk_device; var stages = std.BoundedArray(vk.PipelineShaderStageCreateInfo, 2){}; const vertex_module: *ShaderModule = @ptrCast(@alignCast(desc.vertex.module)); stages.appendAssumeCapacity(.{ .stage = .{ .vertex_bit = true }, .module = vertex_module.vk_shader_module, .p_name = desc.vertex.entry_point, .p_specialization_info = null, }); if (desc.fragment) |frag| { const frag_module: *ShaderModule = @ptrCast(@alignCast(frag.module)); stages.appendAssumeCapacity(.{ .stage = .{ .fragment_bit = true }, .module = frag_module.vk_shader_module, .p_name = frag.entry_point, .p_specialization_info = null, }); } var vertex_bindings = try std.ArrayList(vk.VertexInputBindingDescription).initCapacity(allocator, desc.vertex.buffer_count); var vertex_attrs = try std.ArrayList(vk.VertexInputAttributeDescription).initCapacity(allocator, desc.vertex.buffer_count); defer { vertex_bindings.deinit(); vertex_attrs.deinit(); } for (0..desc.vertex.buffer_count) |i| { const buf = desc.vertex.buffers.?[i]; const input_rate = conv.vulkanVertexInputRate(buf.step_mode); vertex_bindings.appendAssumeCapacity(.{ .binding = @intCast(i), .stride = @intCast(buf.array_stride), .input_rate = input_rate, }); for (0..buf.attribute_count) |j| { const attr = buf.attributes.?[j]; try vertex_attrs.append(.{ .location = attr.shader_location, .binding = @intCast(i), .format = conv.vulkanVertexFormat(attr.format), .offset = @intCast(attr.offset), }); } } const vertex_input = vk.PipelineVertexInputStateCreateInfo{ .vertex_binding_description_count = @intCast(vertex_bindings.items.len), .p_vertex_binding_descriptions = vertex_bindings.items.ptr, .vertex_attribute_description_count = @intCast(vertex_attrs.items.len), .p_vertex_attribute_descriptions = vertex_attrs.items.ptr, }; const input_assembly = vk.PipelineInputAssemblyStateCreateInfo{ .topology = conv.vulkanPrimitiveTopology(desc.primitive.topology), .primitive_restart_enable = @intFromBool(desc.primitive.strip_index_format != .undefined), }; const viewport = vk.PipelineViewportStateCreateInfo{ .viewport_count = 1, .p_viewports = &[_]vk.Viewport{.{ .x = 0, .y = 0, .width = 1.0, .height = 1.0, .min_depth = 0.0, .max_depth = 1.0 }}, .scissor_count = 1, .p_scissors = &[_]vk.Rect2D{.{ .offset = .{ .x = 0, .y = 0 }, .extent = .{ .width = 1, .height = 1 } }}, }; const rasterization = vk.PipelineRasterizationStateCreateInfo{ .depth_clamp_enable = vk.FALSE, .rasterizer_discard_enable = vk.FALSE, .polygon_mode = .fill, .cull_mode = conv.vulkanCullMode(desc.primitive.cull_mode), .front_face = conv.vulkanFrontFace(desc.primitive.front_face), .depth_bias_enable = isDepthBiasEnabled(desc.depth_stencil), .depth_bias_constant_factor = conv.vulkanDepthBias(desc.depth_stencil), .depth_bias_clamp = conv.vulkanDepthBiasClamp(desc.depth_stencil), .depth_bias_slope_factor = conv.vulkanDepthBiasSlopeScale(desc.depth_stencil), .line_width = 1, }; const sample_count = conv.vulkanSampleCount(desc.multisample.count); const multisample = vk.PipelineMultisampleStateCreateInfo{ .rasterization_samples = sample_count, .sample_shading_enable = vk.FALSE, .min_sample_shading = 0, .p_sample_mask = &[_]u32{desc.multisample.mask}, .alpha_to_coverage_enable = @intFromEnum(desc.multisample.alpha_to_coverage_enabled), .alpha_to_one_enable = vk.FALSE, }; var layout: *PipelineLayout = undefined; if (desc.layout) |layout_raw| { layout = @ptrCast(@alignCast(layout_raw)); layout.manager.reference(); } else if (vertex_module.air) |vertex_air| { var layout_desc = utils.DefaultPipelineLayoutDescriptor.init(allocator); defer layout_desc.deinit(); try layout_desc.addFunction(vertex_air, .{ .vertex = true }, desc.vertex.entry_point); if (desc.fragment) |frag| { const frag_module: *ShaderModule = @ptrCast(@alignCast(frag.module)); if (frag_module.air) |frag_air| { try layout_desc.addFunction(frag_air, .{ .fragment = true }, frag.entry_point); } else { @panic( \\Cannot create pipeline descriptor autoamtically. \\Please provide it yourself or write the shader in WGSL. ); } } layout = try PipelineLayout.initDefault(device, layout_desc); } else { @panic( \\Cannot create pipeline descriptor autoamtically. \\Please provide it yourself or write the shader in WGSL. ); } errdefer layout.manager.release(); var blend_attachments: []vk.PipelineColorBlendAttachmentState = &.{}; defer if (desc.fragment != null) allocator.free(blend_attachments); var rp_key = Device.RenderPassKey.init(); if (desc.fragment) |frag| { blend_attachments = try allocator.alloc(vk.PipelineColorBlendAttachmentState, frag.target_count); for (0..frag.target_count) |i| { const target = frag.targets.?[i]; const blend = target.blend orelse &sysgpu.BlendState{}; blend_attachments[i] = .{ .blend_enable = if (target.blend != null) vk.TRUE else vk.FALSE, .src_color_blend_factor = conv.vulkanBlendFactor(blend.color.src_factor, true), .dst_color_blend_factor = conv.vulkanBlendFactor(blend.color.dst_factor, true), .color_blend_op = conv.vulkanBlendOp(blend.color.operation), .src_alpha_blend_factor = conv.vulkanBlendFactor(blend.alpha.src_factor, false), .dst_alpha_blend_factor = conv.vulkanBlendFactor(blend.alpha.dst_factor, false), .alpha_blend_op = conv.vulkanBlendOp(blend.alpha.operation), .color_write_mask = .{ .r_bit = target.write_mask.red, .g_bit = target.write_mask.green, .b_bit = target.write_mask.blue, .a_bit = target.write_mask.alpha, }, }; rp_key.colors.appendAssumeCapacity(.{ .format = conv.vulkanFormat(device, target.format), .samples = desc.multisample.count, .load_op = .clear, .store_op = .store, .layout = .color_attachment_optimal, .resolve = null, }); } } var depth_stencil_state = vk.PipelineDepthStencilStateCreateInfo{ .depth_test_enable = vk.FALSE, .depth_write_enable = vk.FALSE, .depth_compare_op = .never, .depth_bounds_test_enable = vk.FALSE, .stencil_test_enable = vk.FALSE, .front = .{ .fail_op = .keep, .depth_fail_op = .keep, .pass_op = .keep, .compare_op = .never, .compare_mask = 0, .write_mask = 0, .reference = 0, }, .back = .{ .fail_op = .keep, .depth_fail_op = .keep, .pass_op = .keep, .compare_op = .never, .compare_mask = 0, .write_mask = 0, .reference = 0, }, .min_depth_bounds = 0, .max_depth_bounds = 1, }; if (desc.depth_stencil) |ds| { depth_stencil_state.depth_test_enable = @intFromBool(ds.depth_compare != .always or ds.depth_write_enabled == .true); depth_stencil_state.depth_write_enable = @intFromBool(ds.depth_write_enabled == .true); depth_stencil_state.depth_compare_op = conv.vulkanCompareOp(ds.depth_compare); depth_stencil_state.stencil_test_enable = @intFromBool(conv.stencilEnable(ds.stencil_front) or conv.stencilEnable(ds.stencil_back)); depth_stencil_state.front = .{ .fail_op = conv.vulkanStencilOp(ds.stencil_front.fail_op), .depth_fail_op = conv.vulkanStencilOp(ds.stencil_front.depth_fail_op), .pass_op = conv.vulkanStencilOp(ds.stencil_front.pass_op), .compare_op = conv.vulkanCompareOp(ds.stencil_front.compare), .compare_mask = ds.stencil_read_mask, .write_mask = ds.stencil_write_mask, .reference = 0, }; depth_stencil_state.back = .{ .fail_op = conv.vulkanStencilOp(ds.stencil_back.fail_op), .depth_fail_op = conv.vulkanStencilOp(ds.stencil_back.depth_fail_op), .pass_op = conv.vulkanStencilOp(ds.stencil_back.pass_op), .compare_op = conv.vulkanCompareOp(ds.stencil_back.compare), .compare_mask = ds.stencil_read_mask, .write_mask = ds.stencil_write_mask, .reference = 0, }; rp_key.depth_stencil = .{ .format = conv.vulkanFormat(device, ds.format), .samples = desc.multisample.count, .depth_load_op = .load, .depth_store_op = .store, .stencil_load_op = .load, .stencil_store_op = .store, .layout = .depth_stencil_attachment_optimal, .read_only = ds.depth_write_enabled == .false and ds.stencil_write_mask == 0, }; } const color_blend = vk.PipelineColorBlendStateCreateInfo{ .logic_op_enable = vk.FALSE, .logic_op = .clear, .attachment_count = @intCast(blend_attachments.len), .p_attachments = blend_attachments.ptr, .blend_constants = .{ 0, 0, 0, 0 }, }; const dynamic_states = [_]vk.DynamicState{ .viewport, .scissor, .line_width, .blend_constants, .depth_bounds, .stencil_reference, }; const dynamic = vk.PipelineDynamicStateCreateInfo{ .dynamic_state_count = dynamic_states.len, .p_dynamic_states = &dynamic_states, }; const render_pass = try device.createRenderPass(rp_key); var vk_pipeline: vk.Pipeline = undefined; _ = try vkd.createGraphicsPipelines(vk_device, .null_handle, 1, &[_]vk.GraphicsPipelineCreateInfo{.{ .stage_count = stages.len, .p_stages = stages.slice().ptr, .p_vertex_input_state = &vertex_input, .p_input_assembly_state = &input_assembly, .p_viewport_state = &viewport, .p_rasterization_state = &rasterization, .p_multisample_state = &multisample, .p_depth_stencil_state = &depth_stencil_state, .p_color_blend_state = &color_blend, .p_dynamic_state = &dynamic, .layout = layout.vk_layout, .render_pass = render_pass, .subpass = 0, .base_pipeline_index = -1, }}, null, @ptrCast(&vk_pipeline)); const pipeline = try allocator.create(RenderPipeline); pipeline.* = .{ .device = device, .vk_pipeline = vk_pipeline, .layout = layout, }; return pipeline; } pub fn deinit(pipeline: *RenderPipeline) void { const vk_device = pipeline.device.vk_device; pipeline.layout.manager.release(); vkd.destroyPipeline(vk_device, pipeline.vk_pipeline, null); allocator.destroy(pipeline); } pub fn getBindGroupLayout(pipeline: *RenderPipeline, group_index: u32) *BindGroupLayout { return @ptrCast(pipeline.layout.group_layouts[group_index]); } fn isDepthBiasEnabled(ds: ?*const sysgpu.DepthStencilState) vk.Bool32 { if (ds == null) return vk.FALSE; return @intFromBool(ds.?.depth_bias != 0 or ds.?.depth_bias_slope_scale != 0); } }; pub const CommandBuffer = struct { pub const StreamingResult = struct { buffer: *Buffer, map: [*]u8, offset: u32, }; manager: utils.Manager(CommandBuffer) = .{}, device: *Device, vk_command_buffer: vk.CommandBuffer, wait_semaphores: std.ArrayListUnmanaged(vk.Semaphore) = .{}, wait_dst_stage_masks: std.ArrayListUnmanaged(vk.PipelineStageFlags) = .{}, reference_tracker: *ReferenceTracker, upload_buffer: ?*Buffer = null, upload_map: ?[*]u8 = null, upload_next_offset: u32 = upload_page_size, pub fn init(device: *Device) !*CommandBuffer { const vk_device = device.vk_device; var vk_command_buffer: vk.CommandBuffer = undefined; try vkd.allocateCommandBuffers(vk_device, &.{ .command_pool = device.cmd_pool, .level = .primary, .command_buffer_count = 1, }, @ptrCast(&vk_command_buffer)); try vkd.beginCommandBuffer(vk_command_buffer, &.{ .flags = .{ .one_time_submit_bit = true } }); const reference_tracker = try ReferenceTracker.init(device, vk_command_buffer); errdefer reference_tracker.deinit(); const command_buffer = try allocator.create(CommandBuffer); command_buffer.* = .{ .device = device, .vk_command_buffer = vk_command_buffer, .reference_tracker = reference_tracker, }; return command_buffer; } pub fn deinit(command_buffer: *CommandBuffer) void { // reference_tracker lifetime is managed externally // vk_command_buffer lifetime is managed externally command_buffer.wait_dst_stage_masks.deinit(allocator); command_buffer.wait_semaphores.deinit(allocator); allocator.destroy(command_buffer); } // Internal pub fn upload(command_buffer: *CommandBuffer, size: u64) !StreamingResult { if (command_buffer.upload_next_offset + size > upload_page_size) { const streaming_manager = &command_buffer.device.streaming_manager; std.debug.assert(size <= upload_page_size); // TODO - support large uploads const buffer = try streaming_manager.acquire(); try command_buffer.reference_tracker.referenceUploadPage(buffer); command_buffer.upload_buffer = buffer; command_buffer.upload_map = buffer.map; command_buffer.upload_next_offset = 0; } const offset = command_buffer.upload_next_offset; command_buffer.upload_next_offset = @intCast(utils.alignUp(offset + size, limits.min_uniform_buffer_offset_alignment)); return StreamingResult{ .buffer = command_buffer.upload_buffer.?, .map = command_buffer.upload_map.? + offset, .offset = offset, }; } }; pub const ReferenceTracker = struct { device: *Device, vk_command_buffer: vk.CommandBuffer, buffers: std.ArrayListUnmanaged(*Buffer) = .{}, textures: std.ArrayListUnmanaged(*Texture) = .{}, texture_views: std.ArrayListUnmanaged(*TextureView) = .{}, bind_groups: std.ArrayListUnmanaged(*BindGroup) = .{}, compute_pipelines: std.ArrayListUnmanaged(*ComputePipeline) = .{}, render_pipelines: std.ArrayListUnmanaged(*RenderPipeline) = .{}, upload_pages: std.ArrayListUnmanaged(*Buffer) = .{}, framebuffers: std.ArrayListUnmanaged(vk.Framebuffer) = .{}, pub fn init(device: *Device, vk_command_buffer: vk.CommandBuffer) !*ReferenceTracker { const tracker = try allocator.create(ReferenceTracker); tracker.* = .{ .device = device, .vk_command_buffer = vk_command_buffer, }; return tracker; } pub fn deinit(tracker: *ReferenceTracker) void { const device = tracker.device; const vk_device = tracker.device.vk_device; vkd.freeCommandBuffers(vk_device, device.cmd_pool, 1, @ptrCast(&tracker.vk_command_buffer)); for (tracker.buffers.items) |buffer| { buffer.gpu_count -= 1; buffer.manager.release(); } for (tracker.textures.items) |texture| { texture.manager.release(); } for (tracker.texture_views.items) |texture_view| { texture_view.manager.release(); } for (tracker.bind_groups.items) |group| { for (group.buffers.items) |access| access.buffer.gpu_count -= 1; group.manager.release(); } for (tracker.compute_pipelines.items) |pipeline| { pipeline.manager.release(); } for (tracker.render_pipelines.items) |pipeline| { pipeline.manager.release(); } for (tracker.upload_pages.items) |buffer| { device.streaming_manager.release(buffer); } for (tracker.framebuffers.items) |fb| vkd.destroyFramebuffer(vk_device, fb, null); tracker.buffers.deinit(allocator); tracker.textures.deinit(allocator); tracker.texture_views.deinit(allocator); tracker.bind_groups.deinit(allocator); tracker.compute_pipelines.deinit(allocator); tracker.render_pipelines.deinit(allocator); tracker.upload_pages.deinit(allocator); tracker.framebuffers.deinit(allocator); allocator.destroy(tracker); } pub fn referenceBuffer(tracker: *ReferenceTracker, buffer: *Buffer) !void { buffer.manager.reference(); try tracker.buffers.append(allocator, buffer); } pub fn referenceTexture(tracker: *ReferenceTracker, texture: *Texture) !void { texture.manager.reference(); try tracker.textures.append(allocator, texture); } pub fn referenceTextureView(tracker: *ReferenceTracker, texture_view: *TextureView) !void { texture_view.manager.reference(); try tracker.texture_views.append(allocator, texture_view); } pub fn referenceBindGroup(tracker: *ReferenceTracker, group: *BindGroup) !void { group.manager.reference(); try tracker.bind_groups.append(allocator, group); } pub fn referenceComputePipeline(tracker: *ReferenceTracker, pipeline: *ComputePipeline) !void { pipeline.manager.reference(); try tracker.compute_pipelines.append(allocator, pipeline); } pub fn referenceRenderPipeline(tracker: *ReferenceTracker, pipeline: *RenderPipeline) !void { pipeline.manager.reference(); try tracker.render_pipelines.append(allocator, pipeline); } pub fn referenceUploadPage(tracker: *ReferenceTracker, upload_page: *Buffer) !void { try tracker.upload_pages.append(allocator, upload_page); } pub fn submit(tracker: *ReferenceTracker) !void { for (tracker.buffers.items) |buffer| { buffer.gpu_count += 1; } for (tracker.bind_groups.items) |group| { for (group.buffers.items) |access| access.buffer.gpu_count += 1; } } }; pub const CommandEncoder = struct { manager: utils.Manager(CommandEncoder) = .{}, device: *Device, command_buffer: *CommandBuffer, reference_tracker: *ReferenceTracker, state_tracker: StateTracker = .{}, pub fn init(device: *Device, desc: ?*const sysgpu.CommandEncoder.Descriptor) !*CommandEncoder { _ = desc; const command_buffer = try CommandBuffer.init(device); const cmd_encoder = try allocator.create(CommandEncoder); cmd_encoder.* = .{ .device = device, .command_buffer = command_buffer, .reference_tracker = command_buffer.reference_tracker, }; return cmd_encoder; } pub fn deinit(cmd_encoder: *CommandEncoder) void { cmd_encoder.state_tracker.deinit(); cmd_encoder.command_buffer.manager.release(); allocator.destroy(cmd_encoder); } pub fn beginComputePass(encoder: *CommandEncoder, desc: *const sysgpu.ComputePassDescriptor) !*ComputePassEncoder { return ComputePassEncoder.init(encoder, desc); } pub fn beginRenderPass(encoder: *CommandEncoder, desc: *const sysgpu.RenderPassDescriptor) !*RenderPassEncoder { try encoder.state_tracker.endPass(); return RenderPassEncoder.init(encoder, desc); } pub fn copyBufferToBuffer( encoder: *CommandEncoder, source: *Buffer, source_offset: u64, destination: *Buffer, destination_offset: u64, size: u64, ) !void { const vk_command_buffer = encoder.command_buffer.vk_command_buffer; try encoder.reference_tracker.referenceBuffer(source); try encoder.reference_tracker.referenceBuffer(destination); try encoder.state_tracker.copyFromBuffer(source); try encoder.state_tracker.writeToBuffer(destination, .{ .transfer_bit = true }, .{ .transfer_write_bit = true }); encoder.state_tracker.flush(vk_command_buffer); const region = vk.BufferCopy{ .src_offset = source_offset, .dst_offset = destination_offset, .size = size, }; vkd.cmdCopyBuffer(vk_command_buffer, source.vk_buffer, destination.vk_buffer, 1, @ptrCast(&region)); } pub fn copyBufferToTexture( encoder: *CommandEncoder, source: *const sysgpu.ImageCopyBuffer, destination: *const sysgpu.ImageCopyTexture, copy_size_raw: *const sysgpu.Extent3D, ) !void { const vk_command_buffer = encoder.command_buffer.vk_command_buffer; const source_buffer: *Buffer = @ptrCast(@alignCast(source.buffer)); const destination_texture: *Texture = @ptrCast(@alignCast(destination.texture)); try encoder.reference_tracker.referenceBuffer(source_buffer); try encoder.reference_tracker.referenceTexture(destination_texture); try encoder.state_tracker.copyFromBuffer(source_buffer); try encoder.state_tracker.writeToTexture( destination_texture, .{ .transfer_bit = true }, .{ .transfer_write_bit = true }, .transfer_dst_optimal, ); encoder.state_tracker.flush(vk_command_buffer); const copy_size = utils.calcExtent(destination_texture.dimension, copy_size_raw.*); const destination_origin = utils.calcOrigin(destination_texture.dimension, destination.origin); const region = vk.BufferImageCopy{ .buffer_offset = source.layout.offset, .buffer_row_length = source.layout.bytes_per_row / 4, // TODO .buffer_image_height = source.layout.rows_per_image, .image_subresource = .{ .aspect_mask = conv.vulkanImageAspectFlags(destination.aspect, destination_texture.format), .mip_level = destination.mip_level, .base_array_layer = destination_origin.array_slice, .layer_count = copy_size.array_count, }, .image_offset = .{ .x = @intCast(destination_origin.x), .y = @intCast(destination_origin.y), .z = @intCast(destination_origin.z), }, .image_extent = .{ .width = copy_size.width, .height = copy_size.height, .depth = copy_size.depth }, }; vkd.cmdCopyBufferToImage( vk_command_buffer, source_buffer.vk_buffer, destination_texture.image, .transfer_dst_optimal, 1, @ptrCast(&region), ); } pub fn copyTextureToTexture( encoder: *CommandEncoder, source: *const sysgpu.ImageCopyTexture, destination: *const sysgpu.ImageCopyTexture, copy_size_raw: *const sysgpu.Extent3D, ) !void { const vk_command_buffer = encoder.command_buffer.vk_command_buffer; const source_texture: *Texture = @ptrCast(@alignCast(source.texture)); const destination_texture: *Texture = @ptrCast(@alignCast(destination.texture)); try encoder.reference_tracker.referenceTexture(source_texture); try encoder.reference_tracker.referenceTexture(destination_texture); try encoder.state_tracker.copyFromTexture(source_texture); try encoder.state_tracker.writeToTexture( destination_texture, .{ .transfer_bit = true }, .{ .transfer_write_bit = true }, .transfer_dst_optimal, ); encoder.state_tracker.flush(vk_command_buffer); const copy_size = utils.calcExtent(destination_texture.dimension, copy_size_raw.*); const source_origin = utils.calcOrigin(source_texture.dimension, source.origin); const destination_origin = utils.calcOrigin(destination_texture.dimension, destination.origin); const region = vk.ImageCopy{ .src_subresource = .{ .aspect_mask = conv.vulkanImageAspectFlags(source.aspect, source_texture.format), .mip_level = source.mip_level, .base_array_layer = source_origin.array_slice, .layer_count = copy_size.array_count, }, .src_offset = .{ .x = @intCast(source_origin.x), .y = @intCast(source_origin.y), .z = @intCast(source_origin.z), }, .dst_subresource = .{ .aspect_mask = conv.vulkanImageAspectFlags(destination.aspect, destination_texture.format), .mip_level = destination.mip_level, .base_array_layer = destination_origin.array_slice, .layer_count = copy_size.array_count, }, .dst_offset = .{ .x = @intCast(destination_origin.x), .y = @intCast(destination_origin.y), .z = @intCast(destination_origin.z), }, .extent = .{ .width = copy_size.width, .height = copy_size.height, .depth = copy_size.depth }, }; vkd.cmdCopyImage( vk_command_buffer, source_texture.image, .transfer_src_optimal, destination_texture.image, .transfer_dst_optimal, 1, @ptrCast(&region), ); } pub fn finish(encoder: *CommandEncoder, desc: *const sysgpu.CommandBuffer.Descriptor) !*CommandBuffer { _ = desc; const vk_command_buffer = encoder.command_buffer.vk_command_buffer; try encoder.state_tracker.endPass(); encoder.state_tracker.flush(vk_command_buffer); try vkd.endCommandBuffer(vk_command_buffer); return encoder.command_buffer; } pub fn writeBuffer(encoder: *CommandEncoder, buffer: *Buffer, offset: u64, data: [*]const u8, size: u64) !void { const stream = try encoder.command_buffer.upload(size); @memcpy(stream.map[0..size], data[0..size]); try encoder.copyBufferToBuffer(stream.buffer, stream.offset, buffer, offset, size); } pub fn writeTexture( encoder: *CommandEncoder, destination: *const sysgpu.ImageCopyTexture, data: [*]const u8, data_size: usize, data_layout: *const sysgpu.Texture.DataLayout, write_size: *const sysgpu.Extent3D, ) !void { const stream = try encoder.command_buffer.upload(data_size); @memcpy(stream.map[0..data_size], data[0..data_size]); try encoder.copyBufferToTexture( &.{ .layout = .{ .offset = stream.offset, .bytes_per_row = data_layout.bytes_per_row, .rows_per_image = data_layout.rows_per_image, }, .buffer = @ptrCast(stream.buffer), }, destination, write_size, ); } }; pub const StateTracker = struct { const BufferState = struct { stage_mask: vk.PipelineStageFlags, access_mask: vk.AccessFlags, }; const TextureState = struct { stage_mask: vk.PipelineStageFlags, access_mask: vk.AccessFlags, image_layout: vk.ImageLayout, }; device: *Device = undefined, written_buffers: std.AutoHashMapUnmanaged(*Buffer, BufferState) = .{}, copy_buffers: std.AutoHashMapUnmanaged(*Buffer, void) = .{}, written_textures: std.AutoHashMapUnmanaged(*Texture, TextureState) = .{}, copy_textures: std.AutoHashMapUnmanaged(*Texture, void) = .{}, image_barriers: std.ArrayListUnmanaged(vk.ImageMemoryBarrier) = .{}, src_stage_mask: vk.PipelineStageFlags = .{}, dst_stage_mask: vk.PipelineStageFlags = .{}, src_access_mask: vk.AccessFlags = .{}, dst_access_mask: vk.AccessFlags = .{}, pub fn init(tracker: *StateTracker, device: *Device) void { tracker.device = device; } pub fn deinit(tracker: *StateTracker) void { tracker.written_buffers.deinit(allocator); tracker.copy_buffers.deinit(allocator); tracker.written_textures.deinit(allocator); tracker.copy_textures.deinit(allocator); tracker.image_barriers.deinit(allocator); } pub fn accessBindGroup( tracker: *StateTracker, group: *BindGroup, stage_mask: vk.PipelineStageFlags, access_mask: vk.AccessFlags, image_layout: vk.ImageLayout, ) !void { for (group.buffers.items) |access| { const buffer = access.buffer; if (access.storage) { try tracker.writeToBuffer(buffer, stage_mask, access_mask); } else { try tracker.readFromBuffer(buffer); } } for (group.texture_views.items) |access| { const texture = access.texture_view.texture; if (access.storage) { try tracker.writeToTexture(texture, stage_mask, access_mask, image_layout); } else { try tracker.readFromTexture(texture); } } } pub fn writeToBuffer( tracker: *StateTracker, buffer: *Buffer, stage_mask: vk.PipelineStageFlags, access_mask: vk.AccessFlags, ) !void { if (tracker.written_buffers.fetchRemove(buffer)) |write| { // WAW hazard tracker.src_stage_mask = tracker.src_stage_mask.merge(write.value.stage_mask); tracker.dst_stage_mask = tracker.dst_stage_mask.merge(stage_mask); tracker.src_access_mask = tracker.src_access_mask.merge(write.value.access_mask); tracker.dst_access_mask = tracker.dst_access_mask.merge(access_mask); } else if (tracker.copy_buffers.fetchRemove(buffer)) |_| { // WAR hazard tracker.src_stage_mask = tracker.src_stage_mask.merge(.{ .transfer_bit = true }); tracker.dst_stage_mask = tracker.dst_stage_mask.merge(stage_mask); } else { // WAR hazard tracker.src_stage_mask = tracker.src_stage_mask.merge(buffer.read_stage_mask); tracker.dst_stage_mask = tracker.dst_stage_mask.merge(stage_mask); } try tracker.written_buffers.put(allocator, buffer, .{ .stage_mask = stage_mask, .access_mask = access_mask }); } pub fn writeToTexture( tracker: *StateTracker, texture: *Texture, stage_mask: vk.PipelineStageFlags, access_mask: vk.AccessFlags, image_layout: vk.ImageLayout, ) !void { var src_access_mask: vk.AccessFlags = undefined; var old_layout: vk.ImageLayout = undefined; if (tracker.written_textures.fetchRemove(texture)) |write| { // WAW hazard tracker.src_stage_mask = tracker.src_stage_mask.merge(write.value.stage_mask); tracker.dst_stage_mask = tracker.dst_stage_mask.merge(stage_mask); src_access_mask = write.value.access_mask; old_layout = write.value.image_layout; } else if (tracker.copy_textures.fetchRemove(texture)) |_| { // WAR hazard tracker.src_stage_mask = tracker.src_stage_mask.merge(.{ .transfer_bit = true }); tracker.dst_stage_mask = tracker.dst_stage_mask.merge(stage_mask); src_access_mask = .{}; old_layout = .transfer_src_optimal; } else { // WAR hazard tracker.src_stage_mask = tracker.src_stage_mask.merge(texture.read_stage_mask); tracker.dst_stage_mask = tracker.dst_stage_mask.merge(stage_mask); src_access_mask = .{}; old_layout = texture.read_image_layout; } if (old_layout != image_layout) { try tracker.addImageBarrier(texture, src_access_mask, access_mask, old_layout, image_layout); } try tracker.written_textures.put( allocator, texture, .{ .stage_mask = stage_mask, .access_mask = access_mask, .image_layout = image_layout }, ); } pub fn readFromBufferEx( tracker: *StateTracker, buffer: *Buffer, stage_mask: vk.PipelineStageFlags, access_mask: vk.AccessFlags, ) !void { if (tracker.written_buffers.fetchRemove(buffer)) |write| { // RAW hazard tracker.src_stage_mask = tracker.src_stage_mask.merge(write.value.stage_mask); tracker.dst_stage_mask = tracker.dst_stage_mask.merge(stage_mask); tracker.src_access_mask = tracker.src_access_mask.merge(write.value.access_mask); tracker.dst_access_mask = tracker.dst_access_mask.merge(access_mask); } else if (tracker.copy_buffers.fetchRemove(buffer)) |_| { // RAR hazard - no hazard } } pub fn readFromBuffer(tracker: *StateTracker, buffer: *Buffer) !void { try tracker.readFromBufferEx(buffer, buffer.read_stage_mask, buffer.read_access_mask); } pub fn copyFromBuffer(tracker: *StateTracker, buffer: *Buffer) !void { try tracker.readFromBufferEx(buffer, .{ .transfer_bit = true }, .{ .transfer_read_bit = true }); try tracker.copy_buffers.put(allocator, buffer, {}); } pub fn readFromTextureEx( tracker: *StateTracker, texture: *Texture, stage_mask: vk.PipelineStageFlags, access_mask: vk.AccessFlags, image_layout: vk.ImageLayout, ) !void { var src_access_mask: vk.AccessFlags = undefined; var old_layout: vk.ImageLayout = undefined; if (tracker.written_textures.fetchRemove(texture)) |write| { // RAW hazard tracker.src_stage_mask = tracker.src_stage_mask.merge(write.value.stage_mask); tracker.dst_stage_mask = tracker.dst_stage_mask.merge(stage_mask); src_access_mask = write.value.access_mask; old_layout = write.value.image_layout; } else if (tracker.copy_textures.fetchRemove(texture)) |_| { // RAR - no execution hazard but needed for layout transition tracker.dst_stage_mask = tracker.dst_stage_mask.merge(stage_mask); src_access_mask = .{}; old_layout = .transfer_src_optimal; } else { // RAR - no hazard tracker.dst_stage_mask = tracker.dst_stage_mask.merge(stage_mask); src_access_mask = .{}; old_layout = texture.read_image_layout; } if (old_layout != image_layout) { try tracker.addImageBarrier(texture, src_access_mask, access_mask, old_layout, image_layout); } } pub fn readFromTexture(tracker: *StateTracker, texture: *Texture) !void { try tracker.readFromTextureEx(texture, texture.read_stage_mask, texture.read_access_mask, texture.read_image_layout); } pub fn copyFromTexture(tracker: *StateTracker, texture: *Texture) !void { try tracker.readFromTextureEx(texture, .{ .transfer_bit = true }, .{ .transfer_read_bit = true }, .transfer_src_optimal); try tracker.copy_textures.put(allocator, texture, {}); } pub fn initTexture(tracker: *StateTracker, texture: *Texture) !void { const src_access_mask = .{}; const old_layout = .undefined; const access_mask = texture.read_access_mask; const image_layout = texture.read_image_layout; tracker.dst_stage_mask = tracker.dst_stage_mask.merge(texture.read_stage_mask); try tracker.addImageBarrier(texture, src_access_mask, access_mask, old_layout, image_layout); } pub fn flush(tracker: *StateTracker, vk_command_buffer: vk.CommandBuffer) void { if (tracker.src_stage_mask.merge(tracker.dst_stage_mask).toInt() == 0 and tracker.image_barriers.items.len == 0) return; var memory_barriers = std.BoundedArray(vk.MemoryBarrier, 1){}; if (tracker.src_access_mask.merge(tracker.dst_access_mask).toInt() != 0) { memory_barriers.appendAssumeCapacity(.{ .src_access_mask = tracker.src_access_mask, .dst_access_mask = tracker.dst_access_mask, }); } // If the synchronization2 feature is not enabled, srcStageMask must not be 0 const src_stage_mask = if (tracker.src_stage_mask.toInt() != 0) tracker.src_stage_mask else vk.PipelineStageFlags{ .top_of_pipe_bit = true }; vkd.cmdPipelineBarrier( vk_command_buffer, src_stage_mask, tracker.dst_stage_mask, .{}, memory_barriers.len, &memory_barriers.buffer, 0, undefined, @intCast(tracker.image_barriers.items.len), tracker.image_barriers.items.ptr, ); tracker.src_stage_mask = .{}; tracker.dst_stage_mask = .{}; tracker.src_access_mask = .{}; tracker.dst_access_mask = .{}; tracker.image_barriers.clearRetainingCapacity(); } pub fn endPass(tracker: *StateTracker) !void { { var it = tracker.written_buffers.iterator(); while (it.next()) |entry| { const buffer = entry.key_ptr.*; const write = entry.value_ptr.*; tracker.src_stage_mask = tracker.src_stage_mask.merge(write.stage_mask); tracker.dst_stage_mask = tracker.dst_stage_mask.merge(buffer.read_stage_mask); tracker.src_access_mask = tracker.src_access_mask.merge(write.access_mask); tracker.dst_access_mask = tracker.dst_access_mask.merge(buffer.read_access_mask); } tracker.written_buffers.clearRetainingCapacity(); } { // no hazard tracker.copy_buffers.clearRetainingCapacity(); } { var it = tracker.written_textures.iterator(); while (it.next()) |entry| { const texture = entry.key_ptr.*; const write = entry.value_ptr.*; tracker.src_stage_mask = tracker.src_stage_mask.merge(write.stage_mask); tracker.dst_stage_mask = tracker.dst_stage_mask.merge(texture.read_stage_mask); const src_access_mask = write.access_mask; const old_layout = write.image_layout; const access_mask = texture.read_access_mask; const image_layout = texture.read_image_layout; if (old_layout != image_layout) { try tracker.addImageBarrier(texture, src_access_mask, access_mask, old_layout, image_layout); } } tracker.written_textures.clearRetainingCapacity(); } { var it = tracker.copy_textures.iterator(); while (it.next()) |entry| { const texture = entry.key_ptr.*; const src_access_mask: vk.AccessFlags = .{}; const old_layout: vk.ImageLayout = .transfer_src_optimal; const access_mask = texture.read_access_mask; const image_layout = texture.read_image_layout; if (old_layout != image_layout) { try tracker.addImageBarrier(texture, src_access_mask, access_mask, old_layout, image_layout); } } tracker.copy_textures.clearRetainingCapacity(); } } fn addImageBarrier( tracker: *StateTracker, texture: *Texture, src_access_mask: vk.AccessFlags, dst_access_mask: vk.AccessFlags, old_layout: vk.ImageLayout, new_layout: vk.ImageLayout, ) !void { const size = utils.calcExtent(texture.dimension, texture.size); try tracker.image_barriers.append(allocator, .{ .src_access_mask = src_access_mask, .dst_access_mask = dst_access_mask, .old_layout = old_layout, .new_layout = new_layout, .src_queue_family_index = vk.QUEUE_FAMILY_IGNORED, .dst_queue_family_index = vk.QUEUE_FAMILY_IGNORED, .image = texture.image, .subresource_range = .{ .aspect_mask = conv.vulkanImageAspectFlags(.all, texture.format), .base_mip_level = 0, .level_count = texture.mip_level_count, .base_array_layer = 0, .layer_count = size.array_count, }, }); } }; pub const ComputePassEncoder = struct { manager: utils.Manager(ComputePassEncoder) = .{}, vk_command_buffer: vk.CommandBuffer, reference_tracker: *ReferenceTracker, state_tracker: *StateTracker, pipeline: ?*ComputePipeline = null, bind_groups: [limits.max_bind_groups]*BindGroup = undefined, pub fn init(cmd_encoder: *CommandEncoder, desc: *const sysgpu.ComputePassDescriptor) !*ComputePassEncoder { _ = desc; const vk_command_buffer = cmd_encoder.command_buffer.vk_command_buffer; const encoder = try allocator.create(ComputePassEncoder); encoder.* = .{ .vk_command_buffer = vk_command_buffer, .reference_tracker = cmd_encoder.reference_tracker, .state_tracker = &cmd_encoder.state_tracker, }; return encoder; } pub fn deinit(encoder: *ComputePassEncoder) void { allocator.destroy(encoder); } pub fn dispatchWorkgroups( encoder: *ComputePassEncoder, workgroup_count_x: u32, workgroup_count_y: u32, workgroup_count_z: u32, ) !void { const vk_command_buffer = encoder.vk_command_buffer; const bind_group_count = encoder.pipeline.?.layout.group_layouts.len; for (encoder.bind_groups[0..bind_group_count]) |group| { try encoder.state_tracker.accessBindGroup( group, .{ .compute_shader_bit = true }, .{ .shader_write_bit = true }, .general, ); } encoder.state_tracker.flush(vk_command_buffer); vkd.cmdDispatch(vk_command_buffer, workgroup_count_x, workgroup_count_y, workgroup_count_z); } pub fn end(encoder: *ComputePassEncoder) void { _ = encoder; } pub fn setBindGroup( encoder: *ComputePassEncoder, group_index: u32, group: *BindGroup, dynamic_offset_count: usize, dynamic_offsets: ?[*]const u32, ) !void { const vk_command_buffer = encoder.vk_command_buffer; try encoder.reference_tracker.referenceBindGroup(group); encoder.bind_groups[group_index] = group; vkd.cmdBindDescriptorSets( vk_command_buffer, .compute, encoder.pipeline.?.layout.vk_layout, group_index, 1, @ptrCast(&group.desc_set), @intCast(dynamic_offset_count), if (dynamic_offsets) |offsets| offsets else &[_]u32{}, ); } pub fn setPipeline(encoder: *ComputePassEncoder, pipeline: *ComputePipeline) !void { const vk_command_buffer = encoder.vk_command_buffer; try encoder.reference_tracker.referenceComputePipeline(pipeline); vkd.cmdBindPipeline( vk_command_buffer, .compute, pipeline.vk_pipeline, ); encoder.pipeline = pipeline; } }; pub const RenderPassEncoder = struct { manager: utils.Manager(RenderPassEncoder) = .{}, device: *Device, encoder: *CommandEncoder, vk_command_buffer: vk.CommandBuffer, reference_tracker: *ReferenceTracker, render_pass: vk.RenderPass, framebuffer: vk.Framebuffer, extent: vk.Extent2D, pipeline: ?*RenderPipeline = null, pub fn init(cmd_encoder: *CommandEncoder, desc: *const sysgpu.RenderPassDescriptor) !*RenderPassEncoder { const device = cmd_encoder.device; const vk_device = device.vk_device; const vk_command_buffer = cmd_encoder.command_buffer.vk_command_buffer; const depth_stencil_attachment_count = @intFromBool(desc.depth_stencil_attachment != null); const max_attachment_count = 2 * (desc.color_attachment_count + depth_stencil_attachment_count); var image_views = try std.ArrayList(vk.ImageView).initCapacity(allocator, max_attachment_count); defer image_views.deinit(); var clear_values = std.ArrayList(vk.ClearValue).init(allocator); defer clear_values.deinit(); var rp_key = Device.RenderPassKey.init(); var extent: vk.Extent2D = .{ .width = 0, .height = 0 }; for (0..desc.color_attachment_count) |i| { const attach = desc.color_attachments.?[i]; if (attach.view) |view_raw| { const view: *TextureView = @ptrCast(@alignCast(view_raw)); const resolve_view: ?*TextureView = @ptrCast(@alignCast(attach.resolve_target)); try cmd_encoder.reference_tracker.referenceTextureView(view); if (resolve_view) |v| try cmd_encoder.reference_tracker.referenceTextureView(v); if (use_semaphore_wait) { if (view.texture.swapchain) |sc| { try cmd_encoder.command_buffer.wait_semaphores.append(allocator, sc.wait_semaphore); try cmd_encoder.command_buffer.wait_dst_stage_masks.append(allocator, .{ .all_commands_bit = true }); } } image_views.appendAssumeCapacity(view.vk_view); if (resolve_view) |rv| image_views.appendAssumeCapacity(rv.vk_view); rp_key.colors.appendAssumeCapacity(.{ .format = view.vk_format, .samples = view.texture.sample_count, .load_op = attach.load_op, .store_op = attach.store_op, .layout = view.texture.read_image_layout, .resolve = if (resolve_view) |rv| .{ .format = rv.vk_format, .layout = rv.texture.read_image_layout, } else null, }); if (attach.load_op == .clear) { try clear_values.append(.{ .color = .{ .float_32 = [4]f32{ @floatCast(attach.clear_value.r), @floatCast(attach.clear_value.g), @floatCast(attach.clear_value.b), @floatCast(attach.clear_value.a), }, }, }); } extent = view.extent; } } if (desc.depth_stencil_attachment) |attach| { const view: *TextureView = @ptrCast(@alignCast(attach.view)); try cmd_encoder.reference_tracker.referenceTextureView(view); image_views.appendAssumeCapacity(view.vk_view); rp_key.depth_stencil = .{ .format = view.vk_format, .samples = view.texture.sample_count, .depth_load_op = attach.depth_load_op, .depth_store_op = attach.depth_store_op, .stencil_load_op = attach.stencil_load_op, .stencil_store_op = attach.stencil_store_op, .layout = view.texture.read_image_layout, .read_only = attach.depth_read_only == .true or attach.stencil_read_only == .true, }; if (attach.depth_load_op == .clear or attach.stencil_load_op == .clear) { try clear_values.append(.{ .depth_stencil = .{ .depth = attach.depth_clear_value, .stencil = attach.stencil_clear_value, }, }); } extent = view.extent; } const render_pass = try device.createRenderPass(rp_key); const framebuffer = try vkd.createFramebuffer(vk_device, &.{ .render_pass = render_pass, .attachment_count = @as(u32, @intCast(image_views.items.len)), .p_attachments = image_views.items.ptr, .width = extent.width, .height = extent.height, .layers = 1, }, null); try cmd_encoder.reference_tracker.framebuffers.append(allocator, framebuffer); cmd_encoder.state_tracker.flush(vk_command_buffer); const rect = vk.Rect2D{ .offset = .{ .x = 0, .y = 0 }, .extent = extent, }; vkd.cmdBeginRenderPass(vk_command_buffer, &vk.RenderPassBeginInfo{ .render_pass = render_pass, .framebuffer = framebuffer, .render_area = rect, .clear_value_count = @as(u32, @intCast(clear_values.items.len)), .p_clear_values = clear_values.items.ptr, }, .@"inline"); vkd.cmdSetViewport(vk_command_buffer, 0, 1, @as(*const [1]vk.Viewport, &vk.Viewport{ .x = 0, .y = @as(f32, @floatFromInt(extent.height)), .width = @as(f32, @floatFromInt(extent.width)), .height = -@as(f32, @floatFromInt(extent.height)), .min_depth = 0, .max_depth = 1, })); vkd.cmdSetScissor(vk_command_buffer, 0, 1, @as(*const [1]vk.Rect2D, &rect)); vkd.cmdSetStencilReference(vk_command_buffer, .{ .front_bit = true, .back_bit = true }, 0); // Result const rpe = try allocator.create(RenderPassEncoder); errdefer allocator.destroy(rpe); rpe.* = .{ .device = device, .encoder = cmd_encoder, .vk_command_buffer = vk_command_buffer, .reference_tracker = cmd_encoder.reference_tracker, .render_pass = render_pass, .framebuffer = framebuffer, .extent = extent, }; return rpe; } pub fn deinit(encoder: *RenderPassEncoder) void { allocator.destroy(encoder); } pub fn draw( encoder: *RenderPassEncoder, vertex_count: u32, instance_count: u32, first_vertex: u32, first_instance: u32, ) !void { const vk_command_buffer = encoder.vk_command_buffer; vkd.cmdDraw(vk_command_buffer, vertex_count, instance_count, first_vertex, first_instance); } pub fn drawIndexed( encoder: *RenderPassEncoder, index_count: u32, instance_count: u32, first_index: u32, base_vertex: i32, first_instance: u32, ) !void { const vk_command_buffer = encoder.vk_command_buffer; vkd.cmdDrawIndexed(vk_command_buffer, index_count, instance_count, first_index, base_vertex, first_instance); } pub fn setBindGroup( encoder: *RenderPassEncoder, group_index: u32, group: *BindGroup, dynamic_offset_count: usize, dynamic_offsets: ?[*]const u32, ) !void { const vk_command_buffer = encoder.vk_command_buffer; try encoder.reference_tracker.referenceBindGroup(group); vkd.cmdBindDescriptorSets( vk_command_buffer, .graphics, encoder.pipeline.?.layout.vk_layout, group_index, 1, @ptrCast(&group.desc_set), @intCast(dynamic_offset_count), if (dynamic_offsets) |offsets| offsets else &[_]u32{}, ); } pub fn end(encoder: *RenderPassEncoder) !void { const vk_command_buffer = encoder.vk_command_buffer; vkd.cmdEndRenderPass(vk_command_buffer); } pub fn setIndexBuffer( encoder: *RenderPassEncoder, buffer: *Buffer, format: sysgpu.IndexFormat, offset: u64, size: u64, ) !void { _ = size; const vk_command_buffer = encoder.vk_command_buffer; try encoder.reference_tracker.referenceBuffer(buffer); vkd.cmdBindIndexBuffer(vk_command_buffer, buffer.vk_buffer, offset, conv.vulkanIndexType(format)); } pub fn setPipeline(encoder: *RenderPassEncoder, pipeline: *RenderPipeline) !void { const vk_command_buffer = encoder.vk_command_buffer; try encoder.reference_tracker.referenceRenderPipeline(pipeline); vkd.cmdBindPipeline(vk_command_buffer, .graphics, pipeline.vk_pipeline); encoder.pipeline = pipeline; } pub fn setScissorRect(encoder: *RenderPassEncoder, x: u32, y: u32, width: u32, height: u32) !void { const vk_command_buffer = encoder.vk_command_buffer; const rect = vk.Rect2D{ .offset = .{ .x = @intCast(x), .y = @intCast(y) }, .extent = .{ .width = width, .height = height }, }; vkd.cmdSetScissor(vk_command_buffer, 0, 1, @as(*const [1]vk.Rect2D, &rect)); } pub fn setVertexBuffer(encoder: *RenderPassEncoder, slot: u32, buffer: *Buffer, offset: u64, size: u64) !void { _ = size; const vk_command_buffer = encoder.vk_command_buffer; try encoder.reference_tracker.referenceBuffer(buffer); vkd.cmdBindVertexBuffers(vk_command_buffer, slot, 1, @ptrCast(&.{buffer.vk_buffer}), @ptrCast(&offset)); } pub fn setViewport( encoder: *RenderPassEncoder, x: f32, y: f32, width: f32, height: f32, min_depth: f32, max_depth: f32, ) !void { const vk_command_buffer = encoder.vk_command_buffer; vkd.cmdSetViewport(vk_command_buffer, 0, 1, @as(*const [1]vk.Viewport, &vk.Viewport{ .x = x, .y = @as(f32, @floatFromInt(encoder.extent.height)) - y, .width = width, .height = -height, .min_depth = min_depth, .max_depth = max_depth, })); } }; pub const Queue = struct { manager: utils.Manager(Queue) = .{}, device: *Device, vk_queue: vk.Queue, command_buffers: std.ArrayListUnmanaged(*CommandBuffer) = .{}, wait_semaphores: std.ArrayListUnmanaged(vk.Semaphore) = .{}, wait_dst_stage_masks: std.ArrayListUnmanaged(vk.PipelineStageFlags) = .{}, signal_semaphores: std.ArrayListUnmanaged(vk.Semaphore) = .{}, command_encoder: ?*CommandEncoder = null, pub fn init(device: *Device) !Queue { const vk_device = device.vk_device; const vk_queue = vkd.getDeviceQueue(vk_device, device.adapter.queue_family, 0); return .{ .device = device, .vk_queue = vk_queue, }; } pub fn deinit(queue: *Queue) void { if (queue.command_encoder) |command_encoder| command_encoder.manager.release(); queue.wait_dst_stage_masks.deinit(allocator); queue.wait_semaphores.deinit(allocator); queue.signal_semaphores.deinit(allocator); queue.command_buffers.deinit(allocator); } pub fn submit(queue: *Queue, commands: []const *CommandBuffer) !void { if (queue.command_encoder) |command_encoder| { const command_buffer = try command_encoder.finish(&.{}); command_buffer.manager.reference(); // handled in main.zig defer command_buffer.manager.release(); command_buffer.manager.reference(); try queue.command_buffers.append(allocator, command_buffer); try command_buffer.reference_tracker.submit(); command_encoder.manager.release(); queue.command_encoder = null; } for (commands) |command_buffer| { command_buffer.manager.reference(); try queue.command_buffers.append(allocator, command_buffer); try command_buffer.reference_tracker.submit(); try queue.wait_dst_stage_masks.appendSlice(allocator, command_buffer.wait_dst_stage_masks.items); try queue.wait_semaphores.appendSlice(allocator, command_buffer.wait_semaphores.items); } } pub fn flush(queue: *Queue) !void { if (queue.command_buffers.items.len == 0 and queue.signal_semaphores.items.len == 0) return; const vk_queue = queue.vk_queue; var submit_object = try SubmitObject.init(queue.device); var vk_command_buffers = try std.ArrayListUnmanaged(vk.CommandBuffer).initCapacity( allocator, queue.command_buffers.items.len, ); defer vk_command_buffers.deinit(allocator); for (queue.command_buffers.items) |command_buffer| { vk_command_buffers.appendAssumeCapacity(command_buffer.vk_command_buffer); try submit_object.reference_trackers.append(allocator, command_buffer.reference_tracker); command_buffer.manager.release(); } queue.command_buffers.clearRetainingCapacity(); const submitInfo = vk.SubmitInfo{ .command_buffer_count = @intCast(vk_command_buffers.items.len), .p_command_buffers = vk_command_buffers.items.ptr, .wait_semaphore_count = @intCast(queue.wait_semaphores.items.len), .p_wait_semaphores = queue.wait_semaphores.items.ptr, .p_wait_dst_stage_mask = queue.wait_dst_stage_masks.items.ptr, .signal_semaphore_count = @intCast(queue.signal_semaphores.items.len), .p_signal_semaphores = queue.signal_semaphores.items.ptr, }; try vkd.queueSubmit(vk_queue, 1, @ptrCast(&submitInfo), submit_object.fence); queue.wait_semaphores.clearRetainingCapacity(); queue.wait_dst_stage_masks.clearRetainingCapacity(); queue.signal_semaphores.clearRetainingCapacity(); try queue.device.submit_objects.append(allocator, submit_object); } pub fn writeBuffer(queue: *Queue, buffer: *Buffer, offset: u64, data: [*]const u8, size: u64) !void { const encoder = try queue.getCommandEncoder(); try encoder.writeBuffer(buffer, offset, data, size); } pub fn writeTexture( queue: *Queue, destination: *const sysgpu.ImageCopyTexture, data: [*]const u8, data_size: usize, data_layout: *const sysgpu.Texture.DataLayout, write_size: *const sysgpu.Extent3D, ) !void { const encoder = try queue.getCommandEncoder(); try encoder.writeTexture(destination, data, data_size, data_layout, write_size); } // Private fn getCommandEncoder(queue: *Queue) !*CommandEncoder { if (queue.command_encoder) |command_encoder| return command_encoder; const command_encoder = try CommandEncoder.init(queue.device, &.{}); queue.command_encoder = command_encoder; return command_encoder; } }; const MemoryAllocator = struct { info: vk.PhysicalDeviceMemoryProperties, const MemoryKind = enum { lazily_allocated, linear, linear_read_mappable, linear_write_mappable, }; fn init(physical_device: vk.PhysicalDevice) MemoryAllocator { const mem_info = vki.getPhysicalDeviceMemoryProperties(physical_device); return .{ .info = mem_info }; } fn findBestAllocator( mem_alloc: *MemoryAllocator, requirements: vk.MemoryRequirements, mem_kind: MemoryKind, ) ?u32 { const mem_types = mem_alloc.info.memory_types[0..mem_alloc.info.memory_type_count]; const mem_heaps = mem_alloc.info.memory_heaps[0..mem_alloc.info.memory_heap_count]; var best_type: ?u32 = null; for (mem_types, 0..) |mem_type, i| { if (requirements.memory_type_bits & (@as(u32, @intCast(1)) << @intCast(i)) == 0) continue; const flags = mem_type.property_flags; const heap_size = mem_heaps[mem_type.heap_index].size; const candidate = switch (mem_kind) { .lazily_allocated => flags.lazily_allocated_bit, .linear_write_mappable => flags.host_visible_bit and flags.host_coherent_bit and !flags.device_coherent_bit_amd, .linear_read_mappable => blk: { if (flags.host_visible_bit and flags.host_coherent_bit and !flags.device_coherent_bit_amd) { if (best_type) |best| { if (mem_types[best].property_flags.host_cached_bit) { if (flags.host_cached_bit) { const best_heap_size = mem_heaps[mem_types[best].heap_index].size; if (heap_size > best_heap_size) { break :blk true; } } break :blk false; } } break :blk true; } break :blk false; }, .linear => blk: { if (best_type) |best| { if (mem_types[best].property_flags.device_local_bit) { if (flags.device_local_bit and !flags.device_coherent_bit_amd) { const best_heap_size = mem_heaps[mem_types[best].heap_index].size; if (heap_size > best_heap_size or flags.host_visible_bit) { break :blk true; } } break :blk false; } } break :blk true; }, }; if (candidate) best_type = @intCast(i); } return best_type; } }; test "reference declarations" { std.testing.refAllDeclsRecursive(@This()); }
0
repos/mach-sysgpu
repos/mach-sysgpu/src/conventions.md
### Object ordering Backends should be a single file with object in the following order: - Instance - Adapter - Surface - SurfaceCapabilities - Device - SwapChain - Buffer - Texture - TextureView - Sampler - BindGroupLayout - BindGroup - PipelineLayout - ShaderModule - ComputePipeline - RenderPipeline - CommandBuffer - CommandEncoder - ComputePassEncoder - RenderPassEncoder - RenderBundle - RenderBundleEncoder - Queue - QuerySet Utility objects (e.g. StateTracker should come after the closest object that "owns" them.
0
repos/mach-sysgpu
repos/mach-sysgpu/src/shader.zig
const std = @import("std"); pub const CodeGen = @import("shader/CodeGen.zig"); pub const Air = @import("shader/Air.zig"); pub const Ast = @import("shader/Ast.zig"); pub const Parser = @import("shader/Parser.zig"); pub const Token = @import("shader/Token.zig"); pub const Tokenizer = @import("shader/Tokenizer.zig"); pub const ErrorList = @import("shader/ErrorList.zig"); pub const printAir = @import("shader/print_air.zig").printAir; test "reference declarations" { std.testing.refAllDecls(CodeGen); std.testing.refAllDecls(Air); std.testing.refAllDecls(Ast); std.testing.refAllDecls(Parser); std.testing.refAllDecls(Token); std.testing.refAllDecls(Tokenizer); std.testing.refAllDecls(ErrorList); _ = printAir; _ = @import("shader/test.zig"); }
0
repos/mach-sysgpu
repos/mach-sysgpu/src/opengl.zig
const std = @import("std"); const builtin = @import("builtin"); const sysgpu = @import("sysgpu/main.zig"); const limits = @import("limits.zig"); const utils = @import("utils.zig"); const shader = @import("shader.zig"); const c = @import("opengl/c.zig"); const conv = @import("opengl/conv.zig"); const proc = @import("opengl/proc.zig"); const log = std.log.scoped(.opengl); const instance_class_name = "sysgpu-hwnd"; const upload_page_size = 64 * 1024 * 1024; // TODO - split writes and/or support large uploads const gl_major_version = 4; const gl_minor_version = 6; // TODO - lower this after initial implementation is complete const use_buffer_storage = true; const max_back_buffer_count = 3; var allocator: std.mem.Allocator = undefined; var debug_enabled: bool = undefined; pub const InitOptions = struct { debug_enabled: bool = builtin.mode == .Debug, }; pub fn init(alloc: std.mem.Allocator, options: InitOptions) !void { allocator = alloc; debug_enabled = options.debug_enabled; } const BindingPoint = shader.CodeGen.BindingPoint; const BindingTable = shader.CodeGen.BindingTable; const MapCallback = struct { buffer: *Buffer, callback: sysgpu.Buffer.MapCallback, userdata: ?*anyopaque, }; const ActiveContext = struct { old_hdc: c.HDC, old_hglrc: c.HGLRC, pub fn init(hdc: c.HDC, hglrc: c.HGLRC) ActiveContext { const old_hdc = c.wglGetCurrentDC(); const old_hglrc = c.wglGetCurrentContext(); if (c.wglMakeCurrent(hdc, hglrc) == c.FALSE) @panic("ActiveContext failed"); return .{ .old_hdc = old_hdc, .old_hglrc = old_hglrc }; } pub fn deinit(ctx: *ActiveContext) void { _ = c.wglMakeCurrent(ctx.old_hdc, ctx.old_hglrc); } }; fn createDummyWindow() c.HWND { const hinstance = c.GetModuleHandleA(null); const dwExStyle = c.WS_EX_OVERLAPPEDWINDOW; const dwStyle = c.WS_CLIPSIBLINGS | c.WS_CLIPCHILDREN; return c.CreateWindowExA( dwExStyle, instance_class_name, instance_class_name, dwStyle, 0, 0, 1, 1, null, null, hinstance, null, ); } fn setPixelFormat(wgl: *proc.InstanceWGL, hwnd: c.HWND) !c_int { const hdc = c.GetDC(hwnd); const format_attribs = [_]c_int{ c.WGL_DRAW_TO_WINDOW_ARB, c.GL_TRUE, c.WGL_SUPPORT_OPENGL_ARB, c.GL_TRUE, c.WGL_DOUBLE_BUFFER_ARB, c.GL_TRUE, c.WGL_PIXEL_TYPE_ARB, c.WGL_TYPE_RGBA_ARB, c.WGL_COLOR_BITS_ARB, 32, 0, }; var num_formats: c_uint = undefined; var pixel_format: c_int = undefined; if (wgl.choosePixelFormatARB(hdc, &format_attribs, null, 1, &pixel_format, &num_formats) == c.FALSE) return error.ChoosePixelFormatARBFailed; if (num_formats == 0) return error.NoFormatsAvailable; var pfd: c.PIXELFORMATDESCRIPTOR = undefined; if (c.DescribePixelFormat(hdc, pixel_format, @sizeOf(@TypeOf(pfd)), &pfd) == c.FALSE) return error.DescribePixelFormatFailed; if (c.SetPixelFormat(hdc, pixel_format, &pfd) == c.FALSE) return error.SetPixelFormatFailed; return pixel_format; } fn messageCallback( source: c.GLenum, message_type: c.GLenum, id: c.GLuint, severity: c.GLenum, length: c.GLsizei, message: [*c]const c.GLchar, user_data: ?*const anyopaque, ) callconv(.C) void { _ = source; _ = length; _ = user_data; switch (id) { 0x20071 => return, // Buffer detailed info else => {}, } std.debug.print("GL CALLBACK: {s} type = 0x{x}, id = 0x{x}, severity = 0x{x}, message = {s}\n", .{ if (message_type == c.GL_DEBUG_TYPE_ERROR) "** GL ERROR **" else "", message_type, id, severity, message, }); } fn checkError(gl: *proc.DeviceGL) void { const err = gl.getError(); if (err != c.GL_NO_ERROR) { std.debug.print("glGetError {x}\n", .{err}); } } pub const Instance = struct { manager: utils.Manager(Instance) = .{}, wgl: proc.InstanceWGL, pub fn init(desc: *const sysgpu.Instance.Descriptor) !*Instance { // TODO _ = desc; // WNDCLASS const hinstance = c.GetModuleHandleA(null); const wc: c.WNDCLASSA = .{ .lpfnWndProc = c.DefWindowProcA, .hInstance = hinstance, .lpszClassName = instance_class_name, .style = c.CS_OWNDC, }; if (c.RegisterClassA(&wc) == 0) return error.RegisterClassFailed; // Dummy context const hwnd = createDummyWindow(); const hdc = c.GetDC(hwnd); const pfd = c.PIXELFORMATDESCRIPTOR{ .nSize = @sizeOf(c.PIXELFORMATDESCRIPTOR), .nVersion = 1, .dwFlags = c.PFD_DRAW_TO_WINDOW | c.PFD_SUPPORT_OPENGL | c.PFD_DOUBLEBUFFER, .iPixelType = c.PFD_TYPE_RGBA, .cColorBits = 32, .iLayerType = c.PFD_MAIN_PLANE, }; const pixel_format = c.ChoosePixelFormat(hdc, &pfd); if (c.SetPixelFormat(hdc, pixel_format, &pfd) == c.FALSE) return error.SetPixelFormatFailed; const hglrc = c.WGLCreateContext(hdc); if (hglrc == null) return error.WGLCreateContextFailed; defer _ = c.WGLDeleteContext(hglrc); // Extension procs try proc.init(); var ctx = ActiveContext.init(hdc, hglrc); defer ctx.deinit(); var wgl: proc.InstanceWGL = undefined; wgl.load(); // Result const instance = try allocator.create(Instance); instance.* = .{ .wgl = wgl, }; return instance; } pub fn deinit(instance: *Instance) void { const hinstance = c.GetModuleHandleA(null); proc.deinit(); _ = c.UnregisterClassA(instance_class_name, hinstance); allocator.destroy(instance); } pub fn createSurface(instance: *Instance, desc: *const sysgpu.Surface.Descriptor) !*Surface { return Surface.init(instance, desc); } }; pub const Adapter = struct { manager: utils.Manager(Adapter) = .{}, hwnd: ?c.HWND, hdc: c.HDC, hglrc: c.HGLRC, pixel_format: c_int, vendor: [*c]const c.GLubyte, renderer: [*c]const c.GLubyte, version: [*c]const c.GLubyte, pub fn init(instance: *Instance, options: *const sysgpu.RequestAdapterOptions) !*Adapter { const wgl = &instance.wgl; // Use hwnd from surface is provided var hwnd: c.HWND = undefined; var pixel_format: c_int = undefined; if (options.compatible_surface) |surface_raw| { const surface: *Surface = @ptrCast(@alignCast(surface_raw)); hwnd = surface.hwnd; pixel_format = surface.pixel_format; } else { hwnd = createDummyWindow(); pixel_format = try setPixelFormat(wgl, hwnd); } // GL context const hdc = c.GetDC(hwnd); if (hdc == null) return error.GetDCFailed; const context_attribs = [_]c_int{ c.WGL_CONTEXT_MAJOR_VERSION_ARB, gl_major_version, c.WGL_CONTEXT_MINOR_VERSION_ARB, gl_minor_version, c.WGL_CONTEXT_FLAGS_ARB, c.WGL_CONTEXT_DEBUG_BIT_ARB, c.WGL_CONTEXT_PROFILE_MASK_ARB, c.WGL_CONTEXT_CORE_PROFILE_BIT_ARB, 0, }; const hglrc = wgl.createContextAttribsARB(hdc, null, &context_attribs); if (hglrc == null) return error.WGLCreateContextFailed; var ctx = ActiveContext.init(hdc, hglrc); defer ctx.deinit(); var gl: proc.AdapterGL = undefined; gl.load(); const vendor = gl.getString(c.GL_VENDOR); const renderer = gl.getString(c.GL_RENDERER); const version = gl.getString(c.GL_VERSION); // Result const adapter = try allocator.create(Adapter); adapter.* = .{ .hwnd = if (options.compatible_surface == null) hwnd else null, .hdc = hdc, .pixel_format = pixel_format, .hglrc = hglrc, .vendor = vendor, .renderer = renderer, .version = version, }; return adapter; } pub fn deinit(adapter: *Adapter) void { _ = c.wglDeleteContext(adapter.hglrc); if (adapter.hwnd) |hwnd| _ = c.DestroyWindow(hwnd); allocator.destroy(adapter); } pub fn createDevice(adapter: *Adapter, desc: ?*const sysgpu.Device.Descriptor) !*Device { return Device.init(adapter, desc); } pub fn getProperties(adapter: *Adapter) sysgpu.Adapter.Properties { return .{ .vendor_id = 0, // TODO .vendor_name = adapter.vendor, .architecture = adapter.renderer, .device_id = 0, // TODO .name = adapter.vendor, // TODO .driver_description = adapter.version, .adapter_type = .unknown, .backend_type = .opengl, .compatibility_mode = .false, }; } }; pub const Surface = struct { manager: utils.Manager(Surface) = .{}, hwnd: c.HWND, pixel_format: c_int, pub fn init(instance: *Instance, desc: *const sysgpu.Surface.Descriptor) !*Surface { const wgl = &instance.wgl; if (utils.findChained(sysgpu.Surface.DescriptorFromWindowsHWND, desc.next_in_chain.generic)) |win_desc| { // workaround issues with @alignCast panicking as HWND is not a real pointer var hwnd: c.HWND = undefined; @memcpy(std.mem.asBytes(&hwnd), std.mem.asBytes(&win_desc.hwnd)); const pixel_format = try setPixelFormat(wgl, hwnd); const surface = try allocator.create(Surface); surface.* = .{ .hwnd = hwnd, .pixel_format = pixel_format, }; return surface; } else { return error.InvalidDescriptor; } } pub fn deinit(surface: *Surface) void { allocator.destroy(surface); } }; pub const Device = struct { manager: utils.Manager(Device) = .{}, queue: *Queue, hdc: c.HDC, hglrc: c.HGLRC, pixel_format: c_int, gl: proc.DeviceGL, streaming_manager: StreamingManager = undefined, reference_trackers: std.ArrayListUnmanaged(*ReferenceTracker) = .{}, map_callbacks: std.ArrayListUnmanaged(MapCallback) = .{}, lost_cb: ?sysgpu.Device.LostCallback = null, lost_cb_userdata: ?*anyopaque = null, log_cb: ?sysgpu.LoggingCallback = null, log_cb_userdata: ?*anyopaque = null, err_cb: ?sysgpu.ErrorCallback = null, err_cb_userdata: ?*anyopaque = null, pub fn init(adapter: *Adapter, desc: ?*const sysgpu.Device.Descriptor) !*Device { // TODO _ = desc; var ctx = ActiveContext.init(adapter.hdc, adapter.hglrc); defer ctx.deinit(); var gl: proc.DeviceGL = undefined; gl.loadVersion(gl_major_version, gl_minor_version); // Default state gl.enable(c.GL_SCISSOR_TEST); gl.enable(c.GL_PRIMITIVE_RESTART_FIXED_INDEX); gl.enable(c.GL_FRAMEBUFFER_SRGB); if (debug_enabled) { gl.enable(c.GL_DEBUG_OUTPUT); gl.enable(c.GL_DEBUG_OUTPUT_SYNCHRONOUS); gl.debugMessageCallback(messageCallback, null); } // Queue const queue = try allocator.create(Queue); errdefer allocator.destroy(queue); // Object var device = try allocator.create(Device); device.* = .{ .queue = queue, .hdc = adapter.hdc, .hglrc = adapter.hglrc, .pixel_format = adapter.pixel_format, .gl = gl, }; // Initialize device.queue.* = try Queue.init(device); errdefer queue.deinit(); device.streaming_manager = try StreamingManager.init(device); errdefer device.streaming_manager.deinit(); return device; } pub fn deinit(device: *Device) void { if (device.lost_cb) |lost_cb| { lost_cb(.destroyed, "Device was destroyed.", device.lost_cb_userdata); } device.waitAll() catch {}; device.processQueuedOperations(); device.map_callbacks.deinit(allocator); device.reference_trackers.deinit(allocator); device.streaming_manager.deinit(); device.queue.manager.release(); allocator.destroy(device.queue); allocator.destroy(device); } pub fn createBindGroup(device: *Device, desc: *const sysgpu.BindGroup.Descriptor) !*BindGroup { return BindGroup.init(device, desc); } pub fn createBindGroupLayout(device: *Device, desc: *const sysgpu.BindGroupLayout.Descriptor) !*BindGroupLayout { return BindGroupLayout.init(device, desc); } pub fn createBuffer(device: *Device, desc: *const sysgpu.Buffer.Descriptor) !*Buffer { return Buffer.init(device, desc); } pub fn createCommandEncoder(device: *Device, desc: *const sysgpu.CommandEncoder.Descriptor) !*CommandEncoder { return CommandEncoder.init(device, desc); } pub fn createComputePipeline(device: *Device, desc: *const sysgpu.ComputePipeline.Descriptor) !*ComputePipeline { return ComputePipeline.init(device, desc); } pub fn createPipelineLayout(device: *Device, desc: *const sysgpu.PipelineLayout.Descriptor) !*PipelineLayout { return PipelineLayout.init(device, desc); } pub fn createRenderPipeline(device: *Device, desc: *const sysgpu.RenderPipeline.Descriptor) !*RenderPipeline { return RenderPipeline.init(device, desc); } pub fn createSampler(device: *Device, desc: *const sysgpu.Sampler.Descriptor) !*Sampler { return Sampler.init(device, desc); } pub fn createShaderModuleAir(device: *Device, air: *shader.Air, label: [*:0]const u8) !*ShaderModule { _ = label; return ShaderModule.initAir(device, air); } pub fn createShaderModuleSpirv(device: *Device, code: []const u8) !*ShaderModule { _ = code; _ = device; return error.Unsupported; } pub fn createSwapChain(device: *Device, surface: *Surface, desc: *const sysgpu.SwapChain.Descriptor) !*SwapChain { return SwapChain.init(device, surface, desc); } pub fn createTexture(device: *Device, desc: *const sysgpu.Texture.Descriptor) !*Texture { return Texture.init(device, desc); } pub fn getQueue(device: *Device) !*Queue { return device.queue; } pub fn tick(device: *Device) !void { device.processQueuedOperations(); } // Internal pub fn processQueuedOperations(device: *Device) void { // Reference trackers if (device.reference_trackers.items.len > 0) { const gl = &device.gl; var ctx = ActiveContext.init(device.hdc, device.hglrc); defer ctx.deinit(); var i: usize = 0; while (i < device.reference_trackers.items.len) { const reference_tracker = device.reference_trackers.items[i]; var status: c.GLenum = undefined; gl.getSynciv(reference_tracker.sync, c.GL_SYNC_STATUS, @sizeOf(c.GLenum), null, @ptrCast(&status)); if (status == c.GL_SIGNALED) { reference_tracker.deinit(); _ = device.reference_trackers.swapRemove(i); } else { i += 1; } } } // MapAsync { var i: usize = 0; while (i < device.map_callbacks.items.len) { const map_callback = device.map_callbacks.items[i]; if (map_callback.buffer.gpu_count == 0) { map_callback.buffer.executeMapAsync(map_callback); _ = device.map_callbacks.swapRemove(i); } else { i += 1; } } } } fn waitAll(device: *Device) !void { if (device.reference_trackers.items.len > 0) { const gl = &device.gl; var ctx = ActiveContext.init(device.hdc, device.hglrc); defer ctx.deinit(); for (device.reference_trackers.items) |reference_tracker| { _ = gl.clientWaitSync(reference_tracker.sync, c.GL_SYNC_FLUSH_COMMANDS_BIT, std.math.maxInt(u64)); } } } }; pub const StreamingManager = struct { device: *Device, free_buffers: std.ArrayListUnmanaged(*Buffer) = .{}, pub fn init(device: *Device) !StreamingManager { return .{ .device = device, }; } pub fn deinit(manager: *StreamingManager) void { for (manager.free_buffers.items) |buffer| buffer.manager.release(); manager.free_buffers.deinit(allocator); } pub fn acquire(manager: *StreamingManager) !*Buffer { const device = manager.device; // Recycle finished buffers if (manager.free_buffers.items.len == 0) { device.processQueuedOperations(); } // Create new buffer if (manager.free_buffers.items.len == 0) { const buffer = try Buffer.init(device, &.{ .label = "upload", .usage = .{ .copy_src = true, .map_write = true, }, .size = upload_page_size, .mapped_at_creation = .true, }); errdefer _ = buffer.manager.release(); try manager.free_buffers.append(allocator, buffer); } // Result return manager.free_buffers.pop(); } pub fn release(manager: *StreamingManager, buffer: *Buffer) void { manager.free_buffers.append(allocator, buffer) catch { std.debug.panic("OutOfMemory", .{}); }; } }; pub const SwapChain = struct { manager: utils.Manager(SwapChain) = .{}, device: *Device, hdc: c.HDC, pixel_format: c_int, back_buffer_count: u32, textures: [max_back_buffer_count]*Texture, views: [max_back_buffer_count]*TextureView, pub fn init(device: *Device, surface: *Surface, desc: *const sysgpu.SwapChain.Descriptor) !*SwapChain { const swapchain = try allocator.create(SwapChain); const back_buffer_count: u32 = if (desc.present_mode == .mailbox) 3 else 2; var textures = std.BoundedArray(*Texture, max_back_buffer_count){}; var views = std.BoundedArray(*TextureView, max_back_buffer_count){}; errdefer { for (views.slice()) |view| view.manager.release(); for (textures.slice()) |texture| texture.manager.release(); } for (0..back_buffer_count) |_| { const texture = try Texture.initForSwapChain(device, desc, swapchain); const view = try texture.createView(&sysgpu.TextureView.Descriptor{}); textures.appendAssumeCapacity(texture); views.appendAssumeCapacity(view); } swapchain.* = .{ .device = device, .hdc = c.GetDC(surface.hwnd), .pixel_format = surface.pixel_format, .back_buffer_count = back_buffer_count, .textures = textures.buffer, .views = views.buffer, }; return swapchain; } pub fn deinit(swapchain: *SwapChain) void { for (swapchain.views[0..swapchain.back_buffer_count]) |view| view.manager.release(); for (swapchain.textures[0..swapchain.back_buffer_count]) |texture| texture.manager.release(); allocator.destroy(swapchain); } pub fn getCurrentTextureView(swapchain: *SwapChain) !*TextureView { const index = 0; // TEMP - resolve reference tracking in main.zig swapchain.views[index].manager.reference(); return swapchain.views[index]; } pub fn present(swapchain: *SwapChain) !void { const device = swapchain.device; var ctx = ActiveContext.init(swapchain.hdc, device.hglrc); defer ctx.deinit(); if (c.SwapBuffers(swapchain.hdc) == c.FALSE) return error.SwapBuffersFailed; } }; pub const Buffer = struct { manager: utils.Manager(Buffer) = .{}, device: *Device, target: c.GLenum, handle: c.GLuint, gpu_count: u32 = 0, map: ?[*]u8, mapped_at_creation: bool, // TODO - packed buffer descriptor struct size: u64, usage: sysgpu.Buffer.UsageFlags, pub fn init(device: *Device, desc: *const sysgpu.Buffer.Descriptor) !*Buffer { const gl = &device.gl; var ctx = ActiveContext.init(device.hdc, device.hglrc); defer ctx.deinit(); const target = conv.glTargetForBuffer(desc.usage); var handle: c.GLuint = undefined; gl.genBuffers(1, &handle); gl.bindBuffer(target, handle); if (use_buffer_storage) { const flags = conv.glBufferStorageFlags(desc.usage, desc.mapped_at_creation); gl.bufferStorage(target, @intCast(desc.size), null, flags); } else { const usage = conv.glBufferDataUsage(desc.usage, desc.mapped_at_creation); gl.bufferData(target, desc.size, null, usage); } // TODO - create an upload buffer instead of using persistent mapping when map_read/write are both false var map: ?*anyopaque = null; const access = conv.glMapAccess(desc.usage, desc.mapped_at_creation); if (access != 0) { map = gl.mapBufferRange(target, 0, @intCast(desc.size), access); } const buffer = try allocator.create(Buffer); buffer.* = .{ .device = device, .target = target, .handle = handle, .size = desc.size, .usage = desc.usage, .map = @ptrCast(map), .mapped_at_creation = desc.mapped_at_creation == .true, }; return buffer; } pub fn deinit(buffer: *Buffer) void { const device = buffer.device; const gl = &device.gl; var ctx = ActiveContext.init(device.hdc, device.hglrc); defer ctx.deinit(); gl.deleteBuffers(1, &buffer.handle); allocator.destroy(buffer); } pub fn getMappedRange(buffer: *Buffer, offset: usize, size: usize) !?*anyopaque { return @ptrCast(buffer.map.?[offset .. offset + size]); } pub fn getSize(buffer: *Buffer) u64 { return buffer.size; } pub fn getUsage(buffer: *Buffer) sysgpu.Buffer.UsageFlags { return buffer.usage; } pub fn mapAsync( buffer: *Buffer, mode: sysgpu.MapModeFlags, offset: usize, size: usize, callback: sysgpu.Buffer.MapCallback, userdata: ?*anyopaque, ) !void { _ = mode; _ = size; _ = offset; const map_callback = MapCallback{ .buffer = buffer, .callback = callback, .userdata = userdata }; if (buffer.gpu_count == 0) { buffer.executeMapAsync(map_callback); } else { try buffer.device.map_callbacks.append(allocator, map_callback); } } pub fn setLabel(buffer: *Buffer, label: [*:0]const u8) void { _ = label; _ = buffer; } pub fn unmap(buffer: *Buffer) !void { if (buffer.mapped_at_creation) { const device = buffer.device; const gl = &device.gl; var ctx = ActiveContext.init(device.hdc, device.hglrc); defer ctx.deinit(); gl.bindBuffer(buffer.target, buffer.handle); _ = gl.unmapBuffer(buffer.target); // TODO - handle error? buffer.mapped_at_creation = false; } } // Internal pub fn executeMapAsync(buffer: *Buffer, map_callback: MapCallback) void { _ = buffer; map_callback.callback(.success, map_callback.userdata); } }; pub const Texture = struct { manager: utils.Manager(Texture) = .{}, handle: c.GLuint, swapchain: ?*SwapChain = null, // TODO - packed texture descriptor struct usage: sysgpu.Texture.UsageFlags, dimension: sysgpu.Texture.Dimension, size: sysgpu.Extent3D, format: sysgpu.Texture.Format, mip_level_count: u32, sample_count: u32, pub fn init(device: *Device, desc: *const sysgpu.Texture.Descriptor) !*Texture { _ = device; const texture = try allocator.create(Texture); texture.* = .{ .handle = 0, .swapchain = null, .usage = desc.usage, .dimension = desc.dimension, .size = desc.size, .format = desc.format, .mip_level_count = desc.mip_level_count, .sample_count = desc.sample_count, }; return texture; } pub fn initForSwapChain(device: *Device, desc: *const sysgpu.SwapChain.Descriptor, swapchain: *SwapChain) !*Texture { _ = device; const texture = try allocator.create(Texture); texture.* = .{ .handle = 0, .swapchain = swapchain, .usage = desc.usage, .dimension = .dimension_2d, .size = .{ .width = desc.width, .height = desc.height, .depth_or_array_layers = 1 }, .format = desc.format, .mip_level_count = 1, .sample_count = 1, }; return texture; } pub fn deinit(texture: *Texture) void { allocator.destroy(texture); } pub fn createView(texture: *Texture, desc: *const sysgpu.TextureView.Descriptor) !*TextureView { return TextureView.init(texture, desc); } }; pub const TextureView = struct { manager: utils.Manager(TextureView) = .{}, texture: *Texture, format: sysgpu.Texture.Format, dimension: sysgpu.TextureView.Dimension, base_mip_level: u32, mip_level_count: u32, base_array_layer: u32, array_layer_count: u32, aspect: sysgpu.Texture.Aspect, pub fn init(texture: *Texture, desc: *const sysgpu.TextureView.Descriptor) !*TextureView { texture.manager.reference(); const texture_dimension: sysgpu.TextureView.Dimension = switch (texture.dimension) { .dimension_1d => .dimension_1d, .dimension_2d => .dimension_2d, .dimension_3d => .dimension_3d, }; const view = try allocator.create(TextureView); view.* = .{ .texture = texture, .format = if (desc.format != .undefined) desc.format else texture.format, .dimension = if (desc.dimension != .dimension_undefined) desc.dimension else texture_dimension, .base_mip_level = desc.base_mip_level, .mip_level_count = desc.mip_level_count, .base_array_layer = desc.base_array_layer, .array_layer_count = desc.array_layer_count, .aspect = desc.aspect, }; return view; } pub fn deinit(view: *TextureView) void { view.texture.manager.release(); allocator.destroy(view); } // Internal pub fn width(view: *TextureView) u32 { return @max(1, view.texture.size.width >> @intCast(view.base_mip_level)); } pub fn height(view: *TextureView) u32 { return @max(1, view.texture.size.height >> @intCast(view.base_mip_level)); } }; pub const Sampler = struct { manager: utils.Manager(TextureView) = .{}, pub fn init(device: *Device, desc: *const sysgpu.Sampler.Descriptor) !*Sampler { _ = desc; _ = device; const sampler = try allocator.create(Sampler); sampler.* = .{}; return sampler; } pub fn deinit(sampler: *Sampler) void { allocator.destroy(sampler); } }; pub const BindGroupLayout = struct { manager: utils.Manager(BindGroupLayout) = .{}, entries: []const sysgpu.BindGroupLayout.Entry, pub fn init(device: *Device, desc: *const sysgpu.BindGroupLayout.Descriptor) !*BindGroupLayout { _ = device; var entries: []const sysgpu.BindGroupLayout.Entry = undefined; if (desc.entry_count > 0) { entries = try allocator.dupe(sysgpu.BindGroupLayout.Entry, desc.entries.?[0..desc.entry_count]); } else { entries = &[_]sysgpu.BindGroupLayout.Entry{}; } const layout = try allocator.create(BindGroupLayout); layout.* = .{ .entries = entries, }; return layout; } pub fn deinit(layout: *BindGroupLayout) void { if (layout.entries.len > 0) allocator.free(layout.entries); allocator.destroy(layout); } // Internal pub fn getEntry(layout: *BindGroupLayout, binding: u32) ?*const sysgpu.BindGroupLayout.Entry { for (layout.entries) |*entry| { if (entry.binding == binding) return entry; } return null; } pub fn getDynamicIndex(layout: *BindGroupLayout, binding: u32) ?u32 { var index: u32 = 0; for (layout.entries) |entry| { if (entry.buffer.has_dynamic_offset == .false) continue; if (entry.binding == binding) return index; index += 1; } return null; } }; pub const BindGroup = struct { const Kind = enum { buffer, sampler, texture, }; const Entry = struct { kind: Kind = undefined, binding: u32, dynamic_index: ?u32, target: c.GLenum = 0, buffer: ?*Buffer = null, offset: u32 = 0, size: u32 = 0, }; manager: utils.Manager(BindGroup) = .{}, entries: []const Entry, pub fn init(device: *Device, desc: *const sysgpu.BindGroup.Descriptor) !*BindGroup { _ = device; const layout: *BindGroupLayout = @ptrCast(@alignCast(desc.layout)); var entries = try allocator.alloc(Entry, desc.entry_count); errdefer allocator.free(entries); for (desc.entries.?[0..desc.entry_count], 0..) |entry, i| { var gl_entry = &entries[i]; const bind_group_entry = layout.getEntry(entry.binding) orelse return error.UnknownBinding; gl_entry.* = .{ .binding = entry.binding, .dynamic_index = layout.getDynamicIndex(entry.binding), }; if (entry.buffer) |buffer_raw| { const buffer: *Buffer = @ptrCast(@alignCast(buffer_raw)); buffer.manager.reference(); gl_entry.kind = .buffer; gl_entry.target = conv.glTargetForBufferBinding(bind_group_entry.buffer.type); gl_entry.buffer = buffer; gl_entry.offset = @intCast(entry.offset); gl_entry.size = @intCast(entry.size); } } const group = try allocator.create(BindGroup); group.* = .{ .entries = entries, }; return group; } pub fn deinit(group: *BindGroup) void { for (group.entries) |entry| { if (entry.buffer) |buffer| buffer.manager.release(); } allocator.free(group.entries); allocator.destroy(group); } }; pub const PipelineLayout = struct { manager: utils.Manager(PipelineLayout) = .{}, group_layouts: []*BindGroupLayout, bindings: BindingTable, pub fn init(device: *Device, desc: *const sysgpu.PipelineLayout.Descriptor) !*PipelineLayout { _ = device; var group_layouts = try allocator.alloc(*BindGroupLayout, desc.bind_group_layout_count); errdefer allocator.free(group_layouts); for (0..desc.bind_group_layout_count) |i| { const layout: *BindGroupLayout = @ptrCast(@alignCast(desc.bind_group_layouts.?[i])); layout.manager.reference(); group_layouts[i] = layout; } var bindings: BindingTable = .{}; errdefer bindings.deinit(allocator); var buffer_index: u32 = 0; var texture_index: u32 = 0; var sampler_index: u32 = 0; for (group_layouts, 0..) |group_layout, group| { for (group_layout.entries) |entry| { const key = BindingPoint{ .group = @intCast(group), .binding = entry.binding }; if (entry.buffer.type != .undefined) { try bindings.put(allocator, key, buffer_index); buffer_index += 1; } else if (entry.sampler.type != .undefined) { try bindings.put(allocator, key, sampler_index); sampler_index += 1; } else if (entry.texture.sample_type != .undefined or entry.storage_texture.format != .undefined) { try bindings.put(allocator, key, texture_index); texture_index += 1; } } } const layout = try allocator.create(PipelineLayout); layout.* = .{ .group_layouts = group_layouts, .bindings = bindings, }; return layout; } pub fn initDefault(device: *Device, default_pipeline_layout: utils.DefaultPipelineLayoutDescriptor) !*PipelineLayout { const groups = default_pipeline_layout.groups; var bind_group_layouts = std.BoundedArray(*sysgpu.BindGroupLayout, limits.max_bind_groups){}; defer { for (bind_group_layouts.slice()) |bind_group_layout_raw| { const bind_group_layout: *BindGroupLayout = @ptrCast(@alignCast(bind_group_layout_raw)); bind_group_layout.manager.release(); } } for (groups.slice()) |entries| { const bind_group_layout = try device.createBindGroupLayout( &sysgpu.BindGroupLayout.Descriptor.init(.{ .entries = entries.items }), ); bind_group_layouts.appendAssumeCapacity(@ptrCast(bind_group_layout)); } return device.createPipelineLayout( &sysgpu.PipelineLayout.Descriptor.init(.{ .bind_group_layouts = bind_group_layouts.slice() }), ); } pub fn deinit(layout: *PipelineLayout) void { for (layout.group_layouts) |group_layout| group_layout.manager.release(); layout.bindings.deinit(allocator); allocator.free(layout.group_layouts); allocator.destroy(layout); } }; pub const ShaderModule = struct { manager: utils.Manager(ShaderModule) = .{}, device: *Device, air: *shader.Air, pub fn initAir(device: *Device, air: *shader.Air) !*ShaderModule { const module = try allocator.create(ShaderModule); module.* = .{ .device = device, .air = air, }; return module; } pub fn deinit(shader_module: *ShaderModule) void { shader_module.air.deinit(allocator); allocator.destroy(shader_module.air); allocator.destroy(shader_module); } pub fn compile( module: *ShaderModule, entrypoint: [*:0]const u8, shader_type: c.GLenum, bindings: *const BindingTable, ) !c.GLuint { const gl = &module.device.gl; const stage = switch (shader_type) { c.GL_VERTEX_SHADER => shader.CodeGen.Stage.vertex, c.GL_FRAGMENT_SHADER => shader.CodeGen.Stage.fragment, c.GL_COMPUTE_SHADER => shader.CodeGen.Stage.compute, else => unreachable, }; const code = try shader.CodeGen.generate( allocator, module.air, .glsl, true, .{ .emit_source_file = "" }, .{ .name = entrypoint, .stage = stage }, bindings, null, ); defer allocator.free(code); const code_z = try allocator.dupeZ(u8, code); defer allocator.free(code_z); std.debug.print("{s}\n", .{code}); const gl_shader = gl.createShader(shader_type); if (gl_shader == 0) return error.CreateShaderFailed; gl.shaderSource(gl_shader, 1, @ptrCast(&code_z), null); gl.compileShader(gl_shader); var success: c.GLint = undefined; gl.getShaderiv(gl_shader, c.GL_COMPILE_STATUS, &success); if (success == c.GL_FALSE) { var info_log: [512]c.GLchar = undefined; gl.getShaderInfoLog(gl_shader, @sizeOf(@TypeOf(info_log)), null, &info_log); std.debug.print("Compilation Failed {s}\n", .{@as([*:0]u8, @ptrCast(&info_log))}); return error.CompilationFailed; } return gl_shader; } }; pub const ComputePipeline = struct { manager: utils.Manager(ComputePipeline) = .{}, device: *Device, layout: *PipelineLayout, program: c.GLuint, pub fn init(device: *Device, desc: *const sysgpu.ComputePipeline.Descriptor) !*ComputePipeline { const gl = &device.gl; var ctx = ActiveContext.init(device.hdc, device.hglrc); defer ctx.deinit(); const compute_module: *ShaderModule = @ptrCast(@alignCast(desc.compute.module)); // Pipeline Layout var layout: *PipelineLayout = undefined; if (desc.layout) |layout_raw| { layout = @ptrCast(@alignCast(layout_raw)); layout.manager.reference(); } else { var layout_desc = utils.DefaultPipelineLayoutDescriptor.init(allocator); defer layout_desc.deinit(); try layout_desc.addFunction(compute_module.air, .{ .compute = true }, desc.compute.entry_point); layout = try PipelineLayout.initDefault(device, layout_desc); } errdefer layout.manager.release(); // Shaders const compute_shader = try compute_module.compile(desc.compute.entry_point, c.GL_COMPUTE_SHADER, &layout.bindings); defer gl.deleteShader(compute_shader); // Program const program = gl.createProgram(); errdefer gl.deleteProgram(program); gl.attachShader(program, compute_shader); gl.linkProgram(program); var success: c.GLint = undefined; gl.getProgramiv(program, c.GL_LINK_STATUS, &success); if (success == c.GL_FALSE) { var info_log: [512]c.GLchar = undefined; gl.getProgramInfoLog(program, @sizeOf(@TypeOf(info_log)), null, &info_log); std.debug.print("Link Failed {s}\n", .{@as([*:0]u8, @ptrCast(&info_log))}); return error.LinkFailed; } // Result const pipeline = try allocator.create(ComputePipeline); pipeline.* = .{ .device = device, .layout = layout, .program = program, }; return pipeline; } pub fn deinit(pipeline: *ComputePipeline) void { const device = pipeline.device; const gl = &device.gl; var ctx = ActiveContext.init(device.hdc, device.hglrc); defer ctx.deinit(); gl.deleteProgram(pipeline.program); pipeline.layout.manager.release(); allocator.destroy(pipeline); } pub fn getBindGroupLayout(pipeline: *ComputePipeline, group_index: u32) *BindGroupLayout { return @ptrCast(pipeline.layout.group_layouts[group_index]); } }; pub const RenderPipeline = struct { const Attribute = struct { is_int: bool, index: c.GLuint, count: c.GLint, vertex_type: c.GLenum, normalized: c.GLboolean, stride: c.GLsizei, offset: c.GLuint, }; const ColorTarget = struct { blend_enabled: bool, color_op: c.GLenum, alpha_op: c.GLenum, src_color_blend: c.GLenum, dst_color_blend: c.GLenum, src_alpha_blend: c.GLenum, dst_alpha_blend: c.GLenum, write_red: c.GLboolean, write_green: c.GLboolean, write_blue: c.GLboolean, write_alpha: c.GLboolean, }; manager: utils.Manager(RenderPipeline) = .{}, device: *Device, layout: *PipelineLayout, program: c.GLuint, vao: c.GLuint, attributes: []Attribute, buffer_attributes: [][]Attribute, mode: c.GLenum, front_face: c.GLenum, cull_enabled: bool, cull_face: c.GLenum, depth_test_enabled: bool, depth_mask: c.GLboolean, depth_func: c.GLenum, stencil_test_enabled: bool, stencil_read_mask: c.GLuint, stencil_write_mask: c.GLuint, stencil_back_compare_func: c.GLenum, stencil_back_fail_op: c.GLenum, stencil_back_depth_fail_op: c.GLenum, stencil_back_pass_op: c.GLenum, stencil_front_compare_func: c.GLenum, stencil_front_fail_op: c.GLenum, stencil_front_depth_fail_op: c.GLenum, stencil_front_pass_op: c.GLenum, polygon_offset_enabled: bool, depth_bias: f32, depth_bias_slope_scale: f32, depth_bias_clamp: f32, multisample_enabled: bool, sample_mask_enabled: bool, sample_mask_value: c.GLuint, alpha_to_coverage_enabled: bool, color_targets: []ColorTarget, pub fn init(device: *Device, desc: *const sysgpu.RenderPipeline.Descriptor) !*RenderPipeline { const gl = &device.gl; var ctx = ActiveContext.init(device.hdc, device.hglrc); defer ctx.deinit(); const vertex_module: *ShaderModule = @ptrCast(@alignCast(desc.vertex.module)); // Pipeline Layout var layout: *PipelineLayout = undefined; if (desc.layout) |layout_raw| { layout = @ptrCast(@alignCast(layout_raw)); layout.manager.reference(); } else { var layout_desc = utils.DefaultPipelineLayoutDescriptor.init(allocator); defer layout_desc.deinit(); try layout_desc.addFunction(vertex_module.air, .{ .vertex = true }, desc.vertex.entry_point); if (desc.fragment) |frag| { const frag_module: *ShaderModule = @ptrCast(@alignCast(frag.module)); try layout_desc.addFunction(frag_module.air, .{ .fragment = true }, frag.entry_point); } layout = try PipelineLayout.initDefault(device, layout_desc); } errdefer layout.manager.release(); // Shaders const vertex_shader = try vertex_module.compile(desc.vertex.entry_point, c.GL_VERTEX_SHADER, &layout.bindings); defer gl.deleteShader(vertex_shader); var opt_fragment_shader: ?c.GLuint = null; if (desc.fragment) |frag| { const frag_module: *ShaderModule = @ptrCast(@alignCast(frag.module)); opt_fragment_shader = try frag_module.compile(frag.entry_point, c.GL_FRAGMENT_SHADER, &layout.bindings); } defer if (opt_fragment_shader) |fragment_shader| gl.deleteShader(fragment_shader); // Vertex State var vao: c.GLuint = undefined; gl.genVertexArrays(1, &vao); gl.bindVertexArray(vao); var attribute_count: usize = 0; for (0..desc.vertex.buffer_count) |i| { const buffer = desc.vertex.buffers.?[i]; attribute_count += buffer.attribute_count; } var attributes = try allocator.alloc(Attribute, attribute_count); errdefer allocator.free(attributes); var buffer_attributes = try allocator.alloc([]Attribute, desc.vertex.buffer_count); errdefer allocator.free(buffer_attributes); attribute_count = 0; for (0..desc.vertex.buffer_count) |i| { const buffer = desc.vertex.buffers.?[i]; const attributes_begin = attribute_count; for (0..buffer.attribute_count) |j| { const attr = buffer.attributes.?[j]; const format_type = utils.vertexFormatType(attr.format); attributes[attribute_count] = .{ .is_int = conv.glAttributeIsInt(format_type), .index = attr.shader_location, .count = conv.glAttributeCount(attr.format), .vertex_type = conv.glAttributeType(attr.format), .normalized = conv.glAttributeIsNormalized(format_type), .stride = @intCast(buffer.array_stride), .offset = @intCast(attr.offset), }; gl.enableVertexAttribArray(attr.shader_location); if (buffer.step_mode == .instance) gl.vertexAttribDivisor(attr.shader_location, 1); attribute_count += 1; } buffer_attributes[i] = attributes[attributes_begin..attribute_count]; } // Primitive State const mode = conv.glPrimitiveMode(desc.primitive.topology); const front_face = conv.glFrontFace(desc.primitive.front_face); const cull_enabled = conv.glCullEnabled(desc.primitive.cull_mode); const cull_face = conv.glCullFace(desc.primitive.cull_mode); // Depth Stencil State var depth_test_enabled = false; var depth_mask: c.GLboolean = c.GL_FALSE; var depth_func: c.GLenum = c.GL_LESS; var stencil_test_enabled = false; var stencil_read_mask: c.GLuint = 0xff; var stencil_write_mask: c.GLuint = 0xff; var stencil_back_compare_func: c.GLenum = c.GL_ALWAYS; var stencil_back_fail_op: c.GLenum = c.GL_KEEP; var stencil_back_depth_fail_op: c.GLenum = c.GL_KEEP; var stencil_back_pass_op: c.GLenum = c.GL_KEEP; var stencil_front_compare_func: c.GLenum = c.GL_ALWAYS; var stencil_front_fail_op: c.GLenum = c.GL_KEEP; var stencil_front_depth_fail_op: c.GLenum = c.GL_KEEP; var stencil_front_pass_op: c.GLenum = c.GL_KEEP; var polygon_offset_enabled = false; var depth_bias: f32 = 0.0; var depth_bias_slope_scale: f32 = 0.0; var depth_bias_clamp: f32 = 0.0; if (desc.depth_stencil) |ds| { depth_test_enabled = conv.glDepthTestEnabled(ds); depth_mask = conv.glDepthMask(ds); depth_func = conv.glCompareFunc(ds.depth_compare); stencil_test_enabled = conv.glStencilTestEnabled(ds); stencil_read_mask = @intCast(ds.stencil_read_mask & 0xff); stencil_write_mask = @intCast(ds.stencil_write_mask & 0xff); stencil_back_compare_func = conv.glCompareFunc(ds.stencil_back.compare); stencil_back_fail_op = conv.glStencilOp(ds.stencil_back.fail_op); stencil_back_depth_fail_op = conv.glStencilOp(ds.stencil_back.depth_fail_op); stencil_back_pass_op = conv.glStencilOp(ds.stencil_back.pass_op); stencil_front_compare_func = conv.glCompareFunc(ds.stencil_front.compare); stencil_front_fail_op = conv.glStencilOp(ds.stencil_front.fail_op); stencil_front_depth_fail_op = conv.glStencilOp(ds.stencil_front.depth_fail_op); stencil_front_pass_op = conv.glStencilOp(ds.stencil_front.pass_op); polygon_offset_enabled = ds.depth_bias != 0; depth_bias = @floatFromInt(ds.depth_bias); depth_bias_slope_scale = ds.depth_bias_slope_scale; depth_bias_clamp = ds.depth_bias_clamp; } // Multisample const multisample_enabled = desc.multisample.count != 1; const sample_mask_enabled = desc.multisample.mask != 0xFFFFFFFF; const sample_mask_value = desc.multisample.mask; const alpha_to_coverage_enabled = desc.multisample.alpha_to_coverage_enabled == .true; // Fragment const target_count = if (desc.fragment) |fragment| fragment.target_count else 0; var color_targets = try allocator.alloc(ColorTarget, target_count); errdefer allocator.free(color_targets); if (desc.fragment) |fragment| { for (0..fragment.target_count) |i| { const target = fragment.targets.?[i]; var blend_enabled = false; var color_op: c.GLenum = c.GL_FUNC_ADD; var alpha_op: c.GLenum = c.GL_FUNC_ADD; var src_color_blend: c.GLenum = c.GL_ONE; var dst_color_blend: c.GLenum = c.GL_ZERO; var src_alpha_blend: c.GLenum = c.GL_ONE; var dst_alpha_blend: c.GLenum = c.GL_ZERO; const write_red: c.GLboolean = if (target.write_mask.red) c.GL_TRUE else c.GL_FALSE; const write_green: c.GLboolean = if (target.write_mask.green) c.GL_TRUE else c.GL_FALSE; const write_blue: c.GLboolean = if (target.write_mask.blue) c.GL_TRUE else c.GL_FALSE; const write_alpha: c.GLboolean = if (target.write_mask.alpha) c.GL_TRUE else c.GL_FALSE; if (target.blend) |blend| { blend_enabled = true; color_op = conv.glBlendOp(blend.color.operation); alpha_op = conv.glBlendOp(blend.alpha.operation); src_color_blend = conv.glBlendFactor(blend.color.src_factor, true); dst_color_blend = conv.glBlendFactor(blend.color.dst_factor, true); src_alpha_blend = conv.glBlendFactor(blend.alpha.src_factor, false); dst_alpha_blend = conv.glBlendFactor(blend.alpha.dst_factor, false); } color_targets[i] = .{ .blend_enabled = blend_enabled, .color_op = color_op, .alpha_op = alpha_op, .src_color_blend = src_color_blend, .dst_color_blend = dst_color_blend, .src_alpha_blend = src_alpha_blend, .dst_alpha_blend = dst_alpha_blend, .write_red = write_red, .write_green = write_green, .write_blue = write_blue, .write_alpha = write_alpha, }; } } // Object var pipeline = try allocator.create(RenderPipeline); pipeline.* = .{ .device = device, .layout = layout, .program = 0, .vao = vao, .attributes = attributes, .buffer_attributes = buffer_attributes, .mode = mode, .front_face = front_face, .cull_enabled = cull_enabled, .cull_face = cull_face, .depth_test_enabled = depth_test_enabled, .depth_mask = depth_mask, .depth_func = depth_func, .stencil_test_enabled = stencil_test_enabled, .stencil_read_mask = stencil_read_mask, .stencil_write_mask = stencil_write_mask, .stencil_back_compare_func = stencil_back_compare_func, .stencil_back_fail_op = stencil_back_fail_op, .stencil_back_depth_fail_op = stencil_back_depth_fail_op, .stencil_back_pass_op = stencil_back_pass_op, .stencil_front_compare_func = stencil_front_compare_func, .stencil_front_fail_op = stencil_front_fail_op, .stencil_front_depth_fail_op = stencil_front_depth_fail_op, .stencil_front_pass_op = stencil_front_pass_op, .polygon_offset_enabled = polygon_offset_enabled, .depth_bias = depth_bias, .depth_bias_slope_scale = depth_bias_slope_scale, .depth_bias_clamp = depth_bias_clamp, .multisample_enabled = multisample_enabled, .sample_mask_enabled = sample_mask_enabled, .sample_mask_value = sample_mask_value, .alpha_to_coverage_enabled = alpha_to_coverage_enabled, .color_targets = color_targets, }; // Apply state to avoid program recompilation pipeline.applyState(0); // Program const program = gl.createProgram(); errdefer gl.deleteProgram(program); gl.attachShader(program, vertex_shader); if (opt_fragment_shader) |fragment_shader| gl.attachShader(program, fragment_shader); gl.linkProgram(program); var success: c.GLint = undefined; gl.getProgramiv(program, c.GL_LINK_STATUS, &success); if (success == c.GL_FALSE) { var info_log: [512]c.GLchar = undefined; gl.getProgramInfoLog(program, @sizeOf(@TypeOf(info_log)), null, &info_log); std.debug.print("Link Failed {s}\n", .{@as([*:0]u8, @ptrCast(&info_log))}); return error.LinkFailed; } pipeline.program = program; return pipeline; } pub fn deinit(pipeline: *RenderPipeline) void { const device = pipeline.device; const gl = &device.gl; var ctx = ActiveContext.init(device.hdc, device.hglrc); defer ctx.deinit(); gl.deleteVertexArrays(1, &pipeline.vao); gl.deleteProgram(pipeline.program); pipeline.layout.manager.release(); allocator.free(pipeline.color_targets); allocator.free(pipeline.attributes); allocator.free(pipeline.buffer_attributes); allocator.destroy(pipeline); } // Internal pub fn getBindGroupLayout(pipeline: *RenderPipeline, group_index: u32) *BindGroupLayout { return @ptrCast(pipeline.layout.group_layouts[group_index]); } pub fn applyState(pipeline: *RenderPipeline, stencil_ref: c.GLint) void { const device = pipeline.device; const gl = &device.gl; var ctx = ActiveContext.init(device.hdc, device.hglrc); defer ctx.deinit(); gl.bindVertexArray(pipeline.vao); gl.frontFace(pipeline.front_face); if (pipeline.cull_enabled) { gl.enable(c.GL_CULL_FACE); gl.cullFace(pipeline.cull_face); } else { gl.disable(c.GL_CULL_FACE); } if (pipeline.depth_test_enabled) { gl.enable(c.GL_DEPTH_TEST); gl.depthMask(pipeline.depth_mask); gl.depthFunc(pipeline.depth_func); } else { gl.disable(c.GL_DEPTH_TEST); } if (pipeline.stencil_test_enabled) { gl.enable(c.GL_STENCIL_TEST); gl.stencilFuncSeparate( c.GL_BACK, pipeline.stencil_back_compare_func, stencil_ref, pipeline.stencil_read_mask, ); gl.stencilFuncSeparate( c.GL_FRONT, pipeline.stencil_front_compare_func, stencil_ref, pipeline.stencil_read_mask, ); gl.stencilOpSeparate( c.GL_BACK, pipeline.stencil_back_fail_op, pipeline.stencil_back_depth_fail_op, pipeline.stencil_back_pass_op, ); gl.stencilOpSeparate( c.GL_FRONT, pipeline.stencil_front_fail_op, pipeline.stencil_front_depth_fail_op, pipeline.stencil_front_pass_op, ); gl.stencilMask(pipeline.stencil_write_mask); } else { gl.disable(c.GL_STENCIL_TEST); } if (pipeline.polygon_offset_enabled) { gl.enable(c.GL_POLYGON_OFFSET_FILL); gl.polygonOffsetClamp( pipeline.depth_bias_slope_scale, pipeline.depth_bias, pipeline.depth_bias_clamp, ); } else { gl.disable(c.GL_POLYGON_OFFSET_FILL); } if (pipeline.multisample_enabled) { gl.enable(c.GL_MULTISAMPLE); if (pipeline.sample_mask_enabled) { gl.enable(c.GL_SAMPLE_MASK); gl.sampleMaski(0, pipeline.sample_mask_value); } else { gl.disable(c.GL_SAMPLE_MASK); } if (pipeline.alpha_to_coverage_enabled) { gl.enable(c.GL_SAMPLE_ALPHA_TO_COVERAGE); } else { gl.disable(c.GL_SAMPLE_ALPHA_TO_COVERAGE); } } else { gl.disable(c.GL_MULTISAMPLE); } for (pipeline.color_targets, 0..) |target, i| { const buf: c.GLuint = @intCast(i); if (target.blend_enabled) { gl.enablei(c.GL_BLEND, buf); gl.blendEquationSeparatei(buf, target.color_op, target.alpha_op); gl.blendFuncSeparatei( buf, target.src_color_blend, target.dst_color_blend, target.src_alpha_blend, target.dst_alpha_blend, ); } else { gl.disablei(c.GL_BLEND, buf); } gl.colorMaski(buf, target.write_red, target.write_green, target.write_blue, target.write_alpha); } } }; const Command = union(enum) { begin_render_pass: struct { color_attachments: std.BoundedArray(sysgpu.RenderPassColorAttachment, limits.max_color_attachments), depth_stencil_attachment: ?sysgpu.RenderPassDepthStencilAttachment, }, end_render_pass, copy_buffer_to_buffer: struct { source: *Buffer, source_offset: u64, destination: *Buffer, destination_offset: u64, size: u64, }, dispatch_workgroups: struct { workgroup_count_x: u32, workgroup_count_y: u32, workgroup_count_z: u32, }, draw: struct { vertex_count: u32, instance_count: u32, first_vertex: u32, first_instance: u32, }, draw_indexed: struct { index_count: u32, instance_count: u32, first_index: u32, base_vertex: i32, first_instance: u32, }, set_compute_bind_group: struct { group_index: u32, group: *BindGroup, dynamic_offsets: std.BoundedArray(u32, limits.max_bind_groups), }, set_compute_pipeline: struct { pipeline: *ComputePipeline, }, set_render_bind_group: struct { group_index: u32, group: *BindGroup, dynamic_offsets: std.BoundedArray(u32, limits.max_bind_groups), }, set_index_buffer: struct { buffer: *Buffer, format: sysgpu.IndexFormat, offset: u64, }, set_render_pipeline: struct { pipeline: *RenderPipeline, }, set_scissor_rect: struct { x: c.GLint, y: c.GLint, width: c.GLsizei, height: c.GLsizei, }, set_vertex_buffer: struct { slot: u32, buffer: *Buffer, offset: u64, }, set_viewport: struct { x: f32, y: f32, width: f32, height: f32, min_depth: f32, max_depth: f32, }, }; pub const CommandBuffer = struct { const VertexBuffersState = struct { apply_count: u32 = 0, buffers: [limits.max_vertex_buffers]?*Buffer = std.mem.zeroes([limits.max_vertex_buffers]?*Buffer), buffer_offsets: [limits.max_vertex_buffers]u64 = std.mem.zeroes([limits.max_vertex_buffers]u64), }; manager: utils.Manager(CommandBuffer) = .{}, device: *Device, commands: std.ArrayListUnmanaged(Command) = .{}, reference_tracker: *ReferenceTracker, pub fn init(device: *Device) !*CommandBuffer { const reference_tracker = try ReferenceTracker.init(device); errdefer reference_tracker.deinit(); const command_buffer = try allocator.create(CommandBuffer); command_buffer.* = .{ .device = device, .reference_tracker = reference_tracker, }; return command_buffer; } pub fn deinit(command_buffer: *CommandBuffer) void { // reference_tracker lifetime is managed externally command_buffer.commands.deinit(allocator); allocator.destroy(command_buffer); } // Internal pub fn execute(command_buffer: *CommandBuffer) !void { const device = command_buffer.device; const gl = &device.gl; var ctx = ActiveContext.init(device.hdc, device.hglrc); defer ctx.deinit(); var compute_pipeline: ?*ComputePipeline = null; var render_pass_fbo: ?c.GLuint = null; var render_pipeline: ?*RenderPipeline = null; const stencil_ref: c.GLint = 0; var index_type: c.GLenum = undefined; var index_element_size: usize = undefined; var index_buffer: ?*Buffer = null; var index_buffer_offset: usize = undefined; var vertex_state: VertexBuffersState = .{}; try command_buffer.reference_tracker.submit(); try device.reference_trackers.append(allocator, command_buffer.reference_tracker); for (command_buffer.commands.items) |command| { switch (command) { .begin_render_pass => |cmd| { // Test if rendering to default framebuffer var default_framebuffer = false; if (cmd.color_attachments.len == 1) { const attach = cmd.color_attachments.buffer[0]; if (attach.view) |view_raw| { const view: *TextureView = @ptrCast(@alignCast(view_raw)); if (view.texture.swapchain) |swapchain| { default_framebuffer = true; if (swapchain.hdc != device.hdc) { if (c.wglMakeCurrent(swapchain.hdc, device.hglrc) == c.FALSE) return error.WGLMakeCurrentFailed; } } } } // Framebuffer var width: u32 = 0; var height: u32 = 0; if (!default_framebuffer) { var fbo: c.GLuint = undefined; gl.genFramebuffers(1, &fbo); render_pass_fbo = fbo; gl.bindFramebuffer(c.GL_DRAW_FRAMEBUFFER, fbo); var draw_buffers: std.BoundedArray(c.GLenum, limits.max_color_attachments) = .{}; for (cmd.color_attachments.buffer, 0..) |attach, i| { if (attach.view) |view_raw| { const view: *TextureView = @ptrCast(@alignCast(view_raw)); width = view.width(); height = view.height(); draw_buffers.appendAssumeCapacity(@intCast(c.GL_COLOR_ATTACHMENT0 + i)); gl.framebufferTexture2D( c.GL_FRAMEBUFFER, c.GL_COLOR_ATTACHMENT0, c.GL_TEXTURE_2D, view.texture.handle, 0, ); } else { draw_buffers.appendAssumeCapacity(c.GL_NONE); } } if (cmd.depth_stencil_attachment) |attach| { const view: *TextureView = @ptrCast(@alignCast(attach.view)); width = view.width(); height = view.height(); const attachment: c.GLuint = switch (utils.textureFormatType(view.texture.format)) { .depth => c.GL_DEPTH_ATTACHMENT, .stencil => c.GL_STENCIL_ATTACHMENT, .depth_stencil => c.GL_DEPTH_STENCIL_ATTACHMENT, else => unreachable, }; gl.framebufferTexture2D( c.GL_FRAMEBUFFER, attachment, c.GL_TEXTURE_2D, view.texture.handle, 0, ); } gl.drawBuffers(draw_buffers.len, &draw_buffers.buffer); if (gl.checkFramebufferStatus(c.GL_FRAMEBUFFER) != c.GL_FRAMEBUFFER_COMPLETE) return error.CheckFramebufferStatusFailed; } else { // TODO - always render to framebuffer? gl.bindFramebuffer(c.GL_DRAW_FRAMEBUFFER, 0); const view: *TextureView = @ptrCast(@alignCast(cmd.color_attachments.buffer[0].view.?)); width = view.width(); height = view.height(); } // Default State gl.viewport(0, 0, @intCast(width), @intCast(height)); gl.depthRangef(0.0, 1.0); gl.scissor(0, 0, @intCast(width), @intCast(height)); gl.blendColor(0, 0, 0, 0); gl.colorMask(c.GL_TRUE, c.GL_TRUE, c.GL_TRUE, c.GL_TRUE); gl.depthMask(c.GL_TRUE); gl.stencilMask(0xff); // Clear color targets for (cmd.color_attachments.buffer, 0..) |attach, i| { if (attach.view) |view_raw| { const view: *TextureView = @ptrCast(@alignCast(view_raw)); if (attach.load_op == .clear) { switch (utils.textureFormatType(view.texture.format)) { .float, .unorm, .unorm_srgb, .snorm, => { const data = [4]f32{ @floatCast(attach.clear_value.r), @floatCast(attach.clear_value.g), @floatCast(attach.clear_value.b), @floatCast(attach.clear_value.a), }; gl.clearBufferfv(c.GL_COLOR, @intCast(i), &data); }, .uint => { const data = [4]u32{ @intFromFloat(attach.clear_value.r), @intFromFloat(attach.clear_value.g), @intFromFloat(attach.clear_value.b), @intFromFloat(attach.clear_value.a), }; gl.clearBufferuiv(c.GL_COLOR, @intCast(i), &data); }, .sint => { const data = [4]i32{ @intFromFloat(attach.clear_value.r), @intFromFloat(attach.clear_value.g), @intFromFloat(attach.clear_value.b), @intFromFloat(attach.clear_value.a), }; gl.clearBufferiv(c.GL_COLOR, @intCast(i), &data); }, else => unreachable, } } } } // Clear depth target if (cmd.depth_stencil_attachment) |attach| { const view: *TextureView = @ptrCast(@alignCast(attach.view)); const format_type = utils.textureFormatType(view.texture.format); const depth_clear = attach.depth_load_op == .clear and (format_type == .depth or format_type == .depth_stencil); const stencil_clear = attach.stencil_load_op == .clear and (format_type == .stencil or format_type == .depth_stencil); if (depth_clear and stencil_clear) { gl.clearBufferfi( c.GL_DEPTH_STENCIL, 0, attach.depth_clear_value, @intCast(attach.stencil_clear_value), ); } else if (depth_clear) { gl.clearBufferfv(c.GL_DEPTH, 0, &attach.depth_clear_value); } else if (stencil_clear) { gl.clearBufferiv(c.GL_STENCIL, 0, attach.stencil_clear_value); } } // Release references for (cmd.color_attachments.buffer) |attach| { if (attach.view) |view_raw| { const view: *TextureView = @ptrCast(@alignCast(view_raw)); view.manager.release(); } } if (cmd.depth_stencil_attachment) |attach| { const view: *TextureView = @ptrCast(@alignCast(attach.view)); view.manager.release(); } }, .end_render_pass => { // TODO - invalidate on discard if (render_pass_fbo) |fbo| gl.deleteFramebuffers(1, &fbo); render_pass_fbo = null; }, .copy_buffer_to_buffer => |cmd| { gl.bindBuffer(c.GL_COPY_READ_BUFFER, cmd.source.handle); gl.bindBuffer(c.GL_COPY_WRITE_BUFFER, cmd.destination.handle); gl.copyBufferSubData( c.GL_COPY_READ_BUFFER, c.GL_COPY_WRITE_BUFFER, @intCast(cmd.source_offset), @intCast(cmd.destination_offset), @intCast(cmd.size), ); }, .dispatch_workgroups => |cmd| { gl.dispatchCompute(cmd.workgroup_count_x, cmd.workgroup_count_y, cmd.workgroup_count_z); }, .draw => |cmd| { if (vertex_state.apply_count > 0) applyVertexBuffers(gl, &vertex_state, render_pipeline.?); gl.drawArraysInstancedBaseInstance( render_pipeline.?.mode, @intCast(cmd.first_vertex), @intCast(cmd.vertex_count), @intCast(cmd.instance_count), cmd.first_instance, ); }, .draw_indexed => |cmd| { if (vertex_state.apply_count > 0) applyVertexBuffers(gl, &vertex_state, render_pipeline.?); gl.drawElementsInstancedBaseVertexBaseInstance( render_pipeline.?.mode, @intCast(cmd.index_count), index_type, @ptrFromInt(index_buffer_offset + cmd.first_index * index_element_size), @intCast(cmd.instance_count), cmd.base_vertex, cmd.first_instance, ); }, .set_index_buffer => |cmd| { const buffer = cmd.buffer; gl.bindBuffer(c.GL_ELEMENT_ARRAY_BUFFER, buffer.handle); index_type = conv.glIndexType(cmd.format); index_element_size = conv.glIndexElementSize(cmd.format); index_buffer_offset = cmd.offset; if (index_buffer) |old_index_buffer| old_index_buffer.manager.release(); index_buffer = buffer; }, .set_compute_pipeline => |cmd| { const pipeline = cmd.pipeline; gl.useProgram(pipeline.program); if (compute_pipeline) |old_pipeline| old_pipeline.manager.release(); compute_pipeline = pipeline; }, .set_compute_bind_group => |cmd| { // NOTE - this does not work yet for applications that expect bind groups to stay valid after // pipeline changes. For that we will need to defer GLSL compilation until layout is known. const group = cmd.group; for (group.entries) |entry| { const key = BindingPoint{ .group = cmd.group_index, .binding = entry.binding }; if (compute_pipeline.?.layout.bindings.get(key)) |slot| { switch (entry.kind) { .buffer => { var offset = entry.offset; if (entry.dynamic_index) |i| offset += cmd.dynamic_offsets.buffer[i]; gl.bindBufferRange(entry.target, slot, entry.buffer.?.handle, offset, entry.size); }, else => @panic("unimplemented"), } } } group.manager.release(); }, .set_render_bind_group => |cmd| { // NOTE - this does not work yet for applications that expect bind groups to stay valid after // pipeline changes. For that we will need to defer GLSL compilation until layout is known. const group = cmd.group; for (group.entries) |entry| { const key = BindingPoint{ .group = cmd.group_index, .binding = entry.binding }; if (render_pipeline.?.layout.bindings.get(key)) |slot| { switch (entry.kind) { .buffer => { var offset = entry.offset; if (entry.dynamic_index) |i| offset += cmd.dynamic_offsets.buffer[i]; gl.bindBufferRange(entry.target, slot, entry.buffer.?.handle, offset, entry.size); }, else => @panic("unimplemented"), } } } group.manager.release(); }, .set_render_pipeline => |cmd| { var pipeline = cmd.pipeline; pipeline.applyState(stencil_ref); gl.useProgram(pipeline.program); if (render_pipeline) |old_pipeline| old_pipeline.manager.release(); render_pipeline = pipeline; }, .set_scissor_rect => |cmd| { gl.scissor(cmd.x, cmd.y, cmd.width, cmd.height); }, .set_vertex_buffer => |cmd| { const buffer = cmd.buffer; vertex_state.buffers[cmd.slot] = buffer; vertex_state.buffer_offsets[cmd.slot] = cmd.offset; vertex_state.apply_count = @max(vertex_state.apply_count, cmd.slot + 1); }, .set_viewport => |cmd| { gl.viewportIndexedf(0, cmd.x, cmd.y, cmd.width, cmd.height); gl.depthRangef(cmd.min_depth, cmd.max_depth); }, } } command_buffer.reference_tracker.sync = gl.fenceSync(c.GL_SYNC_GPU_COMMANDS_COMPLETE, 0); std.debug.assert(render_pass_fbo == null); if (compute_pipeline) |pipeline| pipeline.manager.release(); if (render_pipeline) |pipeline| pipeline.manager.release(); if (index_buffer) |buffer| buffer.manager.release(); checkError(gl); } fn applyVertexBuffers(gl: *proc.DeviceGL, vertex_state: *VertexBuffersState, render_pipeline: *RenderPipeline) void { for (0..vertex_state.apply_count) |buffer_index| { if (vertex_state.buffers[buffer_index]) |buffer| { gl.bindBuffer(c.GL_ARRAY_BUFFER, buffer.handle); const offset = vertex_state.buffer_offsets[buffer_index]; for (render_pipeline.buffer_attributes[buffer_index]) |attribute| { if (attribute.is_int) { gl.vertexAttribIPointer( attribute.index, attribute.count, attribute.vertex_type, attribute.stride, @ptrFromInt(attribute.offset + offset), ); } else { gl.vertexAttribPointer( attribute.index, attribute.count, attribute.vertex_type, attribute.normalized, attribute.stride, @ptrFromInt(attribute.offset + offset), ); } } buffer.manager.release(); vertex_state.buffers[buffer_index] = null; } } vertex_state.apply_count = 0; } }; pub const ReferenceTracker = struct { device: *Device, sync: c.GLsync = undefined, buffers: std.ArrayListUnmanaged(*Buffer) = .{}, bind_groups: std.ArrayListUnmanaged(*BindGroup) = .{}, upload_pages: std.ArrayListUnmanaged(*Buffer) = .{}, pub fn init(device: *Device) !*ReferenceTracker { const tracker = try allocator.create(ReferenceTracker); tracker.* = .{ .device = device, }; return tracker; } pub fn deinit(tracker: *ReferenceTracker) void { const device = tracker.device; for (tracker.buffers.items) |buffer| { buffer.gpu_count -= 1; buffer.manager.release(); } for (tracker.bind_groups.items) |group| { for (group.entries) |entry| { switch (entry.kind) { .buffer => entry.buffer.?.gpu_count -= 1, else => {}, } } group.manager.release(); } for (tracker.upload_pages.items) |buffer| { device.streaming_manager.release(buffer); } tracker.buffers.deinit(allocator); tracker.bind_groups.deinit(allocator); tracker.upload_pages.deinit(allocator); allocator.destroy(tracker); } pub fn referenceBuffer(tracker: *ReferenceTracker, buffer: *Buffer) !void { buffer.manager.reference(); try tracker.buffers.append(allocator, buffer); } pub fn referenceBindGroup(tracker: *ReferenceTracker, group: *BindGroup) !void { group.manager.reference(); try tracker.bind_groups.append(allocator, group); } pub fn referenceUploadPage(tracker: *ReferenceTracker, upload_page: *Buffer) !void { try tracker.upload_pages.append(allocator, upload_page); } pub fn submit(tracker: *ReferenceTracker) !void { for (tracker.buffers.items) |buffer| { buffer.gpu_count += 1; } for (tracker.bind_groups.items) |group| { for (group.entries) |entry| { switch (entry.kind) { .buffer => entry.buffer.?.gpu_count += 1, else => {}, } } } } }; pub const CommandEncoder = struct { pub const StreamingResult = struct { buffer: *Buffer, map: [*]u8, offset: u32, }; manager: utils.Manager(CommandEncoder) = .{}, device: *Device, command_buffer: *CommandBuffer, commands: *std.ArrayListUnmanaged(Command), reference_tracker: *ReferenceTracker, upload_buffer: ?*Buffer = null, upload_map: ?[*]u8 = null, upload_next_offset: u32 = upload_page_size, pub fn init(device: *Device, desc: ?*const sysgpu.CommandEncoder.Descriptor) !*CommandEncoder { _ = desc; const command_buffer = try CommandBuffer.init(device); const encoder = try allocator.create(CommandEncoder); encoder.* = .{ .device = device, .command_buffer = command_buffer, .commands = &command_buffer.commands, .reference_tracker = command_buffer.reference_tracker, }; return encoder; } pub fn deinit(encoder: *CommandEncoder) void { encoder.command_buffer.manager.release(); allocator.destroy(encoder); } pub fn beginComputePass(encoder: *CommandEncoder, desc: *const sysgpu.ComputePassDescriptor) !*ComputePassEncoder { return ComputePassEncoder.init(encoder, desc); } pub fn beginRenderPass(encoder: *CommandEncoder, desc: *const sysgpu.RenderPassDescriptor) !*RenderPassEncoder { return RenderPassEncoder.init(encoder, desc); } pub fn copyBufferToBuffer( encoder: *CommandEncoder, source: *Buffer, source_offset: u64, destination: *Buffer, destination_offset: u64, size: u64, ) !void { try encoder.reference_tracker.referenceBuffer(source); try encoder.reference_tracker.referenceBuffer(destination); try encoder.commands.append(allocator, .{ .copy_buffer_to_buffer = .{ .source = source, .source_offset = source_offset, .destination = destination, .destination_offset = destination_offset, .size = size, } }); } pub fn copyBufferToTexture( encoder: *CommandEncoder, source: *const sysgpu.ImageCopyBuffer, destination: *const sysgpu.ImageCopyTexture, copy_size: *const sysgpu.Extent3D, ) !void { _ = copy_size; _ = destination; const source_buffer: *Buffer = @ptrCast(@alignCast(source.buffer)); try encoder.reference_tracker.referenceBuffer(source_buffer); } pub fn copyTextureToTexture( encoder: *CommandEncoder, source: *const sysgpu.ImageCopyTexture, destination: *const sysgpu.ImageCopyTexture, copy_size: *const sysgpu.Extent3D, ) !void { _ = copy_size; _ = destination; _ = source; _ = encoder; } pub fn finish(encoder: *CommandEncoder, desc: *const sysgpu.CommandBuffer.Descriptor) !*CommandBuffer { _ = desc; const command_buffer = encoder.command_buffer; return command_buffer; } pub fn writeBuffer(encoder: *CommandEncoder, buffer: *Buffer, offset: u64, data: [*]const u8, size: u64) !void { const stream = try encoder.upload(size); @memcpy(stream.map[0..size], data[0..size]); try encoder.copyBufferToBuffer(stream.buffer, stream.offset, buffer, offset, size); } pub fn writeTexture( encoder: *CommandEncoder, destination: *const sysgpu.ImageCopyTexture, data: [*]const u8, data_size: usize, data_layout: *const sysgpu.Texture.DataLayout, write_size: *const sysgpu.Extent3D, ) !void { const stream = try encoder.upload(data_size); @memcpy(stream.map[0..data_size], data[0..data_size]); try encoder.copyBufferToTexture( &.{ .layout = .{ .offset = stream.offset, .bytes_per_row = data_layout.bytes_per_row, .rows_per_image = data_layout.rows_per_image, }, .buffer = @ptrCast(stream.buffer), }, destination, write_size, ); } pub fn upload(encoder: *CommandEncoder, size: u64) !StreamingResult { if (encoder.upload_next_offset + size > upload_page_size) { const streaming_manager = &encoder.device.streaming_manager; std.debug.assert(size <= upload_page_size); // TODO - support large uploads const buffer = try streaming_manager.acquire(); try encoder.reference_tracker.referenceUploadPage(buffer); encoder.upload_buffer = buffer; encoder.upload_map = buffer.map; encoder.upload_next_offset = 0; } const offset = encoder.upload_next_offset; encoder.upload_next_offset = @intCast(utils.alignUp(offset + size, limits.min_uniform_buffer_offset_alignment)); return StreamingResult{ .buffer = encoder.upload_buffer.?, .map = encoder.upload_map.? + offset, .offset = offset, }; } }; pub const ComputePassEncoder = struct { manager: utils.Manager(ComputePassEncoder) = .{}, commands: *std.ArrayListUnmanaged(Command), reference_tracker: *ReferenceTracker, pub fn init(cmd_encoder: *CommandEncoder, desc: *const sysgpu.ComputePassDescriptor) !*ComputePassEncoder { _ = desc; const encoder = try allocator.create(ComputePassEncoder); encoder.* = .{ .commands = &cmd_encoder.command_buffer.commands, .reference_tracker = cmd_encoder.reference_tracker, }; return encoder; } pub fn deinit(encoder: *ComputePassEncoder) void { allocator.destroy(encoder); } pub fn dispatchWorkgroups( encoder: *ComputePassEncoder, workgroup_count_x: u32, workgroup_count_y: u32, workgroup_count_z: u32, ) !void { try encoder.commands.append(allocator, .{ .dispatch_workgroups = .{ .workgroup_count_x = workgroup_count_x, .workgroup_count_y = workgroup_count_y, .workgroup_count_z = workgroup_count_z, } }); } pub fn end(encoder: *ComputePassEncoder) void { _ = encoder; } pub fn setBindGroup( encoder: *ComputePassEncoder, group_index: u32, group: *BindGroup, dynamic_offset_count: usize, dynamic_offsets: ?[*]const u32, ) !void { group.manager.reference(); var dynamic_offsets_array: std.BoundedArray(u32, limits.max_bind_groups) = .{}; if (dynamic_offset_count > 0) dynamic_offsets_array.appendSliceAssumeCapacity(dynamic_offsets.?[0..dynamic_offset_count]); try encoder.commands.append(allocator, .{ .set_compute_bind_group = .{ .group_index = group_index, .group = group, .dynamic_offsets = dynamic_offsets_array, } }); } pub fn setPipeline(encoder: *ComputePassEncoder, pipeline: *ComputePipeline) !void { pipeline.manager.reference(); try encoder.commands.append(allocator, .{ .set_compute_pipeline = .{ .pipeline = pipeline, } }); } }; pub const RenderPassEncoder = struct { manager: utils.Manager(RenderPassEncoder) = .{}, commands: *std.ArrayListUnmanaged(Command), reference_tracker: *ReferenceTracker, pub fn init(cmd_encoder: *CommandEncoder, desc: *const sysgpu.RenderPassDescriptor) !*RenderPassEncoder { var encoder = try allocator.create(RenderPassEncoder); encoder.* = .{ .commands = &cmd_encoder.command_buffer.commands, .reference_tracker = cmd_encoder.reference_tracker, }; var color_attachments: std.BoundedArray(sysgpu.RenderPassColorAttachment, limits.max_color_attachments) = .{}; for (0..desc.color_attachment_count) |i| { const attach = &desc.color_attachments.?[i]; if (attach.view) |view_raw| { const view: *TextureView = @ptrCast(@alignCast(view_raw)); view.manager.reference(); } color_attachments.appendAssumeCapacity(attach.*); } if (desc.depth_stencil_attachment) |attach| { const view: *TextureView = @ptrCast(@alignCast(attach.view)); view.manager.reference(); } try encoder.commands.append(allocator, .{ .begin_render_pass = .{ .color_attachments = color_attachments, .depth_stencil_attachment = if (desc.depth_stencil_attachment) |ds| ds.* else null, } }); return encoder; } pub fn deinit(encoder: *RenderPassEncoder) void { allocator.destroy(encoder); } pub fn draw( encoder: *RenderPassEncoder, vertex_count: u32, instance_count: u32, first_vertex: u32, first_instance: u32, ) !void { try encoder.commands.append(allocator, .{ .draw = .{ .vertex_count = vertex_count, .instance_count = instance_count, .first_vertex = first_vertex, .first_instance = first_instance, } }); } pub fn drawIndexed( encoder: *RenderPassEncoder, index_count: u32, instance_count: u32, first_index: u32, base_vertex: i32, first_instance: u32, ) !void { try encoder.commands.append(allocator, .{ .draw_indexed = .{ .index_count = index_count, .instance_count = instance_count, .first_index = first_index, .base_vertex = base_vertex, .first_instance = first_instance, } }); } pub fn end(encoder: *RenderPassEncoder) !void { try encoder.commands.append(allocator, .end_render_pass); } pub fn setBindGroup( encoder: *RenderPassEncoder, group_index: u32, group: *BindGroup, dynamic_offset_count: usize, dynamic_offsets: ?[*]const u32, ) !void { group.manager.reference(); var dynamic_offsets_array: std.BoundedArray(u32, limits.max_bind_groups) = .{}; if (dynamic_offset_count > 0) dynamic_offsets_array.appendSliceAssumeCapacity(dynamic_offsets.?[0..dynamic_offset_count]); try encoder.commands.append(allocator, .{ .set_render_bind_group = .{ .group_index = group_index, .group = group, .dynamic_offsets = dynamic_offsets_array, } }); } pub fn setIndexBuffer( encoder: *RenderPassEncoder, buffer: *Buffer, format: sysgpu.IndexFormat, offset: u64, size: u64, ) !void { _ = size; try encoder.reference_tracker.referenceBuffer(buffer); buffer.manager.reference(); try encoder.commands.append(allocator, .{ .set_index_buffer = .{ .buffer = buffer, .format = format, .offset = offset, } }); } pub fn setPipeline(encoder: *RenderPassEncoder, pipeline: *RenderPipeline) !void { pipeline.manager.reference(); try encoder.commands.append(allocator, .{ .set_render_pipeline = .{ .pipeline = pipeline, } }); } pub fn setScissorRect(encoder: *RenderPassEncoder, x: u32, y: u32, width: u32, height: u32) !void { try encoder.commands.append(allocator, .{ .set_scissor_rect = .{ .x = @intCast(x), .y = @intCast(y), .width = @intCast(width), .height = @intCast(height), } }); } pub fn setVertexBuffer(encoder: *RenderPassEncoder, slot: u32, buffer: *Buffer, offset: u64, size: u64) !void { _ = size; try encoder.reference_tracker.referenceBuffer(buffer); buffer.manager.reference(); try encoder.commands.append(allocator, .{ .set_vertex_buffer = .{ .slot = slot, .buffer = buffer, .offset = offset, } }); } pub fn setViewport( encoder: *RenderPassEncoder, x: f32, y: f32, width: f32, height: f32, min_depth: f32, max_depth: f32, ) !void { try encoder.commands.append(allocator, .{ .set_viewport = .{ .x = x, .y = y, .width = width, .height = height, .min_depth = min_depth, .max_depth = max_depth, } }); } }; pub const Queue = struct { manager: utils.Manager(Queue) = .{}, device: *Device, command_encoder: ?*CommandEncoder = null, pub fn init(device: *Device) !Queue { return .{ .device = device, }; } pub fn deinit(queue: *Queue) void { if (queue.command_encoder) |command_encoder| command_encoder.manager.release(); } pub fn submit(queue: *Queue, commands: []const *CommandBuffer) !void { if (queue.command_encoder) |command_encoder| { const command_buffer = try command_encoder.finish(&.{}); command_buffer.manager.reference(); // handled in main.zig defer command_buffer.manager.release(); try command_buffer.execute(); command_encoder.manager.release(); queue.command_encoder = null; } for (commands) |command_buffer| { try command_buffer.execute(); } } pub fn writeBuffer(queue: *Queue, buffer: *Buffer, offset: u64, data: [*]const u8, size: u64) !void { const encoder = try queue.getCommandEncoder(); try encoder.writeBuffer(buffer, offset, data, size); } pub fn writeTexture( queue: *Queue, destination: *const sysgpu.ImageCopyTexture, data: [*]const u8, data_size: usize, data_layout: *const sysgpu.Texture.DataLayout, write_size: *const sysgpu.Extent3D, ) !void { const encoder = try queue.getCommandEncoder(); try encoder.writeTexture(destination, data, data_size, data_layout, write_size); } // Private fn getCommandEncoder(queue: *Queue) !*CommandEncoder { if (queue.command_encoder) |command_encoder| return command_encoder; const command_encoder = try CommandEncoder.init(queue.device, &.{}); queue.command_encoder = command_encoder; return command_encoder; } }; test "reference declarations" { std.testing.refAllDeclsRecursive(@This()); }
0
repos/mach-sysgpu/src
repos/mach-sysgpu/src/sysgpu/query_set.zig
const ChainedStruct = @import("main.zig").ChainedStruct; const PipelineStatisticName = @import("main.zig").PipelineStatisticName; const QueryType = @import("main.zig").QueryType; const Impl = @import("interface.zig").Impl; pub const QuerySet = opaque { pub const Descriptor = extern struct { next_in_chain: ?*const ChainedStruct = null, label: ?[*:0]const u8 = null, type: QueryType, count: u32, pipeline_statistics: ?[*]const PipelineStatisticName = null, pipeline_statistics_count: usize = 0, /// Provides a slightly friendlier Zig API to initialize this structure. pub inline fn init(v: struct { next_in_chain: ?*const ChainedStruct = null, label: ?[*:0]const u8 = null, type: QueryType, count: u32, pipeline_statistics: ?[]const PipelineStatisticName = null, }) Descriptor { return .{ .next_in_chain = v.next_in_chain, .label = v.label, .type = v.type, .count = v.count, .pipeline_statistics_count = if (v.pipeline_statistics) |e| e.len else 0, .pipeline_statistics = if (v.pipeline_statistics) |e| e.ptr else null, }; } }; pub inline fn destroy(query_set: *QuerySet) void { Impl.querySetDestroy(query_set); } pub inline fn getCount(query_set: *QuerySet) u32 { return Impl.querySetGetCount(query_set); } pub inline fn getType(query_set: *QuerySet) QueryType { return Impl.querySetGetType(query_set); } pub inline fn setLabel(query_set: *QuerySet, label: [*:0]const u8) void { Impl.querySetSetLabel(query_set, label); } pub inline fn reference(query_set: *QuerySet) void { Impl.querySetReference(query_set); } pub inline fn release(query_set: *QuerySet) void { Impl.querySetRelease(query_set); } };
0
repos/mach-sysgpu/src
repos/mach-sysgpu/src/sysgpu/surface.zig
const ChainedStruct = @import("main.zig").ChainedStruct; const Impl = @import("interface.zig").Impl; pub const Surface = opaque { pub const Descriptor = extern struct { pub const NextInChain = extern union { generic: ?*const ChainedStruct, from_android_native_window: *const DescriptorFromAndroidNativeWindow, from_canvas_html_selector: *const DescriptorFromCanvasHTMLSelector, from_metal_layer: *const DescriptorFromMetalLayer, from_wayland_surface: *const DescriptorFromWaylandSurface, from_windows_core_window: *const DescriptorFromWindowsCoreWindow, from_windows_hwnd: *const DescriptorFromWindowsHWND, from_windows_swap_chain_panel: *const DescriptorFromWindowsSwapChainPanel, from_xlib_window: *const DescriptorFromXlibWindow, }; next_in_chain: NextInChain = .{ .generic = null }, label: ?[*:0]const u8 = null, }; pub const DescriptorFromAndroidNativeWindow = extern struct { chain: ChainedStruct = .{ .next = null, .s_type = .surface_descriptor_from_android_native_window }, window: *anyopaque, }; pub const DescriptorFromCanvasHTMLSelector = extern struct { chain: ChainedStruct = .{ .next = null, .s_type = .surface_descriptor_from_canvas_html_selector }, selector: [*:0]const u8, }; pub const DescriptorFromMetalLayer = extern struct { chain: ChainedStruct = .{ .next = null, .s_type = .surface_descriptor_from_metal_layer }, layer: *anyopaque, }; pub const DescriptorFromWaylandSurface = extern struct { chain: ChainedStruct = .{ .next = null, .s_type = .surface_descriptor_from_wayland_surface }, display: *anyopaque, surface: *anyopaque, }; pub const DescriptorFromWindowsCoreWindow = extern struct { chain: ChainedStruct = .{ .next = null, .s_type = .surface_descriptor_from_windows_core_window }, core_window: *anyopaque, }; pub const DescriptorFromWindowsHWND = extern struct { chain: ChainedStruct = .{ .next = null, .s_type = .surface_descriptor_from_windows_hwnd }, hinstance: *anyopaque, hwnd: *anyopaque, }; pub const DescriptorFromWindowsSwapChainPanel = extern struct { chain: ChainedStruct = .{ .next = null, .s_type = .surface_descriptor_from_windows_swap_chain_panel }, swap_chain_panel: *anyopaque, }; pub const DescriptorFromXlibWindow = extern struct { chain: ChainedStruct = .{ .next = null, .s_type = .surface_descriptor_from_xlib_window }, display: *anyopaque, window: u32, }; pub inline fn reference(surface: *Surface) void { Impl.surfaceReference(surface); } pub inline fn release(surface: *Surface) void { Impl.surfaceRelease(surface); } };
0
repos/mach-sysgpu/src
repos/mach-sysgpu/src/sysgpu/shared_texture_memory.zig
const Texture = @import("texture.zig").Texture; const Bool32 = @import("main.zig").Bool32; const Extent3D = @import("main.zig").Extent3D; const SharedFence = @import("shared_fence.zig").SharedFence; const ChainedStruct = @import("main.zig").ChainedStruct; const ChainedStructOut = @import("main.zig").ChainedStructOut; pub const SharedTextureMemory = opaque { pub const Properties = extern struct { next_in_chain: *const ChainedStruct, usage: Texture.UsageFlags, size: Extent3D, format: Texture.Format, }; pub const VkImageDescriptor = extern struct { chain: ChainedStruct = .{ .next = null, .s_type = .shared_texture_memory_vk_image_descriptor }, vk_format: i32, vk_usage_flags: Texture.UsageFlags, vk_extent3D: Extent3D, }; pub const AHardwareBufferDescriptor = extern struct { chain: ChainedStruct = .{ .next = null, .s_type = .shared_texture_memory_a_hardware_buffer_descriptor }, handle: *anyopaque, }; pub const BeginAccessDescriptor = extern struct { pub const NextInChain = extern union { generic: ?*const ChainedStruct, vk_image_layout_begin_state: *const VkImageLayoutBeginState, }; next_in_chain: NextInChain = .{ .generic = null }, initialized: Bool32, fence_count: usize, fences: *const SharedFence, signaled_values: *const u64, }; pub const Descriptor = extern struct { pub const NextInChain = extern union { generic: ?*const ChainedStruct, a_hardware_buffer_descriptor: *const AHardwareBufferDescriptor, dma_buf_descriptor: *const DmaBufDescriptor, dxgi_shared_handle_descriptor: *const DXGISharedHandleDescriptor, egl_image_descriptor: *const EGLImageDescriptor, io_surface_descriptor: *const IOSurfaceDescriptor, opaque_fd_descriptor: *const OpaqueFDDescriptor, vk_dedicated_allocation_descriptor: *const VkDedicatedAllocationDescriptor, zircon_handle_descriptor: *const ZirconHandleDescriptor, }; next_in_chain: NextInChain = .{ .generic = null }, label: ?[*]const u8, }; pub const DmaBufDescriptor = extern struct { chain: ChainedStruct = .{ .next = null, .s_type = .shared_texture_memory_dma_buf_descriptor }, memory_fd: c_int, allocation_size: u64, drm_modifier: u64, plane_count: usize, plane_offsets: *const u64, plane_strides: *const u32, }; pub const DXGISharedHandleDescriptor = extern struct { chain: ChainedStruct = .{ .next = null, .s_type = .shared_texture_memory_dxgi_shared_handle_descriptor }, handle: *anyopaque, }; pub const EGLImageDescriptor = extern struct { chain: ChainedStruct = .{ .next = null, .s_type = .shared_texture_memory_egl_image_descriptor }, image: *anyopaque, }; pub const EndAccessState = extern struct { pub const NextInChain = extern union { generic: ?*const ChainedStruct, vk_image_layout_end_state: *const VkImageLayoutEndState, }; next_in_chain: NextInChain = .{ .generic = null }, initialized: Bool32, fence_count: usize, fences: *const SharedFence, signaled_values: *const u64, }; pub const IOSurfaceDescriptor = extern struct { chain: ChainedStruct = .{ .next = null, .s_type = .shared_texture_memory_io_surface_descriptor }, ioSurface: *anyopaque, }; pub const OpaqueFDDescriptor = extern struct { chain: ChainedStruct = .{ .next = null, .s_type = .shared_texture_memory_opaque_fd_descriptor }, memory_fd: c_int, allocation_size: u64, }; pub const VkDedicatedAllocationDescriptor = extern struct { chain: ChainedStruct = .{ .next = null, .s_type = .shared_texture_memory_vk_dedicated_allocation_descriptor }, dedicated_allocation: Bool32, }; pub const VkImageLayoutBeginState = extern struct { chain: ChainedStruct = .{ .next = null, .s_type = .shared_texture_memory_vk_image_layout_begin_state }, old_layout: i32, new_layout: i32, }; pub const VkImageLayoutEndState = extern struct { chain: ChainedStruct = .{ .next = null, .s_type = .shared_texture_memory_vk_image_layout_end_state }, old_layout: i32, new_layout: i32, }; pub const ZirconHandleDescriptor = extern struct { chain: ChainedStruct = .{ .next = null, .s_type = .shared_texture_memory_zircon_handle_descriptor }, memory_fd: u32, allocation_size: u64, }; };
0
repos/mach-sysgpu/src
repos/mach-sysgpu/src/sysgpu/bind_group_layout.zig
const Bool32 = @import("main.zig").Bool32; const ChainedStruct = @import("main.zig").ChainedStruct; const ShaderStageFlags = @import("main.zig").ShaderStageFlags; const Buffer = @import("buffer.zig").Buffer; const Sampler = @import("sampler.zig").Sampler; const Texture = @import("texture.zig").Texture; const TextureView = @import("texture_view.zig").TextureView; const StorageTextureBindingLayout = @import("main.zig").StorageTextureBindingLayout; const StorageTextureAccess = @import("main.zig").StorageTextureAccess; const ExternalTexture = @import("external_texture.zig").ExternalTexture; const Impl = @import("interface.zig").Impl; pub const BindGroupLayout = opaque { pub const Entry = extern struct { pub const NextInChain = extern union { generic: ?*const ChainedStruct, external_texture_binding_layout: *const ExternalTexture.BindingLayout, }; next_in_chain: NextInChain = .{ .generic = null }, binding: u32, visibility: ShaderStageFlags, buffer: Buffer.BindingLayout = .{}, sampler: Sampler.BindingLayout = .{}, texture: Texture.BindingLayout = .{}, storage_texture: StorageTextureBindingLayout = .{}, /// Helper to create a buffer BindGroupLayout.Entry. pub fn buffer( binding: u32, visibility: ShaderStageFlags, binding_type: Buffer.BindingType, has_dynamic_offset: bool, min_binding_size: u64, ) Entry { return .{ .binding = binding, .visibility = visibility, .buffer = .{ .type = binding_type, .has_dynamic_offset = Bool32.from(has_dynamic_offset), .min_binding_size = min_binding_size, }, }; } /// Helper to create a sampler BindGroupLayout.Entry. pub fn sampler( binding: u32, visibility: ShaderStageFlags, binding_type: Sampler.BindingType, ) Entry { return .{ .binding = binding, .visibility = visibility, .sampler = .{ .type = binding_type }, }; } /// Helper to create a texture BindGroupLayout.Entry. pub fn texture( binding: u32, visibility: ShaderStageFlags, sample_type: Texture.SampleType, view_dimension: TextureView.Dimension, multisampled: bool, ) Entry { return .{ .binding = binding, .visibility = visibility, .texture = .{ .sample_type = sample_type, .view_dimension = view_dimension, .multisampled = Bool32.from(multisampled), }, }; } /// Helper to create a storage texture BindGroupLayout.Entry. pub fn storageTexture( binding: u32, visibility: ShaderStageFlags, access: StorageTextureAccess, format: Texture.Format, view_dimension: TextureView.Dimension, ) Entry { return .{ .binding = binding, .visibility = visibility, .storage_texture = .{ .access = access, .format = format, .view_dimension = view_dimension, }, }; } }; pub const Descriptor = extern struct { next_in_chain: ?*const ChainedStruct = null, label: ?[*:0]const u8 = null, entry_count: usize = 0, entries: ?[*]const Entry = null, /// Provides a slightly friendlier Zig API to initialize this structure. pub inline fn init(v: struct { next_in_chain: ?*const ChainedStruct = null, label: ?[*:0]const u8 = null, entries: ?[]const Entry = null, }) Descriptor { return .{ .next_in_chain = v.next_in_chain, .label = v.label, .entry_count = if (v.entries) |e| e.len else 0, .entries = if (v.entries) |e| e.ptr else null, }; } }; pub inline fn setLabel(bind_group_layout: *BindGroupLayout, label: [*:0]const u8) void { Impl.bindGroupLayoutSetLabel(bind_group_layout, label); } pub inline fn reference(bind_group_layout: *BindGroupLayout) void { Impl.bindGroupLayoutReference(bind_group_layout); } pub inline fn release(bind_group_layout: *BindGroupLayout) void { Impl.bindGroupLayoutRelease(bind_group_layout); } };
0
repos/mach-sysgpu/src
repos/mach-sysgpu/src/sysgpu/texture_view.zig
const ChainedStruct = @import("main.zig").ChainedStruct; const Texture = @import("texture.zig").Texture; const Impl = @import("interface.zig").Impl; const types = @import("main.zig"); pub const TextureView = opaque { pub const Dimension = enum(u32) { dimension_undefined = 0x00000000, dimension_1d = 0x00000001, dimension_2d = 0x00000002, dimension_2d_array = 0x00000003, dimension_cube = 0x00000004, dimension_cube_array = 0x00000005, dimension_3d = 0x00000006, }; pub const Descriptor = extern struct { next_in_chain: ?*const ChainedStruct = null, label: ?[*:0]const u8 = null, format: Texture.Format = .undefined, dimension: Dimension = .dimension_undefined, base_mip_level: u32 = 0, mip_level_count: u32 = types.mip_level_count_undefined, base_array_layer: u32 = 0, array_layer_count: u32 = types.array_layer_count_undefined, aspect: Texture.Aspect = .all, }; pub inline fn setLabel(texture_view: *TextureView, label: [*:0]const u8) void { Impl.textureViewSetLabel(texture_view, label); } pub inline fn reference(texture_view: *TextureView) void { Impl.textureViewReference(texture_view); } pub inline fn release(texture_view: *TextureView) void { Impl.textureViewRelease(texture_view); } };
0
repos/mach-sysgpu/src
repos/mach-sysgpu/src/sysgpu/pipeline_layout.zig
const ChainedStruct = @import("main.zig").ChainedStruct; const BindGroupLayout = @import("bind_group_layout.zig").BindGroupLayout; const Impl = @import("interface.zig").Impl; pub const PipelineLayout = opaque { pub const Descriptor = extern struct { next_in_chain: ?*const ChainedStruct = null, label: ?[*:0]const u8 = null, bind_group_layout_count: usize = 0, bind_group_layouts: ?[*]const *BindGroupLayout = null, /// Provides a slightly friendlier Zig API to initialize this structure. pub inline fn init(v: struct { next_in_chain: ?*const ChainedStruct = null, label: ?[*:0]const u8 = null, bind_group_layouts: ?[]const *BindGroupLayout = null, }) Descriptor { return .{ .next_in_chain = v.next_in_chain, .label = v.label, .bind_group_layout_count = if (v.bind_group_layouts) |e| e.len else 0, .bind_group_layouts = if (v.bind_group_layouts) |e| e.ptr else null, }; } }; pub inline fn setLabel(pipeline_layout: *PipelineLayout, label: [*:0]const u8) void { Impl.pipelineLayoutSetLabel(pipeline_layout, label); } pub inline fn reference(pipeline_layout: *PipelineLayout) void { Impl.pipelineLayoutReference(pipeline_layout); } pub inline fn release(pipeline_layout: *PipelineLayout) void { Impl.pipelineLayoutRelease(pipeline_layout); } };
0
repos/mach-sysgpu/src
repos/mach-sysgpu/src/sysgpu/compute_pipeline.zig
const ChainedStruct = @import("main.zig").ChainedStruct; const ProgrammableStageDescriptor = @import("main.zig").ProgrammableStageDescriptor; const PipelineLayout = @import("pipeline_layout.zig").PipelineLayout; const BindGroupLayout = @import("bind_group_layout.zig").BindGroupLayout; const Impl = @import("interface.zig").Impl; pub const ComputePipeline = opaque { pub const Descriptor = extern struct { next_in_chain: ?*const ChainedStruct = null, label: ?[*:0]const u8 = null, layout: ?*PipelineLayout = null, compute: ProgrammableStageDescriptor, }; pub inline fn getBindGroupLayout(compute_pipeline: *ComputePipeline, group_index: u32) *BindGroupLayout { return Impl.computePipelineGetBindGroupLayout(compute_pipeline, group_index); } pub inline fn setLabel(compute_pipeline: *ComputePipeline, label: [*:0]const u8) void { Impl.computePipelineSetLabel(compute_pipeline, label); } pub inline fn reference(compute_pipeline: *ComputePipeline) void { Impl.computePipelineReference(compute_pipeline); } pub inline fn release(compute_pipeline: *ComputePipeline) void { Impl.computePipelineRelease(compute_pipeline); } };
0
repos/mach-sysgpu/src
repos/mach-sysgpu/src/sysgpu/interface.zig
const sysgpu = @import("main.zig"); /// The sysgpu.Interface implementation that is used by the entire program. Only one may exist, since /// it is resolved fully at comptime with no vtable indirection, etc. /// /// Depending on the implementation, it may need to be `.init()`ialized before use. pub const Impl = blk: { if (@import("builtin").is_test) { break :blk StubInterface; } else { const root = @import("root"); if (!@hasDecl(root, "SYSGPUInterface")) @compileError("expected to find `pub const SYSGPUInterface = T;` in root file"); _ = sysgpu.Interface(root.SYSGPUInterface); // verify the type break :blk root.SYSGPUInterface; } }; /// Verifies that a sysgpu.Interface implementation exposes the expected function declarations. pub fn Interface(comptime T: type) type { // sysgpu.Device assertDecl(T, "deviceCreateRenderPipeline", fn (device: *sysgpu.Device, descriptor: *const sysgpu.RenderPipeline.Descriptor) callconv(.Inline) *sysgpu.RenderPipeline); assertDecl(T, "deviceCreateRenderPipelineAsync", fn (device: *sysgpu.Device, descriptor: *const sysgpu.RenderPipeline.Descriptor, callback: sysgpu.CreateRenderPipelineAsyncCallback, userdata: ?*anyopaque) callconv(.Inline) void); assertDecl(T, "deviceCreatePipelineLayout", fn (device: *sysgpu.Device, pipeline_layout_descriptor: *const sysgpu.PipelineLayout.Descriptor) callconv(.Inline) *sysgpu.PipelineLayout); // sysgpu.PipelineLayout assertDecl(T, "pipelineLayoutSetLabel", fn (pipeline_layout: *sysgpu.PipelineLayout, label: [*:0]const u8) callconv(.Inline) void); assertDecl(T, "pipelineLayoutReference", fn (pipeline_layout: *sysgpu.PipelineLayout) callconv(.Inline) void); assertDecl(T, "pipelineLayoutRelease", fn (pipeline_layout: *sysgpu.PipelineLayout) callconv(.Inline) void); // sysgpu.RenderBundleEncoder assertDecl(T, "renderBundleEncoderSetPipeline", fn (render_bundle_encoder: *sysgpu.RenderBundleEncoder, pipeline: *sysgpu.RenderPipeline) callconv(.Inline) void); assertDecl(T, "renderBundleEncoderSetBindGroup", fn (render_bundle_encoder: *sysgpu.RenderBundleEncoder, group_index: u32, group: *sysgpu.BindGroup, dynamic_offset_count: usize, dynamic_offsets: ?[*]const u32) callconv(.Inline) void); // sysgpu.RenderPassEncoder assertDecl(T, "renderPassEncoderSetPipeline", fn (render_pass_encoder: *sysgpu.RenderPassEncoder, pipeline: *sysgpu.RenderPipeline) callconv(.Inline) void); assertDecl(T, "renderPassEncoderSetBindGroup", fn (render_pass_encoder: *sysgpu.RenderPassEncoder, group_index: u32, group: *sysgpu.BindGroup, dynamic_offset_count: usize, dynamic_offsets: ?[*]const u32) callconv(.Inline) void); // sysgpu.BindGroup assertDecl(T, "bindGroupSetLabel", fn (bind_group: *sysgpu.BindGroup, label: [*:0]const u8) callconv(.Inline) void); assertDecl(T, "bindGroupReference", fn (bind_group: *sysgpu.BindGroup) callconv(.Inline) void); assertDecl(T, "bindGroupRelease", fn (bind_group: *sysgpu.BindGroup) callconv(.Inline) void); // sysgpu.BindGroupLayout assertDecl(T, "bindGroupLayoutSetLabel", fn (bind_group_layout: *sysgpu.BindGroupLayout, label: [*:0]const u8) callconv(.Inline) void); assertDecl(T, "bindGroupLayoutReference", fn (bind_group_layout: *sysgpu.BindGroupLayout) callconv(.Inline) void); assertDecl(T, "bindGroupLayoutRelease", fn (bind_group_layout: *sysgpu.BindGroupLayout) callconv(.Inline) void); // sysgpu.RenderPipeline assertDecl(T, "renderPipelineGetBindGroupLayout", fn (render_pipeline: *sysgpu.RenderPipeline, group_index: u32) callconv(.Inline) *sysgpu.BindGroupLayout); assertDecl(T, "renderPipelineSetLabel", fn (render_pipeline: *sysgpu.RenderPipeline, label: [*:0]const u8) callconv(.Inline) void); assertDecl(T, "renderPipelineReference", fn (render_pipeline: *sysgpu.RenderPipeline) callconv(.Inline) void); assertDecl(T, "renderPipelineRelease", fn (render_pipeline: *sysgpu.RenderPipeline) callconv(.Inline) void); // sysgpu.Instance assertDecl(T, "createInstance", fn (descriptor: ?*const sysgpu.Instance.Descriptor) callconv(.Inline) ?*sysgpu.Instance); // sysgpu.Adapter assertDecl(T, "adapterCreateDevice", fn (adapter: *sysgpu.Adapter, descriptor: ?*const sysgpu.Device.Descriptor) callconv(.Inline) ?*sysgpu.Device); assertDecl(T, "adapterEnumerateFeatures", fn (adapter: *sysgpu.Adapter, features: ?[*]sysgpu.FeatureName) callconv(.Inline) usize); assertDecl(T, "adapterGetInstance", fn (adapter: *sysgpu.Adapter) callconv(.Inline) *sysgpu.Instance); assertDecl(T, "adapterGetLimits", fn (adapter: *sysgpu.Adapter, limits: *sysgpu.SupportedLimits) callconv(.Inline) u32); assertDecl(T, "adapterGetProperties", fn (adapter: *sysgpu.Adapter, properties: *sysgpu.Adapter.Properties) callconv(.Inline) void); assertDecl(T, "adapterHasFeature", fn (adapter: *sysgpu.Adapter, feature: sysgpu.FeatureName) callconv(.Inline) u32); assertDecl(T, "adapterPropertiesFreeMembers", fn (value: sysgpu.Adapter.Properties) callconv(.Inline) void); assertDecl(T, "adapterRequestDevice", fn (adapter: *sysgpu.Adapter, descriptor: ?*const sysgpu.Device.Descriptor, callback: sysgpu.RequestDeviceCallback, userdata: ?*anyopaque) callconv(.Inline) void); assertDecl(T, "adapterReference", fn (adapter: *sysgpu.Adapter) callconv(.Inline) void); assertDecl(T, "adapterRelease", fn (adapter: *sysgpu.Adapter) callconv(.Inline) void); // sysgpu.Buffer assertDecl(T, "bufferDestroy", fn (buffer: *sysgpu.Buffer) callconv(.Inline) void); assertDecl(T, "bufferGetConstMappedRange", fn (buffer: *sysgpu.Buffer, offset: usize, size: usize) callconv(.Inline) ?*const anyopaque); assertDecl(T, "bufferGetMappedRange", fn (buffer: *sysgpu.Buffer, offset: usize, size: usize) callconv(.Inline) ?*anyopaque); assertDecl(T, "bufferGetSize", fn (buffer: *sysgpu.Buffer) callconv(.Inline) u64); assertDecl(T, "bufferGetUsage", fn (buffer: *sysgpu.Buffer) callconv(.Inline) sysgpu.Buffer.UsageFlags); assertDecl(T, "bufferMapAsync", fn (buffer: *sysgpu.Buffer, mode: sysgpu.MapModeFlags, offset: usize, size: usize, callback: sysgpu.Buffer.MapCallback, userdata: ?*anyopaque) callconv(.Inline) void); assertDecl(T, "bufferSetLabel", fn (buffer: *sysgpu.Buffer, label: [*:0]const u8) callconv(.Inline) void); assertDecl(T, "bufferUnmap", fn (buffer: *sysgpu.Buffer) callconv(.Inline) void); assertDecl(T, "bufferReference", fn (buffer: *sysgpu.Buffer) callconv(.Inline) void); assertDecl(T, "bufferRelease", fn (buffer: *sysgpu.Buffer) callconv(.Inline) void); // sysgpu.CommandBuffer assertDecl(T, "commandBufferSetLabel", fn (command_buffer: *sysgpu.CommandBuffer, label: [*:0]const u8) callconv(.Inline) void); assertDecl(T, "commandBufferReference", fn (command_buffer: *sysgpu.CommandBuffer) callconv(.Inline) void); assertDecl(T, "commandBufferRelease", fn (command_buffer: *sysgpu.CommandBuffer) callconv(.Inline) void); // sysgpu.CommandEncoder assertDecl(T, "commandEncoderBeginComputePass", fn (command_encoder: *sysgpu.CommandEncoder, descriptor: ?*const sysgpu.ComputePassDescriptor) callconv(.Inline) *sysgpu.ComputePassEncoder); assertDecl(T, "commandEncoderBeginRenderPass", fn (command_encoder: *sysgpu.CommandEncoder, descriptor: *const sysgpu.RenderPassDescriptor) callconv(.Inline) *sysgpu.RenderPassEncoder); assertDecl(T, "commandEncoderClearBuffer", fn (command_encoder: *sysgpu.CommandEncoder, buffer: *sysgpu.Buffer, offset: u64, size: u64) callconv(.Inline) void); assertDecl(T, "commandEncoderCopyBufferToBuffer", fn (command_encoder: *sysgpu.CommandEncoder, source: *sysgpu.Buffer, source_offset: u64, destination: *sysgpu.Buffer, destination_offset: u64, size: u64) callconv(.Inline) void); assertDecl(T, "commandEncoderCopyBufferToTexture", fn (command_encoder: *sysgpu.CommandEncoder, source: *const sysgpu.ImageCopyBuffer, destination: *const sysgpu.ImageCopyTexture, copy_size: *const sysgpu.Extent3D) callconv(.Inline) void); assertDecl(T, "commandEncoderCopyTextureToBuffer", fn (command_encoder: *sysgpu.CommandEncoder, source: *const sysgpu.ImageCopyTexture, destination: *const sysgpu.ImageCopyBuffer, copy_size: *const sysgpu.Extent3D) callconv(.Inline) void); assertDecl(T, "commandEncoderCopyTextureToTexture", fn (command_encoder: *sysgpu.CommandEncoder, source: *const sysgpu.ImageCopyTexture, destination: *const sysgpu.ImageCopyTexture, copy_size: *const sysgpu.Extent3D) callconv(.Inline) void); assertDecl(T, "commandEncoderFinish", fn (command_encoder: *sysgpu.CommandEncoder, descriptor: ?*const sysgpu.CommandBuffer.Descriptor) callconv(.Inline) *sysgpu.CommandBuffer); assertDecl(T, "commandEncoderInjectValidationError", fn (command_encoder: *sysgpu.CommandEncoder, message: [*:0]const u8) callconv(.Inline) void); assertDecl(T, "commandEncoderInsertDebugMarker", fn (command_encoder: *sysgpu.CommandEncoder, marker_label: [*:0]const u8) callconv(.Inline) void); assertDecl(T, "commandEncoderPopDebugGroup", fn (command_encoder: *sysgpu.CommandEncoder) callconv(.Inline) void); assertDecl(T, "commandEncoderPushDebugGroup", fn (command_encoder: *sysgpu.CommandEncoder, group_label: [*:0]const u8) callconv(.Inline) void); assertDecl(T, "commandEncoderResolveQuerySet", fn (command_encoder: *sysgpu.CommandEncoder, query_set: *sysgpu.QuerySet, first_query: u32, query_count: u32, destination: *sysgpu.Buffer, destination_offset: u64) callconv(.Inline) void); assertDecl(T, "commandEncoderSetLabel", fn (command_encoder: *sysgpu.CommandEncoder, label: [*:0]const u8) callconv(.Inline) void); assertDecl(T, "commandEncoderWriteBuffer", fn (command_encoder: *sysgpu.CommandEncoder, buffer: *sysgpu.Buffer, buffer_offset: u64, data: [*]const u8, size: u64) callconv(.Inline) void); assertDecl(T, "commandEncoderWriteTimestamp", fn (command_encoder: *sysgpu.CommandEncoder, query_set: *sysgpu.QuerySet, query_index: u32) callconv(.Inline) void); assertDecl(T, "commandEncoderReference", fn (command_encoder: *sysgpu.CommandEncoder) callconv(.Inline) void); assertDecl(T, "commandEncoderRelease", fn (command_encoder: *sysgpu.CommandEncoder) callconv(.Inline) void); // sysgpu.ComputePassEncoder assertDecl(T, "computePassEncoderDispatchWorkgroups", fn (compute_pass_encoder: *sysgpu.ComputePassEncoder, workgroup_count_x: u32, workgroup_count_y: u32, workgroup_count_z: u32) callconv(.Inline) void); assertDecl(T, "computePassEncoderDispatchWorkgroupsIndirect", fn (compute_pass_encoder: *sysgpu.ComputePassEncoder, indirect_buffer: *sysgpu.Buffer, indirect_offset: u64) callconv(.Inline) void); assertDecl(T, "computePassEncoderEnd", fn (compute_pass_encoder: *sysgpu.ComputePassEncoder) callconv(.Inline) void); assertDecl(T, "computePassEncoderInsertDebugMarker", fn (compute_pass_encoder: *sysgpu.ComputePassEncoder, marker_label: [*:0]const u8) callconv(.Inline) void); assertDecl(T, "computePassEncoderPopDebugGroup", fn (compute_pass_encoder: *sysgpu.ComputePassEncoder) callconv(.Inline) void); assertDecl(T, "computePassEncoderPushDebugGroup", fn (compute_pass_encoder: *sysgpu.ComputePassEncoder, group_label: [*:0]const u8) callconv(.Inline) void); assertDecl(T, "computePassEncoderSetBindGroup", fn (compute_pass_encoder: *sysgpu.ComputePassEncoder, group_index: u32, group: *sysgpu.BindGroup, dynamic_offset_count: usize, dynamic_offsets: ?[*]const u32) callconv(.Inline) void); assertDecl(T, "computePassEncoderSetLabel", fn (compute_pass_encoder: *sysgpu.ComputePassEncoder, label: [*:0]const u8) callconv(.Inline) void); assertDecl(T, "computePassEncoderSetPipeline", fn (compute_pass_encoder: *sysgpu.ComputePassEncoder, pipeline: *sysgpu.ComputePipeline) callconv(.Inline) void); assertDecl(T, "computePassEncoderWriteTimestamp", fn (compute_pass_encoder: *sysgpu.ComputePassEncoder, query_set: *sysgpu.QuerySet, query_index: u32) callconv(.Inline) void); assertDecl(T, "computePassEncoderReference", fn (compute_pass_encoder: *sysgpu.ComputePassEncoder) callconv(.Inline) void); assertDecl(T, "computePassEncoderRelease", fn (compute_pass_encoder: *sysgpu.ComputePassEncoder) callconv(.Inline) void); // sysgpu.ComputePipeline assertDecl(T, "computePipelineGetBindGroupLayout", fn (compute_pipeline: *sysgpu.ComputePipeline, group_index: u32) callconv(.Inline) *sysgpu.BindGroupLayout); assertDecl(T, "computePipelineSetLabel", fn (compute_pipeline: *sysgpu.ComputePipeline, label: [*:0]const u8) callconv(.Inline) void); assertDecl(T, "computePipelineReference", fn (compute_pipeline: *sysgpu.ComputePipeline) callconv(.Inline) void); assertDecl(T, "computePipelineRelease", fn (compute_pipeline: *sysgpu.ComputePipeline) callconv(.Inline) void); // sysgpu.Device assertDecl(T, "getProcAddress", fn (device: *sysgpu.Device, proc_name: [*:0]const u8) callconv(.Inline) ?sysgpu.Proc); assertDecl(T, "deviceCreateBindGroup", fn (device: *sysgpu.Device, descriptor: *const sysgpu.BindGroup.Descriptor) callconv(.Inline) *sysgpu.BindGroup); assertDecl(T, "deviceCreateBindGroupLayout", fn (device: *sysgpu.Device, descriptor: *const sysgpu.BindGroupLayout.Descriptor) callconv(.Inline) *sysgpu.BindGroupLayout); assertDecl(T, "deviceCreateBuffer", fn (device: *sysgpu.Device, descriptor: *const sysgpu.Buffer.Descriptor) callconv(.Inline) *sysgpu.Buffer); assertDecl(T, "deviceCreateCommandEncoder", fn (device: *sysgpu.Device, descriptor: ?*const sysgpu.CommandEncoder.Descriptor) callconv(.Inline) *sysgpu.CommandEncoder); assertDecl(T, "deviceCreateComputePipeline", fn (device: *sysgpu.Device, descriptor: *const sysgpu.ComputePipeline.Descriptor) callconv(.Inline) *sysgpu.ComputePipeline); assertDecl(T, "deviceCreateComputePipelineAsync", fn (device: *sysgpu.Device, descriptor: *const sysgpu.ComputePipeline.Descriptor, callback: sysgpu.CreateComputePipelineAsyncCallback, userdata: ?*anyopaque) callconv(.Inline) void); assertDecl(T, "deviceCreateErrorBuffer", fn (device: *sysgpu.Device, descriptor: *const sysgpu.Buffer.Descriptor) callconv(.Inline) *sysgpu.Buffer); assertDecl(T, "deviceCreateErrorExternalTexture", fn (device: *sysgpu.Device) callconv(.Inline) *sysgpu.ExternalTexture); assertDecl(T, "deviceCreateErrorTexture", fn (device: *sysgpu.Device, descriptor: *const sysgpu.Texture.Descriptor) callconv(.Inline) *sysgpu.Texture); assertDecl(T, "deviceCreateExternalTexture", fn (device: *sysgpu.Device, external_texture_descriptor: *const sysgpu.ExternalTexture.Descriptor) callconv(.Inline) *sysgpu.ExternalTexture); assertDecl(T, "deviceCreateQuerySet", fn (device: *sysgpu.Device, descriptor: *const sysgpu.QuerySet.Descriptor) callconv(.Inline) *sysgpu.QuerySet); assertDecl(T, "deviceCreateRenderBundleEncoder", fn (device: *sysgpu.Device, descriptor: *const sysgpu.RenderBundleEncoder.Descriptor) callconv(.Inline) *sysgpu.RenderBundleEncoder); // TODO(self-hosted): this cannot be marked as inline for some reason: // https://github.com/ziglang/zig/issues/12545 assertDecl(T, "deviceCreateSampler", fn (device: *sysgpu.Device, descriptor: ?*const sysgpu.Sampler.Descriptor) callconv(.Inline) *sysgpu.Sampler); assertDecl(T, "deviceCreateShaderModule", fn (device: *sysgpu.Device, descriptor: *const sysgpu.ShaderModule.Descriptor) callconv(.Inline) *sysgpu.ShaderModule); assertDecl(T, "deviceCreateSwapChain", fn (device: *sysgpu.Device, surface: ?*sysgpu.Surface, descriptor: *const sysgpu.SwapChain.Descriptor) callconv(.Inline) *sysgpu.SwapChain); assertDecl(T, "deviceCreateTexture", fn (device: *sysgpu.Device, descriptor: *const sysgpu.Texture.Descriptor) callconv(.Inline) *sysgpu.Texture); assertDecl(T, "deviceDestroy", fn (device: *sysgpu.Device) callconv(.Inline) void); assertDecl(T, "deviceEnumerateFeatures", fn (device: *sysgpu.Device, features: ?[*]sysgpu.FeatureName) callconv(.Inline) usize); assertDecl(T, "deviceGetLimits", fn (device: *sysgpu.Device, limits: *sysgpu.SupportedLimits) callconv(.Inline) u32); assertDecl(T, "deviceGetQueue", fn (device: *sysgpu.Device) callconv(.Inline) *sysgpu.Queue); assertDecl(T, "deviceHasFeature", fn (device: *sysgpu.Device, feature: sysgpu.FeatureName) callconv(.Inline) u32); assertDecl(T, "deviceImportSharedFence", fn (device: *sysgpu.Device, descriptor: *const sysgpu.SharedFence.Descriptor) callconv(.Inline) *sysgpu.SharedFence); assertDecl(T, "deviceImportSharedTextureMemory", fn (device: *sysgpu.Device, descriptor: *const sysgpu.SharedTextureMemory.Descriptor) callconv(.Inline) *sysgpu.SharedTextureMemory); assertDecl(T, "deviceInjectError", fn (device: *sysgpu.Device, typ: sysgpu.ErrorType, message: [*:0]const u8) callconv(.Inline) void); assertDecl(T, "devicePopErrorScope", fn (device: *sysgpu.Device, callback: sysgpu.ErrorCallback, userdata: ?*anyopaque) callconv(.Inline) void); assertDecl(T, "devicePushErrorScope", fn (device: *sysgpu.Device, filter: sysgpu.ErrorFilter) callconv(.Inline) void); assertDecl(T, "deviceSetDeviceLostCallback", fn (device: *sysgpu.Device, callback: ?sysgpu.Device.LostCallback, userdata: ?*anyopaque) callconv(.Inline) void); assertDecl(T, "deviceSetLabel", fn (device: *sysgpu.Device, label: [*:0]const u8) callconv(.Inline) void); assertDecl(T, "deviceSetLoggingCallback", fn (device: *sysgpu.Device, callback: ?sysgpu.LoggingCallback, userdata: ?*anyopaque) callconv(.Inline) void); assertDecl(T, "deviceSetUncapturedErrorCallback", fn (device: *sysgpu.Device, callback: ?sysgpu.ErrorCallback, userdata: ?*anyopaque) callconv(.Inline) void); assertDecl(T, "deviceTick", fn (device: *sysgpu.Device) callconv(.Inline) void); assertDecl(T, "machDeviceWaitForCommandsToBeScheduled", fn (device: *sysgpu.Device) callconv(.Inline) void); assertDecl(T, "deviceReference", fn (device: *sysgpu.Device) callconv(.Inline) void); assertDecl(T, "deviceRelease", fn (device: *sysgpu.Device) callconv(.Inline) void); // sysgpu.ExternalTexture assertDecl(T, "externalTextureDestroy", fn (external_texture: *sysgpu.ExternalTexture) callconv(.Inline) void); assertDecl(T, "externalTextureSetLabel", fn (external_texture: *sysgpu.ExternalTexture, label: [*:0]const u8) callconv(.Inline) void); assertDecl(T, "externalTextureReference", fn (external_texture: *sysgpu.ExternalTexture) callconv(.Inline) void); assertDecl(T, "externalTextureRelease", fn (external_texture: *sysgpu.ExternalTexture) callconv(.Inline) void); // sysgpu.Instance assertDecl(T, "instanceCreateSurface", fn (instance: *sysgpu.Instance, descriptor: *const sysgpu.Surface.Descriptor) callconv(.Inline) *sysgpu.Surface); assertDecl(T, "instanceProcessEvents", fn (instance: *sysgpu.Instance) callconv(.Inline) void); assertDecl(T, "instanceRequestAdapter", fn (instance: *sysgpu.Instance, options: ?*const sysgpu.RequestAdapterOptions, callback: sysgpu.RequestAdapterCallback, userdata: ?*anyopaque) callconv(.Inline) void); assertDecl(T, "instanceReference", fn (instance: *sysgpu.Instance) callconv(.Inline) void); assertDecl(T, "instanceRelease", fn (instance: *sysgpu.Instance) callconv(.Inline) void); // sysgpu.QuerySet assertDecl(T, "querySetDestroy", fn (query_set: *sysgpu.QuerySet) callconv(.Inline) void); assertDecl(T, "querySetGetCount", fn (query_set: *sysgpu.QuerySet) callconv(.Inline) u32); assertDecl(T, "querySetGetType", fn (query_set: *sysgpu.QuerySet) callconv(.Inline) sysgpu.QueryType); assertDecl(T, "querySetSetLabel", fn (query_set: *sysgpu.QuerySet, label: [*:0]const u8) callconv(.Inline) void); assertDecl(T, "querySetReference", fn (query_set: *sysgpu.QuerySet) callconv(.Inline) void); assertDecl(T, "querySetRelease", fn (query_set: *sysgpu.QuerySet) callconv(.Inline) void); // sysgpu.Queue assertDecl(T, "queueCopyTextureForBrowser", fn (queue: *sysgpu.Queue, source: *const sysgpu.ImageCopyTexture, destination: *const sysgpu.ImageCopyTexture, copy_size: *const sysgpu.Extent3D, options: *const sysgpu.CopyTextureForBrowserOptions) callconv(.Inline) void); assertDecl(T, "queueOnSubmittedWorkDone", fn (queue: *sysgpu.Queue, signal_value: u64, callback: sysgpu.Queue.WorkDoneCallback, userdata: ?*anyopaque) callconv(.Inline) void); assertDecl(T, "queueSetLabel", fn (queue: *sysgpu.Queue, label: [*:0]const u8) callconv(.Inline) void); assertDecl(T, "queueSubmit", fn (queue: *sysgpu.Queue, command_count: usize, commands: [*]const *const sysgpu.CommandBuffer) callconv(.Inline) void); assertDecl(T, "queueWriteBuffer", fn (queue: *sysgpu.Queue, buffer: *sysgpu.Buffer, buffer_offset: u64, data: *const anyopaque, size: usize) callconv(.Inline) void); assertDecl(T, "queueWriteTexture", fn (queue: *sysgpu.Queue, destination: *const sysgpu.ImageCopyTexture, data: *const anyopaque, data_size: usize, data_layout: *const sysgpu.Texture.DataLayout, write_size: *const sysgpu.Extent3D) callconv(.Inline) void); assertDecl(T, "queueReference", fn (queue: *sysgpu.Queue) callconv(.Inline) void); assertDecl(T, "queueRelease", fn (queue: *sysgpu.Queue) callconv(.Inline) void); // sysgpu.RenderBundle assertDecl(T, "renderBundleSetLabel", fn (render_bundle: *sysgpu.RenderBundle, label: [*:0]const u8) callconv(.Inline) void); assertDecl(T, "renderBundleReference", fn (render_bundle: *sysgpu.RenderBundle) callconv(.Inline) void); assertDecl(T, "renderBundleRelease", fn (render_bundle: *sysgpu.RenderBundle) callconv(.Inline) void); // sysgpu.RenderBundleEncoder assertDecl(T, "renderBundleEncoderDraw", fn (render_bundle_encoder: *sysgpu.RenderBundleEncoder, vertex_count: u32, instance_count: u32, first_vertex: u32, first_instance: u32) callconv(.Inline) void); assertDecl(T, "renderBundleEncoderDrawIndexed", fn (render_bundle_encoder: *sysgpu.RenderBundleEncoder, index_count: u32, instance_count: u32, first_index: u32, base_vertex: i32, first_instance: u32) callconv(.Inline) void); assertDecl(T, "renderBundleEncoderDrawIndexedIndirect", fn (render_bundle_encoder: *sysgpu.RenderBundleEncoder, indirect_buffer: *sysgpu.Buffer, indirect_offset: u64) callconv(.Inline) void); assertDecl(T, "renderBundleEncoderDrawIndirect", fn (render_bundle_encoder: *sysgpu.RenderBundleEncoder, indirect_buffer: *sysgpu.Buffer, indirect_offset: u64) callconv(.Inline) void); assertDecl(T, "renderBundleEncoderFinish", fn (render_bundle_encoder: *sysgpu.RenderBundleEncoder, descriptor: ?*const sysgpu.RenderBundle.Descriptor) callconv(.Inline) *sysgpu.RenderBundle); assertDecl(T, "renderBundleEncoderInsertDebugMarker", fn (render_bundle_encoder: *sysgpu.RenderBundleEncoder, marker_label: [*:0]const u8) callconv(.Inline) void); assertDecl(T, "renderBundleEncoderPopDebugGroup", fn (render_bundle_encoder: *sysgpu.RenderBundleEncoder) callconv(.Inline) void); assertDecl(T, "renderBundleEncoderPushDebugGroup", fn (render_bundle_encoder: *sysgpu.RenderBundleEncoder, group_label: [*:0]const u8) callconv(.Inline) void); assertDecl(T, "renderBundleEncoderSetIndexBuffer", fn (render_bundle_encoder: *sysgpu.RenderBundleEncoder, buffer: *sysgpu.Buffer, format: sysgpu.IndexFormat, offset: u64, size: u64) callconv(.Inline) void); assertDecl(T, "renderBundleEncoderSetLabel", fn (render_bundle_encoder: *sysgpu.RenderBundleEncoder, label: [*:0]const u8) callconv(.Inline) void); assertDecl(T, "renderBundleEncoderSetVertexBuffer", fn (render_bundle_encoder: *sysgpu.RenderBundleEncoder, slot: u32, buffer: *sysgpu.Buffer, offset: u64, size: u64) callconv(.Inline) void); assertDecl(T, "renderBundleEncoderReference", fn (render_bundle_encoder: *sysgpu.RenderBundleEncoder) callconv(.Inline) void); assertDecl(T, "renderBundleEncoderRelease", fn (render_bundle_encoder: *sysgpu.RenderBundleEncoder) callconv(.Inline) void); // sysgpu.RenderPassEncoder assertDecl(T, "renderPassEncoderBeginOcclusionQuery", fn (render_pass_encoder: *sysgpu.RenderPassEncoder, query_index: u32) callconv(.Inline) void); assertDecl(T, "renderPassEncoderDraw", fn (render_pass_encoder: *sysgpu.RenderPassEncoder, vertex_count: u32, instance_count: u32, first_vertex: u32, first_instance: u32) callconv(.Inline) void); assertDecl(T, "renderPassEncoderDrawIndexed", fn (render_pass_encoder: *sysgpu.RenderPassEncoder, index_count: u32, instance_count: u32, first_index: u32, base_vertex: i32, first_instance: u32) callconv(.Inline) void); assertDecl(T, "renderPassEncoderDrawIndexedIndirect", fn (render_pass_encoder: *sysgpu.RenderPassEncoder, indirect_buffer: *sysgpu.Buffer, indirect_offset: u64) callconv(.Inline) void); assertDecl(T, "renderPassEncoderDrawIndirect", fn (render_pass_encoder: *sysgpu.RenderPassEncoder, indirect_buffer: *sysgpu.Buffer, indirect_offset: u64) callconv(.Inline) void); assertDecl(T, "renderPassEncoderEnd", fn (render_pass_encoder: *sysgpu.RenderPassEncoder) callconv(.Inline) void); assertDecl(T, "renderPassEncoderEndOcclusionQuery", fn (render_pass_encoder: *sysgpu.RenderPassEncoder) callconv(.Inline) void); assertDecl(T, "renderPassEncoderExecuteBundles", fn (render_pass_encoder: *sysgpu.RenderPassEncoder, bundles_count: usize, bundles: [*]const *const sysgpu.RenderBundle) callconv(.Inline) void); assertDecl(T, "renderPassEncoderInsertDebugMarker", fn (render_pass_encoder: *sysgpu.RenderPassEncoder, marker_label: [*:0]const u8) callconv(.Inline) void); assertDecl(T, "renderPassEncoderPopDebugGroup", fn (render_pass_encoder: *sysgpu.RenderPassEncoder) callconv(.Inline) void); assertDecl(T, "renderPassEncoderPushDebugGroup", fn (render_pass_encoder: *sysgpu.RenderPassEncoder, group_label: [*:0]const u8) callconv(.Inline) void); assertDecl(T, "renderPassEncoderSetBlendConstant", fn (render_pass_encoder: *sysgpu.RenderPassEncoder, color: *const sysgpu.Color) callconv(.Inline) void); assertDecl(T, "renderPassEncoderSetIndexBuffer", fn (render_pass_encoder: *sysgpu.RenderPassEncoder, buffer: *sysgpu.Buffer, format: sysgpu.IndexFormat, offset: u64, size: u64) callconv(.Inline) void); assertDecl(T, "renderPassEncoderSetLabel", fn (render_pass_encoder: *sysgpu.RenderPassEncoder, label: [*:0]const u8) callconv(.Inline) void); assertDecl(T, "renderPassEncoderSetScissorRect", fn (render_pass_encoder: *sysgpu.RenderPassEncoder, x: u32, y: u32, width: u32, height: u32) callconv(.Inline) void); assertDecl(T, "renderPassEncoderSetStencilReference", fn (render_pass_encoder: *sysgpu.RenderPassEncoder, reference: u32) callconv(.Inline) void); assertDecl(T, "renderPassEncoderSetVertexBuffer", fn (render_pass_encoder: *sysgpu.RenderPassEncoder, slot: u32, buffer: *sysgpu.Buffer, offset: u64, size: u64) callconv(.Inline) void); assertDecl(T, "renderPassEncoderSetViewport", fn (render_pass_encoder: *sysgpu.RenderPassEncoder, x: f32, y: f32, width: f32, height: f32, min_depth: f32, max_depth: f32) callconv(.Inline) void); assertDecl(T, "renderPassEncoderWriteTimestamp", fn (render_pass_encoder: *sysgpu.RenderPassEncoder, query_set: *sysgpu.QuerySet, query_index: u32) callconv(.Inline) void); assertDecl(T, "renderPassEncoderReference", fn (render_pass_encoder: *sysgpu.RenderPassEncoder) callconv(.Inline) void); assertDecl(T, "renderPassEncoderRelease", fn (render_pass_encoder: *sysgpu.RenderPassEncoder) callconv(.Inline) void); // sysgpu.Sampler assertDecl(T, "samplerSetLabel", fn (sampler: *sysgpu.Sampler, label: [*:0]const u8) callconv(.Inline) void); assertDecl(T, "samplerReference", fn (sampler: *sysgpu.Sampler) callconv(.Inline) void); assertDecl(T, "samplerRelease", fn (sampler: *sysgpu.Sampler) callconv(.Inline) void); // sysgpu.ShaderModule assertDecl(T, "shaderModuleGetCompilationInfo", fn (shader_module: *sysgpu.ShaderModule, callback: sysgpu.CompilationInfoCallback, userdata: ?*anyopaque) callconv(.Inline) void); assertDecl(T, "shaderModuleSetLabel", fn (shader_module: *sysgpu.ShaderModule, label: [*:0]const u8) callconv(.Inline) void); assertDecl(T, "shaderModuleReference", fn (shader_module: *sysgpu.ShaderModule) callconv(.Inline) void); assertDecl(T, "shaderModuleRelease", fn (shader_module: *sysgpu.ShaderModule) callconv(.Inline) void); // sysgpu.SharedFence assertDecl(T, "sharedFenceExportInfo", fn (shared_fence: *sysgpu.SharedFence, info: *sysgpu.SharedFence.ExportInfo) callconv(.Inline) void); assertDecl(T, "sharedFenceReference", fn (shared_fence: *sysgpu.SharedFence) callconv(.Inline) void); assertDecl(T, "sharedFenceRelease", fn (shared_fence: *sysgpu.SharedFence) callconv(.Inline) void); // sysgpu.SharedTextureMemory assertDecl(T, "sharedTextureMemoryBeginAccess", fn (shared_texture_memory: *sysgpu.SharedTextureMemory, texture: *sysgpu.Texture, descriptor: *const sysgpu.SharedTextureMemory.BeginAccessDescriptor) callconv(.Inline) void); assertDecl(T, "sharedTextureMemoryCreateTexture", fn (shared_texture_memory: *sysgpu.SharedTextureMemory, descriptor: *const sysgpu.Texture.Descriptor) callconv(.Inline) *sysgpu.Texture); assertDecl(T, "sharedTextureMemoryEndAccess", fn (shared_texture_memory: *sysgpu.SharedTextureMemory, texture: *sysgpu.Texture, descriptor: *sysgpu.SharedTextureMemory.EndAccessState) callconv(.Inline) void); assertDecl(T, "sharedTextureMemoryEndAccessStateFreeMembers", fn (value: sysgpu.SharedTextureMemory.EndAccessState) callconv(.Inline) void); assertDecl(T, "sharedTextureMemoryGetProperties", fn (shared_texture_memory: *sysgpu.SharedTextureMemory, properties: *sysgpu.SharedTextureMemory.Properties) callconv(.Inline) void); assertDecl(T, "sharedTextureMemorySetLabel", fn (shared_texture_memory: *sysgpu.SharedTextureMemory, label: [*:0]const u8) callconv(.Inline) void); assertDecl(T, "sharedTextureMemoryReference", fn (shared_texture_memory: *sysgpu.SharedTextureMemory) callconv(.Inline) void); assertDecl(T, "sharedTextureMemoryRelease", fn (shared_texture_memory: *sysgpu.SharedTextureMemory) callconv(.Inline) void); // sysgpu.Surface assertDecl(T, "surfaceReference", fn (surface: *sysgpu.Surface) callconv(.Inline) void); assertDecl(T, "surfaceRelease", fn (surface: *sysgpu.Surface) callconv(.Inline) void); // sysgpu.SwapChain assertDecl(T, "swapChainGetCurrentTexture", fn (swap_chain: *sysgpu.SwapChain) callconv(.Inline) ?*sysgpu.Texture); assertDecl(T, "swapChainGetCurrentTextureView", fn (swap_chain: *sysgpu.SwapChain) callconv(.Inline) ?*sysgpu.TextureView); assertDecl(T, "swapChainPresent", fn (swap_chain: *sysgpu.SwapChain) callconv(.Inline) void); assertDecl(T, "swapChainReference", fn (swap_chain: *sysgpu.SwapChain) callconv(.Inline) void); assertDecl(T, "swapChainRelease", fn (swap_chain: *sysgpu.SwapChain) callconv(.Inline) void); // sysgpu.Texture assertDecl(T, "textureCreateView", fn (texture: *sysgpu.Texture, descriptor: ?*const sysgpu.TextureView.Descriptor) callconv(.Inline) *sysgpu.TextureView); assertDecl(T, "textureDestroy", fn (texture: *sysgpu.Texture) callconv(.Inline) void); assertDecl(T, "textureGetDepthOrArrayLayers", fn (texture: *sysgpu.Texture) callconv(.Inline) u32); assertDecl(T, "textureGetDimension", fn (texture: *sysgpu.Texture) callconv(.Inline) sysgpu.Texture.Dimension); assertDecl(T, "textureGetFormat", fn (texture: *sysgpu.Texture) callconv(.Inline) sysgpu.Texture.Format); assertDecl(T, "textureGetHeight", fn (texture: *sysgpu.Texture) callconv(.Inline) u32); assertDecl(T, "textureGetMipLevelCount", fn (texture: *sysgpu.Texture) callconv(.Inline) u32); assertDecl(T, "textureGetSampleCount", fn (texture: *sysgpu.Texture) callconv(.Inline) u32); assertDecl(T, "textureGetUsage", fn (texture: *sysgpu.Texture) callconv(.Inline) sysgpu.Texture.UsageFlags); assertDecl(T, "textureGetWidth", fn (texture: *sysgpu.Texture) callconv(.Inline) u32); assertDecl(T, "textureSetLabel", fn (texture: *sysgpu.Texture, label: [*:0]const u8) callconv(.Inline) void); assertDecl(T, "textureReference", fn (texture: *sysgpu.Texture) callconv(.Inline) void); assertDecl(T, "textureRelease", fn (texture: *sysgpu.Texture) callconv(.Inline) void); assertDecl(T, "textureViewSetLabel", fn (texture_view: *sysgpu.TextureView, label: [*:0]const u8) callconv(.Inline) void); assertDecl(T, "textureViewReference", fn (texture_view: *sysgpu.TextureView) callconv(.Inline) void); assertDecl(T, "textureViewRelease", fn (texture_view: *sysgpu.TextureView) callconv(.Inline) void); return T; } fn assertDecl(comptime T: anytype, comptime name: []const u8, comptime Decl: type) void { if (!@hasDecl(T, name)) @compileError("sysgpu.Interface missing declaration: " ++ @typeName(Decl)); const FoundDecl = @TypeOf(@field(T, name)); if (FoundDecl != Decl) @compileError("sysgpu.Interface field '" ++ name ++ "'\n\texpected type: " ++ @typeName(Decl) ++ "\n\t found type: " ++ @typeName(FoundDecl)); } /// Exports C ABI function declarations for the given sysgpu.Interface implementation. pub fn Export(comptime T: type) type { _ = Interface(T); // verify implementation is a valid interface return struct { // SYSGPU_EXPORT WGPUInstance sysgpuCreateInstance(WGPUInstanceDescriptor const * descriptor); export fn sysgpuCreateInstance(descriptor: ?*const sysgpu.Instance.Descriptor) ?*sysgpu.Instance { return T.createInstance(descriptor); } // SYSGPU_EXPORT WGPUProc sysgpuGetProcAddress(WGPUDevice device, char const * procName); export fn sysgpuGetProcAddress(device: *sysgpu.Device, proc_name: [*:0]const u8) ?sysgpu.Proc { return T.getProcAddress(device, proc_name); } // SYSGPU_EXPORT WGPUDevice sysgpuAdapterCreateDevice(WGPUAdapter adapter, WGPUDeviceDescriptor const * descriptor /* nullable */); export fn sysgpuAdapterCreateDevice(adapter: *sysgpu.Adapter, descriptor: ?*const sysgpu.Device.Descriptor) ?*sysgpu.Device { return T.adapterCreateDevice(adapter, descriptor); } // SYSGPU_EXPORT size_t sysgpuAdapterEnumerateFeatures(WGPUAdapter adapter, WGPUFeatureName * features); export fn sysgpuAdapterEnumerateFeatures(adapter: *sysgpu.Adapter, features: ?[*]sysgpu.FeatureName) usize { return T.adapterEnumerateFeatures(adapter, features); } // SYSGPU_EXPORT WGPUInstance sysgpuAdapterGetInstance(WGPUAdapter adapter); export fn sysgpuAdapterGetInstance(adapter: *sysgpu.Adapter) *sysgpu.Instance { return T.adapterGetInstance(adapter); } // SYSGPU_EXPORT WGPUBool sysgpuAdapterGetLimits(WGPUAdapter adapter, WGPUSupportedLimits * limits); export fn sysgpuAdapterGetLimits(adapter: *sysgpu.Adapter, limits: *sysgpu.SupportedLimits) u32 { return T.adapterGetLimits(adapter, limits); } // SYSGPU_EXPORT void sysgpuAdapterGetProperties(WGPUAdapter adapter, WGPUAdapterProperties * properties); export fn sysgpuAdapterGetProperties(adapter: *sysgpu.Adapter, properties: *sysgpu.Adapter.Properties) void { return T.adapterGetProperties(adapter, properties); } // SYSGPU_EXPORT WGPUBool sysgpuAdapterHasFeature(WGPUAdapter adapter, WGPUFeatureName feature); export fn sysgpuAdapterHasFeature(adapter: *sysgpu.Adapter, feature: sysgpu.FeatureName) u32 { return T.adapterHasFeature(adapter, feature); } // SYSGPU_EXPORT void sysgpuAdapterPropertiesFreeMembers(WGPUAdapterProperties value); export fn sysgpuAdapterPropertiesFreeMembers(value: sysgpu.Adapter.Properties) void { T.adapterPropertiesFreeMembers(value); } // SYSGPU_EXPORT void sysgpuAdapterRequestDevice(WGPUAdapter adapter, WGPUDeviceDescriptor const * descriptor /* nullable */, WGPURequestDeviceCallback callback, void * userdata); export fn sysgpuAdapterRequestDevice(adapter: *sysgpu.Adapter, descriptor: ?*const sysgpu.Device.Descriptor, callback: sysgpu.RequestDeviceCallback, userdata: ?*anyopaque) void { T.adapterRequestDevice(adapter, descriptor, callback, userdata); } // SYSGPU_EXPORT void sysgpuAdapterReference(WGPUAdapter adapter); export fn sysgpuAdapterReference(adapter: *sysgpu.Adapter) void { T.adapterReference(adapter); } // SYSGPU_EXPORT void sysgpuAdapterRelease(WGPUAdapter adapter); export fn sysgpuAdapterRelease(adapter: *sysgpu.Adapter) void { T.adapterRelease(adapter); } // SYSGPU_EXPORT void sysgpuBindGroupSetLabel(WGPUBindGroup bindGroup, char const * label); export fn sysgpuBindGroupSetLabel(bind_group: *sysgpu.BindGroup, label: [*:0]const u8) void { T.bindGroupSetLabel(bind_group, label); } // SYSGPU_EXPORT void sysgpuBindGroupReference(WGPUBindGroup bindGroup); export fn sysgpuBindGroupReference(bind_group: *sysgpu.BindGroup) void { T.bindGroupReference(bind_group); } // SYSGPU_EXPORT void sysgpuBindGroupRelease(WGPUBindGroup bindGroup); export fn sysgpuBindGroupRelease(bind_group: *sysgpu.BindGroup) void { T.bindGroupRelease(bind_group); } // SYSGPU_EXPORT void sysgpuBindGroupLayoutSetLabel(WGPUBindGroupLayout bindGroupLayout, char const * label); export fn sysgpuBindGroupLayoutSetLabel(bind_group_layout: *sysgpu.BindGroupLayout, label: [*:0]const u8) void { T.bindGroupLayoutSetLabel(bind_group_layout, label); } // SYSGPU_EXPORT void sysgpuBindGroupLayoutReference(WGPUBindGroupLayout bindGroupLayout); export fn sysgpuBindGroupLayoutReference(bind_group_layout: *sysgpu.BindGroupLayout) void { T.bindGroupLayoutReference(bind_group_layout); } // SYSGPU_EXPORT void sysgpuBindGroupLayoutRelease(WGPUBindGroupLayout bindGroupLayout); export fn sysgpuBindGroupLayoutRelease(bind_group_layout: *sysgpu.BindGroupLayout) void { T.bindGroupLayoutRelease(bind_group_layout); } // SYSGPU_EXPORT void sysgpuBufferDestroy(WGPUBuffer buffer); export fn sysgpuBufferDestroy(buffer: *sysgpu.Buffer) void { T.bufferDestroy(buffer); } // SYSGPU_EXPORT void const * sysgpuBufferGetConstMappedRange(WGPUBuffer buffer, size_t offset, size_t size); export fn sysgpuBufferGetConstMappedRange(buffer: *sysgpu.Buffer, offset: usize, size: usize) ?*const anyopaque { return T.bufferGetConstMappedRange(buffer, offset, size); } // SYSGPU_EXPORT void * sysgpuBufferGetMappedRange(WGPUBuffer buffer, size_t offset, size_t size); export fn sysgpuBufferGetMappedRange(buffer: *sysgpu.Buffer, offset: usize, size: usize) ?*anyopaque { return T.bufferGetMappedRange(buffer, offset, size); } // SYSGPU_EXPORT uint64_t sysgpuBufferGetSize(WGPUBuffer buffer); export fn sysgpuBufferGetSize(buffer: *sysgpu.Buffer) u64 { return T.bufferGetSize(buffer); } // SYSGPU_EXPORT WGPUBufferUsage sysgpuBufferGetUsage(WGPUBuffer buffer); export fn sysgpuBufferGetUsage(buffer: *sysgpu.Buffer) sysgpu.Buffer.UsageFlags { return T.bufferGetUsage(buffer); } // SYSGPU_EXPORT void sysgpuBufferMapAsync(WGPUBuffer buffer, WGPUMapModeFlags mode, size_t offset, size_t size, WGPUBufferMapCallback callback, void * userdata); export fn sysgpuBufferMapAsync(buffer: *sysgpu.Buffer, mode: u32, offset: usize, size: usize, callback: sysgpu.Buffer.MapCallback, userdata: ?*anyopaque) void { T.bufferMapAsync(buffer, @as(sysgpu.MapModeFlags, @bitCast(mode)), offset, size, callback, userdata); } // SYSGPU_EXPORT void sysgpuBufferSetLabel(WGPUBuffer buffer, char const * label); export fn sysgpuBufferSetLabel(buffer: *sysgpu.Buffer, label: [*:0]const u8) void { T.bufferSetLabel(buffer, label); } // SYSGPU_EXPORT void sysgpuBufferUnmap(WGPUBuffer buffer); export fn sysgpuBufferUnmap(buffer: *sysgpu.Buffer) void { T.bufferUnmap(buffer); } // SYSGPU_EXPORT void sysgpuBufferReference(WGPUBuffer buffer); export fn sysgpuBufferReference(buffer: *sysgpu.Buffer) void { T.bufferReference(buffer); } // SYSGPU_EXPORT void sysgpuBufferRelease(WGPUBuffer buffer); export fn sysgpuBufferRelease(buffer: *sysgpu.Buffer) void { T.bufferRelease(buffer); } // SYSGPU_EXPORT void sysgpuCommandBufferSetLabel(WGPUCommandBuffer commandBuffer, char const * label); export fn sysgpuCommandBufferSetLabel(command_buffer: *sysgpu.CommandBuffer, label: [*:0]const u8) void { T.commandBufferSetLabel(command_buffer, label); } // SYSGPU_EXPORT void sysgpuCommandBufferReference(WGPUCommandBuffer commandBuffer); export fn sysgpuCommandBufferReference(command_buffer: *sysgpu.CommandBuffer) void { T.commandBufferReference(command_buffer); } // SYSGPU_EXPORT void sysgpuCommandBufferRelease(WGPUCommandBuffer commandBuffer); export fn sysgpuCommandBufferRelease(command_buffer: *sysgpu.CommandBuffer) void { T.commandBufferRelease(command_buffer); } // SYSGPU_EXPORT WGPUComputePassEncoder sysgpuCommandEncoderBeginComputePass(WGPUCommandEncoder commandEncoder, WGPUComputePassDescriptor const * descriptor /* nullable */); export fn sysgpuCommandEncoderBeginComputePass(command_encoder: *sysgpu.CommandEncoder, descriptor: ?*const sysgpu.ComputePassDescriptor) *sysgpu.ComputePassEncoder { return T.commandEncoderBeginComputePass(command_encoder, descriptor); } // SYSGPU_EXPORT WGPURenderPassEncoder sysgpuCommandEncoderBeginRenderPass(WGPUCommandEncoder commandEncoder, WGPURenderPassDescriptor const * descriptor); export fn sysgpuCommandEncoderBeginRenderPass(command_encoder: *sysgpu.CommandEncoder, descriptor: *const sysgpu.RenderPassDescriptor) *sysgpu.RenderPassEncoder { return T.commandEncoderBeginRenderPass(command_encoder, descriptor); } // SYSGPU_EXPORT void sysgpuCommandEncoderClearBuffer(WGPUCommandEncoder commandEncoder, WGPUBuffer buffer, uint64_t offset, uint64_t size); export fn sysgpuCommandEncoderClearBuffer(command_encoder: *sysgpu.CommandEncoder, buffer: *sysgpu.Buffer, offset: u64, size: u64) void { T.commandEncoderClearBuffer(command_encoder, buffer, offset, size); } // SYSGPU_EXPORT void sysgpuCommandEncoderCopyBufferToBuffer(WGPUCommandEncoder commandEncoder, WGPUBuffer source, uint64_t sourceOffset, WGPUBuffer destination, uint64_t destinationOffset, uint64_t size); export fn sysgpuCommandEncoderCopyBufferToBuffer(command_encoder: *sysgpu.CommandEncoder, source: *sysgpu.Buffer, source_offset: u64, destination: *sysgpu.Buffer, destination_offset: u64, size: u64) void { T.commandEncoderCopyBufferToBuffer(command_encoder, source, source_offset, destination, destination_offset, size); } // SYSGPU_EXPORT void sysgpuCommandEncoderCopyBufferToTexture(WGPUCommandEncoder commandEncoder, WGPUImageCopyBuffer const * source, WGPUImageCopyTexture const * destination, WGPUExtent3D const * copySize); export fn sysgpuCommandEncoderCopyBufferToTexture(command_encoder: *sysgpu.CommandEncoder, source: *const sysgpu.ImageCopyBuffer, destination: *const sysgpu.ImageCopyTexture, copy_size: *const sysgpu.Extent3D) void { T.commandEncoderCopyBufferToTexture(command_encoder, source, destination, copy_size); } // SYSGPU_EXPORT void sysgpuCommandEncoderCopyTextureToBuffer(WGPUCommandEncoder commandEncoder, WGPUImageCopyTexture const * source, WGPUImageCopyBuffer const * destination, WGPUExtent3D const * copySize); export fn sysgpuCommandEncoderCopyTextureToBuffer(command_encoder: *sysgpu.CommandEncoder, source: *const sysgpu.ImageCopyTexture, destination: *const sysgpu.ImageCopyBuffer, copy_size: *const sysgpu.Extent3D) void { T.commandEncoderCopyTextureToBuffer(command_encoder, source, destination, copy_size); } // SYSGPU_EXPORT void sysgpuCommandEncoderCopyTextureToTexture(WGPUCommandEncoder commandEncoder, WGPUImageCopyTexture const * source, WGPUImageCopyTexture const * destination, WGPUExtent3D const * copySize); export fn sysgpuCommandEncoderCopyTextureToTexture(command_encoder: *sysgpu.CommandEncoder, source: *const sysgpu.ImageCopyTexture, destination: *const sysgpu.ImageCopyTexture, copy_size: *const sysgpu.Extent3D) void { T.commandEncoderCopyTextureToTexture(command_encoder, source, destination, copy_size); } // SYSGPU_EXPORT WGPUCommandBuffer sysgpuCommandEncoderFinish(WGPUCommandEncoder commandEncoder, WGPUCommandBufferDescriptor const * descriptor /* nullable */); export fn sysgpuCommandEncoderFinish(command_encoder: *sysgpu.CommandEncoder, descriptor: ?*const sysgpu.CommandBuffer.Descriptor) *sysgpu.CommandBuffer { return T.commandEncoderFinish(command_encoder, descriptor); } // SYSGPU_EXPORT void sysgpuCommandEncoderInjectValidationError(WGPUCommandEncoder commandEncoder, char const * message); export fn sysgpuCommandEncoderInjectValidationError(command_encoder: *sysgpu.CommandEncoder, message: [*:0]const u8) void { T.commandEncoderInjectValidationError(command_encoder, message); } // SYSGPU_EXPORT void sysgpuCommandEncoderInsertDebugMarker(WGPUCommandEncoder commandEncoder, char const * markerLabel); export fn sysgpuCommandEncoderInsertDebugMarker(command_encoder: *sysgpu.CommandEncoder, marker_label: [*:0]const u8) void { T.commandEncoderInsertDebugMarker(command_encoder, marker_label); } // SYSGPU_EXPORT void sysgpuCommandEncoderPopDebugGroup(WGPUCommandEncoder commandEncoder); export fn sysgpuCommandEncoderPopDebugGroup(command_encoder: *sysgpu.CommandEncoder) void { T.commandEncoderPopDebugGroup(command_encoder); } // SYSGPU_EXPORT void sysgpuCommandEncoderPushDebugGroup(WGPUCommandEncoder commandEncoder, char const * groupLabel); export fn sysgpuCommandEncoderPushDebugGroup(command_encoder: *sysgpu.CommandEncoder, group_label: [*:0]const u8) void { T.commandEncoderPushDebugGroup(command_encoder, group_label); } // SYSGPU_EXPORT void sysgpuCommandEncoderResolveQuerySet(WGPUCommandEncoder commandEncoder, WGPUQuerySet querySet, uint32_t firstQuery, uint32_t queryCount, WGPUBuffer destination, uint64_t destinationOffset); export fn sysgpuCommandEncoderResolveQuerySet(command_encoder: *sysgpu.CommandEncoder, query_set: *sysgpu.QuerySet, first_query: u32, query_count: u32, destination: *sysgpu.Buffer, destination_offset: u64) void { T.commandEncoderResolveQuerySet(command_encoder, query_set, first_query, query_count, destination, destination_offset); } // SYSGPU_EXPORT void sysgpuCommandEncoderSetLabel(WGPUCommandEncoder commandEncoder, char const * label); export fn sysgpuCommandEncoderSetLabel(command_encoder: *sysgpu.CommandEncoder, label: [*:0]const u8) void { T.commandEncoderSetLabel(command_encoder, label); } // SYSGPU_EXPORT void sysgpuCommandEncoderWriteBuffer(WGPUCommandEncoder commandEncoder, WGPUBuffer buffer, uint64_t bufferOffset, uint8_t const * data, uint64_t size); export fn sysgpuCommandEncoderWriteBuffer(command_encoder: *sysgpu.CommandEncoder, buffer: *sysgpu.Buffer, buffer_offset: u64, data: [*]const u8, size: u64) void { T.commandEncoderWriteBuffer(command_encoder, buffer, buffer_offset, data, size); } // SYSGPU_EXPORT void sysgpuCommandEncoderWriteTimestamp(WGPUCommandEncoder commandEncoder, WGPUQuerySet querySet, uint32_t queryIndex); export fn sysgpuCommandEncoderWriteTimestamp(command_encoder: *sysgpu.CommandEncoder, query_set: *sysgpu.QuerySet, query_index: u32) void { T.commandEncoderWriteTimestamp(command_encoder, query_set, query_index); } // SYSGPU_EXPORT void sysgpuCommandEncoderReference(WGPUCommandEncoder commandEncoder); export fn sysgpuCommandEncoderReference(command_encoder: *sysgpu.CommandEncoder) void { T.commandEncoderReference(command_encoder); } // SYSGPU_EXPORT void sysgpuCommandEncoderRelease(WGPUCommandEncoder commandEncoder); export fn sysgpuCommandEncoderRelease(command_encoder: *sysgpu.CommandEncoder) void { T.commandEncoderRelease(command_encoder); } // SYSGPU_EXPORT void sysgpuComputePassEncoderDispatchWorkgroups(WGPUComputePassEncoder computePassEncoder, uint32_t workgroupCountX, uint32_t workgroupCountY, uint32_t workgroupCountZ); export fn sysgpuComputePassEncoderDispatchWorkgroups(compute_pass_encoder: *sysgpu.ComputePassEncoder, workgroup_count_x: u32, workgroup_count_y: u32, workgroup_count_z: u32) void { T.computePassEncoderDispatchWorkgroups(compute_pass_encoder, workgroup_count_x, workgroup_count_y, workgroup_count_z); } // SYSGPU_EXPORT void sysgpuComputePassEncoderDispatchWorkgroupsIndirect(WGPUComputePassEncoder computePassEncoder, WGPUBuffer indirectBuffer, uint64_t indirectOffset); export fn sysgpuComputePassEncoderDispatchWorkgroupsIndirect(compute_pass_encoder: *sysgpu.ComputePassEncoder, indirect_buffer: *sysgpu.Buffer, indirect_offset: u64) void { T.computePassEncoderDispatchWorkgroupsIndirect(compute_pass_encoder, indirect_buffer, indirect_offset); } // SYSGPU_EXPORT void sysgpuComputePassEncoderEnd(WGPUComputePassEncoder computePassEncoder); export fn sysgpuComputePassEncoderEnd(compute_pass_encoder: *sysgpu.ComputePassEncoder) void { T.computePassEncoderEnd(compute_pass_encoder); } // SYSGPU_EXPORT void sysgpuComputePassEncoderInsertDebugMarker(WGPUComputePassEncoder computePassEncoder, char const * markerLabel); export fn sysgpuComputePassEncoderInsertDebugMarker(compute_pass_encoder: *sysgpu.ComputePassEncoder, marker_label: [*:0]const u8) void { T.computePassEncoderInsertDebugMarker(compute_pass_encoder, marker_label); } // SYSGPU_EXPORT void sysgpuComputePassEncoderPopDebugGroup(WGPUComputePassEncoder computePassEncoder); export fn sysgpuComputePassEncoderPopDebugGroup(compute_pass_encoder: *sysgpu.ComputePassEncoder) void { T.computePassEncoderPopDebugGroup(compute_pass_encoder); } // SYSGPU_EXPORT void sysgpuComputePassEncoderPushDebugGroup(WGPUComputePassEncoder computePassEncoder, char const * groupLabel); export fn sysgpuComputePassEncoderPushDebugGroup(compute_pass_encoder: *sysgpu.ComputePassEncoder, group_label: [*:0]const u8) void { T.computePassEncoderPushDebugGroup(compute_pass_encoder, group_label); } // SYSGPU_EXPORT void sysgpuComputePassEncoderSetBindGroup(WGPUComputePassEncoder computePassEncoder, uint32_t groupIndex, WGPUBindGroup group, size_t dynamicOffsetCount, uint32_t const * dynamicOffsets); export fn sysgpuComputePassEncoderSetBindGroup(compute_pass_encoder: *sysgpu.ComputePassEncoder, group_index: u32, group: *sysgpu.BindGroup, dynamic_offset_count: usize, dynamic_offsets: ?[*]const u32) void { T.computePassEncoderSetBindGroup(compute_pass_encoder, group_index, group, dynamic_offset_count, dynamic_offsets); } // SYSGPU_EXPORT void sysgpuComputePassEncoderSetLabel(WGPUComputePassEncoder computePassEncoder, char const * label); export fn sysgpuComputePassEncoderSetLabel(compute_pass_encoder: *sysgpu.ComputePassEncoder, label: [*:0]const u8) void { T.computePassEncoderSetLabel(compute_pass_encoder, label); } // SYSGPU_EXPORT void sysgpuComputePassEncoderSetPipeline(WGPUComputePassEncoder computePassEncoder, WGPUComputePipeline pipeline); export fn sysgpuComputePassEncoderSetPipeline(compute_pass_encoder: *sysgpu.ComputePassEncoder, pipeline: *sysgpu.ComputePipeline) void { T.computePassEncoderSetPipeline(compute_pass_encoder, pipeline); } // SYSGPU_EXPORT void sysgpuComputePassEncoderWriteTimestamp(WGPUComputePassEncoder computePassEncoder, WGPUQuerySet querySet, uint32_t queryIndex); export fn sysgpuComputePassEncoderWriteTimestamp(compute_pass_encoder: *sysgpu.ComputePassEncoder, query_set: *sysgpu.QuerySet, query_index: u32) void { T.computePassEncoderWriteTimestamp(compute_pass_encoder, query_set, query_index); } // SYSGPU_EXPORT void sysgpuComputePassEncoderReference(WGPUComputePassEncoder computePassEncoder); export fn sysgpuComputePassEncoderReference(compute_pass_encoder: *sysgpu.ComputePassEncoder) void { T.computePassEncoderReference(compute_pass_encoder); } // SYSGPU_EXPORT void sysgpuComputePassEncoderRelease(WGPUComputePassEncoder computePassEncoder); export fn sysgpuComputePassEncoderRelease(compute_pass_encoder: *sysgpu.ComputePassEncoder) void { T.computePassEncoderRelease(compute_pass_encoder); } // SYSGPU_EXPORT WGPUBindGroupLayout sysgpuComputePipelineGetBindGroupLayout(WGPUComputePipeline computePipeline, uint32_t groupIndex); export fn sysgpuComputePipelineGetBindGroupLayout(compute_pipeline: *sysgpu.ComputePipeline, group_index: u32) *sysgpu.BindGroupLayout { return T.computePipelineGetBindGroupLayout(compute_pipeline, group_index); } // SYSGPU_EXPORT void sysgpuComputePipelineSetLabel(WGPUComputePipeline computePipeline, char const * label); export fn sysgpuComputePipelineSetLabel(compute_pipeline: *sysgpu.ComputePipeline, label: [*:0]const u8) void { T.computePipelineSetLabel(compute_pipeline, label); } // SYSGPU_EXPORT void sysgpuComputePipelineReference(WGPUComputePipeline computePipeline); export fn sysgpuComputePipelineReference(compute_pipeline: *sysgpu.ComputePipeline) void { T.computePipelineReference(compute_pipeline); } // SYSGPU_EXPORT void sysgpuComputePipelineRelease(WGPUComputePipeline computePipeline); export fn sysgpuComputePipelineRelease(compute_pipeline: *sysgpu.ComputePipeline) void { T.computePipelineRelease(compute_pipeline); } // SYSGPU_EXPORT WGPUBindGroup sysgpuDeviceCreateBindGroup(WGPUDevice device, WGPUBindGroupDescriptor const * descriptor); export fn sysgpuDeviceCreateBindGroup(device: *sysgpu.Device, descriptor: *const sysgpu.BindGroup.Descriptor) *sysgpu.BindGroup { return T.deviceCreateBindGroup(device, descriptor); } // SYSGPU_EXPORT WGPUBindGroupLayout sysgpuDeviceCreateBindGroupLayout(WGPUDevice device, WGPUBindGroupLayout.Descriptor const * descriptor); export fn sysgpuDeviceCreateBindGroupLayout(device: *sysgpu.Device, descriptor: *const sysgpu.BindGroupLayout.Descriptor) *sysgpu.BindGroupLayout { return T.deviceCreateBindGroupLayout(device, descriptor); } // SYSGPU_EXPORT WGPUBuffer sysgpuDeviceCreateBuffer(WGPUDevice device, WGPUBuffer.Descriptor const * descriptor); export fn sysgpuDeviceCreateBuffer(device: *sysgpu.Device, descriptor: *const sysgpu.Buffer.Descriptor) *sysgpu.Buffer { return T.deviceCreateBuffer(device, descriptor); } // SYSGPU_EXPORT WGPUCommandEncoder sysgpuDeviceCreateCommandEncoder(WGPUDevice device, WGPUCommandEncoderDescriptor const * descriptor /* nullable */); export fn sysgpuDeviceCreateCommandEncoder(device: *sysgpu.Device, descriptor: ?*const sysgpu.CommandEncoder.Descriptor) *sysgpu.CommandEncoder { return T.deviceCreateCommandEncoder(device, descriptor); } // SYSGPU_EXPORT WGPUComputePipeline sysgpuDeviceCreateComputePipeline(WGPUDevice device, WGPUComputePipelineDescriptor const * descriptor); export fn sysgpuDeviceCreateComputePipeline(device: *sysgpu.Device, descriptor: *const sysgpu.ComputePipeline.Descriptor) *sysgpu.ComputePipeline { return T.deviceCreateComputePipeline(device, descriptor); } // SYSGPU_EXPORT void sysgpuDeviceCreateComputePipelineAsync(WGPUDevice device, WGPUComputePipelineDescriptor const * descriptor, WGPUCreateComputePipelineAsyncCallback callback, void * userdata); export fn sysgpuDeviceCreateComputePipelineAsync(device: *sysgpu.Device, descriptor: *const sysgpu.ComputePipeline.Descriptor, callback: sysgpu.CreateComputePipelineAsyncCallback, userdata: ?*anyopaque) void { T.deviceCreateComputePipelineAsync(device, descriptor, callback, userdata); } // SYSGPU_EXPORT WGPUBuffer sysgpuDeviceCreateErrorBuffer(WGPUDevice device, WGPUBufferDescriptor const * descriptor); export fn sysgpuDeviceCreateErrorBuffer(device: *sysgpu.Device, descriptor: *const sysgpu.Buffer.Descriptor) *sysgpu.Buffer { return T.deviceCreateErrorBuffer(device, descriptor); } // SYSGPU_EXPORT WGPUExternalTexture sysgpuDeviceCreateErrorExternalTexture(WGPUDevice device); export fn sysgpuDeviceCreateErrorExternalTexture(device: *sysgpu.Device) *sysgpu.ExternalTexture { return T.deviceCreateErrorExternalTexture(device); } // SYSGPU_EXPORT WGPUTexture sysgpuDeviceCreateErrorTexture(WGPUDevice device, WGPUTextureDescriptor const * descriptor); export fn sysgpuDeviceCreateErrorTexture(device: *sysgpu.Device, descriptor: *const sysgpu.Texture.Descriptor) *sysgpu.Texture { return T.deviceCreateErrorTexture(device, descriptor); } // SYSGPU_EXPORT WGPUExternalTexture sysgpuDeviceCreateExternalTexture(WGPUDevice device, WGPUExternalTextureDescriptor const * externalTextureDescriptor); export fn sysgpuDeviceCreateExternalTexture(device: *sysgpu.Device, external_texture_descriptor: *const sysgpu.ExternalTexture.Descriptor) *sysgpu.ExternalTexture { return T.deviceCreateExternalTexture(device, external_texture_descriptor); } // SYSGPU_EXPORT WGPUPipelineLayout sysgpuDeviceCreatePipelineLayout(WGPUDevice device, WGPUPipelineLayoutDescriptor const * descriptor); export fn sysgpuDeviceCreatePipelineLayout(device: *sysgpu.Device, pipeline_layout_descriptor: *const sysgpu.PipelineLayout.Descriptor) *sysgpu.PipelineLayout { return T.deviceCreatePipelineLayout(device, pipeline_layout_descriptor); } // SYSGPU_EXPORT WGPUQuerySet sysgpuDeviceCreateQuerySet(WGPUDevice device, WGPUQuerySetDescriptor const * descriptor); export fn sysgpuDeviceCreateQuerySet(device: *sysgpu.Device, descriptor: *const sysgpu.QuerySet.Descriptor) *sysgpu.QuerySet { return T.deviceCreateQuerySet(device, descriptor); } // SYSGPU_EXPORT WGPURenderBundleEncoder sysgpuDeviceCreateRenderBundleEncoder(WGPUDevice device, WGPURenderBundleEncoderDescriptor const * descriptor); export fn sysgpuDeviceCreateRenderBundleEncoder(device: *sysgpu.Device, descriptor: *const sysgpu.RenderBundleEncoder.Descriptor) *sysgpu.RenderBundleEncoder { return T.deviceCreateRenderBundleEncoder(device, descriptor); } // SYSGPU_EXPORT WGPURenderPipeline sysgpuDeviceCreateRenderPipeline(WGPUDevice device, WGPURenderPipelineDescriptor const * descriptor); export fn sysgpuDeviceCreateRenderPipeline(device: *sysgpu.Device, descriptor: *const sysgpu.RenderPipeline.Descriptor) *sysgpu.RenderPipeline { return T.deviceCreateRenderPipeline(device, descriptor); } // SYSGPU_EXPORT void sysgpuDeviceCreateRenderPipelineAsync(WGPUDevice device, WGPURenderPipelineDescriptor const * descriptor, WGPUCreateRenderPipelineAsyncCallback callback, void * userdata); export fn sysgpuDeviceCreateRenderPipelineAsync(device: *sysgpu.Device, descriptor: *const sysgpu.RenderPipeline.Descriptor, callback: sysgpu.CreateRenderPipelineAsyncCallback, userdata: ?*anyopaque) void { T.deviceCreateRenderPipelineAsync(device, descriptor, callback, userdata); } // SYSGPU_EXPORT WGPUSampler sysgpuDeviceCreateSampler(WGPUDevice device, WGPUSamplerDescriptor const * descriptor /* nullable */); export fn sysgpuDeviceCreateSampler(device: *sysgpu.Device, descriptor: ?*const sysgpu.Sampler.Descriptor) *sysgpu.Sampler { return T.deviceCreateSampler(device, descriptor); } // SYSGPU_EXPORT WGPUShaderModule sysgpuDeviceCreateShaderModule(WGPUDevice device, WGPUShaderModuleDescriptor const * descriptor); export fn sysgpuDeviceCreateShaderModule(device: *sysgpu.Device, descriptor: *const sysgpu.ShaderModule.Descriptor) *sysgpu.ShaderModule { return T.deviceCreateShaderModule(device, descriptor); } // SYSGPU_EXPORT WGPUSwapChain sysgpuDeviceCreateSwapChain(WGPUDevice device, WGPUSurface surface /* nullable */, WGPUSwapChainDescriptor const * descriptor); export fn sysgpuDeviceCreateSwapChain(device: *sysgpu.Device, surface: ?*sysgpu.Surface, descriptor: *const sysgpu.SwapChain.Descriptor) *sysgpu.SwapChain { return T.deviceCreateSwapChain(device, surface, descriptor); } // SYSGPU_EXPORT WGPUTexture sysgpuDeviceCreateTexture(WGPUDevice device, WGPUTextureDescriptor const * descriptor); export fn sysgpuDeviceCreateTexture(device: *sysgpu.Device, descriptor: *const sysgpu.Texture.Descriptor) *sysgpu.Texture { return T.deviceCreateTexture(device, descriptor); } // SYSGPU_EXPORT void sysgpuDeviceDestroy(WGPUDevice device); export fn sysgpuDeviceDestroy(device: *sysgpu.Device) void { T.deviceDestroy(device); } // SYSGPU_EXPORT size_t sysgpuDeviceEnumerateFeatures(WGPUDevice device, WGPUFeatureName * features); export fn sysgpuDeviceEnumerateFeatures(device: *sysgpu.Device, features: ?[*]sysgpu.FeatureName) usize { return T.deviceEnumerateFeatures(device, features); } // SYSGPU_EXPORT WGPUBool sysgpuDeviceGetLimits(WGPUDevice device, WGPUSupportedLimits * limits); export fn sysgpuDeviceGetLimits(device: *sysgpu.Device, limits: *sysgpu.SupportedLimits) u32 { return T.deviceGetLimits(device, limits); } // SYSGPU_EXPORT WGPUSharedFence sysgpuDeviceImportSharedFence(WGPUDevice device, WGPUSharedFenceDescriptor const * descriptor); export fn sysgpuDeviceImportSharedFence(device: *sysgpu.Device, descriptor: *const sysgpu.SharedFence.Descriptor) *sysgpu.SharedFence { return T.deviceImportSharedFence(device, descriptor); } // SYSGPU_EXPORT WGPUSharedTextureMemory sysgpuDeviceImportSharedTextureMemory(WGPUDevice device, WGPUSharedTextureMemoryDescriptor const * descriptor); export fn sysgpuDeviceImportSharedTextureMemory(device: *sysgpu.Device, descriptor: *const sysgpu.SharedTextureMemory.Descriptor) *sysgpu.SharedTextureMemory { return T.deviceImportSharedTextureMemory(device, descriptor); } // SYSGPU_EXPORT WGPUQueue sysgpuDeviceGetQueue(WGPUDevice device); export fn sysgpuDeviceGetQueue(device: *sysgpu.Device) *sysgpu.Queue { return T.deviceGetQueue(device); } // SYSGPU_EXPORT bool sysgpuDeviceHasFeature(WGPUDevice device, WGPUFeatureName feature); export fn sysgpuDeviceHasFeature(device: *sysgpu.Device, feature: sysgpu.FeatureName) u32 { return T.deviceHasFeature(device, feature); } // SYSGPU_EXPORT void sysgpuDeviceInjectError(WGPUDevice device, WGPUErrorType type, char const * message); export fn sysgpuDeviceInjectError(device: *sysgpu.Device, typ: sysgpu.ErrorType, message: [*:0]const u8) void { T.deviceInjectError(device, typ, message); } // SYSGPU_EXPORT void sysgpuDevicePopErrorScope(WGPUDevice device, WGPUErrorCallback callback, void * userdata); export fn sysgpuDevicePopErrorScope(device: *sysgpu.Device, callback: sysgpu.ErrorCallback, userdata: ?*anyopaque) void { T.devicePopErrorScope(device, callback, userdata); } // SYSGPU_EXPORT void sysgpuDevicePushErrorScope(WGPUDevice device, WGPUErrorFilter filter); export fn sysgpuDevicePushErrorScope(device: *sysgpu.Device, filter: sysgpu.ErrorFilter) void { T.devicePushErrorScope(device, filter); } // TODO: dawn: callback not marked as nullable in dawn.json but in fact is. // SYSGPU_EXPORT void sysgpuDeviceSetDeviceLostCallback(WGPUDevice device, WGPUDeviceLostCallback callback, void * userdata); export fn sysgpuDeviceSetDeviceLostCallback(device: *sysgpu.Device, callback: ?sysgpu.Device.LostCallback, userdata: ?*anyopaque) void { T.deviceSetDeviceLostCallback(device, callback, userdata); } // SYSGPU_EXPORT void sysgpuDeviceSetLabel(WGPUDevice device, char const * label); export fn sysgpuDeviceSetLabel(device: *sysgpu.Device, label: [*:0]const u8) void { T.deviceSetLabel(device, label); } // TODO: dawn: callback not marked as nullable in dawn.json but in fact is. // SYSGPU_EXPORT void sysgpuDeviceSetLoggingCallback(WGPUDevice device, WGPULoggingCallback callback, void * userdata); export fn sysgpuDeviceSetLoggingCallback(device: *sysgpu.Device, callback: ?sysgpu.LoggingCallback, userdata: ?*anyopaque) void { T.deviceSetLoggingCallback(device, callback, userdata); } // TODO: dawn: callback not marked as nullable in dawn.json but in fact is. // SYSGPU_EXPORT void sysgpuDeviceSetUncapturedErrorCallback(WGPUDevice device, WGPUErrorCallback callback, void * userdata); export fn sysgpuDeviceSetUncapturedErrorCallback(device: *sysgpu.Device, callback: ?sysgpu.ErrorCallback, userdata: ?*anyopaque) void { T.deviceSetUncapturedErrorCallback(device, callback, userdata); } // SYSGPU_EXPORT void sysgpuDeviceTick(WGPUDevice device); export fn sysgpuDeviceTick(device: *sysgpu.Device) void { T.deviceTick(device); } // SYSGPU_EXPORT void sysgpuMachDeviceWaitForCommandsToBeScheduled(WGPUDevice device); export fn sysgpuMachDeviceWaitForCommandsToBeScheduled(device: *sysgpu.Device) void { T.machDeviceWaitForCommandsToBeScheduled(device); } // SYSGPU_EXPORT void sysgpuDeviceReference(WGPUDevice device); export fn sysgpuDeviceReference(device: *sysgpu.Device) void { T.deviceReference(device); } // SYSGPU_EXPORT void sysgpuDeviceRelease(WGPUDevice device); export fn sysgpuDeviceRelease(device: *sysgpu.Device) void { T.deviceRelease(device); } // SYSGPU_EXPORT void sysgpuExternalTextureDestroy(WGPUExternalTexture externalTexture); export fn sysgpuExternalTextureDestroy(external_texture: *sysgpu.ExternalTexture) void { T.externalTextureDestroy(external_texture); } // SYSGPU_EXPORT void sysgpuExternalTextureSetLabel(WGPUExternalTexture externalTexture, char const * label); export fn sysgpuExternalTextureSetLabel(external_texture: *sysgpu.ExternalTexture, label: [*:0]const u8) void { T.externalTextureSetLabel(external_texture, label); } // SYSGPU_EXPORT void sysgpuExternalTextureReference(WGPUExternalTexture externalTexture); export fn sysgpuExternalTextureReference(external_texture: *sysgpu.ExternalTexture) void { T.externalTextureReference(external_texture); } // SYSGPU_EXPORT void sysgpuExternalTextureRelease(WGPUExternalTexture externalTexture); export fn sysgpuExternalTextureRelease(external_texture: *sysgpu.ExternalTexture) void { T.externalTextureRelease(external_texture); } // SYSGPU_EXPORT WGPUSurface sysgpuInstanceCreateSurface(WGPUInstance instance, WGPUSurfaceDescriptor const * descriptor); export fn sysgpuInstanceCreateSurface(instance: *sysgpu.Instance, descriptor: *const sysgpu.Surface.Descriptor) *sysgpu.Surface { return T.instanceCreateSurface(instance, descriptor); } // SYSGPU_EXPORT void instanceProcessEvents(WGPUInstance instance); export fn sysgpuInstanceProcessEvents(instance: *sysgpu.Instance) void { T.instanceProcessEvents(instance); } // SYSGPU_EXPORT void sysgpuInstanceRequestAdapter(WGPUInstance instance, WGPURequestAdapterOptions const * options /* nullable */, WGPURequestAdapterCallback callback, void * userdata); export fn sysgpuInstanceRequestAdapter(instance: *sysgpu.Instance, options: ?*const sysgpu.RequestAdapterOptions, callback: sysgpu.RequestAdapterCallback, userdata: ?*anyopaque) void { T.instanceRequestAdapter(instance, options, callback, userdata); } // SYSGPU_EXPORT void sysgpuInstanceReference(WGPUInstance instance); export fn sysgpuInstanceReference(instance: *sysgpu.Instance) void { T.instanceReference(instance); } // SYSGPU_EXPORT void sysgpuInstanceRelease(WGPUInstance instance); export fn sysgpuInstanceRelease(instance: *sysgpu.Instance) void { T.instanceRelease(instance); } // SYSGPU_EXPORT void sysgpuPipelineLayoutSetLabel(WGPUPipelineLayout pipelineLayout, char const * label); export fn sysgpuPipelineLayoutSetLabel(pipeline_layout: *sysgpu.PipelineLayout, label: [*:0]const u8) void { T.pipelineLayoutSetLabel(pipeline_layout, label); } // SYSGPU_EXPORT void sysgpuPipelineLayoutReference(WGPUPipelineLayout pipelineLayout); export fn sysgpuPipelineLayoutReference(pipeline_layout: *sysgpu.PipelineLayout) void { T.pipelineLayoutReference(pipeline_layout); } // SYSGPU_EXPORT void sysgpuPipelineLayoutRelease(WGPUPipelineLayout pipelineLayout); export fn sysgpuPipelineLayoutRelease(pipeline_layout: *sysgpu.PipelineLayout) void { T.pipelineLayoutRelease(pipeline_layout); } // SYSGPU_EXPORT void sysgpuQuerySetDestroy(WGPUQuerySet querySet); export fn sysgpuQuerySetDestroy(query_set: *sysgpu.QuerySet) void { T.querySetDestroy(query_set); } // SYSGPU_EXPORT uint32_t sysgpuQuerySetGetCount(WGPUQuerySet querySet); export fn sysgpuQuerySetGetCount(query_set: *sysgpu.QuerySet) u32 { return T.querySetGetCount(query_set); } // SYSGPU_EXPORT WGPUQueryType sysgpuQuerySetGetType(WGPUQuerySet querySet); export fn sysgpuQuerySetGetType(query_set: *sysgpu.QuerySet) sysgpu.QueryType { return T.querySetGetType(query_set); } // SYSGPU_EXPORT void sysgpuQuerySetSetLabel(WGPUQuerySet querySet, char const * label); export fn sysgpuQuerySetSetLabel(query_set: *sysgpu.QuerySet, label: [*:0]const u8) void { T.querySetSetLabel(query_set, label); } // SYSGPU_EXPORT void sysgpuQuerySetReference(WGPUQuerySet querySet); export fn sysgpuQuerySetReference(query_set: *sysgpu.QuerySet) void { T.querySetReference(query_set); } // SYSGPU_EXPORT void sysgpuQuerySetRelease(WGPUQuerySet querySet); export fn sysgpuQuerySetRelease(query_set: *sysgpu.QuerySet) void { T.querySetRelease(query_set); } // SYSGPU_EXPORT void sysgpuQueueCopyTextureForBrowser(WGPUQueue queue, WGPUImageCopyTexture const * source, WGPUImageCopyTexture const * destination, WGPUExtent3D const * copySize, WGPUCopyTextureForBrowserOptions const * options); export fn sysgpuQueueCopyTextureForBrowser(queue: *sysgpu.Queue, source: *const sysgpu.ImageCopyTexture, destination: *const sysgpu.ImageCopyTexture, copy_size: *const sysgpu.Extent3D, options: *const sysgpu.CopyTextureForBrowserOptions) void { T.queueCopyTextureForBrowser(queue, source, destination, copy_size, options); } // SYSGPU_EXPORT void sysgpuQueueOnSubmittedWorkDone(WGPUQueue queue, uint64_t signalValue, WGPUQueueWorkDoneCallback callback, void * userdata); export fn sysgpuQueueOnSubmittedWorkDone(queue: *sysgpu.Queue, signal_value: u64, callback: sysgpu.Queue.WorkDoneCallback, userdata: ?*anyopaque) void { T.queueOnSubmittedWorkDone(queue, signal_value, callback, userdata); } // SYSGPU_EXPORT void sysgpuQueueSetLabel(WGPUQueue queue, char const * label); export fn sysgpuQueueSetLabel(queue: *sysgpu.Queue, label: [*:0]const u8) void { T.queueSetLabel(queue, label); } // SYSGPU_EXPORT void sysgpuQueueSubmit(WGPUQueue queue, size_t commandCount, WGPUCommandBuffer const * commands); export fn sysgpuQueueSubmit(queue: *sysgpu.Queue, command_count: usize, commands: [*]const *const sysgpu.CommandBuffer) void { T.queueSubmit(queue, command_count, commands); } // SYSGPU_EXPORT void sysgpuQueueWriteBuffer(WGPUQueue queue, WGPUBuffer buffer, uint64_t bufferOffset, void const * data, size_t size); export fn sysgpuQueueWriteBuffer(queue: *sysgpu.Queue, buffer: *sysgpu.Buffer, buffer_offset: u64, data: *const anyopaque, size: usize) void { T.queueWriteBuffer(queue, buffer, buffer_offset, data, size); } // SYSGPU_EXPORT void sysgpuQueueWriteTexture(WGPUQueue queue, WGPUImageCopyTexture const * destination, void const * data, size_t dataSize, WGPUTextureDataLayout const * dataLayout, WGPUExtent3D const * writeSize); export fn sysgpuQueueWriteTexture(queue: *sysgpu.Queue, destination: *const sysgpu.ImageCopyTexture, data: *const anyopaque, data_size: usize, data_layout: *const sysgpu.Texture.DataLayout, write_size: *const sysgpu.Extent3D) void { T.queueWriteTexture(queue, destination, data, data_size, data_layout, write_size); } // SYSGPU_EXPORT void sysgpuQueueReference(WGPUQueue queue); export fn sysgpuQueueReference(queue: *sysgpu.Queue) void { T.queueReference(queue); } // SYSGPU_EXPORT void sysgpuQueueRelease(WGPUQueue queue); export fn sysgpuQueueRelease(queue: *sysgpu.Queue) void { T.queueRelease(queue); } // SYSGPU_EXPORT void sysgpuRenderBundleSetLabel(WGPURenderBundle renderBundle, char const * label); export fn sysgpuRenderBundleSetLabel(render_bundle: *sysgpu.RenderBundle, label: [*:0]const u8) void { T.renderBundleSetLabel(render_bundle, label); } // SYSGPU_EXPORT void sysgpuRenderBundleReference(WGPURenderBundle renderBundle); export fn sysgpuRenderBundleReference(render_bundle: *sysgpu.RenderBundle) void { T.renderBundleReference(render_bundle); } // SYSGPU_EXPORT void sysgpuRenderBundleRelease(WGPURenderBundle renderBundle); export fn sysgpuRenderBundleRelease(render_bundle: *sysgpu.RenderBundle) void { T.renderBundleRelease(render_bundle); } // SYSGPU_EXPORT void sysgpuRenderBundleEncoderDraw(WGPURenderBundleEncoder renderBundleEncoder, uint32_t vertexCount, uint32_t instanceCount, uint32_t firstVertex, uint32_t firstInstance); export fn sysgpuRenderBundleEncoderDraw(render_bundle_encoder: *sysgpu.RenderBundleEncoder, vertex_count: u32, instance_count: u32, first_vertex: u32, first_instance: u32) void { T.renderBundleEncoderDraw(render_bundle_encoder, vertex_count, instance_count, first_vertex, first_instance); } // SYSGPU_EXPORT void sysgpuRenderBundleEncoderDrawIndexed(WGPURenderBundleEncoder renderBundleEncoder, uint32_t indexCount, uint32_t instanceCount, uint32_t firstIndex, int32_t baseVertex, uint32_t firstInstance); export fn sysgpuRenderBundleEncoderDrawIndexed(render_bundle_encoder: *sysgpu.RenderBundleEncoder, index_count: u32, instance_count: u32, first_index: u32, base_vertex: i32, first_instance: u32) void { T.renderBundleEncoderDrawIndexed(render_bundle_encoder, index_count, instance_count, first_index, base_vertex, first_instance); } // SYSGPU_EXPORT void sysgpuRenderBundleEncoderDrawIndexedIndirect(WGPURenderBundleEncoder renderBundleEncoder, WGPUBuffer indirectBuffer, uint64_t indirectOffset); export fn sysgpuRenderBundleEncoderDrawIndexedIndirect(render_bundle_encoder: *sysgpu.RenderBundleEncoder, indirect_buffer: *sysgpu.Buffer, indirect_offset: u64) void { T.renderBundleEncoderDrawIndexedIndirect(render_bundle_encoder, indirect_buffer, indirect_offset); } // SYSGPU_EXPORT void sysgpuRenderBundleEncoderDrawIndirect(WGPURenderBundleEncoder renderBundleEncoder, WGPUBuffer indirectBuffer, uint64_t indirectOffset); export fn sysgpuRenderBundleEncoderDrawIndirect(render_bundle_encoder: *sysgpu.RenderBundleEncoder, indirect_buffer: *sysgpu.Buffer, indirect_offset: u64) void { T.renderBundleEncoderDrawIndirect(render_bundle_encoder, indirect_buffer, indirect_offset); } // SYSGPU_EXPORT WGPURenderBundle sysgpuRenderBundleEncoderFinish(WGPURenderBundleEncoder renderBundleEncoder, WGPURenderBundleDescriptor const * descriptor /* nullable */); export fn sysgpuRenderBundleEncoderFinish(render_bundle_encoder: *sysgpu.RenderBundleEncoder, descriptor: ?*const sysgpu.RenderBundle.Descriptor) *sysgpu.RenderBundle { return T.renderBundleEncoderFinish(render_bundle_encoder, descriptor); } // SYSGPU_EXPORT void sysgpuRenderBundleEncoderInsertDebugMarker(WGPURenderBundleEncoder renderBundleEncoder, char const * markerLabel); export fn sysgpuRenderBundleEncoderInsertDebugMarker(render_bundle_encoder: *sysgpu.RenderBundleEncoder, marker_label: [*:0]const u8) void { T.renderBundleEncoderInsertDebugMarker(render_bundle_encoder, marker_label); } // SYSGPU_EXPORT void sysgpuRenderBundleEncoderPopDebugGroup(WGPURenderBundleEncoder renderBundleEncoder); export fn sysgpuRenderBundleEncoderPopDebugGroup(render_bundle_encoder: *sysgpu.RenderBundleEncoder) void { T.renderBundleEncoderPopDebugGroup(render_bundle_encoder); } // SYSGPU_EXPORT void sysgpuRenderBundleEncoderPushDebugGroup(WGPURenderBundleEncoder renderBundleEncoder, char const * groupLabel); export fn sysgpuRenderBundleEncoderPushDebugGroup(render_bundle_encoder: *sysgpu.RenderBundleEncoder, group_label: [*:0]const u8) void { T.renderBundleEncoderPushDebugGroup(render_bundle_encoder, group_label); } // SYSGPU_EXPORT void sysgpuRenderBundleEncoderSetBindGroup(WGPURenderBundleEncoder renderBundleEncoder, uint32_t groupIndex, WGPUBindGroup group, size_t dynamicOffsetCount, uint32_t const * dynamicOffsets); export fn sysgpuRenderBundleEncoderSetBindGroup(render_bundle_encoder: *sysgpu.RenderBundleEncoder, group_index: u32, group: *sysgpu.BindGroup, dynamic_offset_count: usize, dynamic_offsets: ?[*]const u32) void { T.renderBundleEncoderSetBindGroup(render_bundle_encoder, group_index, group, dynamic_offset_count, dynamic_offsets); } // SYSGPU_EXPORT void sysgpuRenderBundleEncoderSetIndexBuffer(WGPURenderBundleEncoder renderBundleEncoder, WGPUBuffer buffer, WGPUIndexFormat format, uint64_t offset, uint64_t size); export fn sysgpuRenderBundleEncoderSetIndexBuffer(render_bundle_encoder: *sysgpu.RenderBundleEncoder, buffer: *sysgpu.Buffer, format: sysgpu.IndexFormat, offset: u64, size: u64) void { T.renderBundleEncoderSetIndexBuffer(render_bundle_encoder, buffer, format, offset, size); } // SYSGPU_EXPORT void sysgpuRenderBundleEncoderSetLabel(WGPURenderBundleEncoder renderBundleEncoder, char const * label); export fn sysgpuRenderBundleEncoderSetLabel(render_bundle_encoder: *sysgpu.RenderBundleEncoder, label: [*:0]const u8) void { T.renderBundleEncoderSetLabel(render_bundle_encoder, label); } // SYSGPU_EXPORT void sysgpuRenderBundleEncoderSetPipeline(WGPURenderBundleEncoder renderBundleEncoder, WGPURenderPipeline pipeline); export fn sysgpuRenderBundleEncoderSetPipeline(render_bundle_encoder: *sysgpu.RenderBundleEncoder, pipeline: *sysgpu.RenderPipeline) void { T.renderBundleEncoderSetPipeline(render_bundle_encoder, pipeline); } // SYSGPU_EXPORT void sysgpuRenderBundleEncoderSetVertexBuffer(WGPURenderBundleEncoder renderBundleEncoder, uint32_t slot, WGPUBuffer buffer, uint64_t offset, uint64_t size); export fn sysgpuRenderBundleEncoderSetVertexBuffer(render_bundle_encoder: *sysgpu.RenderBundleEncoder, slot: u32, buffer: *sysgpu.Buffer, offset: u64, size: u64) void { T.renderBundleEncoderSetVertexBuffer(render_bundle_encoder, slot, buffer, offset, size); } // SYSGPU_EXPORT void sysgpuRenderBundleEncoderReference(WGPURenderBundleEncoder renderBundleEncoder); export fn sysgpuRenderBundleEncoderReference(render_bundle_encoder: *sysgpu.RenderBundleEncoder) void { T.renderBundleEncoderReference(render_bundle_encoder); } // SYSGPU_EXPORT void sysgpuRenderBundleEncoderRelease(WGPURenderBundleEncoder renderBundleEncoder); export fn sysgpuRenderBundleEncoderRelease(render_bundle_encoder: *sysgpu.RenderBundleEncoder) void { T.renderBundleEncoderRelease(render_bundle_encoder); } // SYSGPU_EXPORT void sysgpuRenderPassEncoderBeginOcclusionQuery(WGPURenderPassEncoder renderPassEncoder, uint32_t queryIndex); export fn sysgpuRenderPassEncoderBeginOcclusionQuery(render_pass_encoder: *sysgpu.RenderPassEncoder, query_index: u32) void { T.renderPassEncoderBeginOcclusionQuery(render_pass_encoder, query_index); } // SYSGPU_EXPORT void sysgpuRenderPassEncoderDraw(WGPURenderPassEncoder renderPassEncoder, uint32_t vertexCount, uint32_t instanceCount, uint32_t firstVertex, uint32_t firstInstance); export fn sysgpuRenderPassEncoderDraw(render_pass_encoder: *sysgpu.RenderPassEncoder, vertex_count: u32, instance_count: u32, first_vertex: u32, first_instance: u32) void { T.renderPassEncoderDraw(render_pass_encoder, vertex_count, instance_count, first_vertex, first_instance); } // SYSGPU_EXPORT void sysgpuRenderPassEncoderDrawIndexed(WGPURenderPassEncoder renderPassEncoder, uint32_t indexCount, uint32_t instanceCount, uint32_t firstIndex, int32_t baseVertex, uint32_t firstInstance); export fn sysgpuRenderPassEncoderDrawIndexed(render_pass_encoder: *sysgpu.RenderPassEncoder, index_count: u32, instance_count: u32, first_index: u32, base_vertex: i32, first_instance: u32) void { T.renderPassEncoderDrawIndexed(render_pass_encoder, index_count, instance_count, first_index, base_vertex, first_instance); } // SYSGPU_EXPORT void sysgpuRenderPassEncoderDrawIndexedIndirect(WGPURenderPassEncoder renderPassEncoder, WGPUBuffer indirectBuffer, uint64_t indirectOffset); export fn sysgpuRenderPassEncoderDrawIndexedIndirect(render_pass_encoder: *sysgpu.RenderPassEncoder, indirect_buffer: *sysgpu.Buffer, indirect_offset: u64) void { T.renderPassEncoderDrawIndexedIndirect(render_pass_encoder, indirect_buffer, indirect_offset); } // SYSGPU_EXPORT void sysgpuRenderPassEncoderDrawIndirect(WGPURenderPassEncoder renderPassEncoder, WGPUBuffer indirectBuffer, uint64_t indirectOffset); export fn sysgpuRenderPassEncoderDrawIndirect(render_pass_encoder: *sysgpu.RenderPassEncoder, indirect_buffer: *sysgpu.Buffer, indirect_offset: u64) void { T.renderPassEncoderDrawIndirect(render_pass_encoder, indirect_buffer, indirect_offset); } // SYSGPU_EXPORT void sysgpuRenderPassEncoderEnd(WGPURenderPassEncoder renderPassEncoder); export fn sysgpuRenderPassEncoderEnd(render_pass_encoder: *sysgpu.RenderPassEncoder) void { T.renderPassEncoderEnd(render_pass_encoder); } // SYSGPU_EXPORT void sysgpuRenderPassEncoderEndOcclusionQuery(WGPURenderPassEncoder renderPassEncoder); export fn sysgpuRenderPassEncoderEndOcclusionQuery(render_pass_encoder: *sysgpu.RenderPassEncoder) void { T.renderPassEncoderEndOcclusionQuery(render_pass_encoder); } // SYSGPU_EXPORT void sysgpuRenderPassEncoderExecuteBundles(WGPURenderPassEncoder renderPassEncoder, size_t bundleCount, WGPURenderBundle const * bundles); export fn sysgpuRenderPassEncoderExecuteBundles(render_pass_encoder: *sysgpu.RenderPassEncoder, bundles_count: usize, bundles: [*]const *const sysgpu.RenderBundle) void { T.renderPassEncoderExecuteBundles(render_pass_encoder, bundles_count, bundles); } // SYSGPU_EXPORT void sysgpuRenderPassEncoderInsertDebugMarker(WGPURenderPassEncoder renderPassEncoder, char const * markerLabel); export fn sysgpuRenderPassEncoderInsertDebugMarker(render_pass_encoder: *sysgpu.RenderPassEncoder, marker_label: [*:0]const u8) void { T.renderPassEncoderInsertDebugMarker(render_pass_encoder, marker_label); } // SYSGPU_EXPORT void sysgpuRenderPassEncoderPopDebugGroup(WGPURenderPassEncoder renderPassEncoder); export fn sysgpuRenderPassEncoderPopDebugGroup(render_pass_encoder: *sysgpu.RenderPassEncoder) void { T.renderPassEncoderPopDebugGroup(render_pass_encoder); } // SYSGPU_EXPORT void sysgpuRenderPassEncoderPushDebugGroup(WGPURenderPassEncoder renderPassEncoder, char const * groupLabel); export fn sysgpuRenderPassEncoderPushDebugGroup(render_pass_encoder: *sysgpu.RenderPassEncoder, group_label: [*:0]const u8) void { T.renderPassEncoderPushDebugGroup(render_pass_encoder, group_label); } // SYSGPU_EXPORT void sysgpuRenderPassEncoderSetBindGroup(WGPURenderPassEncoder renderPassEncoder, uint32_t groupIndex, WGPUBindGroup group, size_t dynamicOffsetCount, uint32_t const * dynamicOffsets); export fn sysgpuRenderPassEncoderSetBindGroup(render_pass_encoder: *sysgpu.RenderPassEncoder, group_index: u32, group: *sysgpu.BindGroup, dynamic_offset_count: usize, dynamic_offsets: ?[*]const u32) void { T.renderPassEncoderSetBindGroup(render_pass_encoder, group_index, group, dynamic_offset_count, dynamic_offsets); } // SYSGPU_EXPORT void sysgpuRenderPassEncoderSetBlendConstant(WGPURenderPassEncoder renderPassEncoder, WGPUColor const * color); export fn sysgpuRenderPassEncoderSetBlendConstant(render_pass_encoder: *sysgpu.RenderPassEncoder, color: *const sysgpu.Color) void { T.renderPassEncoderSetBlendConstant(render_pass_encoder, color); } // SYSGPU_EXPORT void sysgpuRenderPassEncoderSetIndexBuffer(WGPURenderPassEncoder renderPassEncoder, WGPUBuffer buffer, WGPUIndexFormat format, uint64_t offset, uint64_t size); export fn sysgpuRenderPassEncoderSetIndexBuffer(render_pass_encoder: *sysgpu.RenderPassEncoder, buffer: *sysgpu.Buffer, format: sysgpu.IndexFormat, offset: u64, size: u64) void { T.renderPassEncoderSetIndexBuffer(render_pass_encoder, buffer, format, offset, size); } // SYSGPU_EXPORT void sysgpuRenderPassEncoderSetLabel(WGPURenderPassEncoder renderPassEncoder, char const * label); export fn sysgpuRenderPassEncoderSetLabel(render_pass_encoder: *sysgpu.RenderPassEncoder, label: [*:0]const u8) void { T.renderPassEncoderSetLabel(render_pass_encoder, label); } // SYSGPU_EXPORT void sysgpuRenderPassEncoderSetPipeline(WGPURenderPassEncoder renderPassEncoder, WGPURenderPipeline pipeline); export fn sysgpuRenderPassEncoderSetPipeline(render_pass_encoder: *sysgpu.RenderPassEncoder, pipeline: *sysgpu.RenderPipeline) void { T.renderPassEncoderSetPipeline(render_pass_encoder, pipeline); } // SYSGPU_EXPORT void sysgpuRenderPassEncoderSetScissorRect(WGPURenderPassEncoder renderPassEncoder, uint32_t x, uint32_t y, uint32_t width, uint32_t height); export fn sysgpuRenderPassEncoderSetScissorRect(render_pass_encoder: *sysgpu.RenderPassEncoder, x: u32, y: u32, width: u32, height: u32) void { T.renderPassEncoderSetScissorRect(render_pass_encoder, x, y, width, height); } // SYSGPU_EXPORT void sysgpuRenderPassEncoderSetStencilReference(WGPURenderPassEncoder renderPassEncoder, uint32_t reference); export fn sysgpuRenderPassEncoderSetStencilReference(render_pass_encoder: *sysgpu.RenderPassEncoder, reference: u32) void { T.renderPassEncoderSetStencilReference(render_pass_encoder, reference); } // SYSGPU_EXPORT void sysgpuRenderPassEncoderSetVertexBuffer(WGPURenderPassEncoder renderPassEncoder, uint32_t slot, WGPUBuffer buffer, uint64_t offset, uint64_t size); export fn sysgpuRenderPassEncoderSetVertexBuffer(render_pass_encoder: *sysgpu.RenderPassEncoder, slot: u32, buffer: *sysgpu.Buffer, offset: u64, size: u64) void { T.renderPassEncoderSetVertexBuffer(render_pass_encoder, slot, buffer, offset, size); } // SYSGPU_EXPORT void sysgpuRenderPassEncoderSetViewport(WGPURenderPassEncoder renderPassEncoder, float x, float y, float width, float height, float minDepth, float maxDepth); export fn sysgpuRenderPassEncoderSetViewport(render_pass_encoder: *sysgpu.RenderPassEncoder, x: f32, y: f32, width: f32, height: f32, min_depth: f32, max_depth: f32) void { T.renderPassEncoderSetViewport(render_pass_encoder, x, y, width, height, min_depth, max_depth); } // SYSGPU_EXPORT void sysgpuRenderPassEncoderWriteTimestamp(WGPURenderPassEncoder renderPassEncoder, WGPUQuerySet querySet, uint32_t queryIndex); export fn sysgpuRenderPassEncoderWriteTimestamp(render_pass_encoder: *sysgpu.RenderPassEncoder, query_set: *sysgpu.QuerySet, query_index: u32) void { T.renderPassEncoderWriteTimestamp(render_pass_encoder, query_set, query_index); } // SYSGPU_EXPORT void sysgpuRenderPassEncoderReference(WGPURenderPassEncoder renderPassEncoder); export fn sysgpuRenderPassEncoderReference(render_pass_encoder: *sysgpu.RenderPassEncoder) void { T.renderPassEncoderReference(render_pass_encoder); } // SYSGPU_EXPORT void sysgpuRenderPassEncoderRelease(WGPURenderPassEncoder renderPassEncoder); export fn sysgpuRenderPassEncoderRelease(render_pass_encoder: *sysgpu.RenderPassEncoder) void { T.renderPassEncoderRelease(render_pass_encoder); } // SYSGPU_EXPORT WGPUBindGroupLayout sysgpuRenderPipelineGetBindGroupLayout(WGPURenderPipeline renderPipeline, uint32_t groupIndex); export fn sysgpuRenderPipelineGetBindGroupLayout(render_pipeline: *sysgpu.RenderPipeline, group_index: u32) *sysgpu.BindGroupLayout { return T.renderPipelineGetBindGroupLayout(render_pipeline, group_index); } // SYSGPU_EXPORT void sysgpuRenderPipelineSetLabel(WGPURenderPipeline renderPipeline, char const * label); export fn sysgpuRenderPipelineSetLabel(render_pipeline: *sysgpu.RenderPipeline, label: [*:0]const u8) void { T.renderPipelineSetLabel(render_pipeline, label); } // SYSGPU_EXPORT void sysgpuRenderPipelineReference(WGPURenderPipeline renderPipeline); export fn sysgpuRenderPipelineReference(render_pipeline: *sysgpu.RenderPipeline) void { T.renderPipelineReference(render_pipeline); } // SYSGPU_EXPORT void sysgpuRenderPipelineRelease(WGPURenderPipeline renderPipeline); export fn sysgpuRenderPipelineRelease(render_pipeline: *sysgpu.RenderPipeline) void { T.renderPipelineRelease(render_pipeline); } // SYSGPU_EXPORT void sysgpuSamplerSetLabel(WGPUSampler sampler, char const * label); export fn sysgpuSamplerSetLabel(sampler: *sysgpu.Sampler, label: [*:0]const u8) void { T.samplerSetLabel(sampler, label); } // SYSGPU_EXPORT void sysgpuSamplerReference(WGPUSampler sampler); export fn sysgpuSamplerReference(sampler: *sysgpu.Sampler) void { T.samplerReference(sampler); } // SYSGPU_EXPORT void sysgpuSamplerRelease(WGPUSampler sampler); export fn sysgpuSamplerRelease(sampler: *sysgpu.Sampler) void { T.samplerRelease(sampler); } // SYSGPU_EXPORT void sysgpuShaderModuleGetCompilationInfo(WGPUShaderModule shaderModule, WGPUCompilationInfoCallback callback, void * userdata); export fn sysgpuShaderModuleGetCompilationInfo(shader_module: *sysgpu.ShaderModule, callback: sysgpu.CompilationInfoCallback, userdata: ?*anyopaque) void { T.shaderModuleGetCompilationInfo(shader_module, callback, userdata); } // SYSGPU_EXPORT void sysgpuShaderModuleSetLabel(WGPUShaderModule shaderModule, char const * label); export fn sysgpuShaderModuleSetLabel(shader_module: *sysgpu.ShaderModule, label: [*:0]const u8) void { T.shaderModuleSetLabel(shader_module, label); } // SYSGPU_EXPORT void sysgpuShaderModuleReference(WGPUShaderModule shaderModule); export fn sysgpuShaderModuleReference(shader_module: *sysgpu.ShaderModule) void { T.shaderModuleReference(shader_module); } // SYSGPU_EXPORT void sysgpuShaderModuleRelease(WGPUShaderModule shaderModule); export fn sysgpuShaderModuleRelease(shader_module: *sysgpu.ShaderModule) void { T.shaderModuleRelease(shader_module); } // SYSGPU_EXPORT void sysgpuSharedFenceExportInfo(WGPUSharedFence sharedFence, WGPUSharedFenceExportInfo * info); export fn sysgpuSharedFenceExportInfo(shared_fence: *sysgpu.SharedFence, info: *sysgpu.SharedFence.ExportInfo) void { T.sharedFenceExportInfo(shared_fence, info); } // SYSGPU_EXPORT void sysgpuSharedFenceReference(WGPUSharedFence sharedFence); export fn sysgpuSharedFenceReference(shared_fence: *sysgpu.SharedFence) void { T.sharedFenceReference(shared_fence); } // SYSGPU_EXPORT void sysgpuSharedFenceRelease(WGPUSharedFence sharedFence); export fn sysgpuSharedFenceRelease(shared_fence: *sysgpu.SharedFence) void { T.sharedFenceRelease(shared_fence); } // SYSGPU_EXPORT void sysgpuSharedTextureMemoryBeginAccess(WGPUSharedTextureMemory sharedTextureMemory, WGPUTexture texture, WGPUSharedTextureMemoryBeginAccessDescriptor const * descriptor); export fn sysgpuSharedTextureMemoryBeginAccess(shared_texture_memory: *sysgpu.SharedTextureMemory, texture: *sysgpu.Texture, descriptor: *const sysgpu.SharedTextureMemory.BeginAccessDescriptor) void { T.sharedTextureMemoryBeginAccess(shared_texture_memory, texture, descriptor); } // SYSGPU_EXPORT WGPUTexture sysgpuSharedTextureMemoryCreateTexture(WGPUSharedTextureMemory sharedTextureMemory, WGPUTextureDescriptor const * descriptor); export fn sysgpuSharedTextureMemoryCreateTexture(shared_texture_memory: *sysgpu.SharedTextureMemory, descriptor: *const sysgpu.Texture.Descriptor) *sysgpu.Texture { return T.sharedTextureMemoryCreateTexture(shared_texture_memory, descriptor); } // SYSGPU_EXPORT void sysgpuSharedTextureMemoryEndAccess(WGPUSharedTextureMemory sharedTextureMemory, WGPUTexture texture, WGPUSharedTextureMemoryEndAccessState * descriptor); export fn sysgpuSharedTextureMemoryEndAccess(shared_texture_memory: *sysgpu.SharedTextureMemory, texture: *sysgpu.Texture, descriptor: *sysgpu.SharedTextureMemory.EndAccessState) void { T.sharedTextureMemoryEndAccess(shared_texture_memory, texture, descriptor); } // SYSGPU_EXPORT void sysgpuSharedTextureMemoryEndAccessStateFreeMembers(WGPUSharedTextureMemoryEndAccessState value); export fn sysgpuSharedTextureMemoryEndAccessStateFreeMembers(value: sysgpu.SharedTextureMemory.EndAccessState) void { T.sharedTextureMemoryEndAccessStateFreeMembers(value); } // SYSGPU_EXPORT void sysgpuSharedTextureMemoryGetProperties(WGPUSharedTextureMemory sharedTextureMemory, WGPUSharedTextureMemoryProperties * properties); export fn sysgpuSharedTextureMemoryGetProperties(shared_texture_memory: *sysgpu.SharedTextureMemory, properties: *sysgpu.SharedTextureMemory.Properties) void { T.sharedTextureMemoryGetProperties(shared_texture_memory, properties); } // SYSGPU_EXPORT void sysgpuSharedTextureMemorySetLabel(WGPUSharedTextureMemory sharedTextureMemory, char const * label); export fn sysgpuSharedTextureMemorySetLabel(shared_texture_memory: *sysgpu.SharedTextureMemory, label: [*:0]const u8) void { T.sharedTextureMemorySetLabel(shared_texture_memory, label); } // SYSGPU_EXPORT void sysgpuSharedTextureMemoryReference(WGPUSharedTextureMemory sharedTextureMemory); export fn sysgpuSharedTextureMemoryReference(shared_texture_memory: *sysgpu.SharedTextureMemory) void { T.sharedTextureMemoryReference(shared_texture_memory); } // SYSGPU_EXPORT void sysgpuSharedTextureMemoryRelease(WGPUSharedTextureMemory sharedTextureMemory); export fn sysgpuSharedTextureMemoryRelease(shared_texture_memory: *sysgpu.SharedTextureMemory) void { T.sharedTextureMemoryRelease(shared_texture_memory); } // SYSGPU_EXPORT void sysgpuSurfaceReference(WGPUSurface surface); export fn sysgpuSurfaceReference(surface: *sysgpu.Surface) void { T.surfaceReference(surface); } // SYSGPU_EXPORT void sysgpuSurfaceRelease(WGPUSurface surface); export fn sysgpuSurfaceRelease(surface: *sysgpu.Surface) void { T.surfaceRelease(surface); } // SYSGPU_EXPORT WGPUTexture sysgpuSwapChainGetCurrentTexture(WGPUSwapChain swapChain); export fn sysgpuSwapChainGetCurrentTexture(swap_chain: *sysgpu.SwapChain) ?*sysgpu.Texture { return T.swapChainGetCurrentTexture(swap_chain); } // SYSGPU_EXPORT WGPUTextureView sysgpuSwapChainGetCurrentTextureView(WGPUSwapChain swapChain); export fn sysgpuSwapChainGetCurrentTextureView(swap_chain: *sysgpu.SwapChain) ?*sysgpu.TextureView { return T.swapChainGetCurrentTextureView(swap_chain); } // SYSGPU_EXPORT void sysgpuSwapChainPresent(WGPUSwapChain swapChain); export fn sysgpuSwapChainPresent(swap_chain: *sysgpu.SwapChain) void { T.swapChainPresent(swap_chain); } // SYSGPU_EXPORT void sysgpuSwapChainReference(WGPUSwapChain swapChain); export fn sysgpuSwapChainReference(swap_chain: *sysgpu.SwapChain) void { T.swapChainReference(swap_chain); } // SYSGPU_EXPORT void sysgpuSwapChainRelease(WGPUSwapChain swapChain); export fn sysgpuSwapChainRelease(swap_chain: *sysgpu.SwapChain) void { T.swapChainRelease(swap_chain); } // SYSGPU_EXPORT WGPUTextureView sysgpuTextureCreateView(WGPUTexture texture, WGPUTextureViewDescriptor const * descriptor /* nullable */); export fn sysgpuTextureCreateView(texture: *sysgpu.Texture, descriptor: ?*const sysgpu.TextureView.Descriptor) *sysgpu.TextureView { return T.textureCreateView(texture, descriptor); } // SYSGPU_EXPORT void sysgpuTextureDestroy(WGPUTexture texture); export fn sysgpuTextureDestroy(texture: *sysgpu.Texture) void { T.textureDestroy(texture); } // SYSGPU_EXPORT uint32_t sysgpuTextureGetDepthOrArrayLayers(WGPUTexture texture); export fn sysgpuTextureGetDepthOrArrayLayers(texture: *sysgpu.Texture) u32 { return T.textureGetDepthOrArrayLayers(texture); } // SYSGPU_EXPORT WGPUTextureDimension sysgpuTextureGetDimension(WGPUTexture texture); export fn sysgpuTextureGetDimension(texture: *sysgpu.Texture) sysgpu.Texture.Dimension { return T.textureGetDimension(texture); } // SYSGPU_EXPORT WGPUTextureFormat sysgpuTextureGetFormat(WGPUTexture texture); export fn sysgpuTextureGetFormat(texture: *sysgpu.Texture) sysgpu.Texture.Format { return T.textureGetFormat(texture); } // SYSGPU_EXPORT uint32_t sysgpuTextureGetHeight(WGPUTexture texture); export fn sysgpuTextureGetHeight(texture: *sysgpu.Texture) u32 { return T.textureGetHeight(texture); } // SYSGPU_EXPORT uint32_t sysgpuTextureGetMipLevelCount(WGPUTexture texture); export fn sysgpuTextureGetMipLevelCount(texture: *sysgpu.Texture) u32 { return T.textureGetMipLevelCount(texture); } // SYSGPU_EXPORT uint32_t sysgpuTextureGetSampleCount(WGPUTexture texture); export fn sysgpuTextureGetSampleCount(texture: *sysgpu.Texture) u32 { return T.textureGetSampleCount(texture); } // SYSGPU_EXPORT WGPUTextureUsage sysgpuTextureGetUsage(WGPUTexture texture); export fn sysgpuTextureGetUsage(texture: *sysgpu.Texture) sysgpu.Texture.UsageFlags { return T.textureGetUsage(texture); } // SYSGPU_EXPORT uint32_t sysgpuTextureGetWidth(WGPUTexture texture); export fn sysgpuTextureGetWidth(texture: *sysgpu.Texture) u32 { return T.textureGetWidth(texture); } // SYSGPU_EXPORT void sysgpuTextureSetLabel(WGPUTexture texture, char const * label); export fn sysgpuTextureSetLabel(texture: *sysgpu.Texture, label: [*:0]const u8) void { T.textureSetLabel(texture, label); } // SYSGPU_EXPORT void sysgpuTextureReference(WGPUTexture texture); export fn sysgpuTextureReference(texture: *sysgpu.Texture) void { T.textureReference(texture); } // SYSGPU_EXPORT void sysgpuTextureRelease(WGPUTexture texture); export fn sysgpuTextureRelease(texture: *sysgpu.Texture) void { T.textureRelease(texture); } // SYSGPU_EXPORT void sysgpuTextureViewSetLabel(WGPUTextureView textureView, char const * label); export fn sysgpuTextureViewSetLabel(texture_view: *sysgpu.TextureView, label: [*:0]const u8) void { T.textureViewSetLabel(texture_view, label); } // SYSGPU_EXPORT void sysgpuTextureViewReference(WGPUTextureView textureView); export fn sysgpuTextureViewReference(texture_view: *sysgpu.TextureView) void { T.textureViewReference(texture_view); } // SYSGPU_EXPORT void sysgpuTextureViewRelease(WGPUTextureView textureView); export fn sysgpuTextureViewRelease(texture_view: *sysgpu.TextureView) void { T.textureViewRelease(texture_view); } }; } /// A stub sysgpu.Interface in which every function is implemented by `unreachable;` pub const StubInterface = Interface(struct { pub inline fn createInstance(descriptor: ?*const sysgpu.Instance.Descriptor) ?*sysgpu.Instance { _ = descriptor; unreachable; } pub inline fn getProcAddress(device: *sysgpu.Device, proc_name: [*:0]const u8) ?sysgpu.Proc { _ = device; _ = proc_name; unreachable; } pub inline fn adapterCreateDevice(adapter: *sysgpu.Adapter, descriptor: ?*const sysgpu.Device.Descriptor) ?*sysgpu.Device { _ = adapter; _ = descriptor; unreachable; } pub inline fn adapterEnumerateFeatures(adapter: *sysgpu.Adapter, features: ?[*]sysgpu.FeatureName) usize { _ = adapter; _ = features; unreachable; } pub inline fn adapterGetInstance(adapter: *sysgpu.Adapter) *sysgpu.Instance { _ = adapter; unreachable; } pub inline fn adapterGetLimits(adapter: *sysgpu.Adapter, limits: *sysgpu.SupportedLimits) u32 { _ = adapter; _ = limits; unreachable; } pub inline fn adapterGetProperties(adapter: *sysgpu.Adapter, properties: *sysgpu.Adapter.Properties) void { _ = adapter; _ = properties; unreachable; } pub inline fn adapterHasFeature(adapter: *sysgpu.Adapter, feature: sysgpu.FeatureName) u32 { _ = adapter; _ = feature; unreachable; } pub inline fn adapterPropertiesFreeMembers(value: sysgpu.Adapter.Properties) void { _ = value; unreachable; } pub inline fn adapterRequestDevice(adapter: *sysgpu.Adapter, descriptor: ?*const sysgpu.Device.Descriptor, callback: sysgpu.RequestDeviceCallback, userdata: ?*anyopaque) void { _ = adapter; _ = descriptor; _ = callback; _ = userdata; unreachable; } pub inline fn adapterReference(adapter: *sysgpu.Adapter) void { _ = adapter; unreachable; } pub inline fn adapterRelease(adapter: *sysgpu.Adapter) void { _ = adapter; unreachable; } pub inline fn bindGroupSetLabel(bind_group: *sysgpu.BindGroup, label: [*:0]const u8) void { _ = bind_group; _ = label; unreachable; } pub inline fn bindGroupReference(bind_group: *sysgpu.BindGroup) void { _ = bind_group; unreachable; } pub inline fn bindGroupRelease(bind_group: *sysgpu.BindGroup) void { _ = bind_group; unreachable; } pub inline fn bindGroupLayoutSetLabel(bind_group_layout: *sysgpu.BindGroupLayout, label: [*:0]const u8) void { _ = bind_group_layout; _ = label; unreachable; } pub inline fn bindGroupLayoutReference(bind_group_layout: *sysgpu.BindGroupLayout) void { _ = bind_group_layout; unreachable; } pub inline fn bindGroupLayoutRelease(bind_group_layout: *sysgpu.BindGroupLayout) void { _ = bind_group_layout; unreachable; } pub inline fn bufferDestroy(buffer: *sysgpu.Buffer) void { _ = buffer; unreachable; } // TODO: dawn: return value not marked as nullable in dawn.json but in fact is. pub inline fn bufferGetConstMappedRange(buffer: *sysgpu.Buffer, offset: usize, size: usize) ?*const anyopaque { _ = buffer; _ = offset; _ = size; unreachable; } // TODO: dawn: return value not marked as nullable in dawn.json but in fact is. pub inline fn bufferGetMappedRange(buffer: *sysgpu.Buffer, offset: usize, size: usize) ?*anyopaque { _ = buffer; _ = offset; _ = size; unreachable; } pub inline fn bufferGetSize(buffer: *sysgpu.Buffer) u64 { _ = buffer; unreachable; } pub inline fn bufferGetUsage(buffer: *sysgpu.Buffer) sysgpu.Buffer.UsageFlags { _ = buffer; unreachable; } pub inline fn bufferMapAsync(buffer: *sysgpu.Buffer, mode: sysgpu.MapModeFlags, offset: usize, size: usize, callback: sysgpu.Buffer.MapCallback, userdata: ?*anyopaque) void { _ = buffer; _ = mode; _ = offset; _ = size; _ = callback; _ = userdata; unreachable; } pub inline fn bufferSetLabel(buffer: *sysgpu.Buffer, label: [*:0]const u8) void { _ = buffer; _ = label; unreachable; } pub inline fn bufferUnmap(buffer: *sysgpu.Buffer) void { _ = buffer; unreachable; } pub inline fn bufferReference(buffer: *sysgpu.Buffer) void { _ = buffer; unreachable; } pub inline fn bufferRelease(buffer: *sysgpu.Buffer) void { _ = buffer; unreachable; } pub inline fn commandBufferSetLabel(command_buffer: *sysgpu.CommandBuffer, label: [*:0]const u8) void { _ = command_buffer; _ = label; unreachable; } pub inline fn commandBufferReference(command_buffer: *sysgpu.CommandBuffer) void { _ = command_buffer; unreachable; } pub inline fn commandBufferRelease(command_buffer: *sysgpu.CommandBuffer) void { _ = command_buffer; unreachable; } pub inline fn commandEncoderBeginComputePass(command_encoder: *sysgpu.CommandEncoder, descriptor: ?*const sysgpu.ComputePassDescriptor) *sysgpu.ComputePassEncoder { _ = command_encoder; _ = descriptor; unreachable; } pub inline fn commandEncoderBeginRenderPass(command_encoder: *sysgpu.CommandEncoder, descriptor: *const sysgpu.RenderPassDescriptor) *sysgpu.RenderPassEncoder { _ = command_encoder; _ = descriptor; unreachable; } pub inline fn commandEncoderClearBuffer(command_encoder: *sysgpu.CommandEncoder, buffer: *sysgpu.Buffer, offset: u64, size: u64) void { _ = command_encoder; _ = buffer; _ = offset; _ = size; unreachable; } pub inline fn commandEncoderCopyBufferToBuffer(command_encoder: *sysgpu.CommandEncoder, source: *sysgpu.Buffer, source_offset: u64, destination: *sysgpu.Buffer, destination_offset: u64, size: u64) void { _ = command_encoder; _ = source; _ = source_offset; _ = destination; _ = destination_offset; _ = size; unreachable; } pub inline fn commandEncoderCopyBufferToTexture(command_encoder: *sysgpu.CommandEncoder, source: *const sysgpu.ImageCopyBuffer, destination: *const sysgpu.ImageCopyTexture, copy_size: *const sysgpu.Extent3D) void { _ = command_encoder; _ = source; _ = destination; _ = copy_size; unreachable; } pub inline fn commandEncoderCopyTextureToBuffer(command_encoder: *sysgpu.CommandEncoder, source: *const sysgpu.ImageCopyTexture, destination: *const sysgpu.ImageCopyBuffer, copy_size: *const sysgpu.Extent3D) void { _ = command_encoder; _ = source; _ = destination; _ = copy_size; unreachable; } pub inline fn commandEncoderCopyTextureToTexture(command_encoder: *sysgpu.CommandEncoder, source: *const sysgpu.ImageCopyTexture, destination: *const sysgpu.ImageCopyTexture, copy_size: *const sysgpu.Extent3D) void { _ = command_encoder; _ = source; _ = destination; _ = copy_size; unreachable; } pub inline fn commandEncoderFinish(command_encoder: *sysgpu.CommandEncoder, descriptor: ?*const sysgpu.CommandBuffer.Descriptor) *sysgpu.CommandBuffer { _ = command_encoder; _ = descriptor; unreachable; } pub inline fn commandEncoderInjectValidationError(command_encoder: *sysgpu.CommandEncoder, message: [*:0]const u8) void { _ = command_encoder; _ = message; unreachable; } pub inline fn commandEncoderInsertDebugMarker(command_encoder: *sysgpu.CommandEncoder, marker_label: [*:0]const u8) void { _ = command_encoder; _ = marker_label; unreachable; } pub inline fn commandEncoderPopDebugGroup(command_encoder: *sysgpu.CommandEncoder) void { _ = command_encoder; unreachable; } pub inline fn commandEncoderPushDebugGroup(command_encoder: *sysgpu.CommandEncoder, group_label: [*:0]const u8) void { _ = command_encoder; _ = group_label; unreachable; } pub inline fn commandEncoderResolveQuerySet(command_encoder: *sysgpu.CommandEncoder, query_set: *sysgpu.QuerySet, first_query: u32, query_count: u32, destination: *sysgpu.Buffer, destination_offset: u64) void { _ = command_encoder; _ = query_set; _ = first_query; _ = query_count; _ = destination; _ = destination_offset; unreachable; } pub inline fn commandEncoderSetLabel(command_encoder: *sysgpu.CommandEncoder, label: [*:0]const u8) void { _ = command_encoder; _ = label; unreachable; } pub inline fn commandEncoderWriteBuffer(command_encoder: *sysgpu.CommandEncoder, buffer: *sysgpu.Buffer, buffer_offset: u64, data: [*]const u8, size: u64) void { _ = command_encoder; _ = buffer; _ = buffer_offset; _ = data; _ = size; unreachable; } pub inline fn commandEncoderWriteTimestamp(command_encoder: *sysgpu.CommandEncoder, query_set: *sysgpu.QuerySet, query_index: u32) void { _ = command_encoder; _ = query_set; _ = query_index; unreachable; } pub inline fn commandEncoderReference(command_encoder: *sysgpu.CommandEncoder) void { _ = command_encoder; unreachable; } pub inline fn commandEncoderRelease(command_encoder: *sysgpu.CommandEncoder) void { _ = command_encoder; unreachable; } pub inline fn computePassEncoderDispatchWorkgroups(compute_pass_encoder: *sysgpu.ComputePassEncoder, workgroup_count_x: u32, workgroup_count_y: u32, workgroup_count_z: u32) void { _ = compute_pass_encoder; _ = workgroup_count_x; _ = workgroup_count_y; _ = workgroup_count_z; unreachable; } pub inline fn computePassEncoderDispatchWorkgroupsIndirect(compute_pass_encoder: *sysgpu.ComputePassEncoder, indirect_buffer: *sysgpu.Buffer, indirect_offset: u64) void { _ = compute_pass_encoder; _ = indirect_buffer; _ = indirect_offset; unreachable; } pub inline fn computePassEncoderEnd(compute_pass_encoder: *sysgpu.ComputePassEncoder) void { _ = compute_pass_encoder; unreachable; } pub inline fn computePassEncoderInsertDebugMarker(compute_pass_encoder: *sysgpu.ComputePassEncoder, marker_label: [*:0]const u8) void { _ = compute_pass_encoder; _ = marker_label; unreachable; } pub inline fn computePassEncoderPopDebugGroup(compute_pass_encoder: *sysgpu.ComputePassEncoder) void { _ = compute_pass_encoder; unreachable; } pub inline fn computePassEncoderPushDebugGroup(compute_pass_encoder: *sysgpu.ComputePassEncoder, group_label: [*:0]const u8) void { _ = compute_pass_encoder; _ = group_label; unreachable; } pub inline fn computePassEncoderSetBindGroup(compute_pass_encoder: *sysgpu.ComputePassEncoder, group_index: u32, group: *sysgpu.BindGroup, dynamic_offset_count: usize, dynamic_offsets: ?[*]const u32) void { _ = compute_pass_encoder; _ = group_index; _ = group; _ = dynamic_offset_count; _ = dynamic_offsets; unreachable; } pub inline fn computePassEncoderSetLabel(compute_pass_encoder: *sysgpu.ComputePassEncoder, label: [*:0]const u8) void { _ = compute_pass_encoder; _ = label; unreachable; } pub inline fn computePassEncoderSetPipeline(compute_pass_encoder: *sysgpu.ComputePassEncoder, pipeline: *sysgpu.ComputePipeline) void { _ = compute_pass_encoder; _ = pipeline; unreachable; } pub inline fn computePassEncoderWriteTimestamp(compute_pass_encoder: *sysgpu.ComputePassEncoder, query_set: *sysgpu.QuerySet, query_index: u32) void { _ = compute_pass_encoder; _ = query_set; _ = query_index; unreachable; } pub inline fn computePassEncoderReference(compute_pass_encoder: *sysgpu.ComputePassEncoder) void { _ = compute_pass_encoder; unreachable; } pub inline fn computePassEncoderRelease(compute_pass_encoder: *sysgpu.ComputePassEncoder) void { _ = compute_pass_encoder; unreachable; } pub inline fn computePipelineGetBindGroupLayout(compute_pipeline: *sysgpu.ComputePipeline, group_index: u32) *sysgpu.BindGroupLayout { _ = compute_pipeline; _ = group_index; unreachable; } pub inline fn computePipelineSetLabel(compute_pipeline: *sysgpu.ComputePipeline, label: [*:0]const u8) void { _ = compute_pipeline; _ = label; unreachable; } pub inline fn computePipelineReference(compute_pipeline: *sysgpu.ComputePipeline) void { _ = compute_pipeline; unreachable; } pub inline fn computePipelineRelease(compute_pipeline: *sysgpu.ComputePipeline) void { _ = compute_pipeline; unreachable; } pub inline fn deviceCreateBindGroup(device: *sysgpu.Device, descriptor: *const sysgpu.BindGroup.Descriptor) *sysgpu.BindGroup { _ = device; _ = descriptor; unreachable; } pub inline fn deviceCreateBindGroupLayout(device: *sysgpu.Device, descriptor: *const sysgpu.BindGroupLayout.Descriptor) *sysgpu.BindGroupLayout { _ = device; _ = descriptor; unreachable; } pub inline fn deviceCreateBuffer(device: *sysgpu.Device, descriptor: *const sysgpu.Buffer.Descriptor) *sysgpu.Buffer { _ = device; _ = descriptor; unreachable; } pub inline fn deviceCreateCommandEncoder(device: *sysgpu.Device, descriptor: ?*const sysgpu.CommandEncoder.Descriptor) *sysgpu.CommandEncoder { _ = device; _ = descriptor; unreachable; } pub inline fn deviceCreateComputePipeline(device: *sysgpu.Device, descriptor: *const sysgpu.ComputePipeline.Descriptor) *sysgpu.ComputePipeline { _ = device; _ = descriptor; unreachable; } pub inline fn deviceCreateComputePipelineAsync(device: *sysgpu.Device, descriptor: *const sysgpu.ComputePipeline.Descriptor, callback: sysgpu.CreateComputePipelineAsyncCallback, userdata: ?*anyopaque) void { _ = device; _ = descriptor; _ = callback; _ = userdata; unreachable; } pub inline fn deviceCreateErrorBuffer(device: *sysgpu.Device, descriptor: *const sysgpu.Buffer.Descriptor) *sysgpu.Buffer { _ = device; _ = descriptor; unreachable; } pub inline fn deviceCreateErrorExternalTexture(device: *sysgpu.Device) *sysgpu.ExternalTexture { _ = device; unreachable; } pub inline fn deviceCreateErrorTexture(device: *sysgpu.Device, descriptor: *const sysgpu.Texture.Descriptor) *sysgpu.Texture { _ = device; _ = descriptor; unreachable; } pub inline fn deviceCreateExternalTexture(device: *sysgpu.Device, external_texture_descriptor: *const sysgpu.ExternalTexture.Descriptor) *sysgpu.ExternalTexture { _ = device; _ = external_texture_descriptor; unreachable; } pub inline fn deviceCreatePipelineLayout(device: *sysgpu.Device, pipeline_layout_descriptor: *const sysgpu.PipelineLayout.Descriptor) *sysgpu.PipelineLayout { _ = device; _ = pipeline_layout_descriptor; unreachable; } pub inline fn deviceCreateQuerySet(device: *sysgpu.Device, descriptor: *const sysgpu.QuerySet.Descriptor) *sysgpu.QuerySet { _ = device; _ = descriptor; unreachable; } pub inline fn deviceCreateRenderBundleEncoder(device: *sysgpu.Device, descriptor: *const sysgpu.RenderBundleEncoder.Descriptor) *sysgpu.RenderBundleEncoder { _ = device; _ = descriptor; unreachable; } pub inline fn deviceCreateRenderPipeline(device: *sysgpu.Device, descriptor: *const sysgpu.RenderPipeline.Descriptor) *sysgpu.RenderPipeline { _ = device; _ = descriptor; unreachable; } pub inline fn deviceCreateRenderPipelineAsync(device: *sysgpu.Device, descriptor: *const sysgpu.RenderPipeline.Descriptor, callback: sysgpu.CreateRenderPipelineAsyncCallback, userdata: ?*anyopaque) void { _ = device; _ = descriptor; _ = callback; _ = userdata; unreachable; } pub inline fn deviceCreateSampler(device: *sysgpu.Device, descriptor: ?*const sysgpu.Sampler.Descriptor) *sysgpu.Sampler { _ = device; _ = descriptor; unreachable; } pub inline fn deviceCreateShaderModule(device: *sysgpu.Device, descriptor: *const sysgpu.ShaderModule.Descriptor) *sysgpu.ShaderModule { _ = device; _ = descriptor; unreachable; } pub inline fn deviceCreateSwapChain(device: *sysgpu.Device, surface: ?*sysgpu.Surface, descriptor: *const sysgpu.SwapChain.Descriptor) *sysgpu.SwapChain { _ = device; _ = surface; _ = descriptor; unreachable; } pub inline fn deviceCreateTexture(device: *sysgpu.Device, descriptor: *const sysgpu.Texture.Descriptor) *sysgpu.Texture { _ = device; _ = descriptor; unreachable; } pub inline fn deviceDestroy(device: *sysgpu.Device) void { _ = device; unreachable; } pub inline fn deviceEnumerateFeatures(device: *sysgpu.Device, features: ?[*]sysgpu.FeatureName) usize { _ = device; _ = features; unreachable; } pub inline fn deviceGetLimits(device: *sysgpu.Device, limits: *sysgpu.SupportedLimits) u32 { _ = device; _ = limits; unreachable; } pub inline fn deviceGetQueue(device: *sysgpu.Device) *sysgpu.Queue { _ = device; unreachable; } pub inline fn deviceHasFeature(device: *sysgpu.Device, feature: sysgpu.FeatureName) u32 { _ = device; _ = feature; unreachable; } pub inline fn deviceImportSharedFence(device: *sysgpu.Device, descriptor: *const sysgpu.SharedFence.Descriptor) *sysgpu.SharedFence { _ = device; _ = descriptor; unreachable; } pub inline fn deviceImportSharedTextureMemory(device: *sysgpu.Device, descriptor: *const sysgpu.SharedTextureMemory.Descriptor) *sysgpu.SharedTextureMemory { _ = device; _ = descriptor; unreachable; } pub inline fn deviceInjectError(device: *sysgpu.Device, typ: sysgpu.ErrorType, message: [*:0]const u8) void { _ = device; _ = typ; _ = message; unreachable; } pub inline fn deviceLoseForTesting(device: *sysgpu.Device) void { _ = device; unreachable; } pub inline fn devicePopErrorScope(device: *sysgpu.Device, callback: sysgpu.ErrorCallback, userdata: ?*anyopaque) void { _ = device; _ = callback; _ = userdata; unreachable; } pub inline fn devicePushErrorScope(device: *sysgpu.Device, filter: sysgpu.ErrorFilter) void { _ = device; _ = filter; unreachable; } pub inline fn deviceSetDeviceLostCallback(device: *sysgpu.Device, callback: ?sysgpu.Device.LostCallback, userdata: ?*anyopaque) void { _ = device; _ = callback; _ = userdata; unreachable; } pub inline fn deviceSetLabel(device: *sysgpu.Device, label: [*:0]const u8) void { _ = device; _ = label; unreachable; } pub inline fn deviceSetLoggingCallback(device: *sysgpu.Device, callback: ?sysgpu.LoggingCallback, userdata: ?*anyopaque) void { _ = device; _ = callback; _ = userdata; unreachable; } pub inline fn deviceSetUncapturedErrorCallback(device: *sysgpu.Device, callback: ?sysgpu.ErrorCallback, userdata: ?*anyopaque) void { _ = device; _ = callback; _ = userdata; unreachable; } pub inline fn deviceTick(device: *sysgpu.Device) void { _ = device; unreachable; } pub inline fn machDeviceWaitForCommandsToBeScheduled(device: *sysgpu.Device) void { _ = device; unreachable; } pub inline fn deviceReference(device: *sysgpu.Device) void { _ = device; unreachable; } pub inline fn deviceRelease(device: *sysgpu.Device) void { _ = device; unreachable; } pub inline fn externalTextureDestroy(external_texture: *sysgpu.ExternalTexture) void { _ = external_texture; unreachable; } pub inline fn externalTextureSetLabel(external_texture: *sysgpu.ExternalTexture, label: [*:0]const u8) void { _ = external_texture; _ = label; unreachable; } pub inline fn externalTextureReference(external_texture: *sysgpu.ExternalTexture) void { _ = external_texture; unreachable; } pub inline fn externalTextureRelease(external_texture: *sysgpu.ExternalTexture) void { _ = external_texture; unreachable; } pub inline fn instanceCreateSurface(instance: *sysgpu.Instance, descriptor: *const sysgpu.Surface.Descriptor) *sysgpu.Surface { _ = instance; _ = descriptor; unreachable; } pub inline fn instanceProcessEvents(instance: *sysgpu.Instance) void { _ = instance; unreachable; } pub inline fn instanceRequestAdapter(instance: *sysgpu.Instance, options: ?*const sysgpu.RequestAdapterOptions, callback: sysgpu.RequestAdapterCallback, userdata: ?*anyopaque) void { _ = instance; _ = options; _ = callback; _ = userdata; unreachable; } pub inline fn instanceReference(instance: *sysgpu.Instance) void { _ = instance; unreachable; } pub inline fn instanceRelease(instance: *sysgpu.Instance) void { _ = instance; unreachable; } pub inline fn pipelineLayoutSetLabel(pipeline_layout: *sysgpu.PipelineLayout, label: [*:0]const u8) void { _ = pipeline_layout; _ = label; unreachable; } pub inline fn pipelineLayoutReference(pipeline_layout: *sysgpu.PipelineLayout) void { _ = pipeline_layout; unreachable; } pub inline fn pipelineLayoutRelease(pipeline_layout: *sysgpu.PipelineLayout) void { _ = pipeline_layout; unreachable; } pub inline fn querySetDestroy(query_set: *sysgpu.QuerySet) void { _ = query_set; unreachable; } pub inline fn querySetGetCount(query_set: *sysgpu.QuerySet) u32 { _ = query_set; unreachable; } pub inline fn querySetGetType(query_set: *sysgpu.QuerySet) sysgpu.QueryType { _ = query_set; unreachable; } pub inline fn querySetSetLabel(query_set: *sysgpu.QuerySet, label: [*:0]const u8) void { _ = query_set; _ = label; unreachable; } pub inline fn querySetReference(query_set: *sysgpu.QuerySet) void { _ = query_set; unreachable; } pub inline fn querySetRelease(query_set: *sysgpu.QuerySet) void { _ = query_set; unreachable; } pub inline fn queueCopyTextureForBrowser(queue: *sysgpu.Queue, source: *const sysgpu.ImageCopyTexture, destination: *const sysgpu.ImageCopyTexture, copy_size: *const sysgpu.Extent3D, options: *const sysgpu.CopyTextureForBrowserOptions) void { _ = queue; _ = source; _ = destination; _ = copy_size; _ = options; unreachable; } pub inline fn queueOnSubmittedWorkDone(queue: *sysgpu.Queue, signal_value: u64, callback: sysgpu.Queue.WorkDoneCallback, userdata: ?*anyopaque) void { _ = queue; _ = signal_value; _ = callback; _ = userdata; unreachable; } pub inline fn queueSetLabel(queue: *sysgpu.Queue, label: [*:0]const u8) void { _ = queue; _ = label; unreachable; } pub inline fn queueSubmit(queue: *sysgpu.Queue, command_count: usize, commands: [*]const *const sysgpu.CommandBuffer) void { _ = queue; _ = command_count; _ = commands; unreachable; } pub inline fn queueWriteBuffer(queue: *sysgpu.Queue, buffer: *sysgpu.Buffer, buffer_offset: u64, data: *const anyopaque, size: usize) void { _ = queue; _ = buffer; _ = buffer_offset; _ = data; _ = size; unreachable; } pub inline fn queueWriteTexture(queue: *sysgpu.Queue, destination: *const sysgpu.ImageCopyTexture, data: *const anyopaque, data_size: usize, data_layout: *const sysgpu.Texture.DataLayout, write_size: *const sysgpu.Extent3D) void { _ = queue; _ = destination; _ = data; _ = data_size; _ = data_layout; _ = write_size; unreachable; } pub inline fn queueReference(queue: *sysgpu.Queue) void { _ = queue; unreachable; } pub inline fn queueRelease(queue: *sysgpu.Queue) void { _ = queue; unreachable; } pub inline fn renderBundleSetLabel(render_bundle: *sysgpu.RenderBundle, label: [*:0]const u8) void { _ = render_bundle; _ = label; unreachable; } pub inline fn renderBundleReference(render_bundle: *sysgpu.RenderBundle) void { _ = render_bundle; unreachable; } pub inline fn renderBundleRelease(render_bundle: *sysgpu.RenderBundle) void { _ = render_bundle; unreachable; } pub inline fn renderBundleEncoderDraw(render_bundle_encoder: *sysgpu.RenderBundleEncoder, vertex_count: u32, instance_count: u32, first_vertex: u32, first_instance: u32) void { _ = render_bundle_encoder; _ = vertex_count; _ = instance_count; _ = first_vertex; _ = first_instance; unreachable; } pub inline fn renderBundleEncoderDrawIndexed(render_bundle_encoder: *sysgpu.RenderBundleEncoder, index_count: u32, instance_count: u32, first_index: u32, base_vertex: i32, first_instance: u32) void { _ = render_bundle_encoder; _ = index_count; _ = instance_count; _ = first_index; _ = base_vertex; _ = first_instance; unreachable; } pub inline fn renderBundleEncoderDrawIndexedIndirect(render_bundle_encoder: *sysgpu.RenderBundleEncoder, indirect_buffer: *sysgpu.Buffer, indirect_offset: u64) void { _ = render_bundle_encoder; _ = indirect_buffer; _ = indirect_offset; unreachable; } pub inline fn renderBundleEncoderDrawIndirect(render_bundle_encoder: *sysgpu.RenderBundleEncoder, indirect_buffer: *sysgpu.Buffer, indirect_offset: u64) void { _ = render_bundle_encoder; _ = indirect_buffer; _ = indirect_offset; unreachable; } pub inline fn renderBundleEncoderFinish(render_bundle_encoder: *sysgpu.RenderBundleEncoder, descriptor: ?*const sysgpu.RenderBundle.Descriptor) *sysgpu.RenderBundle { _ = render_bundle_encoder; _ = descriptor; unreachable; } pub inline fn renderBundleEncoderInsertDebugMarker(render_bundle_encoder: *sysgpu.RenderBundleEncoder, marker_label: [*:0]const u8) void { _ = render_bundle_encoder; _ = marker_label; unreachable; } pub inline fn renderBundleEncoderPopDebugGroup(render_bundle_encoder: *sysgpu.RenderBundleEncoder) void { _ = render_bundle_encoder; unreachable; } pub inline fn renderBundleEncoderPushDebugGroup(render_bundle_encoder: *sysgpu.RenderBundleEncoder, group_label: [*:0]const u8) void { _ = render_bundle_encoder; _ = group_label; unreachable; } pub inline fn renderBundleEncoderSetBindGroup(render_bundle_encoder: *sysgpu.RenderBundleEncoder, group_index: u32, group: *sysgpu.BindGroup, dynamic_offset_count: usize, dynamic_offsets: ?[*]const u32) void { _ = render_bundle_encoder; _ = group_index; _ = group; _ = dynamic_offset_count; _ = dynamic_offsets; unreachable; } pub inline fn renderBundleEncoderSetIndexBuffer(render_bundle_encoder: *sysgpu.RenderBundleEncoder, buffer: *sysgpu.Buffer, format: sysgpu.IndexFormat, offset: u64, size: u64) void { _ = render_bundle_encoder; _ = buffer; _ = format; _ = offset; _ = size; unreachable; } pub inline fn renderBundleEncoderSetLabel(render_bundle_encoder: *sysgpu.RenderBundleEncoder, label: [*:0]const u8) void { _ = render_bundle_encoder; _ = label; unreachable; } pub inline fn renderBundleEncoderSetPipeline(render_bundle_encoder: *sysgpu.RenderBundleEncoder, pipeline: *sysgpu.RenderPipeline) void { _ = render_bundle_encoder; _ = pipeline; unreachable; } pub inline fn renderBundleEncoderSetVertexBuffer(render_bundle_encoder: *sysgpu.RenderBundleEncoder, slot: u32, buffer: *sysgpu.Buffer, offset: u64, size: u64) void { _ = render_bundle_encoder; _ = slot; _ = buffer; _ = offset; _ = size; unreachable; } pub inline fn renderBundleEncoderReference(render_bundle_encoder: *sysgpu.RenderBundleEncoder) void { _ = render_bundle_encoder; unreachable; } pub inline fn renderBundleEncoderRelease(render_bundle_encoder: *sysgpu.RenderBundleEncoder) void { _ = render_bundle_encoder; unreachable; } pub inline fn renderPassEncoderBeginOcclusionQuery(render_pass_encoder: *sysgpu.RenderPassEncoder, query_index: u32) void { _ = render_pass_encoder; _ = query_index; unreachable; } pub inline fn renderPassEncoderDraw(render_pass_encoder: *sysgpu.RenderPassEncoder, vertex_count: u32, instance_count: u32, first_vertex: u32, first_instance: u32) void { _ = render_pass_encoder; _ = vertex_count; _ = instance_count; _ = first_vertex; _ = first_instance; unreachable; } pub inline fn renderPassEncoderDrawIndexed(render_pass_encoder: *sysgpu.RenderPassEncoder, index_count: u32, instance_count: u32, first_index: u32, base_vertex: i32, first_instance: u32) void { _ = render_pass_encoder; _ = index_count; _ = instance_count; _ = first_index; _ = base_vertex; _ = first_instance; unreachable; } pub inline fn renderPassEncoderDrawIndexedIndirect(render_pass_encoder: *sysgpu.RenderPassEncoder, indirect_buffer: *sysgpu.Buffer, indirect_offset: u64) void { _ = render_pass_encoder; _ = indirect_buffer; _ = indirect_offset; unreachable; } pub inline fn renderPassEncoderDrawIndirect(render_pass_encoder: *sysgpu.RenderPassEncoder, indirect_buffer: *sysgpu.Buffer, indirect_offset: u64) void { _ = render_pass_encoder; _ = indirect_buffer; _ = indirect_offset; unreachable; } pub inline fn renderPassEncoderEnd(render_pass_encoder: *sysgpu.RenderPassEncoder) void { _ = render_pass_encoder; unreachable; } pub inline fn renderPassEncoderEndOcclusionQuery(render_pass_encoder: *sysgpu.RenderPassEncoder) void { _ = render_pass_encoder; unreachable; } pub inline fn renderPassEncoderExecuteBundles(render_pass_encoder: *sysgpu.RenderPassEncoder, bundles_count: usize, bundles: [*]const *const sysgpu.RenderBundle) void { _ = render_pass_encoder; _ = bundles_count; _ = bundles; unreachable; } pub inline fn renderPassEncoderInsertDebugMarker(render_pass_encoder: *sysgpu.RenderPassEncoder, marker_label: [*:0]const u8) void { _ = render_pass_encoder; _ = marker_label; unreachable; } pub inline fn renderPassEncoderPopDebugGroup(render_pass_encoder: *sysgpu.RenderPassEncoder) void { _ = render_pass_encoder; unreachable; } pub inline fn renderPassEncoderPushDebugGroup(render_pass_encoder: *sysgpu.RenderPassEncoder, group_label: [*:0]const u8) void { _ = render_pass_encoder; _ = group_label; unreachable; } pub inline fn renderPassEncoderSetBindGroup(render_pass_encoder: *sysgpu.RenderPassEncoder, group_index: u32, group: *sysgpu.BindGroup, dynamic_offset_count: usize, dynamic_offsets: ?[*]const u32) void { _ = render_pass_encoder; _ = group_index; _ = group; _ = dynamic_offset_count; _ = dynamic_offsets; unreachable; } pub inline fn renderPassEncoderSetBlendConstant(render_pass_encoder: *sysgpu.RenderPassEncoder, color: *const sysgpu.Color) void { _ = render_pass_encoder; _ = color; unreachable; } pub inline fn renderPassEncoderSetIndexBuffer(render_pass_encoder: *sysgpu.RenderPassEncoder, buffer: *sysgpu.Buffer, format: sysgpu.IndexFormat, offset: u64, size: u64) void { _ = render_pass_encoder; _ = buffer; _ = format; _ = offset; _ = size; unreachable; } pub inline fn renderPassEncoderSetLabel(render_pass_encoder: *sysgpu.RenderPassEncoder, label: [*:0]const u8) void { _ = render_pass_encoder; _ = label; unreachable; } pub inline fn renderPassEncoderSetPipeline(render_pass_encoder: *sysgpu.RenderPassEncoder, pipeline: *sysgpu.RenderPipeline) void { _ = render_pass_encoder; _ = pipeline; unreachable; } pub inline fn renderPassEncoderSetScissorRect(render_pass_encoder: *sysgpu.RenderPassEncoder, x: u32, y: u32, width: u32, height: u32) void { _ = render_pass_encoder; _ = x; _ = y; _ = width; _ = height; unreachable; } pub inline fn renderPassEncoderSetStencilReference(render_pass_encoder: *sysgpu.RenderPassEncoder, reference: u32) void { _ = render_pass_encoder; _ = reference; unreachable; } pub inline fn renderPassEncoderSetVertexBuffer(render_pass_encoder: *sysgpu.RenderPassEncoder, slot: u32, buffer: *sysgpu.Buffer, offset: u64, size: u64) void { _ = render_pass_encoder; _ = slot; _ = buffer; _ = offset; _ = size; unreachable; } pub inline fn renderPassEncoderSetViewport(render_pass_encoder: *sysgpu.RenderPassEncoder, x: f32, y: f32, width: f32, height: f32, min_depth: f32, max_depth: f32) void { _ = render_pass_encoder; _ = x; _ = y; _ = width; _ = height; _ = min_depth; _ = max_depth; unreachable; } pub inline fn renderPassEncoderWriteTimestamp(render_pass_encoder: *sysgpu.RenderPassEncoder, query_set: *sysgpu.QuerySet, query_index: u32) void { _ = render_pass_encoder; _ = query_set; _ = query_index; unreachable; } pub inline fn renderPassEncoderReference(render_pass_encoder: *sysgpu.RenderPassEncoder) void { _ = render_pass_encoder; unreachable; } pub inline fn renderPassEncoderRelease(render_pass_encoder: *sysgpu.RenderPassEncoder) void { _ = render_pass_encoder; unreachable; } pub inline fn renderPipelineGetBindGroupLayout(render_pipeline: *sysgpu.RenderPipeline, group_index: u32) *sysgpu.BindGroupLayout { _ = render_pipeline; _ = group_index; unreachable; } pub inline fn renderPipelineSetLabel(render_pipeline: *sysgpu.RenderPipeline, label: [*:0]const u8) void { _ = render_pipeline; _ = label; unreachable; } pub inline fn renderPipelineReference(render_pipeline: *sysgpu.RenderPipeline) void { _ = render_pipeline; unreachable; } pub inline fn renderPipelineRelease(render_pipeline: *sysgpu.RenderPipeline) void { _ = render_pipeline; unreachable; } pub inline fn samplerSetLabel(sampler: *sysgpu.Sampler, label: [*:0]const u8) void { _ = sampler; _ = label; unreachable; } pub inline fn samplerReference(sampler: *sysgpu.Sampler) void { _ = sampler; unreachable; } pub inline fn samplerRelease(sampler: *sysgpu.Sampler) void { _ = sampler; unreachable; } pub inline fn shaderModuleGetCompilationInfo(shader_module: *sysgpu.ShaderModule, callback: sysgpu.CompilationInfoCallback, userdata: ?*anyopaque) void { _ = shader_module; _ = callback; _ = userdata; unreachable; } pub inline fn shaderModuleSetLabel(shader_module: *sysgpu.ShaderModule, label: [*:0]const u8) void { _ = shader_module; _ = label; unreachable; } pub inline fn shaderModuleReference(shader_module: *sysgpu.ShaderModule) void { _ = shader_module; unreachable; } pub inline fn shaderModuleRelease(shader_module: *sysgpu.ShaderModule) void { _ = shader_module; unreachable; } pub inline fn sharedFenceExportInfo(shared_fence: *sysgpu.SharedFence, info: *sysgpu.SharedFence.ExportInfo) void { _ = shared_fence; _ = info; unreachable; } pub inline fn sharedFenceReference(shared_fence: *sysgpu.SharedFence) void { _ = shared_fence; unreachable; } pub inline fn sharedFenceRelease(shared_fence: *sysgpu.SharedFence) void { _ = shared_fence; unreachable; } pub inline fn sharedTextureMemoryBeginAccess(shared_texture_memory: *sysgpu.SharedTextureMemory, texture: *sysgpu.Texture, descriptor: *const sysgpu.SharedTextureMemory.BeginAccessDescriptor) void { _ = shared_texture_memory; _ = texture; _ = descriptor; unreachable; } pub inline fn sharedTextureMemoryCreateTexture(shared_texture_memory: *sysgpu.SharedTextureMemory, descriptor: *const sysgpu.Texture.Descriptor) *sysgpu.Texture { _ = shared_texture_memory; _ = descriptor; unreachable; } pub inline fn sharedTextureMemoryEndAccess(shared_texture_memory: *sysgpu.SharedTextureMemory, texture: *sysgpu.Texture, descriptor: *sysgpu.SharedTextureMemory.EndAccessState) void { _ = shared_texture_memory; _ = texture; _ = descriptor; unreachable; } pub inline fn sharedTextureMemoryEndAccessStateFreeMembers(value: sysgpu.SharedTextureMemory.EndAccessState) void { _ = value; unreachable; } pub inline fn sharedTextureMemoryGetProperties(shared_texture_memory: *sysgpu.SharedTextureMemory, properties: *sysgpu.SharedTextureMemory.Properties) void { _ = shared_texture_memory; _ = properties; unreachable; } pub inline fn sharedTextureMemorySetLabel(shared_texture_memory: *sysgpu.SharedTextureMemory, label: [*:0]const u8) void { _ = shared_texture_memory; _ = label; unreachable; } pub inline fn sharedTextureMemoryReference(shared_texture_memory: *sysgpu.SharedTextureMemory) void { _ = shared_texture_memory; unreachable; } pub inline fn sharedTextureMemoryRelease(shared_texture_memory: *sysgpu.SharedTextureMemory) void { _ = shared_texture_memory; unreachable; } pub inline fn surfaceReference(surface: *sysgpu.Surface) void { _ = surface; unreachable; } pub inline fn surfaceRelease(surface: *sysgpu.Surface) void { _ = surface; unreachable; } pub inline fn swapChainGetCurrentTexture(swap_chain: *sysgpu.SwapChain) ?*sysgpu.Texture { _ = swap_chain; unreachable; } pub inline fn swapChainGetCurrentTextureView(swap_chain: *sysgpu.SwapChain) ?*sysgpu.TextureView { _ = swap_chain; unreachable; } pub inline fn swapChainPresent(swap_chain: *sysgpu.SwapChain) void { _ = swap_chain; unreachable; } pub inline fn swapChainReference(swap_chain: *sysgpu.SwapChain) void { _ = swap_chain; unreachable; } pub inline fn swapChainRelease(swap_chain: *sysgpu.SwapChain) void { _ = swap_chain; unreachable; } pub inline fn textureCreateView(texture: *sysgpu.Texture, descriptor: ?*const sysgpu.TextureView.Descriptor) *sysgpu.TextureView { _ = texture; _ = descriptor; unreachable; } pub inline fn textureDestroy(texture: *sysgpu.Texture) void { _ = texture; unreachable; } pub inline fn textureGetDepthOrArrayLayers(texture: *sysgpu.Texture) u32 { _ = texture; unreachable; } pub inline fn textureGetDimension(texture: *sysgpu.Texture) sysgpu.Texture.Dimension { _ = texture; unreachable; } pub inline fn textureGetFormat(texture: *sysgpu.Texture) sysgpu.Texture.Format { _ = texture; unreachable; } pub inline fn textureGetHeight(texture: *sysgpu.Texture) u32 { _ = texture; unreachable; } pub inline fn textureGetMipLevelCount(texture: *sysgpu.Texture) u32 { _ = texture; unreachable; } pub inline fn textureGetSampleCount(texture: *sysgpu.Texture) u32 { _ = texture; unreachable; } pub inline fn textureGetUsage(texture: *sysgpu.Texture) sysgpu.Texture.UsageFlags { _ = texture; unreachable; } pub inline fn textureGetWidth(texture: *sysgpu.Texture) u32 { _ = texture; unreachable; } pub inline fn textureSetLabel(texture: *sysgpu.Texture, label: [*:0]const u8) void { _ = texture; _ = label; unreachable; } pub inline fn textureReference(texture: *sysgpu.Texture) void { _ = texture; unreachable; } pub inline fn textureRelease(texture: *sysgpu.Texture) void { _ = texture; unreachable; } pub inline fn textureViewSetLabel(texture_view: *sysgpu.TextureView, label: [*:0]const u8) void { _ = texture_view; _ = label; unreachable; } pub inline fn textureViewReference(texture_view: *sysgpu.TextureView) void { _ = texture_view; unreachable; } pub inline fn textureViewRelease(texture_view: *sysgpu.TextureView) void { _ = texture_view; unreachable; } }); test "stub" { _ = StubInterface; }
0
repos/mach-sysgpu/src
repos/mach-sysgpu/src/sysgpu/render_bundle_encoder.zig
const Texture = @import("texture.zig").Texture; const Buffer = @import("buffer.zig").Buffer; const BindGroup = @import("bind_group.zig").BindGroup; const RenderPipeline = @import("render_pipeline.zig").RenderPipeline; const RenderBundle = @import("render_bundle.zig").RenderBundle; const Bool32 = @import("main.zig").Bool32; const ChainedStruct = @import("main.zig").ChainedStruct; const IndexFormat = @import("main.zig").IndexFormat; const Impl = @import("interface.zig").Impl; pub const RenderBundleEncoder = opaque { pub const Descriptor = extern struct { next_in_chain: ?*const ChainedStruct = null, label: ?[*:0]const u8 = null, color_formats_count: usize = 0, color_formats: ?[*]const Texture.Format = null, depth_stencil_format: Texture.Format = .undefined, sample_count: u32 = 1, depth_read_only: Bool32 = .false, stencil_read_only: Bool32 = .false, /// Provides a slightly friendlier Zig API to initialize this structure. pub inline fn init(v: struct { next_in_chain: ?*const ChainedStruct = null, label: ?[*:0]const u8 = null, color_formats: ?[]const Texture.Format = null, depth_stencil_format: Texture.Format = .undefined, sample_count: u32 = 1, depth_read_only: bool = false, stencil_read_only: bool = false, }) Descriptor { return .{ .next_in_chain = v.next_in_chain, .label = v.label, .color_formats_count = if (v.color_formats) |e| e.len else 0, .color_formats = if (v.color_formats) |e| e.ptr else null, .depth_stencil_format = v.depth_stencil_format, .sample_count = v.sample_count, .depth_read_only = Bool32.from(v.depth_read_only), .stencil_read_only = Bool32.from(v.stencil_read_only), }; } }; /// Default `instance_count`: 1 /// Default `first_vertex`: 0 /// Default `first_instance`: 0 pub inline fn draw(render_bundle_encoder: *RenderBundleEncoder, vertex_count: u32, instance_count: u32, first_vertex: u32, first_instance: u32) void { Impl.renderBundleEncoderDraw(render_bundle_encoder, vertex_count, instance_count, first_vertex, first_instance); } /// Default `instance_count`: 1 /// Default `first_index`: 0 /// Default `base_vertex`: 0 /// Default `first_instance`: 0 pub inline fn drawIndexed(render_bundle_encoder: *RenderBundleEncoder, index_count: u32, instance_count: u32, first_index: u32, base_vertex: i32, first_instance: u32) void { Impl.renderBundleEncoderDrawIndexed(render_bundle_encoder, index_count, instance_count, first_index, base_vertex, first_instance); } pub inline fn drawIndexedIndirect(render_bundle_encoder: *RenderBundleEncoder, indirect_buffer: *Buffer, indirect_offset: u64) void { Impl.renderBundleEncoderDrawIndexedIndirect(render_bundle_encoder, indirect_buffer, indirect_offset); } pub inline fn drawIndirect(render_bundle_encoder: *RenderBundleEncoder, indirect_buffer: *Buffer, indirect_offset: u64) void { Impl.renderBundleEncoderDrawIndirect(render_bundle_encoder, indirect_buffer, indirect_offset); } pub inline fn finish(render_bundle_encoder: *RenderBundleEncoder, descriptor: ?*const RenderBundle.Descriptor) *RenderBundle { return Impl.renderBundleEncoderFinish(render_bundle_encoder, descriptor); } pub inline fn insertDebugMarker(render_bundle_encoder: *RenderBundleEncoder, marker_label: [*:0]const u8) void { Impl.renderBundleEncoderInsertDebugMarker(render_bundle_encoder, marker_label); } pub inline fn popDebugGroup(render_bundle_encoder: *RenderBundleEncoder) void { Impl.renderBundleEncoderPopDebugGroup(render_bundle_encoder); } pub inline fn pushDebugGroup(render_bundle_encoder: *RenderBundleEncoder, group_label: [*:0]const u8) void { Impl.renderBundleEncoderPushDebugGroup(render_bundle_encoder, group_label); } /// Default `dynamic_offsets`: `null` pub inline fn setBindGroup(render_bundle_encoder: *RenderBundleEncoder, group_index: u32, group: *BindGroup, dynamic_offsets: ?[]const u32) void { Impl.renderBundleEncoderSetBindGroup( render_bundle_encoder, group_index, group, if (dynamic_offsets) |v| v.len else 0, if (dynamic_offsets) |v| v.ptr else null, ); } /// Default `offset`: 0 /// Default `size`: `gpu.whole_size` pub inline fn setIndexBuffer(render_bundle_encoder: *RenderBundleEncoder, buffer: *Buffer, format: IndexFormat, offset: u64, size: u64) void { Impl.renderBundleEncoderSetIndexBuffer(render_bundle_encoder, buffer, format, offset, size); } pub inline fn setLabel(render_bundle_encoder: *RenderBundleEncoder, label: [*:0]const u8) void { Impl.renderBundleEncoderSetLabel(render_bundle_encoder, label); } pub inline fn setPipeline(render_bundle_encoder: *RenderBundleEncoder, pipeline: *RenderPipeline) void { Impl.renderBundleEncoderSetPipeline(render_bundle_encoder, pipeline); } /// Default `offset`: 0 /// Default `size`: `gpu.whole_size` pub inline fn setVertexBuffer(render_bundle_encoder: *RenderBundleEncoder, slot: u32, buffer: *Buffer, offset: u64, size: u64) void { Impl.renderBundleEncoderSetVertexBuffer(render_bundle_encoder, slot, buffer, offset, size); } pub inline fn reference(render_bundle_encoder: *RenderBundleEncoder) void { Impl.renderBundleEncoderReference(render_bundle_encoder); } pub inline fn release(render_bundle_encoder: *RenderBundleEncoder) void { Impl.renderBundleEncoderRelease(render_bundle_encoder); } };
0
repos/mach-sysgpu/src
repos/mach-sysgpu/src/sysgpu/main.zig
const std = @import("std"); const testing = std.testing; pub const Adapter = @import("adapter.zig").Adapter; pub const BindGroup = @import("bind_group.zig").BindGroup; pub const BindGroupLayout = @import("bind_group_layout.zig").BindGroupLayout; pub const Buffer = @import("buffer.zig").Buffer; pub const CommandBuffer = @import("command_buffer.zig").CommandBuffer; pub const CommandEncoder = @import("command_encoder.zig").CommandEncoder; pub const ComputePassEncoder = @import("compute_pass_encoder.zig").ComputePassEncoder; pub const ComputePipeline = @import("compute_pipeline.zig").ComputePipeline; pub const Device = @import("device.zig").Device; pub const ExternalTexture = @import("external_texture.zig").ExternalTexture; pub const Instance = @import("instance.zig").Instance; pub const PipelineLayout = @import("pipeline_layout.zig").PipelineLayout; pub const QuerySet = @import("query_set.zig").QuerySet; pub const Queue = @import("queue.zig").Queue; pub const RenderBundle = @import("render_bundle.zig").RenderBundle; pub const RenderBundleEncoder = @import("render_bundle_encoder.zig").RenderBundleEncoder; pub const RenderPassEncoder = @import("render_pass_encoder.zig").RenderPassEncoder; pub const RenderPipeline = @import("render_pipeline.zig").RenderPipeline; pub const Sampler = @import("sampler.zig").Sampler; pub const ShaderModule = @import("shader_module.zig").ShaderModule; pub const SharedTextureMemory = @import("shared_texture_memory.zig").SharedTextureMemory; pub const SharedFence = @import("shared_fence.zig").SharedFence; pub const Surface = @import("surface.zig").Surface; pub const SwapChain = @import("swap_chain.zig").SwapChain; pub const Texture = @import("texture.zig").Texture; pub const TextureView = @import("texture_view.zig").TextureView; pub const dawn = @import("dawn.zig"); const instance = @import("instance.zig"); const device = @import("device.zig"); const interface = @import("interface.zig"); pub const Impl = interface.Impl; pub const StubInterface = interface.StubInterface; pub const Export = interface.Export; pub const Interface = interface.Interface; pub inline fn createInstance(descriptor: ?*const instance.Instance.Descriptor) ?*instance.Instance { return Impl.createInstance(descriptor); } pub inline fn getProcAddress(_device: *device.Device, proc_name: [*:0]const u8) ?Proc { return Impl.getProcAddress(_device, proc_name); } pub const array_layer_count_undefined = 0xffffffff; pub const copy_stride_undefined = 0xffffffff; pub const limit_u32_undefined = 0xffffffff; pub const limit_u64_undefined = 0xffffffffffffffff; pub const mip_level_count_undefined = 0xffffffff; pub const whole_map_size = std.math.maxInt(usize); pub const whole_size = 0xffffffffffffffff; /// Generic function pointer type, used for returning API function pointers. Must be /// cast to the right `fn (...) callconv(.C) T` type before use. pub const Proc = *const fn () callconv(.C) void; /// 32-bit unsigned boolean type, as used in webgpu.h pub const Bool32 = enum(u32) { false, true, pub inline fn from(v: bool) @This() { return if (v) .true else .false; } }; pub const ComputePassTimestampWrite = extern struct { query_set: *QuerySet, query_index: u32, location: ComputePassTimestampLocation, }; pub const RenderPassDepthStencilAttachment = extern struct { view: *TextureView, depth_load_op: LoadOp = .undefined, depth_store_op: StoreOp = .undefined, depth_clear_value: f32 = 0, depth_read_only: Bool32 = .false, stencil_load_op: LoadOp = .undefined, stencil_store_op: StoreOp = .undefined, stencil_clear_value: u32 = 0, stencil_read_only: Bool32 = .false, }; pub const RenderPassTimestampWrite = extern struct { query_set: *QuerySet, query_index: u32, location: RenderPassTimestampLocation, }; pub const RequestAdapterOptions = extern struct { pub const NextInChain = extern union { generic: ?*const ChainedStruct, dawn_toggles_descriptor: *const dawn.TogglesDescriptor, }; next_in_chain: NextInChain = .{ .generic = null }, compatible_surface: ?*Surface = null, power_preference: PowerPreference = .undefined, backend_type: BackendType = .undefined, force_fallback_adapter: Bool32 = .false, compatibility_mode: Bool32 = .false, }; pub const ComputePassDescriptor = extern struct { next_in_chain: ?*const ChainedStruct = null, label: ?[*:0]const u8 = null, timestamp_write_count: usize = 0, timestamp_writes: ?[*]const ComputePassTimestampWrite = null, /// Provides a slightly friendlier Zig API to initialize this structure. pub inline fn init(v: struct { next_in_chain: ?*const ChainedStruct = null, label: ?[*:0]const u8 = null, timestamp_writes: ?[]const ComputePassTimestampWrite = null, }) ComputePassDescriptor { return .{ .next_in_chain = v.next_in_chain, .label = v.label, .timestamp_write_count = if (v.timestamp_writes) |e| e.len else 0, .timestamp_writes = if (v.timestamp_writes) |e| e.ptr else null, }; } }; pub const RenderPassDescriptor = extern struct { pub const NextInChain = extern union { generic: ?*const ChainedStruct, max_draw_count: *const RenderPassDescriptorMaxDrawCount, }; next_in_chain: NextInChain = .{ .generic = null }, label: ?[*:0]const u8 = null, color_attachment_count: usize = 0, color_attachments: ?[*]const RenderPassColorAttachment = null, depth_stencil_attachment: ?*const RenderPassDepthStencilAttachment = null, occlusion_query_set: ?*QuerySet = null, timestamp_write_count: usize = 0, timestamp_writes: ?[*]const RenderPassTimestampWrite = null, /// Provides a slightly friendlier Zig API to initialize this structure. pub inline fn init(v: struct { next_in_chain: NextInChain = .{ .generic = null }, label: ?[*:0]const u8 = null, color_attachments: ?[]const RenderPassColorAttachment = null, depth_stencil_attachment: ?*const RenderPassDepthStencilAttachment = null, occlusion_query_set: ?*QuerySet = null, timestamp_writes: ?[]const RenderPassTimestampWrite = null, }) RenderPassDescriptor { return .{ .next_in_chain = v.next_in_chain, .label = v.label, .color_attachment_count = if (v.color_attachments) |e| e.len else 0, .color_attachments = if (v.color_attachments) |e| e.ptr else null, .depth_stencil_attachment = v.depth_stencil_attachment, .occlusion_query_set = v.occlusion_query_set, .timestamp_write_count = if (v.timestamp_writes) |e| e.len else 0, .timestamp_writes = if (v.timestamp_writes) |e| e.ptr else null, }; } }; pub const AlphaMode = enum(u32) { premultiplied = 0x00000000, unpremultiplied = 0x00000001, opaq = 0x00000002 }; pub const BackendType = enum(u32) { undefined, null, webgpu, d3d11, d3d12, metal, vulkan, opengl, opengles, pub fn name(t: BackendType) []const u8 { return switch (t) { .undefined => "Undefined", .null => "Null", .webgpu => "WebGPU", .d3d11 => "D3D11", .d3d12 => "D3D12", .metal => "Metal", .vulkan => "Vulkan", .opengl => "OpenGL", .opengles => "OpenGLES", }; } }; pub const BlendFactor = enum(u32) { zero = 0x00000000, one = 0x00000001, src = 0x00000002, one_minus_src = 0x00000003, src_alpha = 0x00000004, one_minus_src_alpha = 0x00000005, dst = 0x00000006, one_minus_dst = 0x00000007, dst_alpha = 0x00000008, one_minus_dst_alpha = 0x00000009, src_alpha_saturated = 0x0000000A, constant = 0x0000000B, one_minus_constant = 0x0000000C, src1 = 0x0000000D, one_minus_src1 = 0x0000000E, src1_alpha = 0x0000000F, one_minus_src1_alpha = 0x00000010, }; pub const BlendOperation = enum(u32) { add = 0x00000000, subtract = 0x00000001, reverse_subtract = 0x00000002, min = 0x00000003, max = 0x00000004, }; pub const CompareFunction = enum(u32) { undefined = 0x00000000, never = 0x00000001, less = 0x00000002, less_equal = 0x00000003, greater = 0x00000004, greater_equal = 0x00000005, equal = 0x00000006, not_equal = 0x00000007, always = 0x00000008, }; pub const CompilationInfoRequestStatus = enum(u32) { success = 0x00000000, err = 0x00000001, device_lost = 0x00000002, unknown = 0x00000003, }; pub const CompilationMessageType = enum(u32) { err = 0x00000000, warning = 0x00000001, info = 0x00000002, }; pub const ComputePassTimestampLocation = enum(u32) { beginning = 0x00000000, end = 0x00000001, }; pub const CreatePipelineAsyncStatus = enum(u32) { success = 0x00000000, validation_error = 0x00000001, internal_error = 0x00000002, device_lost = 0x00000003, device_destroyed = 0x00000004, unknown = 0x00000005, }; pub const CullMode = enum(u32) { none = 0x00000000, front = 0x00000001, back = 0x00000002, }; pub const ErrorFilter = enum(u32) { validation = 0x00000000, out_of_memory = 0x00000001, internal = 0x00000002, }; pub const ErrorType = enum(u32) { no_error = 0x00000000, validation = 0x00000001, out_of_memory = 0x00000002, internal = 0x00000003, unknown = 0x00000004, device_lost = 0x00000005, }; pub const FeatureName = enum(u32) { undefined = 0x00000000, depth_clip_control = 0x00000001, depth32_float_stencil8 = 0x00000002, timestamp_query = 0x00000003, pipeline_statistics_query = 0x00000004, texture_compression_bc = 0x00000005, texture_compression_etc2 = 0x00000006, texture_compression_astc = 0x00000007, indirect_first_instance = 0x00000008, shader_f16 = 0x00000009, rg11_b10_ufloat_renderable = 0x0000000A, bgra8_unorm_storage = 0x0000000B, float32_filterable = 0x0000000C, dawn_internal_usages = 0x000003ea, dawn_multi_planar_formats = 0x000003eb, dawn_native = 0x000003ec, chromium_experimental_dp4a = 0x000003ed, timestamp_query_inside_passes = 0x000003EE, implicit_device_synchronization = 0x000003EF, surface_capabilities = 0x000003F0, transient_attachments = 0x000003F1, msaa_render_to_single_sampled = 0x000003F2, dual_source_blending = 0x000003F3, d3d11_multithread_protected = 0x000003F4, anglet_exture_sharing = 0x000003F5, shared_texture_memory_vk_image_descriptor = 0x0000044C, shared_texture_memory_vk_dedicated_allocation_descriptor = 0x0000044D, shared_texture_memory_a_hardware_buffer_descriptor = 0x0000044_E, shared_texture_memory_dma_buf_descriptor = 0x0000044F, shared_texture_memory_opaque_fd_descriptor = 0x00000450, shared_texture_memory_zircon_handle_descriptor = 0x00000451, shared_texture_memory_dxgi_shared_handle_descriptor = 0x00000452, shared_texture_memory_d3_d11_texture2_d_descriptor = 0x00000453, shared_texture_memory_io_surface_descriptor = 0x00000454, shared_texture_memory_egl_image_descriptor = 0x00000455, shared_texture_memory_initialized_begin_state = 0x000004B0, shared_texture_memory_initialized_end_state = 0x000004B1, shared_texture_memory_vk_image_layout_begin_state = 0x000004B2, shared_texture_memory_vk_image_layout_end_state = 0x000004B3, shared_fence_vk_semaphore_opaque_fd_descriptor = 0x000004B4, shared_fence_vk_semaphore_opaque_fd_export_info = 0x000004B5, shared_fence_vk_semaphore_sync_fd_descriptor = 0x000004B6, shared_fence_vk_semaphore_sync_fd_export_info = 0x000004B7, shared_fence_vk_semaphore_zircon_handle_descriptor = 0x000004B8, shared_fence_vk_semaphore_zircon_handle_export_info = 0x000004B9, shared_fence_dxgi_shared_handle_descriptor = 0x000004BA, shared_fence_dxgi_shared_handle_export_info = 0x000004BB, shared_fence_mtl_shared_event_descriptor = 0x000004BC, shared_fence_mtl_shared_event_export_info = 0x000004BD, }; pub const FilterMode = enum(u32) { nearest = 0x00000000, linear = 0x00000001, }; pub const MipmapFilterMode = enum(u32) { nearest = 0x00000000, linear = 0x00000001, }; pub const FrontFace = enum(u32) { ccw = 0x00000000, cw = 0x00000001, }; pub const IndexFormat = enum(u32) { undefined = 0x00000000, uint16 = 0x00000001, uint32 = 0x00000002, }; pub const LoadOp = enum(u32) { undefined = 0x00000000, clear = 0x00000001, load = 0x00000002, }; pub const LoggingType = enum(u32) { verbose = 0x00000000, info = 0x00000001, warning = 0x00000002, err = 0x00000003, }; pub const PipelineStatisticName = enum(u32) { vertex_shader_invocations = 0x00000000, clipper_invocations = 0x00000001, clipper_primitives_out = 0x00000002, fragment_shader_invocations = 0x00000003, compute_shader_invocations = 0x00000004, }; pub const PowerPreference = enum(u32) { undefined = 0x00000000, low_power = 0x00000001, high_performance = 0x00000002, }; pub const PresentMode = enum(u32) { immediate = 0x00000000, mailbox = 0x00000001, fifo = 0x00000002, }; pub const PrimitiveTopology = enum(u32) { point_list = 0x00000000, line_list = 0x00000001, line_strip = 0x00000002, triangle_list = 0x00000003, triangle_strip = 0x00000004, }; pub const QueryType = enum(u32) { occlusion = 0x00000000, pipeline_statistics = 0x00000001, timestamp = 0x00000002, }; pub const RenderPassTimestampLocation = enum(u32) { beginning = 0x00000000, end = 0x00000001, }; pub const RequestAdapterStatus = enum(u32) { success = 0x00000000, unavailable = 0x00000001, err = 0x00000002, unknown = 0x00000003, }; pub const RequestDeviceStatus = enum(u32) { success = 0x00000000, err = 0x00000001, unknown = 0x00000002, }; pub const SType = enum(u32) { invalid = 0x00000000, surface_descriptor_from_metal_layer = 0x00000001, surface_descriptor_from_windows_hwnd = 0x00000002, surface_descriptor_from_xlib_window = 0x00000003, surface_descriptor_from_canvas_html_selector = 0x00000004, shader_module_spirv_descriptor = 0x00000005, shader_module_wgsl_descriptor = 0x00000006, primitive_depth_clip_control = 0x00000007, surface_descriptor_from_wayland_surface = 0x00000008, surface_descriptor_from_android_native_window = 0x00000009, surface_descriptor_from_windows_core_window = 0x0000000B, external_texture_binding_entry = 0x0000000C, external_texture_binding_layout = 0x0000000D, surface_descriptor_from_windows_swap_chain_panel = 0x0000000E, render_pass_descriptor_max_draw_count = 0x0000000F, dawn_texture_internal_usage_descriptor = 0x000003E8, dawn_encoder_internal_usage_descriptor = 0x000003EB, dawn_instance_descriptor = 0x000003EC, dawn_cache_device_descriptor = 0x000003ED, dawn_adapter_properties_power_preference = 0x000003EE, dawn_buffer_descriptor_error_info_from_wire_client = 0x000003EF, dawn_toggles_descriptor = 0x000003F0, dawn_shader_module_spirv_options_descriptor = 0x000003F1, request_adapter_options_luid = 0x000003F2, request_adapter_options_get_gl_proc = 0x000003F3, dawn_multisample_state_render_to_single_sampled = 0x000003F4, dawn_render_pass_color_attachment_render_to_single_sampled = 0x000003F5, shared_texture_memory_vk_image_descriptor = 0x0000044C, shared_texture_memory_vk_dedicated_allocation_descriptor = 0x0000044D, shared_texture_memory_a_hardware_buffer_descriptor = 0x0000044E, shared_texture_memory_dma_buf_descriptor = 0x0000044F, shared_texture_memory_opaque_fd_descriptor = 0x00000450, shared_texture_memory_zircon_handle_descriptor = 0x00000451, shared_texture_memory_dxgi_shared_handle_descriptor = 0x00000452, shared_texture_memory_d3d11_texture_2d_descriptor = 0x00000453, shared_texture_memory_io_surface_descriptor = 0x00000454, shared_texture_memory_egl_image_descriptor = 0x00000455, shared_texture_memory_initialized_begin_state = 0x000004B0, shared_texture_memory_initialized_end_state = 0x000004B1, shared_texture_memory_vk_image_layout_begin_state = 0x000004B2, shared_texture_memory_vk_image_layout_end_state = 0x000004B3, shared_fence_vk_semaphore_opaque_fd_descriptor = 0x000004B4, shared_fence_vk_semaphore_opaque_fd_export_info = 0x000004B5, shared_fence_vk_semaphore_syncfd_descriptor = 0x000004B6, shared_fence_vk_semaphore_sync_fd_export_info = 0x000004B7, shared_fence_vk_semaphore_zircon_handle_descriptor = 0x000004B8, shared_fence_vk_semaphore_zircon_handle_export_info = 0x000004B9, shared_fence_dxgi_shared_handle_descriptor = 0x000004BA, shared_fence_dxgi_shared_handle_export_info = 0x000004BB, shared_fence_mtl_shared_event_descriptor = 0x000004BC, shared_fence_mtl_shared_event_export_info = 0x000004BD, shader_module_hlsl_descriptor = 0x000004BE, shader_module_msl_descriptor = 0x000004BF, }; pub const StencilOperation = enum(u32) { keep = 0x00000000, zero = 0x00000001, replace = 0x00000002, invert = 0x00000003, increment_clamp = 0x00000004, decrement_clamp = 0x00000005, increment_wrap = 0x00000006, decrement_wrap = 0x00000007, }; pub const StorageTextureAccess = enum(u32) { undefined = 0x00000000, write_only = 0x00000001, }; pub const StoreOp = enum(u32) { undefined = 0x00000000, store = 0x00000001, discard = 0x00000002, }; pub const VertexFormat = enum(u32) { undefined = 0x00000000, uint8x2 = 0x00000001, uint8x4 = 0x00000002, sint8x2 = 0x00000003, sint8x4 = 0x00000004, unorm8x2 = 0x00000005, unorm8x4 = 0x00000006, snorm8x2 = 0x00000007, snorm8x4 = 0x00000008, uint16x2 = 0x00000009, uint16x4 = 0x0000000a, sint16x2 = 0x0000000b, sint16x4 = 0x0000000c, unorm16x2 = 0x0000000d, unorm16x4 = 0x0000000e, snorm16x2 = 0x0000000f, snorm16x4 = 0x00000010, float16x2 = 0x00000011, float16x4 = 0x00000012, float32 = 0x00000013, float32x2 = 0x00000014, float32x3 = 0x00000015, float32x4 = 0x00000016, uint32 = 0x00000017, uint32x2 = 0x00000018, uint32x3 = 0x00000019, uint32x4 = 0x0000001a, sint32 = 0x0000001b, sint32x2 = 0x0000001c, sint32x3 = 0x0000001d, sint32x4 = 0x0000001e, }; pub const VertexStepMode = enum(u32) { vertex = 0x00000000, instance = 0x00000001, vertex_buffer_not_used = 0x00000002, }; pub const ColorWriteMaskFlags = packed struct(u32) { red: bool = false, green: bool = false, blue: bool = false, alpha: bool = false, _padding: u28 = 0, comptime { std.debug.assert( @sizeOf(@This()) == @sizeOf(u32) and @bitSizeOf(@This()) == @bitSizeOf(u32), ); } pub const all = ColorWriteMaskFlags{ .red = true, .green = true, .blue = true, .alpha = true, }; pub fn equal(a: ColorWriteMaskFlags, b: ColorWriteMaskFlags) bool { return @as(u4, @truncate(@as(u32, @bitCast(a)))) == @as(u4, @truncate(@as(u32, @bitCast(b)))); } }; pub const MapModeFlags = packed struct(u32) { read: bool = false, write: bool = false, _padding: u30 = 0, comptime { std.debug.assert( @sizeOf(@This()) == @sizeOf(u32) and @bitSizeOf(@This()) == @bitSizeOf(u32), ); } pub const undef = MapModeFlags{}; pub fn equal(a: MapModeFlags, b: MapModeFlags) bool { return @as(u2, @truncate(@as(u32, @bitCast(a)))) == @as(u2, @truncate(@as(u32, @bitCast(b)))); } }; pub const ShaderStageFlags = packed struct(u32) { vertex: bool = false, fragment: bool = false, compute: bool = false, _padding: u29 = 0, comptime { std.debug.assert( @sizeOf(@This()) == @sizeOf(u32) and @bitSizeOf(@This()) == @bitSizeOf(u32), ); } pub const none = ShaderStageFlags{}; pub fn equal(a: ShaderStageFlags, b: ShaderStageFlags) bool { return @as(u3, @truncate(@as(u32, @bitCast(a)))) == @as(u3, @truncate(@as(u32, @bitCast(b)))); } }; pub const ChainedStruct = extern struct { // TODO: dawn: not marked as nullable in dawn.json but in fact is. next: ?*const ChainedStruct = null, s_type: SType, }; pub const ChainedStructOut = extern struct { // TODO: dawn: not marked as nullable in dawn.json but in fact is. next: ?*ChainedStructOut = null, s_type: SType, }; pub const BlendComponent = extern struct { operation: BlendOperation = .add, src_factor: BlendFactor = .one, dst_factor: BlendFactor = .zero, }; pub const Color = extern struct { r: f64, g: f64, b: f64, a: f64, }; pub const Extent2D = extern struct { width: u32, height: u32, }; pub const Extent3D = extern struct { width: u32, height: u32 = 1, depth_or_array_layers: u32 = 1, }; pub const Limits = extern struct { max_texture_dimension_1d: u32 = limit_u32_undefined, max_texture_dimension_2d: u32 = limit_u32_undefined, max_texture_dimension_3d: u32 = limit_u32_undefined, max_texture_array_layers: u32 = limit_u32_undefined, max_bind_groups: u32 = limit_u32_undefined, max_bind_groups_plus_vertex_buffers: u32 = limit_u32_undefined, max_bindings_per_bind_group: u32 = limit_u32_undefined, max_dynamic_uniform_buffers_per_pipeline_layout: u32 = limit_u32_undefined, max_dynamic_storage_buffers_per_pipeline_layout: u32 = limit_u32_undefined, max_sampled_textures_per_shader_stage: u32 = limit_u32_undefined, max_samplers_per_shader_stage: u32 = limit_u32_undefined, max_storage_buffers_per_shader_stage: u32 = limit_u32_undefined, max_storage_textures_per_shader_stage: u32 = limit_u32_undefined, max_uniform_buffers_per_shader_stage: u32 = limit_u32_undefined, max_uniform_buffer_binding_size: u64 = limit_u64_undefined, max_storage_buffer_binding_size: u64 = limit_u64_undefined, min_uniform_buffer_offset_alignment: u32 = limit_u32_undefined, min_storage_buffer_offset_alignment: u32 = limit_u32_undefined, max_vertex_buffers: u32 = limit_u32_undefined, max_buffer_size: u64 = limit_u64_undefined, max_vertex_attributes: u32 = limit_u32_undefined, max_vertex_buffer_array_stride: u32 = limit_u32_undefined, max_inter_stage_shader_components: u32 = limit_u32_undefined, max_inter_stage_shader_variables: u32 = limit_u32_undefined, max_color_attachments: u32 = limit_u32_undefined, max_color_attachment_bytes_per_sample: u32 = limit_u32_undefined, max_compute_workgroup_storage_size: u32 = limit_u32_undefined, max_compute_invocations_per_workgroup: u32 = limit_u32_undefined, max_compute_workgroup_size_x: u32 = limit_u32_undefined, max_compute_workgroup_size_y: u32 = limit_u32_undefined, max_compute_workgroup_size_z: u32 = limit_u32_undefined, max_compute_workgroups_per_dimension: u32 = limit_u32_undefined, }; pub const Origin2D = extern struct { x: u32 = 0, y: u32 = 0, }; pub const Origin3D = extern struct { x: u32 = 0, y: u32 = 0, z: u32 = 0, }; pub const CompilationMessage = extern struct { next_in_chain: ?*const ChainedStruct = null, message: ?[*:0]const u8 = null, type: CompilationMessageType, line_num: u64, line_pos: u64, offset: u64, length: u64, utf16_line_pos: u64, utf16_offset: u64, utf16_length: u64, }; pub const ConstantEntry = extern struct { next_in_chain: ?*const ChainedStruct = null, key: [*:0]const u8, value: f64, }; pub const CopyTextureForBrowserOptions = extern struct { next_in_chain: ?*const ChainedStruct = null, flip_y: Bool32 = .false, needs_color_space_conversion: Bool32 = .false, src_alpha_mode: AlphaMode = .unpremultiplied, src_transfer_function_parameters: ?*const [7]f32 = null, conversion_matrix: ?*const [9]f32 = null, dst_transfer_function_parameters: ?*const [7]f32 = null, dst_alpha_mode: AlphaMode = .unpremultiplied, internal_usage: Bool32 = .false, }; pub const MultisampleState = extern struct { pub const NextInChain = extern union { generic: ?*const ChainedStruct, dawn_multisample_state_render_to_single_sampled: *const dawn.MultisampleStateRenderToSingleSampled, }; next_in_chain: NextInChain = .{ .generic = null }, count: u32 = 1, mask: u32 = 0xFFFFFFFF, alpha_to_coverage_enabled: Bool32 = .false, }; pub const PrimitiveDepthClipControl = extern struct { chain: ChainedStruct = .{ .next = null, .s_type = .primitive_depth_clip_control }, unclipped_depth: Bool32 = .false, }; pub const PrimitiveState = extern struct { pub const NextInChain = extern union { generic: ?*const ChainedStruct, primitive_depth_clip_control: *const PrimitiveDepthClipControl, }; next_in_chain: NextInChain = .{ .generic = null }, topology: PrimitiveTopology = .triangle_list, strip_index_format: IndexFormat = .undefined, front_face: FrontFace = .ccw, cull_mode: CullMode = .none, }; pub const RenderPassDescriptorMaxDrawCount = extern struct { chain: ChainedStruct = .{ .next = null, .s_type = .render_pass_descriptor_max_draw_count }, max_draw_count: u64 = 50000000, }; pub const StencilFaceState = extern struct { compare: CompareFunction = .always, fail_op: StencilOperation = .keep, depth_fail_op: StencilOperation = .keep, pass_op: StencilOperation = .keep, }; pub const StorageTextureBindingLayout = extern struct { next_in_chain: ?*const ChainedStruct = null, access: StorageTextureAccess = .undefined, format: Texture.Format = .undefined, view_dimension: TextureView.Dimension = .dimension_undefined, }; pub const VertexAttribute = extern struct { format: VertexFormat, offset: u64, shader_location: u32, }; pub const BlendState = extern struct { color: BlendComponent = .{}, alpha: BlendComponent = .{}, }; pub const CompilationInfo = extern struct { next_in_chain: ?*const ChainedStruct = null, message_count: usize, messages: ?[*]const CompilationMessage = null, /// Helper to get messages as a slice. pub fn getMessages(info: CompilationInfo) ?[]const CompilationMessage { if (info.messages) |messages| { return messages[0..info.message_count]; } return null; } }; pub const DepthStencilState = extern struct { next_in_chain: ?*const ChainedStruct = null, format: Texture.Format, depth_write_enabled: Bool32 = .false, depth_compare: CompareFunction = .always, stencil_front: StencilFaceState = .{}, stencil_back: StencilFaceState = .{}, stencil_read_mask: u32 = 0xFFFFFFFF, stencil_write_mask: u32 = 0xFFFFFFFF, depth_bias: i32 = 0, depth_bias_slope_scale: f32 = 0.0, depth_bias_clamp: f32 = 0.0, }; pub const ImageCopyBuffer = extern struct { next_in_chain: ?*const ChainedStruct = null, layout: Texture.DataLayout, buffer: *Buffer, }; pub const ImageCopyExternalTexture = extern struct { next_in_chain: ?*const ChainedStruct = null, external_texture: *ExternalTexture, origin: Origin3D, natural_size: Extent2D, }; pub const ImageCopyTexture = extern struct { next_in_chain: ?*const ChainedStruct = null, texture: *Texture, mip_level: u32 = 0, origin: Origin3D = .{}, aspect: Texture.Aspect = .all, }; pub const ProgrammableStageDescriptor = extern struct { next_in_chain: ?*const ChainedStruct = null, module: *ShaderModule, entry_point: [*:0]const u8, constant_count: usize = 0, constants: ?[*]const ConstantEntry = null, /// Provides a slightly friendlier Zig API to initialize this structure. pub inline fn init(v: struct { next_in_chain: ?*const ChainedStruct = null, module: *ShaderModule, entry_point: [*:0]const u8, constants: ?[]const ConstantEntry = null, }) ProgrammableStageDescriptor { return .{ .next_in_chain = v.next_in_chain, .module = v.module, .entry_point = v.entry_point, .constant_count = if (v.constants) |e| e.len else 0, .constants = if (v.constants) |e| e.ptr else null, }; } }; pub const RenderPassColorAttachment = extern struct { pub const NextInChain = extern union { generic: ?*const ChainedStruct, dawn_render_pass_color_attachment_render_to_single_sampled: *const dawn.RenderPassColorAttachmentRenderToSingleSampled, }; next_in_chain: NextInChain = .{ .generic = null }, view: ?*TextureView = null, resolve_target: ?*TextureView = null, load_op: LoadOp, store_op: StoreOp, clear_value: Color, }; pub const RequiredLimits = extern struct { next_in_chain: ?*const ChainedStruct = null, limits: Limits, }; /// Used to query limits from a Device or Adapter. Can be used as follows: /// /// ``` /// var supported: gpu.SupportedLimits = .{}; /// if (!adapter.getLimits(&supported)) @panic("unsupported options"); /// ``` /// /// Note that `getLimits` can only fail if `next_in_chain` options are invalid. pub const SupportedLimits = extern struct { next_in_chain: ?*ChainedStructOut = null, limits: Limits = undefined, }; pub const VertexBufferLayout = extern struct { array_stride: u64, step_mode: VertexStepMode = .vertex, attribute_count: usize, attributes: ?[*]const VertexAttribute = null, /// Provides a slightly friendlier Zig API to initialize this structure. pub inline fn init(v: struct { array_stride: u64, step_mode: VertexStepMode = .vertex, attributes: ?[]const VertexAttribute = null, }) VertexBufferLayout { return .{ .array_stride = v.array_stride, .step_mode = v.step_mode, .attribute_count = if (v.attributes) |e| e.len else 0, .attributes = if (v.attributes) |e| e.ptr else null, }; } }; pub const ColorTargetState = extern struct { next_in_chain: ?*const ChainedStruct = null, format: Texture.Format, blend: ?*const BlendState = null, write_mask: ColorWriteMaskFlags = ColorWriteMaskFlags.all, }; pub const VertexState = extern struct { next_in_chain: ?*const ChainedStruct = null, module: *ShaderModule, entry_point: [*:0]const u8, constant_count: usize = 0, constants: ?[*]const ConstantEntry = null, buffer_count: usize = 0, buffers: ?[*]const VertexBufferLayout = null, /// Provides a slightly friendlier Zig API to initialize this structure. pub inline fn init(v: struct { next_in_chain: ?*const ChainedStruct = null, module: *ShaderModule, entry_point: [*:0]const u8, constants: ?[]const ConstantEntry = null, buffers: ?[]const VertexBufferLayout = null, }) VertexState { return .{ .next_in_chain = v.next_in_chain, .module = v.module, .entry_point = v.entry_point, .constant_count = if (v.constants) |e| e.len else 0, .constants = if (v.constants) |e| e.ptr else null, .buffer_count = if (v.buffers) |e| e.len else 0, .buffers = if (v.buffers) |e| e.ptr else null, }; } }; pub const FragmentState = extern struct { next_in_chain: ?*const ChainedStruct = null, module: *ShaderModule, entry_point: [*:0]const u8, constant_count: usize = 0, constants: ?[*]const ConstantEntry = null, target_count: usize, targets: ?[*]const ColorTargetState = null, /// Provides a slightly friendlier Zig API to initialize this structure. pub inline fn init(v: struct { next_in_chain: ?*const ChainedStruct = null, module: *ShaderModule, entry_point: [*:0]const u8, constants: ?[]const ConstantEntry = null, targets: ?[]const ColorTargetState = null, }) FragmentState { return .{ .next_in_chain = v.next_in_chain, .module = v.module, .entry_point = v.entry_point, .constant_count = if (v.constants) |e| e.len else 0, .constants = if (v.constants) |e| e.ptr else null, .target_count = if (v.targets) |e| e.len else 0, .targets = if (v.targets) |e| e.ptr else null, }; } }; test "BackendType name" { try testing.expectEqualStrings("Vulkan", BackendType.vulkan.name()); } test "enum name" { try testing.expectEqualStrings("front", @tagName(CullMode.front)); } pub const CompilationInfoCallback = *const fn ( status: CompilationInfoRequestStatus, compilation_info: *const CompilationInfo, userdata: ?*anyopaque, ) callconv(.C) void; pub const ErrorCallback = *const fn ( typ: ErrorType, message: [*:0]const u8, userdata: ?*anyopaque, ) callconv(.C) void; pub const LoggingCallback = *const fn ( typ: LoggingType, message: [*:0]const u8, userdata: ?*anyopaque, ) callconv(.C) void; pub const RequestDeviceCallback = *const fn ( status: RequestDeviceStatus, device: *Device, message: ?[*:0]const u8, userdata: ?*anyopaque, ) callconv(.C) void; pub const RequestAdapterCallback = *const fn ( status: RequestAdapterStatus, adapter: ?*Adapter, message: ?[*:0]const u8, userdata: ?*anyopaque, ) callconv(.C) void; pub const CreateComputePipelineAsyncCallback = *const fn ( status: CreatePipelineAsyncStatus, compute_pipeline: ?*ComputePipeline, message: ?[*:0]const u8, userdata: ?*anyopaque, ) callconv(.C) void; pub const CreateRenderPipelineAsyncCallback = *const fn ( status: CreatePipelineAsyncStatus, pipeline: ?*RenderPipeline, message: ?[*:0]const u8, userdata: ?*anyopaque, ) callconv(.C) void; test { std.testing.refAllDeclsRecursive(@This()); }
0
repos/mach-sysgpu/src
repos/mach-sysgpu/src/sysgpu/external_texture.zig
const Bool32 = @import("main.zig").Bool32; const ChainedStruct = @import("main.zig").ChainedStruct; const TextureView = @import("texture_view.zig").TextureView; const Origin2D = @import("main.zig").Origin2D; const Extent2D = @import("main.zig").Extent2D; const Impl = @import("interface.zig").Impl; pub const ExternalTexture = opaque { pub const BindingEntry = extern struct { chain: ChainedStruct = .{ .next = null, .s_type = .external_texture_binding_entry }, external_texture: *ExternalTexture, }; pub const BindingLayout = extern struct { chain: ChainedStruct = .{ .next = null, .s_type = .external_texture_binding_layout }, }; const Rotation = enum(u32) { rotate_0_degrees = 0x00000000, rotate_90_degrees = 0x00000001, rotate_180_degrees = 0x00000002, rotate_270_degrees = 0x00000003, }; pub const Descriptor = extern struct { next_in_chain: ?*const ChainedStruct = null, label: ?[*:0]const u8 = null, plane0: *TextureView, plane1: ?*TextureView = null, visible_origin: Origin2D, visible_size: Extent2D, do_yuv_to_rgb_conversion_only: Bool32 = .false, yuv_to_rgb_conversion_matrix: ?*const [12]f32 = null, src_transform_function_parameters: *const [7]f32, dst_transform_function_parameters: *const [7]f32, gamut_conversion_matrix: *const [9]f32, flip_y: Bool32, rotation: Rotation, }; pub inline fn destroy(external_texture: *ExternalTexture) void { Impl.externalTextureDestroy(external_texture); } pub inline fn setLabel(external_texture: *ExternalTexture, label: [*:0]const u8) void { Impl.externalTextureSetLabel(external_texture, label); } pub inline fn reference(external_texture: *ExternalTexture) void { Impl.externalTextureReference(external_texture); } pub inline fn release(external_texture: *ExternalTexture) void { Impl.externalTextureRelease(external_texture); } };
0
repos/mach-sysgpu/src
repos/mach-sysgpu/src/sysgpu/bind_group.zig
const Buffer = @import("buffer.zig").Buffer; const Sampler = @import("sampler.zig").Sampler; const TextureView = @import("texture_view.zig").TextureView; const ChainedStruct = @import("main.zig").ChainedStruct; const BindGroupLayout = @import("bind_group_layout.zig").BindGroupLayout; const ExternalTexture = @import("external_texture.zig").ExternalTexture; const Impl = @import("interface.zig").Impl; pub const BindGroup = opaque { pub const Entry = extern struct { pub const NextInChain = extern union { generic: ?*const ChainedStruct, external_texture_binding_entry: *const ExternalTexture.BindingEntry, }; next_in_chain: NextInChain = .{ .generic = null }, binding: u32, buffer: ?*Buffer = null, offset: u64 = 0, size: u64, elem_size: u32 = 0, // TEMP - using StructuredBuffer until we switch to DXC for templatized raw buffers sampler: ?*Sampler = null, texture_view: ?*TextureView = null, /// Helper to create a buffer BindGroup.Entry. pub fn buffer(binding: u32, buf: *Buffer, offset: u64, size: u64, elem_size: u32) Entry { return .{ .binding = binding, .buffer = buf, .offset = offset, .size = size, .elem_size = elem_size, }; } /// Helper to create a sampler BindGroup.Entry. pub fn sampler(binding: u32, _sampler: *Sampler) Entry { return .{ .binding = binding, .sampler = _sampler, .size = 0, }; } /// Helper to create a texture view BindGroup.Entry. pub fn textureView(binding: u32, texture_view: *TextureView) Entry { return .{ .binding = binding, .texture_view = texture_view, .size = 0, }; } }; pub const Descriptor = extern struct { next_in_chain: ?*const ChainedStruct = null, label: ?[*:0]const u8 = null, layout: *BindGroupLayout, entry_count: usize = 0, entries: ?[*]const Entry = null, /// Provides a slightly friendlier Zig API to initialize this structure. pub inline fn init(v: struct { next_in_chain: ?*const ChainedStruct = null, label: ?[*:0]const u8 = null, layout: *BindGroupLayout, entries: ?[]const Entry = null, }) Descriptor { return .{ .next_in_chain = v.next_in_chain, .label = v.label, .layout = v.layout, .entry_count = if (v.entries) |e| e.len else 0, .entries = if (v.entries) |e| e.ptr else null, }; } }; pub inline fn setLabel(bind_group: *BindGroup, label: [*:0]const u8) void { Impl.bindGroupSetLabel(bind_group, label); } pub inline fn reference(bind_group: *BindGroup) void { Impl.bindGroupReference(bind_group); } pub inline fn release(bind_group: *BindGroup) void { Impl.bindGroupRelease(bind_group); } };
0
repos/mach-sysgpu/src
repos/mach-sysgpu/src/sysgpu/instance.zig
const ChainedStruct = @import("main.zig").ChainedStruct; const RequestAdapterStatus = @import("main.zig").RequestAdapterStatus; const Surface = @import("surface.zig").Surface; const Adapter = @import("adapter.zig").Adapter; const RequestAdapterOptions = @import("main.zig").RequestAdapterOptions; const RequestAdapterCallback = @import("main.zig").RequestAdapterCallback; const Impl = @import("interface.zig").Impl; const dawn = @import("dawn.zig"); pub const Instance = opaque { pub const Descriptor = extern struct { pub const NextInChain = extern union { generic: ?*const ChainedStruct, dawn_toggles_descriptor: *const dawn.TogglesDescriptor, }; next_in_chain: NextInChain = .{ .generic = null }, }; pub inline fn createSurface(instance: *Instance, descriptor: *const Surface.Descriptor) *Surface { return Impl.instanceCreateSurface(instance, descriptor); } pub inline fn processEvents(instance: *Instance) void { Impl.instanceProcessEvents(instance); } pub inline fn requestAdapter( instance: *Instance, options: ?*const RequestAdapterOptions, context: anytype, comptime callback: fn ( ctx: @TypeOf(context), status: RequestAdapterStatus, adapter: ?*Adapter, message: ?[*:0]const u8, ) callconv(.Inline) void, ) void { const Context = @TypeOf(context); const Helper = struct { pub fn cCallback( status: RequestAdapterStatus, adapter: ?*Adapter, message: ?[*:0]const u8, userdata: ?*anyopaque, ) callconv(.C) void { callback( if (Context == void) {} else @as(Context, @ptrCast(@alignCast(userdata))), status, adapter, message, ); } }; Impl.instanceRequestAdapter(instance, options, Helper.cCallback, if (Context == void) null else context); } pub inline fn reference(instance: *Instance) void { Impl.instanceReference(instance); } pub inline fn release(instance: *Instance) void { Impl.instanceRelease(instance); } };
0
repos/mach-sysgpu/src
repos/mach-sysgpu/src/sysgpu/render_pass_encoder.zig
const Buffer = @import("buffer.zig").Buffer; const RenderBundle = @import("render_bundle.zig").RenderBundle; const BindGroup = @import("bind_group.zig").BindGroup; const RenderPipeline = @import("render_pipeline.zig").RenderPipeline; const QuerySet = @import("query_set.zig").QuerySet; const Color = @import("main.zig").Color; const IndexFormat = @import("main.zig").IndexFormat; const Impl = @import("interface.zig").Impl; pub const RenderPassEncoder = opaque { pub inline fn beginOcclusionQuery(render_pass_encoder: *RenderPassEncoder, query_index: u32) void { Impl.renderPassEncoderBeginOcclusionQuery(render_pass_encoder, query_index); } /// Default `instance_count`: 1 /// Default `first_vertex`: 0 /// Default `first_instance`: 0 pub inline fn draw(render_pass_encoder: *RenderPassEncoder, vertex_count: u32, instance_count: u32, first_vertex: u32, first_instance: u32) void { Impl.renderPassEncoderDraw(render_pass_encoder, vertex_count, instance_count, first_vertex, first_instance); } /// Default `instance_count`: 1 /// Default `first_index`: 0 /// Default `base_vertex`: 0 /// Default `first_instance`: 0 pub inline fn drawIndexed(render_pass_encoder: *RenderPassEncoder, index_count: u32, instance_count: u32, first_index: u32, base_vertex: i32, first_instance: u32) void { Impl.renderPassEncoderDrawIndexed(render_pass_encoder, index_count, instance_count, first_index, base_vertex, first_instance); } pub inline fn drawIndexedIndirect(render_pass_encoder: *RenderPassEncoder, indirect_buffer: *Buffer, indirect_offset: u64) void { Impl.renderPassEncoderDrawIndexedIndirect(render_pass_encoder, indirect_buffer, indirect_offset); } pub inline fn drawIndirect(render_pass_encoder: *RenderPassEncoder, indirect_buffer: *Buffer, indirect_offset: u64) void { Impl.renderPassEncoderDrawIndirect(render_pass_encoder, indirect_buffer, indirect_offset); } pub inline fn end(render_pass_encoder: *RenderPassEncoder) void { Impl.renderPassEncoderEnd(render_pass_encoder); } pub inline fn endOcclusionQuery(render_pass_encoder: *RenderPassEncoder) void { Impl.renderPassEncoderEndOcclusionQuery(render_pass_encoder); } pub inline fn executeBundles( render_pass_encoder: *RenderPassEncoder, bundles: []*const RenderBundle, ) void { Impl.renderPassEncoderExecuteBundles( render_pass_encoder, bundles.len, bundles.ptr, ); } pub inline fn insertDebugMarker(render_pass_encoder: *RenderPassEncoder, marker_label: [*:0]const u8) void { Impl.renderPassEncoderInsertDebugMarker(render_pass_encoder, marker_label); } pub inline fn popDebugGroup(render_pass_encoder: *RenderPassEncoder) void { Impl.renderPassEncoderPopDebugGroup(render_pass_encoder); } pub inline fn pushDebugGroup(render_pass_encoder: *RenderPassEncoder, group_label: [*:0]const u8) void { Impl.renderPassEncoderPushDebugGroup(render_pass_encoder, group_label); } /// Default `dynamic_offsets_count`: 0 /// Default `dynamic_offsets`: `null` pub inline fn setBindGroup(render_pass_encoder: *RenderPassEncoder, group_index: u32, group: *BindGroup, dynamic_offsets: ?[]const u32) void { Impl.renderPassEncoderSetBindGroup( render_pass_encoder, group_index, group, if (dynamic_offsets) |v| v.len else 0, if (dynamic_offsets) |v| v.ptr else null, ); } pub inline fn setBlendConstant(render_pass_encoder: *RenderPassEncoder, color: *const Color) void { Impl.renderPassEncoderSetBlendConstant(render_pass_encoder, color); } /// Default `offset`: 0 /// Default `size`: `gpu.whole_size` pub inline fn setIndexBuffer(render_pass_encoder: *RenderPassEncoder, buffer: *Buffer, format: IndexFormat, offset: u64, size: u64) void { Impl.renderPassEncoderSetIndexBuffer(render_pass_encoder, buffer, format, offset, size); } pub inline fn setLabel(render_pass_encoder: *RenderPassEncoder, label: [*:0]const u8) void { Impl.renderPassEncoderSetLabel(render_pass_encoder, label); } pub inline fn setPipeline(render_pass_encoder: *RenderPassEncoder, pipeline: *RenderPipeline) void { Impl.renderPassEncoderSetPipeline(render_pass_encoder, pipeline); } pub inline fn setScissorRect(render_pass_encoder: *RenderPassEncoder, x: u32, y: u32, width: u32, height: u32) void { Impl.renderPassEncoderSetScissorRect(render_pass_encoder, x, y, width, height); } pub inline fn setStencilReference(render_pass_encoder: *RenderPassEncoder, _reference: u32) void { Impl.renderPassEncoderSetStencilReference(render_pass_encoder, _reference); } /// Default `offset`: 0 /// Default `size`: `gpu.whole_size` pub inline fn setVertexBuffer(render_pass_encoder: *RenderPassEncoder, slot: u32, buffer: *Buffer, offset: u64, size: u64) void { Impl.renderPassEncoderSetVertexBuffer(render_pass_encoder, slot, buffer, offset, size); } pub inline fn setViewport(render_pass_encoder: *RenderPassEncoder, x: f32, y: f32, width: f32, height: f32, min_depth: f32, max_depth: f32) void { Impl.renderPassEncoderSetViewport(render_pass_encoder, x, y, width, height, min_depth, max_depth); } pub inline fn writeTimestamp(render_pass_encoder: *RenderPassEncoder, query_set: *QuerySet, query_index: u32) void { Impl.renderPassEncoderWriteTimestamp(render_pass_encoder, query_set, query_index); } pub inline fn reference(render_pass_encoder: *RenderPassEncoder) void { Impl.renderPassEncoderReference(render_pass_encoder); } pub inline fn release(render_pass_encoder: *RenderPassEncoder) void { Impl.renderPassEncoderRelease(render_pass_encoder); } };
0
repos/mach-sysgpu/src
repos/mach-sysgpu/src/sysgpu/command_encoder.zig
const std = @import("std"); const ComputePassEncoder = @import("compute_pass_encoder.zig").ComputePassEncoder; const RenderPassEncoder = @import("render_pass_encoder.zig").RenderPassEncoder; const CommandBuffer = @import("command_buffer.zig").CommandBuffer; const Buffer = @import("buffer.zig").Buffer; const QuerySet = @import("query_set.zig").QuerySet; const RenderPassDescriptor = @import("main.zig").RenderPassDescriptor; const ComputePassDescriptor = @import("main.zig").ComputePassDescriptor; const ChainedStruct = @import("main.zig").ChainedStruct; const ImageCopyBuffer = @import("main.zig").ImageCopyBuffer; const ImageCopyTexture = @import("main.zig").ImageCopyTexture; const Extent3D = @import("main.zig").Extent3D; const Impl = @import("interface.zig").Impl; const dawn = @import("dawn.zig"); pub const CommandEncoder = opaque { pub const Descriptor = extern struct { pub const NextInChain = extern union { generic: ?*const ChainedStruct, dawn_encoder_internal_usage_descriptor: *const dawn.EncoderInternalUsageDescriptor, }; next_in_chain: NextInChain = .{ .generic = null }, label: ?[*:0]const u8 = null, }; pub inline fn beginComputePass(command_encoder: *CommandEncoder, descriptor: ?*const ComputePassDescriptor) *ComputePassEncoder { return Impl.commandEncoderBeginComputePass(command_encoder, descriptor); } pub inline fn beginRenderPass(command_encoder: *CommandEncoder, descriptor: *const RenderPassDescriptor) *RenderPassEncoder { return Impl.commandEncoderBeginRenderPass(command_encoder, descriptor); } /// Default `offset`: 0 /// Default `size`: `gpu.whole_size` pub inline fn clearBuffer(command_encoder: *CommandEncoder, buffer: *Buffer, offset: u64, size: u64) void { Impl.commandEncoderClearBuffer(command_encoder, buffer, offset, size); } pub inline fn copyBufferToBuffer(command_encoder: *CommandEncoder, source: *Buffer, source_offset: u64, destination: *Buffer, destination_offset: u64, size: u64) void { Impl.commandEncoderCopyBufferToBuffer(command_encoder, source, source_offset, destination, destination_offset, size); } pub inline fn copyBufferToTexture(command_encoder: *CommandEncoder, source: *const ImageCopyBuffer, destination: *const ImageCopyTexture, copy_size: *const Extent3D) void { Impl.commandEncoderCopyBufferToTexture(command_encoder, source, destination, copy_size); } pub inline fn copyTextureToBuffer(command_encoder: *CommandEncoder, source: *const ImageCopyTexture, destination: *const ImageCopyBuffer, copy_size: *const Extent3D) void { Impl.commandEncoderCopyTextureToBuffer(command_encoder, source, destination, copy_size); } pub inline fn copyTextureToTexture(command_encoder: *CommandEncoder, source: *const ImageCopyTexture, destination: *const ImageCopyTexture, copy_size: *const Extent3D) void { Impl.commandEncoderCopyTextureToTexture(command_encoder, source, destination, copy_size); } pub inline fn finish(command_encoder: *CommandEncoder, descriptor: ?*const CommandBuffer.Descriptor) *CommandBuffer { return Impl.commandEncoderFinish(command_encoder, descriptor); } pub inline fn injectValidationError(command_encoder: *CommandEncoder, message: [*:0]const u8) void { Impl.commandEncoderInjectValidationError(command_encoder, message); } pub inline fn insertDebugMarker(command_encoder: *CommandEncoder, marker_label: [*:0]const u8) void { Impl.commandEncoderInsertDebugMarker(command_encoder, marker_label); } pub inline fn popDebugGroup(command_encoder: *CommandEncoder) void { Impl.commandEncoderPopDebugGroup(command_encoder); } pub inline fn pushDebugGroup(command_encoder: *CommandEncoder, group_label: [*:0]const u8) void { Impl.commandEncoderPushDebugGroup(command_encoder, group_label); } pub inline fn resolveQuerySet(command_encoder: *CommandEncoder, query_set: *QuerySet, first_query: u32, query_count: u32, destination: *Buffer, destination_offset: u64) void { Impl.commandEncoderResolveQuerySet(command_encoder, query_set, first_query, query_count, destination, destination_offset); } pub inline fn setLabel(command_encoder: *CommandEncoder, label: [*:0]const u8) void { Impl.commandEncoderSetLabel(command_encoder, label); } pub inline fn writeBuffer( command_encoder: *CommandEncoder, buffer: *Buffer, buffer_offset_bytes: u64, data_slice: anytype, ) void { Impl.commandEncoderWriteBuffer( command_encoder, buffer, buffer_offset_bytes, @as([*]const u8, @ptrCast(std.mem.sliceAsBytes(data_slice).ptr)), @as(u64, @intCast(data_slice.len)) * @sizeOf(std.meta.Elem(@TypeOf(data_slice))), ); } pub inline fn writeTimestamp(command_encoder: *CommandEncoder, query_set: *QuerySet, query_index: u32) void { Impl.commandEncoderWriteTimestamp(command_encoder, query_set, query_index); } pub inline fn reference(command_encoder: *CommandEncoder) void { Impl.commandEncoderReference(command_encoder); } pub inline fn release(command_encoder: *CommandEncoder) void { Impl.commandEncoderRelease(command_encoder); } };
0
repos/mach-sysgpu/src
repos/mach-sysgpu/src/sysgpu/adapter.zig
const std = @import("std"); const testing = std.testing; const dawn = @import("dawn.zig"); const Bool32 = @import("main.zig").Bool32; const ChainedStructOut = @import("main.zig").ChainedStructOut; const Device = @import("device.zig").Device; const Instance = @import("instance.zig").Instance; const FeatureName = @import("main.zig").FeatureName; const SupportedLimits = @import("main.zig").SupportedLimits; const RequestDeviceStatus = @import("main.zig").RequestDeviceStatus; const BackendType = @import("main.zig").BackendType; const RequestDeviceCallback = @import("main.zig").RequestDeviceCallback; const Impl = @import("interface.zig").Impl; pub const Adapter = opaque { pub const Type = enum(u32) { discrete_gpu, integrated_gpu, cpu, unknown, pub fn name(t: Type) []const u8 { return switch (t) { .discrete_gpu => "Discrete GPU", .integrated_gpu => "Integrated GPU", .cpu => "CPU", .unknown => "Unknown", }; } }; pub const Properties = extern struct { pub const NextInChain = extern union { generic: ?*const ChainedStructOut, dawn_adapter_properties_power_preference: *const dawn.AdapterPropertiesPowerPreference, }; next_in_chain: NextInChain = .{ .generic = null }, vendor_id: u32, vendor_name: [*:0]const u8, architecture: [*:0]const u8, device_id: u32, name: [*:0]const u8, driver_description: [*:0]const u8, adapter_type: Type, backend_type: BackendType, compatibility_mode: Bool32 = .false, }; pub inline fn createDevice(adapter: *Adapter, descriptor: ?*const Device.Descriptor) ?*Device { return Impl.adapterCreateDevice(adapter, descriptor); } /// Call once with null to determine the array length, and again to fetch the feature list. /// /// Consider using the enumerateFeaturesOwned helper. pub inline fn enumerateFeatures(adapter: *Adapter, features: ?[*]FeatureName) usize { return Impl.adapterEnumerateFeatures(adapter, features); } /// Enumerates the adapter features, storing the result in an allocated slice which is owned by /// the caller. pub inline fn enumerateFeaturesOwned(adapter: *Adapter, allocator: std.mem.Allocator) ![]FeatureName { const count = adapter.enumerateFeatures(null); const data = try allocator.alloc(FeatureName, count); _ = adapter.enumerateFeatures(data.ptr); return data; } pub inline fn getInstance(adapter: *Adapter) *Instance { return Impl.adapterGetInstance(adapter); } pub inline fn getLimits(adapter: *Adapter, limits: *SupportedLimits) bool { return Impl.adapterGetLimits(adapter, limits); } pub inline fn getProperties(adapter: *Adapter, properties: *Adapter.Properties) void { Impl.adapterGetProperties(adapter, properties); } pub inline fn hasFeature(adapter: *Adapter, feature: FeatureName) bool { return Impl.adapterHasFeature(adapter, feature); } pub inline fn requestDevice( adapter: *Adapter, descriptor: ?*const Device.Descriptor, context: anytype, comptime callback: fn ( ctx: @TypeOf(context), status: RequestDeviceStatus, device: *Device, message: ?[*:0]const u8, ) callconv(.Inline) void, ) void { const Context = @TypeOf(context); const Helper = struct { pub fn cCallback(status: RequestDeviceStatus, device: *Device, message: ?[*:0]const u8, userdata: ?*anyopaque) callconv(.C) void { callback( if (Context == void) {} else @as(Context, @ptrCast(@alignCast(userdata))), status, device, message, ); } }; Impl.adapterRequestDevice(adapter, descriptor, Helper.cCallback, if (Context == void) null else context); } pub inline fn reference(adapter: *Adapter) void { Impl.adapterReference(adapter); } pub inline fn release(adapter: *Adapter) void { Impl.adapterRelease(adapter); } }; test "Adapter.Type name" { try testing.expectEqualStrings("Discrete GPU", Adapter.Type.discrete_gpu.name()); }
0
repos/mach-sysgpu/src
repos/mach-sysgpu/src/sysgpu/texture.zig
const std = @import("std"); const Bool32 = @import("main.zig").Bool32; const ChainedStruct = @import("main.zig").ChainedStruct; const TextureView = @import("texture_view.zig").TextureView; const Extent3D = @import("main.zig").Extent3D; const Impl = @import("interface.zig").Impl; const types = @import("main.zig"); const dawn = @import("dawn.zig"); pub const Texture = opaque { pub const Aspect = enum(u32) { all = 0x00000000, stencil_only = 0x00000001, depth_only = 0x00000002, plane0_only = 0x00000003, plane1_only = 0x00000004, }; pub const Dimension = enum(u32) { dimension_1d = 0x00000000, dimension_2d = 0x00000001, dimension_3d = 0x00000002, }; pub const Format = enum(u32) { undefined = 0x00000000, r8_unorm = 0x00000001, r8_snorm = 0x00000002, r8_uint = 0x00000003, r8_sint = 0x00000004, r16_uint = 0x00000005, r16_sint = 0x00000006, r16_float = 0x00000007, rg8_unorm = 0x00000008, rg8_snorm = 0x00000009, rg8_uint = 0x0000000a, rg8_sint = 0x0000000b, r32_float = 0x0000000c, r32_uint = 0x0000000d, r32_sint = 0x0000000e, rg16_uint = 0x0000000f, rg16_sint = 0x00000010, rg16_float = 0x00000011, rgba8_unorm = 0x00000012, rgba8_unorm_srgb = 0x00000013, rgba8_snorm = 0x00000014, rgba8_uint = 0x00000015, rgba8_sint = 0x00000016, bgra8_unorm = 0x00000017, bgra8_unorm_srgb = 0x00000018, rgb10_a2_unorm = 0x00000019, rg11_b10_ufloat = 0x0000001a, rgb9_e5_ufloat = 0x0000001b, rg32_float = 0x0000001c, rg32_uint = 0x0000001d, rg32_sint = 0x0000001e, rgba16_uint = 0x0000001f, rgba16_sint = 0x00000020, rgba16_float = 0x00000021, rgba32_float = 0x00000022, rgba32_uint = 0x00000023, rgba32_sint = 0x00000024, stencil8 = 0x00000025, depth16_unorm = 0x00000026, depth24_plus = 0x00000027, depth24_plus_stencil8 = 0x00000028, depth32_float = 0x00000029, depth32_float_stencil8 = 0x0000002a, bc1_rgba_unorm = 0x0000002b, bc1_rgba_unorm_srgb = 0x0000002c, bc2_rgba_unorm = 0x0000002d, bc2_rgba_unorm_srgb = 0x0000002e, bc3_rgba_unorm = 0x0000002f, bc3_rgba_unorm_srgb = 0x00000030, bc4_runorm = 0x00000031, bc4_rsnorm = 0x00000032, bc5_rg_unorm = 0x00000033, bc5_rg_snorm = 0x00000034, bc6_hrgb_ufloat = 0x00000035, bc6_hrgb_float = 0x00000036, bc7_rgba_unorm = 0x00000037, bc7_rgba_unorm_srgb = 0x00000038, etc2_rgb8_unorm = 0x00000039, etc2_rgb8_unorm_srgb = 0x0000003a, etc2_rgb8_a1_unorm = 0x0000003b, etc2_rgb8_a1_unorm_srgb = 0x0000003c, etc2_rgba8_unorm = 0x0000003d, etc2_rgba8_unorm_srgb = 0x0000003e, eacr11_unorm = 0x0000003f, eacr11_snorm = 0x00000040, eacrg11_unorm = 0x00000041, eacrg11_snorm = 0x00000042, astc4x4_unorm = 0x00000043, astc4x4_unorm_srgb = 0x00000044, astc5x4_unorm = 0x00000045, astc5x4_unorm_srgb = 0x00000046, astc5x5_unorm = 0x00000047, astc5x5_unorm_srgb = 0x00000048, astc6x5_unorm = 0x00000049, astc6x5_unorm_srgb = 0x0000004a, astc6x6_unorm = 0x0000004b, astc6x6_unorm_srgb = 0x0000004c, astc8x5_unorm = 0x0000004d, astc8x5_unorm_srgb = 0x0000004e, astc8x6_unorm = 0x0000004f, astc8x6_unorm_srgb = 0x00000050, astc8x8_unorm = 0x00000051, astc8x8_unorm_srgb = 0x00000052, astc10x5_unorm = 0x00000053, astc10x5_unorm_srgb = 0x00000054, astc10x6_unorm = 0x00000055, astc10x6_unorm_srgb = 0x00000056, astc10x8_unorm = 0x00000057, astc10x8_unorm_srgb = 0x00000058, astc10x10_unorm = 0x00000059, astc10x10_unorm_srgb = 0x0000005a, astc12x10_unorm = 0x0000005b, astc12x10_unorm_srgb = 0x0000005c, astc12x12_unorm = 0x0000005d, astc12x12_unorm_srgb = 0x0000005e, r8_bg8_biplanar420_unorm = 0x0000005f, }; pub const SampleType = enum(u32) { undefined = 0x00000000, float = 0x00000001, unfilterable_float = 0x00000002, depth = 0x00000003, sint = 0x00000004, uint = 0x00000005, }; pub const UsageFlags = packed struct(u32) { copy_src: bool = false, copy_dst: bool = false, texture_binding: bool = false, storage_binding: bool = false, render_attachment: bool = false, transient_attachment: bool = false, _padding: u26 = 0, comptime { std.debug.assert( @sizeOf(@This()) == @sizeOf(u32) and @bitSizeOf(@This()) == @bitSizeOf(u32), ); } pub const none = UsageFlags{}; pub fn equal(a: UsageFlags, b: UsageFlags) bool { return @as(u6, @truncate(@as(u32, @bitCast(a)))) == @as(u6, @truncate(@as(u32, @bitCast(b)))); } }; pub const BindingLayout = extern struct { next_in_chain: ?*const ChainedStruct = null, sample_type: SampleType = .undefined, view_dimension: TextureView.Dimension = .dimension_undefined, multisampled: Bool32 = .false, }; pub const DataLayout = extern struct { next_in_chain: ?*const ChainedStruct = null, offset: u64 = 0, bytes_per_row: u32 = types.copy_stride_undefined, rows_per_image: u32 = types.copy_stride_undefined, }; pub const Descriptor = extern struct { pub const NextInChain = extern union { generic: ?*const ChainedStruct, dawn_texture_internal_usage_descriptor: *const dawn.TextureInternalUsageDescriptor, }; next_in_chain: NextInChain = .{ .generic = null }, label: ?[*:0]const u8 = null, usage: UsageFlags, dimension: Dimension = .dimension_2d, size: Extent3D, format: Format, mip_level_count: u32 = 1, sample_count: u32 = 1, view_format_count: usize = 0, view_formats: ?[*]const Format = null, /// Provides a slightly friendlier Zig API to initialize this structure. pub inline fn init(v: struct { next_in_chain: NextInChain = .{ .generic = null }, label: ?[*:0]const u8 = null, usage: UsageFlags, dimension: Dimension = .dimension_2d, size: Extent3D, format: Format, mip_level_count: u32 = 1, sample_count: u32 = 1, view_formats: ?[]const Format = null, }) Descriptor { return .{ .next_in_chain = v.next_in_chain, .label = v.label, .usage = v.usage, .dimension = v.dimension, .size = v.size, .format = v.format, .mip_level_count = v.mip_level_count, .sample_count = v.sample_count, .view_format_count = if (v.view_formats) |e| e.len else 0, .view_formats = if (v.view_formats) |e| e.ptr else null, }; } }; pub inline fn createView(texture: *Texture, descriptor: ?*const TextureView.Descriptor) *TextureView { return Impl.textureCreateView(texture, descriptor); } pub inline fn destroy(texture: *Texture) void { Impl.textureDestroy(texture); } pub inline fn getDepthOrArrayLayers(texture: *Texture) u32 { return Impl.textureGetDepthOrArrayLayers(texture); } pub inline fn getDimension(texture: *Texture) Dimension { return Impl.textureGetDimension(texture); } pub inline fn getFormat(texture: *Texture) Format { return Impl.textureGetFormat(texture); } pub inline fn getHeight(texture: *Texture) u32 { return Impl.textureGetHeight(texture); } pub inline fn getMipLevelCount(texture: *Texture) u32 { return Impl.textureGetMipLevelCount(texture); } pub inline fn getSampleCount(texture: *Texture) u32 { return Impl.textureGetSampleCount(texture); } pub inline fn getUsage(texture: *Texture) UsageFlags { return Impl.textureGetUsage(texture); } pub inline fn getWidth(texture: *Texture) u32 { return Impl.textureGetWidth(texture); } pub inline fn setLabel(texture: *Texture, label: [*:0]const u8) void { Impl.textureSetLabel(texture, label); } pub inline fn reference(texture: *Texture) void { Impl.textureReference(texture); } pub inline fn release(texture: *Texture) void { Impl.textureRelease(texture); } };
0
repos/mach-sysgpu/src
repos/mach-sysgpu/src/sysgpu/command_buffer.zig
const ChainedStruct = @import("main.zig").ChainedStruct; const Impl = @import("interface.zig").Impl; pub const CommandBuffer = opaque { pub const Descriptor = extern struct { next_in_chain: ?*const ChainedStruct = null, label: ?[*:0]const u8 = null, }; pub inline fn setLabel(command_buffer: *CommandBuffer, label: [*:0]const u8) void { Impl.commandBufferSetLabel(command_buffer, label); } pub inline fn reference(command_buffer: *CommandBuffer) void { Impl.commandBufferReference(command_buffer); } pub inline fn release(command_buffer: *CommandBuffer) void { Impl.commandBufferRelease(command_buffer); } };
0
repos/mach-sysgpu/src
repos/mach-sysgpu/src/sysgpu/dawn.zig
const Bool32 = @import("main.zig").Bool32; const ChainedStruct = @import("main.zig").ChainedStruct; const ChainedStructOut = @import("main.zig").ChainedStructOut; const PowerPreference = @import("main.zig").PowerPreference; const Texture = @import("texture.zig").Texture; pub const CacheDeviceDescriptor = extern struct { chain: ChainedStruct = .{ .next = null, .s_type = .dawn_cache_device_descriptor }, isolation_key: [*:0]const u8 = "", }; pub const EncoderInternalUsageDescriptor = extern struct { chain: ChainedStruct = .{ .next = null, .s_type = .dawn_encoder_internal_usage_descriptor }, use_internal_usages: Bool32 = .false, }; pub const MultisampleStateRenderToSingleSampled = extern struct { chain: ChainedStruct = .{ .next = null, .s_type = .dawn_multisample_state_render_to_single_sampled }, enabled: Bool32 = .false, }; pub const RenderPassColorAttachmentRenderToSingleSampled = extern struct { chain: ChainedStruct = .{ .next = null, .s_type = .dawn_render_pass_color_attachment_render_to_single_sampled }, implicit_sample_count: u32 = 1, }; pub const TextureInternalUsageDescriptor = extern struct { chain: ChainedStruct = .{ .next = null, .s_type = .dawn_texture_internal_usage_descriptor }, internal_usage: Texture.UsageFlags = Texture.UsageFlags.none, }; pub const TogglesDescriptor = extern struct { chain: ChainedStruct = .{ .next = null, .s_type = .dawn_toggles_descriptor }, enabled_toggles_count: usize = 0, enabled_toggles: ?[*]const [*:0]const u8 = null, disabled_toggles_count: usize = 0, disabled_toggles: ?[*]const [*:0]const u8 = null, /// Provides a slightly friendlier Zig API to initialize this structure. pub inline fn init(v: struct { chain: ChainedStruct = .{ .next = null, .s_type = .dawn_toggles_descriptor }, enabled_toggles: ?[]const [*:0]const u8 = null, disabled_toggles: ?[]const [*:0]const u8 = null, }) TogglesDescriptor { return .{ .chain = v.chain, .enabled_toggles_count = if (v.enabled_toggles) |e| e.len else 0, .enabled_toggles = if (v.enabled_toggles) |e| e.ptr else null, .disabled_toggles_count = if (v.disabled_toggles) |e| e.len else 0, .disabled_toggles = if (v.disabled_toggles) |e| e.ptr else null, }; } }; pub const ShaderModuleSPIRVOptionsDescriptor = extern struct { chain: ChainedStruct = .{ .next = null, .s_type = .dawn_shader_module_spirv_options_descriptor }, allow_non_uniform_derivatives: Bool32 = .false, }; pub const AdapterPropertiesPowerPreference = extern struct { chain: ChainedStructOut = .{ .next = null, .s_type = .dawn_adapter_properties_power_preference, }, power_preference: PowerPreference = .undefined, }; pub const BufferDescriptorErrorInfoFromWireClient = extern struct { chain: ChainedStruct = .{ .next = null, .s_type = .dawn_buffer_descriptor_error_info_from_wire_client, }, out_of_memory: Bool32 = .false, };
0
repos/mach-sysgpu/src
repos/mach-sysgpu/src/sysgpu/buffer.zig
const std = @import("std"); const Bool32 = @import("main.zig").Bool32; const ChainedStruct = @import("main.zig").ChainedStruct; const dawn = @import("dawn.zig"); const MapModeFlags = @import("main.zig").MapModeFlags; const Impl = @import("interface.zig").Impl; pub const Buffer = opaque { pub const MapCallback = *const fn (status: MapAsyncStatus, userdata: ?*anyopaque) callconv(.C) void; pub const BindingType = enum(u32) { undefined = 0x00000000, uniform = 0x00000001, storage = 0x00000002, read_only_storage = 0x00000003, }; pub const MapState = enum(u32) { unmapped = 0x00000000, pending = 0x00000001, mapped = 0x00000002, }; pub const MapAsyncStatus = enum(u32) { success = 0x00000000, validation_error = 0x00000001, unknown = 0x00000002, device_lost = 0x00000003, destroyed_before_callback = 0x00000004, unmapped_before_callback = 0x00000005, mapping_already_pending = 0x00000006, offset_out_of_range = 0x00000007, size_out_of_range = 0x00000008, }; pub const UsageFlags = packed struct(u32) { map_read: bool = false, map_write: bool = false, copy_src: bool = false, copy_dst: bool = false, index: bool = false, vertex: bool = false, uniform: bool = false, storage: bool = false, indirect: bool = false, query_resolve: bool = false, _padding: u22 = 0, comptime { std.debug.assert( @sizeOf(@This()) == @sizeOf(u32) and @bitSizeOf(@This()) == @bitSizeOf(u32), ); } pub const none = UsageFlags{}; pub fn equal(a: UsageFlags, b: UsageFlags) bool { return @as(u10, @truncate(@as(u32, @bitCast(a)))) == @as(u10, @truncate(@as(u32, @bitCast(b)))); } }; pub const BindingLayout = extern struct { next_in_chain: ?*const ChainedStruct = null, type: BindingType = .undefined, has_dynamic_offset: Bool32 = .false, min_binding_size: u64 = 0, }; pub const Descriptor = extern struct { pub const NextInChain = extern union { generic: ?*const ChainedStruct, dawn_buffer_descriptor_error_info_from_wire_client: *const dawn.BufferDescriptorErrorInfoFromWireClient, }; next_in_chain: NextInChain = .{ .generic = null }, label: ?[*:0]const u8 = null, usage: UsageFlags, size: u64, mapped_at_creation: Bool32 = .false, }; pub inline fn destroy(buffer: *Buffer) void { Impl.bufferDestroy(buffer); } pub inline fn getMapState(buffer: *Buffer) MapState { return Impl.bufferGetMapState(buffer); } /// Default `offset_bytes`: 0 /// Default `len`: `gpu.whole_map_size` / `std.math.maxint(usize)` (whole range) pub inline fn getConstMappedRange( buffer: *Buffer, comptime T: type, offset_bytes: usize, len: usize, ) ?[]const T { const size = @sizeOf(T) * len; const data = Impl.bufferGetConstMappedRange( buffer, offset_bytes, size + size % 4, ); return if (data) |d| @as([*]const T, @ptrCast(@alignCast(d)))[0..len] else null; } /// Default `offset_bytes`: 0 /// Default `len`: `gpu.whole_map_size` / `std.math.maxint(usize)` (whole range) pub inline fn getMappedRange( buffer: *Buffer, comptime T: type, offset_bytes: usize, len: usize, ) ?[]T { const size = @sizeOf(T) * len; const data = Impl.bufferGetMappedRange( buffer, offset_bytes, size + size % 4, ); return if (data) |d| @as([*]T, @ptrCast(@alignCast(d)))[0..len] else null; } pub inline fn getSize(buffer: *Buffer) u64 { return Impl.bufferGetSize(buffer); } pub inline fn getUsage(buffer: *Buffer) Buffer.UsageFlags { return Impl.bufferGetUsage(buffer); } pub inline fn mapAsync( buffer: *Buffer, mode: MapModeFlags, offset: usize, size: usize, context: anytype, comptime callback: fn (ctx: @TypeOf(context), status: MapAsyncStatus) callconv(.Inline) void, ) void { const Context = @TypeOf(context); const Helper = struct { pub fn cCallback(status: MapAsyncStatus, userdata: ?*anyopaque) callconv(.C) void { callback(if (Context == void) {} else @as(Context, @ptrCast(@alignCast(userdata))), status); } }; Impl.bufferMapAsync(buffer, mode, offset, size, Helper.cCallback, if (Context == void) null else context); } pub inline fn setLabel(buffer: *Buffer, label: [*:0]const u8) void { Impl.bufferSetLabel(buffer, label); } pub inline fn unmap(buffer: *Buffer) void { Impl.bufferUnmap(buffer); } pub inline fn reference(buffer: *Buffer) void { Impl.bufferReference(buffer); } pub inline fn release(buffer: *Buffer) void { Impl.bufferRelease(buffer); } };
0
repos/mach-sysgpu/src
repos/mach-sysgpu/src/sysgpu/sampler.zig
const ChainedStruct = @import("main.zig").ChainedStruct; const FilterMode = @import("main.zig").FilterMode; const MipmapFilterMode = @import("main.zig").MipmapFilterMode; const CompareFunction = @import("main.zig").CompareFunction; const Impl = @import("interface.zig").Impl; pub const Sampler = opaque { pub const AddressMode = enum(u32) { repeat = 0x00000000, mirror_repeat = 0x00000001, clamp_to_edge = 0x00000002, }; pub const BindingType = enum(u32) { undefined = 0x00000000, filtering = 0x00000001, non_filtering = 0x00000002, comparison = 0x00000003, }; pub const BindingLayout = extern struct { next_in_chain: ?*const ChainedStruct = null, type: BindingType = .undefined, }; pub const Descriptor = extern struct { next_in_chain: ?*const ChainedStruct = null, label: ?[*:0]const u8 = null, address_mode_u: AddressMode = .clamp_to_edge, address_mode_v: AddressMode = .clamp_to_edge, address_mode_w: AddressMode = .clamp_to_edge, mag_filter: FilterMode = .nearest, min_filter: FilterMode = .nearest, mipmap_filter: MipmapFilterMode = .nearest, lod_min_clamp: f32 = 0.0, lod_max_clamp: f32 = 32.0, compare: CompareFunction = .undefined, max_anisotropy: u16 = 1, }; pub inline fn setLabel(sampler: *Sampler, label: [*:0]const u8) void { Impl.samplerSetLabel(sampler, label); } pub inline fn reference(sampler: *Sampler) void { Impl.samplerReference(sampler); } pub inline fn release(sampler: *Sampler) void { Impl.samplerRelease(sampler); } };
0
repos/mach-sysgpu/src
repos/mach-sysgpu/src/sysgpu/device.zig
const std = @import("std"); const Adapter = @import("adapter.zig").Adapter; const Queue = @import("queue.zig").Queue; const BindGroup = @import("bind_group.zig").BindGroup; const BindGroupLayout = @import("bind_group_layout.zig").BindGroupLayout; const Buffer = @import("buffer.zig").Buffer; const CommandEncoder = @import("command_encoder.zig").CommandEncoder; const ComputePipeline = @import("compute_pipeline.zig").ComputePipeline; const ExternalTexture = @import("external_texture.zig").ExternalTexture; const PipelineLayout = @import("pipeline_layout.zig").PipelineLayout; const QuerySet = @import("query_set.zig").QuerySet; const RenderBundleEncoder = @import("render_bundle_encoder.zig").RenderBundleEncoder; const RenderPipeline = @import("render_pipeline.zig").RenderPipeline; const Sampler = @import("sampler.zig").Sampler; const ShaderModule = @import("shader_module.zig").ShaderModule; const Surface = @import("surface.zig").Surface; const SwapChain = @import("swap_chain.zig").SwapChain; const Texture = @import("texture.zig").Texture; const ChainedStruct = @import("main.zig").ChainedStruct; const FeatureName = @import("main.zig").FeatureName; const RequiredLimits = @import("main.zig").RequiredLimits; const SupportedLimits = @import("main.zig").SupportedLimits; const ErrorType = @import("main.zig").ErrorType; const ErrorFilter = @import("main.zig").ErrorFilter; const LoggingType = @import("main.zig").LoggingType; const CreatePipelineAsyncStatus = @import("main.zig").CreatePipelineAsyncStatus; const LoggingCallback = @import("main.zig").LoggingCallback; const ErrorCallback = @import("main.zig").ErrorCallback; const CreateComputePipelineAsyncCallback = @import("main.zig").CreateComputePipelineAsyncCallback; const CreateRenderPipelineAsyncCallback = @import("main.zig").CreateRenderPipelineAsyncCallback; const Impl = @import("interface.zig").Impl; const dawn = @import("dawn.zig"); pub const Device = opaque { pub const LostCallback = *const fn ( reason: LostReason, message: [*:0]const u8, userdata: ?*anyopaque, ) callconv(.C) void; pub const LostReason = enum(u32) { undefined = 0x00000000, destroyed = 0x00000001, }; pub const Descriptor = extern struct { pub const NextInChain = extern union { generic: ?*const ChainedStruct, dawn_toggles_descriptor: *const dawn.TogglesDescriptor, dawn_cache_device_descriptor: *const dawn.CacheDeviceDescriptor, }; next_in_chain: NextInChain = .{ .generic = null }, label: ?[*:0]const u8 = null, required_features_count: usize = 0, required_features: ?[*]const FeatureName = null, required_limits: ?*const RequiredLimits = null, default_queue: Queue.Descriptor = Queue.Descriptor{}, device_lost_callback: LostCallback, device_lost_userdata: ?*anyopaque, /// Provides a slightly friendlier Zig API to initialize this structure. pub inline fn init(v: struct { next_in_chain: NextInChain = .{ .generic = null }, label: ?[*:0]const u8 = null, required_features: ?[]const FeatureName = null, required_limits: ?*const RequiredLimits = null, default_queue: Queue.Descriptor = Queue.Descriptor{}, }) Descriptor { return .{ .next_in_chain = v.next_in_chain, .label = v.label, .required_features_count = if (v.required_features) |e| e.len else 0, .required_features = if (v.required_features) |e| e.ptr else null, .default_queue = v.default_queue, }; } }; pub inline fn createBindGroup(device: *Device, descriptor: *const BindGroup.Descriptor) *BindGroup { return Impl.deviceCreateBindGroup(device, descriptor); } pub inline fn createBindGroupLayout(device: *Device, descriptor: *const BindGroupLayout.Descriptor) *BindGroupLayout { return Impl.deviceCreateBindGroupLayout(device, descriptor); } pub inline fn createBuffer(device: *Device, descriptor: *const Buffer.Descriptor) *Buffer { return Impl.deviceCreateBuffer(device, descriptor); } pub inline fn createCommandEncoder(device: *Device, descriptor: ?*const CommandEncoder.Descriptor) *CommandEncoder { return Impl.deviceCreateCommandEncoder(device, descriptor); } pub inline fn createComputePipeline(device: *Device, descriptor: *const ComputePipeline.Descriptor) *ComputePipeline { return Impl.deviceCreateComputePipeline(device, descriptor); } pub inline fn createComputePipelineAsync( device: *Device, descriptor: *const ComputePipeline.Descriptor, context: anytype, comptime callback: fn ( status: CreatePipelineAsyncStatus, compute_pipeline: *ComputePipeline, message: [*:0]const u8, ctx: @TypeOf(context), ) callconv(.Inline) void, ) void { const Context = @TypeOf(context); const Helper = struct { pub fn cCallback( status: CreatePipelineAsyncStatus, compute_pipeline: *ComputePipeline, message: [*:0]const u8, userdata: ?*anyopaque, ) callconv(.C) void { callback( status, compute_pipeline, message, if (Context == void) {} else @as(Context, @ptrCast(@alignCast(userdata))), ); } }; Impl.deviceCreateComputePipelineAsync(device, descriptor, Helper.cCallback, if (Context == void) null else context); } pub inline fn createErrorBuffer(device: *Device, descriptor: *const Buffer.Descriptor) *Buffer { return Impl.deviceCreateErrorBuffer(device, descriptor); } pub inline fn createErrorExternalTexture(device: *Device) *ExternalTexture { return Impl.deviceCreateErrorExternalTexture(device); } pub inline fn createErrorTexture(device: *Device, descriptor: *const Texture.Descriptor) *Texture { return Impl.deviceCreateErrorTexture(device, descriptor); } pub inline fn createExternalTexture(device: *Device, external_texture_descriptor: *const ExternalTexture.Descriptor) *ExternalTexture { return Impl.deviceCreateExternalTexture(device, external_texture_descriptor); } pub inline fn createPipelineLayout(device: *Device, pipeline_layout_descriptor: *const PipelineLayout.Descriptor) *PipelineLayout { return Impl.deviceCreatePipelineLayout(device, pipeline_layout_descriptor); } pub inline fn createQuerySet(device: *Device, descriptor: *const QuerySet.Descriptor) *QuerySet { return Impl.deviceCreateQuerySet(device, descriptor); } pub inline fn createRenderBundleEncoder(device: *Device, descriptor: *const RenderBundleEncoder.Descriptor) *RenderBundleEncoder { return Impl.deviceCreateRenderBundleEncoder(device, descriptor); } pub inline fn createRenderPipeline(device: *Device, descriptor: *const RenderPipeline.Descriptor) *RenderPipeline { return Impl.deviceCreateRenderPipeline(device, descriptor); } pub inline fn createRenderPipelineAsync( device: *Device, descriptor: *const RenderPipeline.Descriptor, context: anytype, comptime callback: fn ( ctx: @TypeOf(context), status: CreatePipelineAsyncStatus, pipeline: *RenderPipeline, message: [*:0]const u8, ) callconv(.Inline) void, ) void { const Context = @TypeOf(context); const Helper = struct { pub fn cCallback( status: CreatePipelineAsyncStatus, pipeline: *RenderPipeline, message: [*:0]const u8, userdata: ?*anyopaque, ) callconv(.C) void { callback( if (Context == void) {} else @as(Context, @ptrCast(@alignCast(userdata))), status, pipeline, message, ); } }; Impl.deviceCreateRenderPipelineAsync(device, descriptor, Helper.cCallback, if (Context == void) null else context); } pub inline fn createSampler(device: *Device, descriptor: ?*const Sampler.Descriptor) *Sampler { return Impl.deviceCreateSampler(device, descriptor); } pub inline fn createShaderModule(device: *Device, descriptor: *const ShaderModule.Descriptor) *ShaderModule { return Impl.deviceCreateShaderModule(device, descriptor); } /// Helper to make createShaderModule invocations slightly nicer. pub inline fn createShaderModuleWGSL( device: *Device, label: ?[*:0]const u8, code: [*:0]const u8, ) *ShaderModule { return device.createShaderModule(&ShaderModule.Descriptor{ .next_in_chain = .{ .wgsl_descriptor = &.{ .code = code } }, .label = label, }); } /// Helper to make createShaderModule invocations slightly nicer. pub inline fn createShaderModuleSpirV( device: *Device, label: ?[*:0]const u8, code: [*]const u32, code_size: u32, ) *ShaderModule { return device.createShaderModule(&ShaderModule.Descriptor{ .next_in_chain = .{ .spirv_descriptor = &.{ .code_size = code_size, .code = code, } }, .label = label, }); } /// Helper to make createShaderModule invocations slightly nicer. pub inline fn createShaderModuleHLSL( device: *Device, label: ?[*:0]const u8, code: []const u8, ) *ShaderModule { return device.createShaderModule(&ShaderModule.Descriptor{ .next_in_chain = .{ .hlsl_descriptor = &.{ .code = code.ptr, .code_size = code.len, } }, .label = label, }); } /// Helper to make createShaderModule invocations slightly nicer. pub inline fn createShaderModuleMSL( device: *Device, label: ?[*:0]const u8, code: []const u8, ) *ShaderModule { return device.createShaderModule(&ShaderModule.Descriptor{ .next_in_chain = .{ .msl_descriptor = &.{ .code = code.ptr, .code_size = code.len, } }, .label = label, }); } pub inline fn createSwapChain(device: *Device, surface: ?*Surface, descriptor: *const SwapChain.Descriptor) *SwapChain { return Impl.deviceCreateSwapChain(device, surface, descriptor); } pub inline fn createTexture(device: *Device, descriptor: *const Texture.Descriptor) *Texture { return Impl.deviceCreateTexture(device, descriptor); } pub inline fn destroy(device: *Device) void { Impl.deviceDestroy(device); } /// Call once with null to determine the array length, and again to fetch the feature list. /// /// Consider using the enumerateFeaturesOwned helper. pub inline fn enumerateFeatures(device: *Device, features: ?[*]FeatureName) usize { return Impl.deviceEnumerateFeatures(device, features); } /// Enumerates the adapter features, storing the result in an allocated slice which is owned by /// the caller. pub inline fn enumerateFeaturesOwned(device: *Device, allocator: std.mem.Allocator) ![]FeatureName { const count = device.enumerateFeatures(null); const data = try allocator.alloc(FeatureName, count); _ = device.enumerateFeatures(data.ptr); return data; } pub inline fn forceLoss(device: *Device, reason: LostReason, message: [*:0]const u8) void { return Impl.deviceForceLoss(device, reason, message); } pub inline fn getAdapter(device: *Device) *Adapter { return Impl.deviceGetAdapter(device); } pub inline fn getLimits(device: *Device, limits: *SupportedLimits) bool { return Impl.deviceGetLimits(device, limits); } pub inline fn getQueue(device: *Device) *Queue { return Impl.deviceGetQueue(device); } pub inline fn hasFeature(device: *Device, feature: FeatureName) bool { return Impl.deviceHasFeature(device, feature); } pub inline fn injectError(device: *Device, typ: ErrorType, message: [*:0]const u8) void { Impl.deviceInjectError(device, typ, message); } pub inline fn popErrorScope( device: *Device, context: anytype, comptime callback: fn (ctx: @TypeOf(context), typ: ErrorType, message: [*:0]const u8) callconv(.Inline) void, ) void { const Context = @TypeOf(context); const Helper = struct { pub fn cCallback(typ: ErrorType, message: [*:0]const u8, userdata: ?*anyopaque) callconv(.C) void { callback(if (Context == void) {} else @as(Context, @ptrCast(@alignCast(userdata))), typ, message); } }; Impl.devicePopErrorScope(device, Helper.cCallback, if (Context == void) null else context); } pub inline fn pushErrorScope(device: *Device, filter: ErrorFilter) void { Impl.devicePushErrorScope(device, filter); } pub inline fn setDeviceLostCallback( device: *Device, context: anytype, comptime callback: ?fn (ctx: @TypeOf(context), reason: LostReason, message: [*:0]const u8) callconv(.Inline) void, ) void { if (callback) |cb| { const Context = @TypeOf(context); const Helper = struct { pub fn cCallback(reason: LostReason, message: [*:0]const u8, userdata: ?*anyopaque) callconv(.C) void { cb(if (Context == void) {} else @as(Context, @ptrCast(@alignCast(userdata))), reason, message); } }; Impl.deviceSetDeviceLostCallback(device, Helper.cCallback, if (Context == void) null else context); } else { Impl.deviceSetDeviceLostCallback(device, null, null); } } pub inline fn setLabel(device: *Device, label: [*:0]const u8) void { Impl.deviceSetLabel(device, label); } pub inline fn setLoggingCallback( device: *Device, context: anytype, comptime callback: ?fn (ctx: @TypeOf(context), typ: LoggingType, message: [*:0]const u8) callconv(.Inline) void, ) void { if (callback) |cb| { const Context = @TypeOf(context); const Helper = struct { pub fn cCallback(typ: LoggingType, message: [*:0]const u8, userdata: ?*anyopaque) callconv(.C) void { cb(if (Context == void) {} else @as(Context, @ptrCast(@alignCast(userdata))), typ, message); } }; Impl.deviceSetLoggingCallback(device, Helper.cCallback, if (Context == void) null else context); } else { Impl.deviceSetLoggingCallback(device, null, null); } } pub inline fn setUncapturedErrorCallback( device: *Device, context: anytype, comptime callback: ?fn (ctx: @TypeOf(context), typ: ErrorType, message: [*:0]const u8) callconv(.Inline) void, ) void { if (callback) |cb| { const Context = @TypeOf(context); const Helper = struct { pub fn cCallback(typ: ErrorType, message: [*:0]const u8, userdata: ?*anyopaque) callconv(.C) void { cb(if (Context == void) {} else @as(Context, @ptrCast(@alignCast(userdata))), typ, message); } }; Impl.deviceSetUncapturedErrorCallback(device, Helper.cCallback, if (Context == void) null else context); } else { Impl.deviceSetUncapturedErrorCallback(device, null, null); } } pub inline fn tick(device: *Device) void { Impl.deviceTick(device); } // Mach WebGPU extension. Supported with mach-gpu-dawn. // // When making Metal interop with other APIs, we need to be careful that QueueSubmit doesn't // mean that the operations will be visible to other APIs/Metal devices right away. macOS // does have a global queue of graphics operations, but the command buffers are inserted there // when they are "scheduled". Submitting other operations before the command buffer is // scheduled could lead to races in who gets scheduled first and incorrect rendering. pub inline fn machWaitForCommandsToBeScheduled(device: *Device) void { Impl.machDeviceWaitForCommandsToBeScheduled(device); } pub inline fn validateTextureDescriptor(device: *Device, descriptor: *const Texture.Descriptor) void { Impl.deviceVlidateTextureDescriptor(device, descriptor); } pub inline fn reference(device: *Device) void { Impl.deviceReference(device); } pub inline fn release(device: *Device) void { Impl.deviceRelease(device); } };
0
repos/mach-sysgpu/src
repos/mach-sysgpu/src/sysgpu/shader_module.zig
const ChainedStruct = @import("main.zig").ChainedStruct; const CompilationInfoCallback = @import("main.zig").CompilationInfoCallback; const CompilationInfoRequestStatus = @import("main.zig").CompilationInfoRequestStatus; const CompilationInfo = @import("main.zig").CompilationInfo; const Impl = @import("interface.zig").Impl; const dawn = @import("dawn.zig"); pub const ShaderModule = opaque { pub const Descriptor = extern struct { pub const NextInChain = extern union { generic: ?*const ChainedStruct, spirv_descriptor: ?*const SPIRVDescriptor, wgsl_descriptor: ?*const WGSLDescriptor, hlsl_descriptor: ?*const HLSLDescriptor, msl_descriptor: ?*const MSLDescriptor, dawn_shader_module_spirv_options_descriptor: ?*const dawn.ShaderModuleSPIRVOptionsDescriptor, }; next_in_chain: NextInChain = .{ .generic = null }, label: ?[*:0]const u8 = null, }; pub const SPIRVDescriptor = extern struct { chain: ChainedStruct = .{ .next = null, .s_type = .shader_module_spirv_descriptor }, code_size: u32, code: [*]const u32, }; pub const WGSLDescriptor = extern struct { chain: ChainedStruct = .{ .next = null, .s_type = .shader_module_wgsl_descriptor }, code: [*:0]const u8, }; pub const HLSLDescriptor = extern struct { chain: ChainedStruct = .{ .next = null, .s_type = .shader_module_hlsl_descriptor }, code: [*]const u8, code_size: u32, }; pub const MSLDescriptor = extern struct { chain: ChainedStruct = .{ .next = null, .s_type = .shader_module_msl_descriptor }, code: [*]const u8, code_size: u32, workgroup_size: WorkgroupSize, }; pub const WorkgroupSize = extern struct { x: u32 = 1, y: u32 = 1, z: u32 = 1 }; pub inline fn getCompilationInfo( shader_module: *ShaderModule, context: anytype, comptime callback: fn ( ctx: @TypeOf(context), status: CompilationInfoRequestStatus, compilation_info: *const CompilationInfo, ) callconv(.Inline) void, ) void { const Context = @TypeOf(context); const Helper = struct { pub fn cCallback( status: CompilationInfoRequestStatus, compilation_info: *const CompilationInfo, userdata: ?*anyopaque, ) callconv(.C) void { callback( if (Context == void) {} else @as(Context, @ptrCast(@alignCast(userdata))), status, compilation_info, ); } }; Impl.shaderModuleGetCompilationInfo(shader_module, Helper.cCallback, if (Context == void) null else context); } pub inline fn setLabel(shader_module: *ShaderModule, label: [*:0]const u8) void { Impl.shaderModuleSetLabel(shader_module, label); } pub inline fn reference(shader_module: *ShaderModule) void { Impl.shaderModuleReference(shader_module); } pub inline fn release(shader_module: *ShaderModule) void { Impl.shaderModuleRelease(shader_module); } };
0
repos/mach-sysgpu/src
repos/mach-sysgpu/src/sysgpu/render_bundle.zig
const ChainedStruct = @import("main.zig").ChainedStruct; const Impl = @import("interface.zig").Impl; pub const RenderBundle = opaque { pub const Descriptor = extern struct { next_in_chain: ?*const ChainedStruct = null, label: ?[*:0]const u8 = null, }; pub inline fn setLabel(render_bundle: *RenderBundle, label: [*:0]const u8) void { Impl.renderBundleSetLabel(render_bundle, label); } pub inline fn reference(render_bundle: *RenderBundle) void { Impl.renderBundleReference(render_bundle); } pub inline fn release(render_bundle: *RenderBundle) void { Impl.renderBundleRelease(render_bundle); } };
0
repos/mach-sysgpu/src
repos/mach-sysgpu/src/sysgpu/queue.zig
const std = @import("std"); const CommandBuffer = @import("command_buffer.zig").CommandBuffer; const Buffer = @import("buffer.zig").Buffer; const Texture = @import("texture.zig").Texture; const ImageCopyTexture = @import("main.zig").ImageCopyTexture; const ImageCopyExternalTexture = @import("main.zig").ImageCopyExternalTexture; const ChainedStruct = @import("main.zig").ChainedStruct; const Extent3D = @import("main.zig").Extent3D; const CopyTextureForBrowserOptions = @import("main.zig").CopyTextureForBrowserOptions; const Impl = @import("interface.zig").Impl; pub const Queue = opaque { pub const WorkDoneCallback = *const fn ( status: WorkDoneStatus, userdata: ?*anyopaque, ) callconv(.C) void; pub const WorkDoneStatus = enum(u32) { success = 0x00000000, err = 0x00000001, unknown = 0x00000002, device_lost = 0x00000003, }; pub const Descriptor = extern struct { next_in_chain: ?*const ChainedStruct = null, label: ?[*:0]const u8 = null, }; pub inline fn copyExternalTextureForBrowser(queue: *Queue, source: *const ImageCopyExternalTexture, destination: *const ImageCopyTexture, copy_size: *const Extent3D, options: *const CopyTextureForBrowserOptions) void { Impl.queueCopyExternalTextureForBrowser(queue, source, destination, copy_size, options); } pub inline fn copyTextureForBrowser(queue: *Queue, source: *const ImageCopyTexture, destination: *const ImageCopyTexture, copy_size: *const Extent3D, options: *const CopyTextureForBrowserOptions) void { Impl.queueCopyTextureForBrowser(queue, source, destination, copy_size, options); } // TODO: dawn: does not allow unsetting this callback to null pub inline fn onSubmittedWorkDone( queue: *Queue, signal_value: u64, context: anytype, comptime callback: fn (ctx: @TypeOf(context), status: WorkDoneStatus) callconv(.Inline) void, ) void { const Context = @TypeOf(context); const Helper = struct { pub fn cCallback(status: WorkDoneStatus, userdata: ?*anyopaque) callconv(.C) void { callback(if (Context == void) {} else @as(Context, @ptrCast(@alignCast(userdata))), status); } }; Impl.queueOnSubmittedWorkDone(queue, signal_value, Helper.cCallback, if (Context == void) null else context); } pub inline fn setLabel(queue: *Queue, label: [*:0]const u8) void { Impl.queueSetLabel(queue, label); } pub inline fn submit(queue: *Queue, commands: []const *const CommandBuffer) void { Impl.queueSubmit(queue, commands.len, commands.ptr); } pub inline fn writeBuffer( queue: *Queue, buffer: *Buffer, buffer_offset_bytes: u64, data_slice: anytype, ) void { Impl.queueWriteBuffer( queue, buffer, buffer_offset_bytes, @as(*const anyopaque, @ptrCast(std.mem.sliceAsBytes(data_slice).ptr)), data_slice.len * @sizeOf(std.meta.Elem(@TypeOf(data_slice))), ); } pub inline fn writeTexture( queue: *Queue, destination: *const ImageCopyTexture, data_layout: *const Texture.DataLayout, write_size: *const Extent3D, data_slice: anytype, ) void { Impl.queueWriteTexture( queue, destination, @as(*const anyopaque, @ptrCast(std.mem.sliceAsBytes(data_slice).ptr)), @as(usize, @intCast(data_slice.len)) * @sizeOf(std.meta.Elem(@TypeOf(data_slice))), data_layout, write_size, ); } pub inline fn reference(queue: *Queue) void { Impl.queueReference(queue); } pub inline fn release(queue: *Queue) void { Impl.queueRelease(queue); } };
0
repos/mach-sysgpu/src
repos/mach-sysgpu/src/sysgpu/compute_pass_encoder.zig
const Buffer = @import("buffer.zig").Buffer; const BindGroup = @import("bind_group.zig").BindGroup; const ComputePipeline = @import("compute_pipeline.zig").ComputePipeline; const QuerySet = @import("query_set.zig").QuerySet; const Impl = @import("interface.zig").Impl; pub const ComputePassEncoder = opaque { /// Default `workgroup_count_y`: 1 /// Default `workgroup_count_z`: 1 pub inline fn dispatchWorkgroups(compute_pass_encoder: *ComputePassEncoder, workgroup_count_x: u32, workgroup_count_y: u32, workgroup_count_z: u32) void { Impl.computePassEncoderDispatchWorkgroups(compute_pass_encoder, workgroup_count_x, workgroup_count_y, workgroup_count_z); } pub inline fn dispatchWorkgroupsIndirect(compute_pass_encoder: *ComputePassEncoder, indirect_buffer: *Buffer, indirect_offset: u64) void { Impl.computePassEncoderDispatchWorkgroupsIndirect(compute_pass_encoder, indirect_buffer, indirect_offset); } pub inline fn end(compute_pass_encoder: *ComputePassEncoder) void { Impl.computePassEncoderEnd(compute_pass_encoder); } pub inline fn insertDebugMarker(compute_pass_encoder: *ComputePassEncoder, marker_label: [*:0]const u8) void { Impl.computePassEncoderInsertDebugMarker(compute_pass_encoder, marker_label); } pub inline fn popDebugGroup(compute_pass_encoder: *ComputePassEncoder) void { Impl.computePassEncoderPopDebugGroup(compute_pass_encoder); } pub inline fn pushDebugGroup(compute_pass_encoder: *ComputePassEncoder, group_label: [*:0]const u8) void { Impl.computePassEncoderPushDebugGroup(compute_pass_encoder, group_label); } /// Default `dynamic_offsets`: null pub inline fn setBindGroup(compute_pass_encoder: *ComputePassEncoder, group_index: u32, group: *BindGroup, dynamic_offsets: ?[]const u32) void { Impl.computePassEncoderSetBindGroup( compute_pass_encoder, group_index, group, if (dynamic_offsets) |v| v.len else 0, if (dynamic_offsets) |v| v.ptr else null, ); } pub inline fn setLabel(compute_pass_encoder: *ComputePassEncoder, label: [*:0]const u8) void { Impl.computePassEncoderSetLabel(compute_pass_encoder, label); } pub inline fn setPipeline(compute_pass_encoder: *ComputePassEncoder, pipeline: *ComputePipeline) void { Impl.computePassEncoderSetPipeline(compute_pass_encoder, pipeline); } pub inline fn writeTimestamp(compute_pass_encoder: *ComputePassEncoder, query_set: *QuerySet, query_index: u32) void { Impl.computePassEncoderWriteTimestamp(compute_pass_encoder, query_set, query_index); } pub inline fn reference(compute_pass_encoder: *ComputePassEncoder) void { Impl.computePassEncoderReference(compute_pass_encoder); } pub inline fn release(compute_pass_encoder: *ComputePassEncoder) void { Impl.computePassEncoderRelease(compute_pass_encoder); } };
0
repos/mach-sysgpu/src
repos/mach-sysgpu/src/sysgpu/swap_chain.zig
const ChainedStruct = @import("main.zig").ChainedStruct; const PresentMode = @import("main.zig").PresentMode; const Texture = @import("texture.zig").Texture; const TextureView = @import("texture_view.zig").TextureView; const Impl = @import("interface.zig").Impl; pub const SwapChain = opaque { pub const Descriptor = extern struct { next_in_chain: ?*const ChainedStruct = null, label: ?[*:0]const u8 = null, usage: Texture.UsageFlags, format: Texture.Format, width: u32, height: u32, present_mode: PresentMode, }; pub inline fn getCurrentTexture(swap_chain: *SwapChain) ?*Texture { return Impl.swapChainGetCurrentTexture(swap_chain); } pub inline fn getCurrentTextureView(swap_chain: *SwapChain) ?*TextureView { return Impl.swapChainGetCurrentTextureView(swap_chain); } pub inline fn present(swap_chain: *SwapChain) void { Impl.swapChainPresent(swap_chain); } pub inline fn reference(swap_chain: *SwapChain) void { Impl.swapChainReference(swap_chain); } pub inline fn release(swap_chain: *SwapChain) void { Impl.swapChainRelease(swap_chain); } };
0
repos/mach-sysgpu/src
repos/mach-sysgpu/src/sysgpu/render_pipeline.zig
const ChainedStruct = @import("main.zig").ChainedStruct; const DepthStencilState = @import("main.zig").DepthStencilState; const MultisampleState = @import("main.zig").MultisampleState; const VertexState = @import("main.zig").VertexState; const PrimitiveState = @import("main.zig").PrimitiveState; const FragmentState = @import("main.zig").FragmentState; const PipelineLayout = @import("pipeline_layout.zig").PipelineLayout; const BindGroupLayout = @import("bind_group_layout.zig").BindGroupLayout; const Impl = @import("interface.zig").Impl; pub const RenderPipeline = opaque { pub const Descriptor = extern struct { next_in_chain: ?*const ChainedStruct = null, label: ?[*:0]const u8 = null, layout: ?*PipelineLayout = null, vertex: VertexState, primitive: PrimitiveState = .{}, depth_stencil: ?*const DepthStencilState = null, multisample: MultisampleState = .{}, fragment: ?*const FragmentState = null, }; pub inline fn getBindGroupLayout(render_pipeline: *RenderPipeline, group_index: u32) *BindGroupLayout { return Impl.renderPipelineGetBindGroupLayout(render_pipeline, group_index); } pub inline fn setLabel(render_pipeline: *RenderPipeline, label: [*:0]const u8) void { Impl.renderPipelineSetLabel(render_pipeline, label); } pub inline fn reference(render_pipeline: *RenderPipeline) void { Impl.renderPipelineReference(render_pipeline); } pub inline fn release(render_pipeline: *RenderPipeline) void { Impl.renderPipelineRelease(render_pipeline); } };
0
repos/mach-sysgpu/src
repos/mach-sysgpu/src/sysgpu/shared_fence.zig
const ChainedStruct = @import("main.zig").ChainedStruct; const ChainedStructOut = @import("main.zig").ChainedStructOut; pub const SharedFence = opaque { pub const Type = enum(u32) { shared_fence_type_undefined = 0x00000000, shared_fence_type_vk_semaphore_opaque_fd = 0x00000001, shared_fence_type_vk_semaphore_sync_fd = 0x00000002, shared_fence_type_vk_semaphore_zircon_handle = 0x00000003, shared_fence_type_dxgi_shared_handle = 0x00000004, shared_fence_type_mtl_shared_event = 0x00000005, }; pub const Descriptor = extern struct { pub const NextInChain = extern union { generic: ?*const ChainedStruct, vk_semaphore_opaque_fd_descriptor: *const VkSemaphoreOpaqueFDDescriptor, vk_semaphore_sync_fd_descriptor: *const VkSemaphoreSyncFDDescriptor, vk_semaphore_zircon_handle_descriptor: *const VkSemaphoreZirconHandleDescriptor, dxgi_shared_handle_descriptor: *const DXGISharedHandleDescriptor, mtl_shared_event_descriptor: *const MTLSharedEventDescriptor, }; next_in_chain: NextInChain = .{ .generic = null }, label: ?[*]const u8, }; pub const DXGISharedHandleDescriptor = extern struct { chain: ChainedStruct, handle: *anyopaque, }; pub const DXGISharedHandleExportInfo = extern struct { chain: ChainedStructOut, handle: *anyopaque, }; pub const ExportInfo = extern struct { pub const NextInChain = extern union { generic: ?*const ChainedStructOut, dxgi_shared_handle_export_info: *const DXGISharedHandleExportInfo, mtl_shared_event_export_info: *const MTLSharedEventExportInfo, vk_semaphore_opaque_fd_export_info: *const VkSemaphoreOpaqueFDExportInfo, vk_semaphore_sync_fd_export_info: *const VkSemaphoreSyncFDExportInfo, vk_semaphore_zircon_handle_export_info: *const VkSemaphoreZirconHandleExportInfo, }; next_in_chain: NextInChain = .{ .generic = null }, type: Type, }; pub const MTLSharedEventDescriptor = extern struct { chain: ChainedStruct, shared_event: *anyopaque, }; pub const MTLSharedEventExportInfo = extern struct { chain: ChainedStructOut, shared_event: *anyopaque, }; pub const VkSemaphoreOpaqueFDDescriptor = extern struct { chain: ChainedStruct, handle: c_int, }; pub const VkSemaphoreOpaqueFDExportInfo = extern struct { chain: ChainedStructOut, handle: c_int, }; pub const VkSemaphoreSyncFDDescriptor = extern struct { chain: ChainedStruct, handle: c_int, }; pub const VkSemaphoreSyncFDExportInfo = extern struct { chain: ChainedStructOut, handle: c_int, }; pub const VkSemaphoreZirconHandleDescriptor = extern struct { chain: ChainedStruct, handle: u32, }; pub const VkSemaphoreZirconHandleExportInfo = extern struct { chain: ChainedStructOut, handle: u32, }; };
0
repos/mach-sysgpu/src
repos/mach-sysgpu/src/opengl/proc.zig
const std = @import("std"); const c = @import("c.zig"); var libgl: std.DynLib = undefined; fn removeOptional(comptime T: type) type { return switch (@typeInfo(T)) { .Optional => |opt| opt.child, else => T, }; } fn getProcAddress(name_ptr: [*:0]const u8) c.PROC { const name = std.mem.span(name_ptr); return libgl.lookup(removeOptional(c.PROC), name); } pub fn init() !void { libgl = try std.DynLib.openZ("opengl32.dll"); } pub fn deinit() void { libgl.close(); } pub const InstanceWGL = struct { getExtensionsStringARB: removeOptional(c.PFNWGLGETEXTENSIONSSTRINGARBPROC), createContextAttribsARB: removeOptional(c.PFNWGLCREATECONTEXTATTRIBSARBPROC), choosePixelFormatARB: removeOptional(c.PFNWGLCHOOSEPIXELFORMATARBPROC), pub fn load(wgl: *InstanceWGL) void { wgl.getExtensionsStringARB = @ptrCast(c.wglGetProcAddress("wglGetExtensionsStringARB")); wgl.createContextAttribsARB = @ptrCast(c.wglGetProcAddress("wglCreateContextAttribsARB")); wgl.choosePixelFormatARB = @ptrCast(c.wglGetProcAddress("wglChoosePixelFormatARB")); } }; pub const AdapterGL = struct { getString: removeOptional(c.PFNGLGETSTRINGPROC), pub fn load(gl: *AdapterGL) void { gl.getString = @ptrCast(getProcAddress("glGetString")); } }; pub const DeviceGL = struct { // 1.0 cullFace: removeOptional(c.PFNGLCULLFACEPROC), frontFace: removeOptional(c.PFNGLFRONTFACEPROC), hint: removeOptional(c.PFNGLHINTPROC), lineWidth: removeOptional(c.PFNGLLINEWIDTHPROC), pointSize: removeOptional(c.PFNGLPOINTSIZEPROC), polygonMode: removeOptional(c.PFNGLPOLYGONMODEPROC), scissor: removeOptional(c.PFNGLSCISSORPROC), texParameterf: removeOptional(c.PFNGLTEXPARAMETERFPROC), texParameterfv: removeOptional(c.PFNGLTEXPARAMETERFVPROC), texParameteri: removeOptional(c.PFNGLTEXPARAMETERIPROC), texParameteriv: removeOptional(c.PFNGLTEXPARAMETERIVPROC), texImage1D: removeOptional(c.PFNGLTEXIMAGE1DPROC), texImage2D: removeOptional(c.PFNGLTEXIMAGE2DPROC), drawBuffer: removeOptional(c.PFNGLDRAWBUFFERPROC), clear: removeOptional(c.PFNGLCLEARPROC), clearColor: removeOptional(c.PFNGLCLEARCOLORPROC), clearStencil: removeOptional(c.PFNGLCLEARSTENCILPROC), clearDepth: removeOptional(c.PFNGLCLEARDEPTHPROC), stencilMask: removeOptional(c.PFNGLSTENCILMASKPROC), colorMask: removeOptional(c.PFNGLCOLORMASKPROC), depthMask: removeOptional(c.PFNGLDEPTHMASKPROC), disable: removeOptional(c.PFNGLDISABLEPROC), enable: removeOptional(c.PFNGLENABLEPROC), finish: removeOptional(c.PFNGLFINISHPROC), flush: removeOptional(c.PFNGLFLUSHPROC), blendFunc: removeOptional(c.PFNGLBLENDFUNCPROC), logicOp: removeOptional(c.PFNGLLOGICOPPROC), stencilFunc: removeOptional(c.PFNGLSTENCILFUNCPROC), stencilOp: removeOptional(c.PFNGLSTENCILOPPROC), depthFunc: removeOptional(c.PFNGLDEPTHFUNCPROC), pixelStoref: removeOptional(c.PFNGLPIXELSTOREFPROC), pixelStorei: removeOptional(c.PFNGLPIXELSTOREIPROC), readBuffer: removeOptional(c.PFNGLREADBUFFERPROC), readPixels: removeOptional(c.PFNGLREADPIXELSPROC), getBooleanv: removeOptional(c.PFNGLGETBOOLEANVPROC), getDoublev: removeOptional(c.PFNGLGETDOUBLEVPROC), getError: removeOptional(c.PFNGLGETERRORPROC), getFloatv: removeOptional(c.PFNGLGETFLOATVPROC), getIntegerv: removeOptional(c.PFNGLGETINTEGERVPROC), getString: removeOptional(c.PFNGLGETSTRINGPROC), getTexImage: removeOptional(c.PFNGLGETTEXIMAGEPROC), getTexParameterfv: removeOptional(c.PFNGLGETTEXPARAMETERFVPROC), getTexParameteriv: removeOptional(c.PFNGLGETTEXPARAMETERIVPROC), getTexLevelParameterfv: removeOptional(c.PFNGLGETTEXLEVELPARAMETERFVPROC), getTexLevelParameteriv: removeOptional(c.PFNGLGETTEXLEVELPARAMETERIVPROC), isEnabled: removeOptional(c.PFNGLISENABLEDPROC), depthRange: removeOptional(c.PFNGLDEPTHRANGEPROC), viewport: removeOptional(c.PFNGLVIEWPORTPROC), // 1.1 drawArrays: removeOptional(c.PFNGLDRAWARRAYSPROC), drawElements: removeOptional(c.PFNGLDRAWELEMENTSPROC), getPointerv: removeOptional(c.PFNGLGETPOINTERVPROC), polygonOffset: removeOptional(c.PFNGLPOLYGONOFFSETPROC), copyTexImage1D: removeOptional(c.PFNGLCOPYTEXIMAGE1DPROC), copyTexImage2D: removeOptional(c.PFNGLCOPYTEXIMAGE2DPROC), copyTexSubImage1D: removeOptional(c.PFNGLCOPYTEXSUBIMAGE1DPROC), copyTexSubImage2D: removeOptional(c.PFNGLCOPYTEXSUBIMAGE2DPROC), texSubImage1D: removeOptional(c.PFNGLTEXSUBIMAGE1DPROC), texSubImage2D: removeOptional(c.PFNGLTEXSUBIMAGE2DPROC), bindTexture: removeOptional(c.PFNGLBINDTEXTUREPROC), deleteTextures: removeOptional(c.PFNGLDELETETEXTURESPROC), genTextures: removeOptional(c.PFNGLGENTEXTURESPROC), isTexture: removeOptional(c.PFNGLISTEXTUREPROC), // 1.2 drawRangeElements: removeOptional(c.PFNGLDRAWRANGEELEMENTSPROC), texImage3D: removeOptional(c.PFNGLTEXIMAGE3DPROC), texSubImage3D: removeOptional(c.PFNGLTEXSUBIMAGE3DPROC), copyTexSubImage3D: removeOptional(c.PFNGLCOPYTEXSUBIMAGE3DPROC), // 1.3 activeTexture: removeOptional(c.PFNGLACTIVETEXTUREPROC), sampleCoverage: removeOptional(c.PFNGLSAMPLECOVERAGEPROC), compressedTexImage3D: removeOptional(c.PFNGLCOMPRESSEDTEXIMAGE3DPROC), compressedTexImage2D: removeOptional(c.PFNGLCOMPRESSEDTEXIMAGE2DPROC), compressedTexImage1D: removeOptional(c.PFNGLCOMPRESSEDTEXIMAGE1DPROC), compressedTexSubImage3D: removeOptional(c.PFNGLCOMPRESSEDTEXSUBIMAGE3DPROC), compressedTexSubImage2D: removeOptional(c.PFNGLCOMPRESSEDTEXSUBIMAGE2DPROC), compressedTexSubImage1D: removeOptional(c.PFNGLCOMPRESSEDTEXSUBIMAGE1DPROC), getCompressedTexImage: removeOptional(c.PFNGLGETCOMPRESSEDTEXIMAGEPROC), // 1.4 blendFuncSeparate: removeOptional(c.PFNGLBLENDFUNCSEPARATEPROC), multiDrawArrays: removeOptional(c.PFNGLMULTIDRAWARRAYSPROC), multiDrawElements: removeOptional(c.PFNGLMULTIDRAWELEMENTSPROC), pointParameterf: removeOptional(c.PFNGLPOINTPARAMETERFPROC), pointParameterfv: removeOptional(c.PFNGLPOINTPARAMETERFVPROC), pointParameteri: removeOptional(c.PFNGLPOINTPARAMETERIPROC), pointParameteriv: removeOptional(c.PFNGLPOINTPARAMETERIVPROC), blendColor: removeOptional(c.PFNGLBLENDCOLORPROC), blendEquation: removeOptional(c.PFNGLBLENDEQUATIONPROC), // 1.5 genQueries: removeOptional(c.PFNGLGENQUERIESPROC), deleteQueries: removeOptional(c.PFNGLDELETEQUERIESPROC), isQuery: removeOptional(c.PFNGLISQUERYPROC), beginQuery: removeOptional(c.PFNGLBEGINQUERYPROC), endQuery: removeOptional(c.PFNGLENDQUERYPROC), getQueryiv: removeOptional(c.PFNGLGETQUERYIVPROC), getQueryObjectiv: removeOptional(c.PFNGLGETQUERYOBJECTIVPROC), getQueryObjectuiv: removeOptional(c.PFNGLGETQUERYOBJECTUIVPROC), bindBuffer: removeOptional(c.PFNGLBINDBUFFERPROC), deleteBuffers: removeOptional(c.PFNGLDELETEBUFFERSPROC), genBuffers: removeOptional(c.PFNGLGENBUFFERSPROC), isBuffer: removeOptional(c.PFNGLISBUFFERPROC), bufferData: removeOptional(c.PFNGLBUFFERDATAPROC), bufferSubData: removeOptional(c.PFNGLBUFFERSUBDATAPROC), getBufferSubData: removeOptional(c.PFNGLGETBUFFERSUBDATAPROC), mapBuffer: removeOptional(c.PFNGLMAPBUFFERPROC), unmapBuffer: removeOptional(c.PFNGLUNMAPBUFFERPROC), getBufferParameteriv: removeOptional(c.PFNGLGETBUFFERPARAMETERIVPROC), getBufferPointerv: removeOptional(c.PFNGLGETBUFFERPOINTERVPROC), // 2.0 blendEquationSeparate: removeOptional(c.PFNGLBLENDEQUATIONSEPARATEPROC), drawBuffers: removeOptional(c.PFNGLDRAWBUFFERSPROC), stencilOpSeparate: removeOptional(c.PFNGLSTENCILOPSEPARATEPROC), stencilFuncSeparate: removeOptional(c.PFNGLSTENCILFUNCSEPARATEPROC), stencilMaskSeparate: removeOptional(c.PFNGLSTENCILMASKSEPARATEPROC), attachShader: removeOptional(c.PFNGLATTACHSHADERPROC), bindAttribLocation: removeOptional(c.PFNGLBINDATTRIBLOCATIONPROC), compileShader: removeOptional(c.PFNGLCOMPILESHADERPROC), createProgram: removeOptional(c.PFNGLCREATEPROGRAMPROC), createShader: removeOptional(c.PFNGLCREATESHADERPROC), deleteProgram: removeOptional(c.PFNGLDELETEPROGRAMPROC), deleteShader: removeOptional(c.PFNGLDELETESHADERPROC), detachShader: removeOptional(c.PFNGLDETACHSHADERPROC), disableVertexAttribArray: removeOptional(c.PFNGLDISABLEVERTEXATTRIBARRAYPROC), enableVertexAttribArray: removeOptional(c.PFNGLENABLEVERTEXATTRIBARRAYPROC), getActiveAttrib: removeOptional(c.PFNGLGETACTIVEATTRIBPROC), getActiveUniform: removeOptional(c.PFNGLGETACTIVEUNIFORMPROC), getAttachedShaders: removeOptional(c.PFNGLGETATTACHEDSHADERSPROC), getAttribLocation: removeOptional(c.PFNGLGETATTRIBLOCATIONPROC), getProgramiv: removeOptional(c.PFNGLGETPROGRAMIVPROC), getProgramInfoLog: removeOptional(c.PFNGLGETPROGRAMINFOLOGPROC), getShaderiv: removeOptional(c.PFNGLGETSHADERIVPROC), getShaderInfoLog: removeOptional(c.PFNGLGETSHADERINFOLOGPROC), getShaderSource: removeOptional(c.PFNGLGETSHADERSOURCEPROC), getUniformLocation: removeOptional(c.PFNGLGETUNIFORMLOCATIONPROC), getUniformfv: removeOptional(c.PFNGLGETUNIFORMFVPROC), getUniformiv: removeOptional(c.PFNGLGETUNIFORMIVPROC), getVertexAttribdv: removeOptional(c.PFNGLGETVERTEXATTRIBDVPROC), getVertexAttribfv: removeOptional(c.PFNGLGETVERTEXATTRIBFVPROC), getVertexAttribiv: removeOptional(c.PFNGLGETVERTEXATTRIBIVPROC), getVertexAttribPointerv: removeOptional(c.PFNGLGETVERTEXATTRIBPOINTERVPROC), isProgram: removeOptional(c.PFNGLISPROGRAMPROC), isShader: removeOptional(c.PFNGLISSHADERPROC), linkProgram: removeOptional(c.PFNGLLINKPROGRAMPROC), shaderSource: removeOptional(c.PFNGLSHADERSOURCEPROC), useProgram: removeOptional(c.PFNGLUSEPROGRAMPROC), uniform1f: removeOptional(c.PFNGLUNIFORM1FPROC), uniform2f: removeOptional(c.PFNGLUNIFORM2FPROC), uniform3f: removeOptional(c.PFNGLUNIFORM3FPROC), uniform4f: removeOptional(c.PFNGLUNIFORM4FPROC), uniform1i: removeOptional(c.PFNGLUNIFORM1IPROC), uniform2i: removeOptional(c.PFNGLUNIFORM2IPROC), uniform3i: removeOptional(c.PFNGLUNIFORM3IPROC), uniform4i: removeOptional(c.PFNGLUNIFORM4IPROC), uniform1fv: removeOptional(c.PFNGLUNIFORM1FVPROC), uniform2fv: removeOptional(c.PFNGLUNIFORM2FVPROC), uniform3fv: removeOptional(c.PFNGLUNIFORM3FVPROC), uniform4fv: removeOptional(c.PFNGLUNIFORM4FVPROC), uniform1iv: removeOptional(c.PFNGLUNIFORM1IVPROC), uniform2iv: removeOptional(c.PFNGLUNIFORM2IVPROC), uniform3iv: removeOptional(c.PFNGLUNIFORM3IVPROC), uniform4iv: removeOptional(c.PFNGLUNIFORM4IVPROC), uniformMatrix2fv: removeOptional(c.PFNGLUNIFORMMATRIX2FVPROC), uniformMatrix3fv: removeOptional(c.PFNGLUNIFORMMATRIX3FVPROC), uniformMatrix4fv: removeOptional(c.PFNGLUNIFORMMATRIX4FVPROC), validateProgram: removeOptional(c.PFNGLVALIDATEPROGRAMPROC), vertexAttrib1d: removeOptional(c.PFNGLVERTEXATTRIB1DPROC), vertexAttrib1dv: removeOptional(c.PFNGLVERTEXATTRIB1DVPROC), vertexAttrib1f: removeOptional(c.PFNGLVERTEXATTRIB1FPROC), vertexAttrib1fv: removeOptional(c.PFNGLVERTEXATTRIB1FVPROC), vertexAttrib1s: removeOptional(c.PFNGLVERTEXATTRIB1SPROC), vertexAttrib1sv: removeOptional(c.PFNGLVERTEXATTRIB1SVPROC), vertexAttrib2d: removeOptional(c.PFNGLVERTEXATTRIB2DPROC), vertexAttrib2dv: removeOptional(c.PFNGLVERTEXATTRIB2DVPROC), vertexAttrib2f: removeOptional(c.PFNGLVERTEXATTRIB2FPROC), vertexAttrib2fv: removeOptional(c.PFNGLVERTEXATTRIB2FVPROC), vertexAttrib2s: removeOptional(c.PFNGLVERTEXATTRIB2SPROC), vertexAttrib2sv: removeOptional(c.PFNGLVERTEXATTRIB2SVPROC), vertexAttrib3d: removeOptional(c.PFNGLVERTEXATTRIB3DPROC), vertexAttrib3dv: removeOptional(c.PFNGLVERTEXATTRIB3DVPROC), vertexAttrib3f: removeOptional(c.PFNGLVERTEXATTRIB3FPROC), vertexAttrib3fv: removeOptional(c.PFNGLVERTEXATTRIB3FVPROC), vertexAttrib3s: removeOptional(c.PFNGLVERTEXATTRIB3SPROC), vertexAttrib3sv: removeOptional(c.PFNGLVERTEXATTRIB3SVPROC), vertexAttrib4Nbv: removeOptional(c.PFNGLVERTEXATTRIB4NBVPROC), vertexAttrib4Niv: removeOptional(c.PFNGLVERTEXATTRIB4NIVPROC), vertexAttrib4Nsv: removeOptional(c.PFNGLVERTEXATTRIB4NSVPROC), vertexAttrib4Nub: removeOptional(c.PFNGLVERTEXATTRIB4NUBPROC), vertexAttrib4Nubv: removeOptional(c.PFNGLVERTEXATTRIB4NUBVPROC), vertexAttrib4Nuiv: removeOptional(c.PFNGLVERTEXATTRIB4NUIVPROC), vertexAttrib4Nusv: removeOptional(c.PFNGLVERTEXATTRIB4NUSVPROC), vertexAttrib4bv: removeOptional(c.PFNGLVERTEXATTRIB4BVPROC), vertexAttrib4d: removeOptional(c.PFNGLVERTEXATTRIB4DPROC), vertexAttrib4dv: removeOptional(c.PFNGLVERTEXATTRIB4DVPROC), vertexAttrib4f: removeOptional(c.PFNGLVERTEXATTRIB4FPROC), vertexAttrib4fv: removeOptional(c.PFNGLVERTEXATTRIB4FVPROC), vertexAttrib4iv: removeOptional(c.PFNGLVERTEXATTRIB4IVPROC), vertexAttrib4s: removeOptional(c.PFNGLVERTEXATTRIB4SPROC), vertexAttrib4sv: removeOptional(c.PFNGLVERTEXATTRIB4SVPROC), vertexAttrib4ubv: removeOptional(c.PFNGLVERTEXATTRIB4UBVPROC), vertexAttrib4uiv: removeOptional(c.PFNGLVERTEXATTRIB4UIVPROC), vertexAttrib4usv: removeOptional(c.PFNGLVERTEXATTRIB4USVPROC), vertexAttribPointer: removeOptional(c.PFNGLVERTEXATTRIBPOINTERPROC), // 2.1 uniformMatrix2x3fv: removeOptional(c.PFNGLUNIFORMMATRIX2X3FVPROC), uniformMatrix3x2fv: removeOptional(c.PFNGLUNIFORMMATRIX3X2FVPROC), uniformMatrix2x4fv: removeOptional(c.PFNGLUNIFORMMATRIX2X4FVPROC), uniformMatrix4x2fv: removeOptional(c.PFNGLUNIFORMMATRIX4X2FVPROC), uniformMatrix3x4fv: removeOptional(c.PFNGLUNIFORMMATRIX3X4FVPROC), uniformMatrix4x3fv: removeOptional(c.PFNGLUNIFORMMATRIX4X3FVPROC), // 3.0 colorMaski: removeOptional(c.PFNGLCOLORMASKIPROC), getBooleani_v: removeOptional(c.PFNGLGETBOOLEANI_VPROC), getIntegeri_v: removeOptional(c.PFNGLGETINTEGERI_VPROC), enablei: removeOptional(c.PFNGLENABLEIPROC), disablei: removeOptional(c.PFNGLDISABLEIPROC), isEnabledi: removeOptional(c.PFNGLISENABLEDIPROC), beginTransformFeedback: removeOptional(c.PFNGLBEGINTRANSFORMFEEDBACKPROC), endTransformFeedback: removeOptional(c.PFNGLENDTRANSFORMFEEDBACKPROC), bindBufferRange: removeOptional(c.PFNGLBINDBUFFERRANGEPROC), bindBufferBase: removeOptional(c.PFNGLBINDBUFFERBASEPROC), transformFeedbackVaryings: removeOptional(c.PFNGLTRANSFORMFEEDBACKVARYINGSPROC), getTransformFeedbackVarying: removeOptional(c.PFNGLGETTRANSFORMFEEDBACKVARYINGPROC), clampColor: removeOptional(c.PFNGLCLAMPCOLORPROC), beginConditionalRender: removeOptional(c.PFNGLBEGINCONDITIONALRENDERPROC), endConditionalRender: removeOptional(c.PFNGLENDCONDITIONALRENDERPROC), vertexAttribIPointer: removeOptional(c.PFNGLVERTEXATTRIBIPOINTERPROC), getVertexAttribIiv: removeOptional(c.PFNGLGETVERTEXATTRIBIIVPROC), getVertexAttribIuiv: removeOptional(c.PFNGLGETVERTEXATTRIBIUIVPROC), vertexAttribI1i: removeOptional(c.PFNGLVERTEXATTRIBI1IPROC), vertexAttribI2i: removeOptional(c.PFNGLVERTEXATTRIBI2IPROC), vertexAttribI3i: removeOptional(c.PFNGLVERTEXATTRIBI3IPROC), vertexAttribI4i: removeOptional(c.PFNGLVERTEXATTRIBI4IPROC), vertexAttribI1ui: removeOptional(c.PFNGLVERTEXATTRIBI1UIPROC), vertexAttribI2ui: removeOptional(c.PFNGLVERTEXATTRIBI2UIPROC), vertexAttribI3ui: removeOptional(c.PFNGLVERTEXATTRIBI3UIPROC), vertexAttribI4ui: removeOptional(c.PFNGLVERTEXATTRIBI4UIPROC), vertexAttribI1iv: removeOptional(c.PFNGLVERTEXATTRIBI1IVPROC), vertexAttribI2iv: removeOptional(c.PFNGLVERTEXATTRIBI2IVPROC), vertexAttribI3iv: removeOptional(c.PFNGLVERTEXATTRIBI3IVPROC), vertexAttribI4iv: removeOptional(c.PFNGLVERTEXATTRIBI4IVPROC), vertexAttribI1uiv: removeOptional(c.PFNGLVERTEXATTRIBI1UIVPROC), vertexAttribI2uiv: removeOptional(c.PFNGLVERTEXATTRIBI2UIVPROC), vertexAttribI3uiv: removeOptional(c.PFNGLVERTEXATTRIBI3UIVPROC), vertexAttribI4uiv: removeOptional(c.PFNGLVERTEXATTRIBI4UIVPROC), vertexAttribI4bv: removeOptional(c.PFNGLVERTEXATTRIBI4BVPROC), vertexAttribI4sv: removeOptional(c.PFNGLVERTEXATTRIBI4SVPROC), vertexAttribI4ubv: removeOptional(c.PFNGLVERTEXATTRIBI4UBVPROC), vertexAttribI4usv: removeOptional(c.PFNGLVERTEXATTRIBI4USVPROC), getUniformuiv: removeOptional(c.PFNGLGETUNIFORMUIVPROC), bindFragDataLocation: removeOptional(c.PFNGLBINDFRAGDATALOCATIONPROC), getFragDataLocation: removeOptional(c.PFNGLGETFRAGDATALOCATIONPROC), uniform1ui: removeOptional(c.PFNGLUNIFORM1UIPROC), uniform2ui: removeOptional(c.PFNGLUNIFORM2UIPROC), uniform3ui: removeOptional(c.PFNGLUNIFORM3UIPROC), uniform4ui: removeOptional(c.PFNGLUNIFORM4UIPROC), uniform1uiv: removeOptional(c.PFNGLUNIFORM1UIVPROC), uniform2uiv: removeOptional(c.PFNGLUNIFORM2UIVPROC), uniform3uiv: removeOptional(c.PFNGLUNIFORM3UIVPROC), uniform4uiv: removeOptional(c.PFNGLUNIFORM4UIVPROC), texParameterIiv: removeOptional(c.PFNGLTEXPARAMETERIIVPROC), texParameterIuiv: removeOptional(c.PFNGLTEXPARAMETERIUIVPROC), getTexParameterIiv: removeOptional(c.PFNGLGETTEXPARAMETERIIVPROC), getTexParameterIuiv: removeOptional(c.PFNGLGETTEXPARAMETERIUIVPROC), clearBufferiv: removeOptional(c.PFNGLCLEARBUFFERIVPROC), clearBufferuiv: removeOptional(c.PFNGLCLEARBUFFERUIVPROC), clearBufferfv: removeOptional(c.PFNGLCLEARBUFFERFVPROC), clearBufferfi: removeOptional(c.PFNGLCLEARBUFFERFIPROC), getStringi: removeOptional(c.PFNGLGETSTRINGIPROC), isRenderbuffer: removeOptional(c.PFNGLISRENDERBUFFERPROC), bindRenderbuffer: removeOptional(c.PFNGLBINDRENDERBUFFERPROC), deleteRenderbuffers: removeOptional(c.PFNGLDELETERENDERBUFFERSPROC), genRenderbuffers: removeOptional(c.PFNGLGENRENDERBUFFERSPROC), renderbufferStorage: removeOptional(c.PFNGLRENDERBUFFERSTORAGEPROC), getRenderbufferParameteriv: removeOptional(c.PFNGLGETRENDERBUFFERPARAMETERIVPROC), isFramebuffer: removeOptional(c.PFNGLISFRAMEBUFFERPROC), bindFramebuffer: removeOptional(c.PFNGLBINDFRAMEBUFFERPROC), deleteFramebuffers: removeOptional(c.PFNGLDELETEFRAMEBUFFERSPROC), genFramebuffers: removeOptional(c.PFNGLGENFRAMEBUFFERSPROC), checkFramebufferStatus: removeOptional(c.PFNGLCHECKFRAMEBUFFERSTATUSPROC), framebufferTexture1D: removeOptional(c.PFNGLFRAMEBUFFERTEXTURE1DPROC), framebufferTexture2D: removeOptional(c.PFNGLFRAMEBUFFERTEXTURE2DPROC), framebufferTexture3D: removeOptional(c.PFNGLFRAMEBUFFERTEXTURE3DPROC), framebufferRenderbuffer: removeOptional(c.PFNGLFRAMEBUFFERRENDERBUFFERPROC), getFramebufferAttachmentParameteriv: removeOptional(c.PFNGLGETFRAMEBUFFERATTACHMENTPARAMETERIVPROC), generateMipmap: removeOptional(c.PFNGLGENERATEMIPMAPPROC), blitFramebuffer: removeOptional(c.PFNGLBLITFRAMEBUFFERPROC), renderbufferStorageMultisample: removeOptional(c.PFNGLRENDERBUFFERSTORAGEMULTISAMPLEPROC), framebufferTextureLayer: removeOptional(c.PFNGLFRAMEBUFFERTEXTURELAYERPROC), mapBufferRange: removeOptional(c.PFNGLMAPBUFFERRANGEPROC), flushMappedBufferRange: removeOptional(c.PFNGLFLUSHMAPPEDBUFFERRANGEPROC), bindVertexArray: removeOptional(c.PFNGLBINDVERTEXARRAYPROC), deleteVertexArrays: removeOptional(c.PFNGLDELETEVERTEXARRAYSPROC), genVertexArrays: removeOptional(c.PFNGLGENVERTEXARRAYSPROC), isVertexArray: removeOptional(c.PFNGLISVERTEXARRAYPROC), // 3.1 drawArraysInstanced: removeOptional(c.PFNGLDRAWARRAYSINSTANCEDPROC), drawElementsInstanced: removeOptional(c.PFNGLDRAWELEMENTSINSTANCEDPROC), texBuffer: removeOptional(c.PFNGLTEXBUFFERPROC), primitiveRestartIndex: removeOptional(c.PFNGLPRIMITIVERESTARTINDEXPROC), copyBufferSubData: removeOptional(c.PFNGLCOPYBUFFERSUBDATAPROC), getUniformIndices: removeOptional(c.PFNGLGETUNIFORMINDICESPROC), getActiveUniformsiv: removeOptional(c.PFNGLGETACTIVEUNIFORMSIVPROC), getActiveUniformName: removeOptional(c.PFNGLGETACTIVEUNIFORMNAMEPROC), getUniformBlockIndex: removeOptional(c.PFNGLGETUNIFORMBLOCKINDEXPROC), getActiveUniformBlockiv: removeOptional(c.PFNGLGETACTIVEUNIFORMBLOCKIVPROC), getActiveUniformBlockName: removeOptional(c.PFNGLGETACTIVEUNIFORMBLOCKNAMEPROC), uniformBlockBinding: removeOptional(c.PFNGLUNIFORMBLOCKBINDINGPROC), // 3.2 drawElementsBaseVertex: removeOptional(c.PFNGLDRAWELEMENTSBASEVERTEXPROC), drawRangeElementsBaseVertex: removeOptional(c.PFNGLDRAWRANGEELEMENTSBASEVERTEXPROC), drawElementsInstancedBaseVertex: removeOptional(c.PFNGLDRAWELEMENTSINSTANCEDBASEVERTEXPROC), multiDrawElementsBaseVertex: removeOptional(c.PFNGLMULTIDRAWELEMENTSBASEVERTEXPROC), provokingVertex: removeOptional(c.PFNGLPROVOKINGVERTEXPROC), fenceSync: removeOptional(c.PFNGLFENCESYNCPROC), isSync: removeOptional(c.PFNGLISSYNCPROC), deleteSync: removeOptional(c.PFNGLDELETESYNCPROC), clientWaitSync: removeOptional(c.PFNGLCLIENTWAITSYNCPROC), waitSync: removeOptional(c.PFNGLWAITSYNCPROC), getInteger64v: removeOptional(c.PFNGLGETINTEGER64VPROC), getSynciv: removeOptional(c.PFNGLGETSYNCIVPROC), getInteger64i_v: removeOptional(c.PFNGLGETINTEGER64I_VPROC), getBufferParameteri64v: removeOptional(c.PFNGLGETBUFFERPARAMETERI64VPROC), framebufferTexture: removeOptional(c.PFNGLFRAMEBUFFERTEXTUREPROC), texImage2DMultisample: removeOptional(c.PFNGLTEXIMAGE2DMULTISAMPLEPROC), texImage3DMultisample: removeOptional(c.PFNGLTEXIMAGE3DMULTISAMPLEPROC), getMultisamplefv: removeOptional(c.PFNGLGETMULTISAMPLEFVPROC), sampleMaski: removeOptional(c.PFNGLSAMPLEMASKIPROC), // 3.3 bindFragDataLocationIndexed: removeOptional(c.PFNGLBINDFRAGDATALOCATIONINDEXEDPROC), getFragDataIndex: removeOptional(c.PFNGLGETFRAGDATAINDEXPROC), genSamplers: removeOptional(c.PFNGLGENSAMPLERSPROC), deleteSamplers: removeOptional(c.PFNGLDELETESAMPLERSPROC), isSampler: removeOptional(c.PFNGLISSAMPLERPROC), bindSampler: removeOptional(c.PFNGLBINDSAMPLERPROC), samplerParameteri: removeOptional(c.PFNGLSAMPLERPARAMETERIPROC), samplerParameteriv: removeOptional(c.PFNGLSAMPLERPARAMETERIVPROC), samplerParameterf: removeOptional(c.PFNGLSAMPLERPARAMETERFPROC), samplerParameterfv: removeOptional(c.PFNGLSAMPLERPARAMETERFVPROC), samplerParameterIiv: removeOptional(c.PFNGLSAMPLERPARAMETERIIVPROC), samplerParameterIuiv: removeOptional(c.PFNGLSAMPLERPARAMETERIUIVPROC), getSamplerParameteriv: removeOptional(c.PFNGLGETSAMPLERPARAMETERIVPROC), getSamplerParameterIiv: removeOptional(c.PFNGLGETSAMPLERPARAMETERIIVPROC), getSamplerParameterfv: removeOptional(c.PFNGLGETSAMPLERPARAMETERFVPROC), getSamplerParameterIuiv: removeOptional(c.PFNGLGETSAMPLERPARAMETERIUIVPROC), queryCounter: removeOptional(c.PFNGLQUERYCOUNTERPROC), getQueryObjecti64v: removeOptional(c.PFNGLGETQUERYOBJECTI64VPROC), getQueryObjectui64v: removeOptional(c.PFNGLGETQUERYOBJECTUI64VPROC), vertexAttribDivisor: removeOptional(c.PFNGLVERTEXATTRIBDIVISORPROC), vertexAttribP1ui: removeOptional(c.PFNGLVERTEXATTRIBP1UIPROC), vertexAttribP1uiv: removeOptional(c.PFNGLVERTEXATTRIBP1UIVPROC), vertexAttribP2ui: removeOptional(c.PFNGLVERTEXATTRIBP2UIPROC), vertexAttribP2uiv: removeOptional(c.PFNGLVERTEXATTRIBP2UIVPROC), vertexAttribP3ui: removeOptional(c.PFNGLVERTEXATTRIBP3UIPROC), vertexAttribP3uiv: removeOptional(c.PFNGLVERTEXATTRIBP3UIVPROC), vertexAttribP4ui: removeOptional(c.PFNGLVERTEXATTRIBP4UIPROC), vertexAttribP4uiv: removeOptional(c.PFNGLVERTEXATTRIBP4UIVPROC), // 4.0 minSampleShading: removeOptional(c.PFNGLMINSAMPLESHADINGPROC), blendEquationi: removeOptional(c.PFNGLBLENDEQUATIONIPROC), blendEquationSeparatei: removeOptional(c.PFNGLBLENDEQUATIONSEPARATEIPROC), blendFunci: removeOptional(c.PFNGLBLENDFUNCIPROC), blendFuncSeparatei: removeOptional(c.PFNGLBLENDFUNCSEPARATEIPROC), drawArraysIndirect: removeOptional(c.PFNGLDRAWARRAYSINDIRECTPROC), drawElementsIndirect: removeOptional(c.PFNGLDRAWELEMENTSINDIRECTPROC), uniform1d: removeOptional(c.PFNGLUNIFORM1DPROC), uniform2d: removeOptional(c.PFNGLUNIFORM2DPROC), uniform3d: removeOptional(c.PFNGLUNIFORM3DPROC), uniform4d: removeOptional(c.PFNGLUNIFORM4DPROC), uniform1dv: removeOptional(c.PFNGLUNIFORM1DVPROC), uniform2dv: removeOptional(c.PFNGLUNIFORM2DVPROC), uniform3dv: removeOptional(c.PFNGLUNIFORM3DVPROC), uniform4dv: removeOptional(c.PFNGLUNIFORM4DVPROC), uniformMatrix2dv: removeOptional(c.PFNGLUNIFORMMATRIX2DVPROC), uniformMatrix3dv: removeOptional(c.PFNGLUNIFORMMATRIX3DVPROC), uniformMatrix4dv: removeOptional(c.PFNGLUNIFORMMATRIX4DVPROC), uniformMatrix2x3dv: removeOptional(c.PFNGLUNIFORMMATRIX2X3DVPROC), uniformMatrix2x4dv: removeOptional(c.PFNGLUNIFORMMATRIX2X4DVPROC), uniformMatrix3x2dv: removeOptional(c.PFNGLUNIFORMMATRIX3X2DVPROC), uniformMatrix3x4dv: removeOptional(c.PFNGLUNIFORMMATRIX3X4DVPROC), uniformMatrix4x2dv: removeOptional(c.PFNGLUNIFORMMATRIX4X2DVPROC), uniformMatrix4x3dv: removeOptional(c.PFNGLUNIFORMMATRIX4X3DVPROC), getUniformdv: removeOptional(c.PFNGLGETUNIFORMDVPROC), getSubroutineUniformLocation: removeOptional(c.PFNGLGETSUBROUTINEUNIFORMLOCATIONPROC), getSubroutineIndex: removeOptional(c.PFNGLGETSUBROUTINEINDEXPROC), getActiveSubroutineUniformiv: removeOptional(c.PFNGLGETACTIVESUBROUTINEUNIFORMIVPROC), getActiveSubroutineUniformName: removeOptional(c.PFNGLGETACTIVESUBROUTINEUNIFORMNAMEPROC), getActiveSubroutineName: removeOptional(c.PFNGLGETACTIVESUBROUTINENAMEPROC), uniformSubroutinesuiv: removeOptional(c.PFNGLUNIFORMSUBROUTINESUIVPROC), getUniformSubroutineuiv: removeOptional(c.PFNGLGETUNIFORMSUBROUTINEUIVPROC), getProgramStageiv: removeOptional(c.PFNGLGETPROGRAMSTAGEIVPROC), patchParameteri: removeOptional(c.PFNGLPATCHPARAMETERIPROC), patchParameterfv: removeOptional(c.PFNGLPATCHPARAMETERFVPROC), bindTransformFeedback: removeOptional(c.PFNGLBINDTRANSFORMFEEDBACKPROC), deleteTransformFeedbacks: removeOptional(c.PFNGLDELETETRANSFORMFEEDBACKSPROC), genTransformFeedbacks: removeOptional(c.PFNGLGENTRANSFORMFEEDBACKSPROC), isTransformFeedback: removeOptional(c.PFNGLISTRANSFORMFEEDBACKPROC), pauseTransformFeedback: removeOptional(c.PFNGLPAUSETRANSFORMFEEDBACKPROC), resumeTransformFeedback: removeOptional(c.PFNGLRESUMETRANSFORMFEEDBACKPROC), drawTransformFeedback: removeOptional(c.PFNGLDRAWTRANSFORMFEEDBACKPROC), drawTransformFeedbackStream: removeOptional(c.PFNGLDRAWTRANSFORMFEEDBACKSTREAMPROC), beginQueryIndexed: removeOptional(c.PFNGLBEGINQUERYINDEXEDPROC), endQueryIndexed: removeOptional(c.PFNGLENDQUERYINDEXEDPROC), getQueryIndexediv: removeOptional(c.PFNGLGETQUERYINDEXEDIVPROC), // 4.1 releaseShaderCompiler: removeOptional(c.PFNGLRELEASESHADERCOMPILERPROC), shaderBinary: removeOptional(c.PFNGLSHADERBINARYPROC), getShaderPrecisionFormat: removeOptional(c.PFNGLGETSHADERPRECISIONFORMATPROC), depthRangef: removeOptional(c.PFNGLDEPTHRANGEFPROC), clearDepthf: removeOptional(c.PFNGLCLEARDEPTHFPROC), getProgramBinary: removeOptional(c.PFNGLGETPROGRAMBINARYPROC), programBinary: removeOptional(c.PFNGLPROGRAMBINARYPROC), programParameteri: removeOptional(c.PFNGLPROGRAMPARAMETERIPROC), useProgramStages: removeOptional(c.PFNGLUSEPROGRAMSTAGESPROC), activeShaderProgram: removeOptional(c.PFNGLACTIVESHADERPROGRAMPROC), createShaderProgramv: removeOptional(c.PFNGLCREATESHADERPROGRAMVPROC), bindProgramPipeline: removeOptional(c.PFNGLBINDPROGRAMPIPELINEPROC), deleteProgramPipelines: removeOptional(c.PFNGLDELETEPROGRAMPIPELINESPROC), genProgramPipelines: removeOptional(c.PFNGLGENPROGRAMPIPELINESPROC), isProgramPipeline: removeOptional(c.PFNGLISPROGRAMPIPELINEPROC), getProgramPipelineiv: removeOptional(c.PFNGLGETPROGRAMPIPELINEIVPROC), programUniform1i: removeOptional(c.PFNGLPROGRAMUNIFORM1IPROC), programUniform1iv: removeOptional(c.PFNGLPROGRAMUNIFORM1IVPROC), programUniform1f: removeOptional(c.PFNGLPROGRAMUNIFORM1FPROC), programUniform1fv: removeOptional(c.PFNGLPROGRAMUNIFORM1FVPROC), programUniform1d: removeOptional(c.PFNGLPROGRAMUNIFORM1DPROC), programUniform1dv: removeOptional(c.PFNGLPROGRAMUNIFORM1DVPROC), programUniform1ui: removeOptional(c.PFNGLPROGRAMUNIFORM1UIPROC), programUniform1uiv: removeOptional(c.PFNGLPROGRAMUNIFORM1UIVPROC), programUniform2i: removeOptional(c.PFNGLPROGRAMUNIFORM2IPROC), programUniform2iv: removeOptional(c.PFNGLPROGRAMUNIFORM2IVPROC), programUniform2f: removeOptional(c.PFNGLPROGRAMUNIFORM2FPROC), programUniform2fv: removeOptional(c.PFNGLPROGRAMUNIFORM2FVPROC), programUniform2d: removeOptional(c.PFNGLPROGRAMUNIFORM2DPROC), programUniform2dv: removeOptional(c.PFNGLPROGRAMUNIFORM2DVPROC), programUniform2ui: removeOptional(c.PFNGLPROGRAMUNIFORM2UIPROC), programUniform2uiv: removeOptional(c.PFNGLPROGRAMUNIFORM2UIVPROC), programUniform3i: removeOptional(c.PFNGLPROGRAMUNIFORM3IPROC), programUniform3iv: removeOptional(c.PFNGLPROGRAMUNIFORM3IVPROC), programUniform3f: removeOptional(c.PFNGLPROGRAMUNIFORM3FPROC), programUniform3fv: removeOptional(c.PFNGLPROGRAMUNIFORM3FVPROC), programUniform3d: removeOptional(c.PFNGLPROGRAMUNIFORM3DPROC), programUniform3dv: removeOptional(c.PFNGLPROGRAMUNIFORM3DVPROC), programUniform3ui: removeOptional(c.PFNGLPROGRAMUNIFORM3UIPROC), programUniform3uiv: removeOptional(c.PFNGLPROGRAMUNIFORM3UIVPROC), programUniform4i: removeOptional(c.PFNGLPROGRAMUNIFORM4IPROC), programUniform4iv: removeOptional(c.PFNGLPROGRAMUNIFORM4IVPROC), programUniform4f: removeOptional(c.PFNGLPROGRAMUNIFORM4FPROC), programUniform4fv: removeOptional(c.PFNGLPROGRAMUNIFORM4FVPROC), programUniform4d: removeOptional(c.PFNGLPROGRAMUNIFORM4DPROC), programUniform4dv: removeOptional(c.PFNGLPROGRAMUNIFORM4DVPROC), programUniform4ui: removeOptional(c.PFNGLPROGRAMUNIFORM4UIPROC), programUniform4uiv: removeOptional(c.PFNGLPROGRAMUNIFORM4UIVPROC), programUniformMatrix2fv: removeOptional(c.PFNGLPROGRAMUNIFORMMATRIX2FVPROC), programUniformMatrix3fv: removeOptional(c.PFNGLPROGRAMUNIFORMMATRIX3FVPROC), programUniformMatrix4fv: removeOptional(c.PFNGLPROGRAMUNIFORMMATRIX4FVPROC), programUniformMatrix2dv: removeOptional(c.PFNGLPROGRAMUNIFORMMATRIX2DVPROC), programUniformMatrix3dv: removeOptional(c.PFNGLPROGRAMUNIFORMMATRIX3DVPROC), programUniformMatrix4dv: removeOptional(c.PFNGLPROGRAMUNIFORMMATRIX4DVPROC), programUniformMatrix2x3fv: removeOptional(c.PFNGLPROGRAMUNIFORMMATRIX2X3FVPROC), programUniformMatrix3x2fv: removeOptional(c.PFNGLPROGRAMUNIFORMMATRIX3X2FVPROC), programUniformMatrix2x4fv: removeOptional(c.PFNGLPROGRAMUNIFORMMATRIX2X4FVPROC), programUniformMatrix4x2fv: removeOptional(c.PFNGLPROGRAMUNIFORMMATRIX4X2FVPROC), programUniformMatrix3x4fv: removeOptional(c.PFNGLPROGRAMUNIFORMMATRIX3X4FVPROC), programUniformMatrix4x3fv: removeOptional(c.PFNGLPROGRAMUNIFORMMATRIX4X3FVPROC), programUniformMatrix2x3dv: removeOptional(c.PFNGLPROGRAMUNIFORMMATRIX2X3DVPROC), programUniformMatrix3x2dv: removeOptional(c.PFNGLPROGRAMUNIFORMMATRIX3X2DVPROC), programUniformMatrix2x4dv: removeOptional(c.PFNGLPROGRAMUNIFORMMATRIX2X4DVPROC), programUniformMatrix4x2dv: removeOptional(c.PFNGLPROGRAMUNIFORMMATRIX4X2DVPROC), programUniformMatrix3x4dv: removeOptional(c.PFNGLPROGRAMUNIFORMMATRIX3X4DVPROC), programUniformMatrix4x3dv: removeOptional(c.PFNGLPROGRAMUNIFORMMATRIX4X3DVPROC), validateProgramPipeline: removeOptional(c.PFNGLVALIDATEPROGRAMPIPELINEPROC), getProgramPipelineInfoLog: removeOptional(c.PFNGLGETPROGRAMPIPELINEINFOLOGPROC), vertexAttribL1d: removeOptional(c.PFNGLVERTEXATTRIBL1DPROC), vertexAttribL2d: removeOptional(c.PFNGLVERTEXATTRIBL2DPROC), vertexAttribL3d: removeOptional(c.PFNGLVERTEXATTRIBL3DPROC), vertexAttribL4d: removeOptional(c.PFNGLVERTEXATTRIBL4DPROC), vertexAttribL1dv: removeOptional(c.PFNGLVERTEXATTRIBL1DVPROC), vertexAttribL2dv: removeOptional(c.PFNGLVERTEXATTRIBL2DVPROC), vertexAttribL3dv: removeOptional(c.PFNGLVERTEXATTRIBL3DVPROC), vertexAttribL4dv: removeOptional(c.PFNGLVERTEXATTRIBL4DVPROC), vertexAttribLPointer: removeOptional(c.PFNGLVERTEXATTRIBLPOINTERPROC), getVertexAttribLdv: removeOptional(c.PFNGLGETVERTEXATTRIBLDVPROC), viewportArrayv: removeOptional(c.PFNGLVIEWPORTARRAYVPROC), viewportIndexedf: removeOptional(c.PFNGLVIEWPORTINDEXEDFPROC), viewportIndexedfv: removeOptional(c.PFNGLVIEWPORTINDEXEDFVPROC), scissorArrayv: removeOptional(c.PFNGLSCISSORARRAYVPROC), scissorIndexed: removeOptional(c.PFNGLSCISSORINDEXEDPROC), scissorIndexedv: removeOptional(c.PFNGLSCISSORINDEXEDVPROC), depthRangeArrayv: removeOptional(c.PFNGLDEPTHRANGEARRAYVPROC), depthRangeIndexed: removeOptional(c.PFNGLDEPTHRANGEINDEXEDPROC), getFloati_v: removeOptional(c.PFNGLGETFLOATI_VPROC), getDoublei_v: removeOptional(c.PFNGLGETDOUBLEI_VPROC), // 4.2 drawArraysInstancedBaseInstance: removeOptional(c.PFNGLDRAWARRAYSINSTANCEDBASEINSTANCEPROC), drawElementsInstancedBaseInstance: removeOptional(c.PFNGLDRAWELEMENTSINSTANCEDBASEINSTANCEPROC), drawElementsInstancedBaseVertexBaseInstance: removeOptional(c.PFNGLDRAWELEMENTSINSTANCEDBASEVERTEXBASEINSTANCEPROC), getInternalformativ: removeOptional(c.PFNGLGETINTERNALFORMATIVPROC), getActiveAtomicCounterBufferiv: removeOptional(c.PFNGLGETACTIVEATOMICCOUNTERBUFFERIVPROC), bindImageTexture: removeOptional(c.PFNGLBINDIMAGETEXTUREPROC), memoryBarrier: removeOptional(c.PFNGLMEMORYBARRIERPROC), texStorage1D: removeOptional(c.PFNGLTEXSTORAGE1DPROC), texStorage2D: removeOptional(c.PFNGLTEXSTORAGE2DPROC), texStorage3D: removeOptional(c.PFNGLTEXSTORAGE3DPROC), drawTransformFeedbackInstanced: removeOptional(c.PFNGLDRAWTRANSFORMFEEDBACKINSTANCEDPROC), drawTransformFeedbackStreamInstanced: removeOptional(c.PFNGLDRAWTRANSFORMFEEDBACKSTREAMINSTANCEDPROC), // 4.3 clearBufferData: removeOptional(c.PFNGLCLEARBUFFERDATAPROC), clearBufferSubData: removeOptional(c.PFNGLCLEARBUFFERSUBDATAPROC), dispatchCompute: removeOptional(c.PFNGLDISPATCHCOMPUTEPROC), dispatchComputeIndirect: removeOptional(c.PFNGLDISPATCHCOMPUTEINDIRECTPROC), copyImageSubData: removeOptional(c.PFNGLCOPYIMAGESUBDATAPROC), framebufferParameteri: removeOptional(c.PFNGLFRAMEBUFFERPARAMETERIPROC), getFramebufferParameteriv: removeOptional(c.PFNGLGETFRAMEBUFFERPARAMETERIVPROC), getInternalformati64v: removeOptional(c.PFNGLGETINTERNALFORMATI64VPROC), invalidateTexSubImage: removeOptional(c.PFNGLINVALIDATETEXSUBIMAGEPROC), invalidateTexImage: removeOptional(c.PFNGLINVALIDATETEXIMAGEPROC), invalidateBufferSubData: removeOptional(c.PFNGLINVALIDATEBUFFERSUBDATAPROC), invalidateBufferData: removeOptional(c.PFNGLINVALIDATEBUFFERDATAPROC), invalidateFramebuffer: removeOptional(c.PFNGLINVALIDATEFRAMEBUFFERPROC), invalidateSubFramebuffer: removeOptional(c.PFNGLINVALIDATESUBFRAMEBUFFERPROC), multiDrawArraysIndirect: removeOptional(c.PFNGLMULTIDRAWARRAYSINDIRECTPROC), multiDrawElementsIndirect: removeOptional(c.PFNGLMULTIDRAWELEMENTSINDIRECTPROC), getProgramInterfaceiv: removeOptional(c.PFNGLGETPROGRAMINTERFACEIVPROC), getProgramResourceIndex: removeOptional(c.PFNGLGETPROGRAMRESOURCEINDEXPROC), getProgramResourceName: removeOptional(c.PFNGLGETPROGRAMRESOURCENAMEPROC), getProgramResourceiv: removeOptional(c.PFNGLGETPROGRAMRESOURCEIVPROC), getProgramResourceLocation: removeOptional(c.PFNGLGETPROGRAMRESOURCELOCATIONPROC), getProgramResourceLocationIndex: removeOptional(c.PFNGLGETPROGRAMRESOURCELOCATIONINDEXPROC), shaderStorageBlockBinding: removeOptional(c.PFNGLSHADERSTORAGEBLOCKBINDINGPROC), texBufferRange: removeOptional(c.PFNGLTEXBUFFERRANGEPROC), texStorage2DMultisample: removeOptional(c.PFNGLTEXSTORAGE2DMULTISAMPLEPROC), texStorage3DMultisample: removeOptional(c.PFNGLTEXSTORAGE3DMULTISAMPLEPROC), textureView: removeOptional(c.PFNGLTEXTUREVIEWPROC), bindVertexBuffer: removeOptional(c.PFNGLBINDVERTEXBUFFERPROC), vertexAttribFormat: removeOptional(c.PFNGLVERTEXATTRIBFORMATPROC), vertexAttribIFormat: removeOptional(c.PFNGLVERTEXATTRIBIFORMATPROC), vertexAttribLFormat: removeOptional(c.PFNGLVERTEXATTRIBLFORMATPROC), vertexAttribBinding: removeOptional(c.PFNGLVERTEXATTRIBBINDINGPROC), vertexBindingDivisor: removeOptional(c.PFNGLVERTEXBINDINGDIVISORPROC), debugMessageControl: removeOptional(c.PFNGLDEBUGMESSAGECONTROLPROC), debugMessageInsert: removeOptional(c.PFNGLDEBUGMESSAGEINSERTPROC), debugMessageCallback: removeOptional(c.PFNGLDEBUGMESSAGECALLBACKPROC), getDebugMessageLog: removeOptional(c.PFNGLGETDEBUGMESSAGELOGPROC), pushDebugGroup: removeOptional(c.PFNGLPUSHDEBUGGROUPPROC), popDebugGroup: removeOptional(c.PFNGLPOPDEBUGGROUPPROC), objectLabel: removeOptional(c.PFNGLOBJECTLABELPROC), getObjectLabel: removeOptional(c.PFNGLGETOBJECTLABELPROC), objectPtrLabel: removeOptional(c.PFNGLOBJECTPTRLABELPROC), getObjectPtrLabel: removeOptional(c.PFNGLGETOBJECTPTRLABELPROC), // 4.4 bufferStorage: removeOptional(c.PFNGLBUFFERSTORAGEPROC), clearTexImage: removeOptional(c.PFNGLCLEARTEXIMAGEPROC), clearTexSubImage: removeOptional(c.PFNGLCLEARTEXSUBIMAGEPROC), bindBuffersBase: removeOptional(c.PFNGLBINDBUFFERSBASEPROC), bindBuffersRange: removeOptional(c.PFNGLBINDBUFFERSRANGEPROC), bindTextures: removeOptional(c.PFNGLBINDTEXTURESPROC), bindSamplers: removeOptional(c.PFNGLBINDSAMPLERSPROC), bindImageTextures: removeOptional(c.PFNGLBINDIMAGETEXTURESPROC), bindVertexBuffers: removeOptional(c.PFNGLBINDVERTEXBUFFERSPROC), // 4.5 clipControl: removeOptional(c.PFNGLCLIPCONTROLPROC), createTransformFeedbacks: removeOptional(c.PFNGLCREATETRANSFORMFEEDBACKSPROC), transformFeedbackBufferBase: removeOptional(c.PFNGLTRANSFORMFEEDBACKBUFFERBASEPROC), transformFeedbackBufferRange: removeOptional(c.PFNGLTRANSFORMFEEDBACKBUFFERRANGEPROC), getTransformFeedbackiv: removeOptional(c.PFNGLGETTRANSFORMFEEDBACKIVPROC), getTransformFeedbacki_v: removeOptional(c.PFNGLGETTRANSFORMFEEDBACKI_VPROC), getTransformFeedbacki64_v: removeOptional(c.PFNGLGETTRANSFORMFEEDBACKI64_VPROC), createBuffers: removeOptional(c.PFNGLCREATEBUFFERSPROC), namedBufferStorage: removeOptional(c.PFNGLNAMEDBUFFERSTORAGEPROC), namedBufferData: removeOptional(c.PFNGLNAMEDBUFFERDATAPROC), namedBufferSubData: removeOptional(c.PFNGLNAMEDBUFFERSUBDATAPROC), copyNamedBufferSubData: removeOptional(c.PFNGLCOPYNAMEDBUFFERSUBDATAPROC), clearNamedBufferData: removeOptional(c.PFNGLCLEARNAMEDBUFFERDATAPROC), clearNamedBufferSubData: removeOptional(c.PFNGLCLEARNAMEDBUFFERSUBDATAPROC), mapNamedBuffer: removeOptional(c.PFNGLMAPNAMEDBUFFERPROC), mapNamedBufferRange: removeOptional(c.PFNGLMAPNAMEDBUFFERRANGEPROC), unmapNamedBuffer: removeOptional(c.PFNGLUNMAPNAMEDBUFFERPROC), flushMappedNamedBufferRange: removeOptional(c.PFNGLFLUSHMAPPEDNAMEDBUFFERRANGEPROC), getNamedBufferParameteriv: removeOptional(c.PFNGLGETNAMEDBUFFERPARAMETERIVPROC), getNamedBufferParameteri64v: removeOptional(c.PFNGLGETNAMEDBUFFERPARAMETERI64VPROC), getNamedBufferPointerv: removeOptional(c.PFNGLGETNAMEDBUFFERPOINTERVPROC), getNamedBufferSubData: removeOptional(c.PFNGLGETNAMEDBUFFERSUBDATAPROC), createFramebuffers: removeOptional(c.PFNGLCREATEFRAMEBUFFERSPROC), namedFramebufferRenderbuffer: removeOptional(c.PFNGLNAMEDFRAMEBUFFERRENDERBUFFERPROC), namedFramebufferParameteri: removeOptional(c.PFNGLNAMEDFRAMEBUFFERPARAMETERIPROC), namedFramebufferTexture: removeOptional(c.PFNGLNAMEDFRAMEBUFFERTEXTUREPROC), namedFramebufferTextureLayer: removeOptional(c.PFNGLNAMEDFRAMEBUFFERTEXTURELAYERPROC), namedFramebufferDrawBuffer: removeOptional(c.PFNGLNAMEDFRAMEBUFFERDRAWBUFFERPROC), namedFramebufferDrawBuffers: removeOptional(c.PFNGLNAMEDFRAMEBUFFERDRAWBUFFERSPROC), namedFramebufferReadBuffer: removeOptional(c.PFNGLNAMEDFRAMEBUFFERREADBUFFERPROC), invalidateNamedFramebufferData: removeOptional(c.PFNGLINVALIDATENAMEDFRAMEBUFFERDATAPROC), invalidateNamedFramebufferSubData: removeOptional(c.PFNGLINVALIDATENAMEDFRAMEBUFFERSUBDATAPROC), clearNamedFramebufferiv: removeOptional(c.PFNGLCLEARNAMEDFRAMEBUFFERIVPROC), clearNamedFramebufferuiv: removeOptional(c.PFNGLCLEARNAMEDFRAMEBUFFERUIVPROC), clearNamedFramebufferfv: removeOptional(c.PFNGLCLEARNAMEDFRAMEBUFFERFVPROC), clearNamedFramebufferfi: removeOptional(c.PFNGLCLEARNAMEDFRAMEBUFFERFIPROC), blitNamedFramebuffer: removeOptional(c.PFNGLBLITNAMEDFRAMEBUFFERPROC), checkNamedFramebufferStatus: removeOptional(c.PFNGLCHECKNAMEDFRAMEBUFFERSTATUSPROC), getNamedFramebufferParameteriv: removeOptional(c.PFNGLGETNAMEDFRAMEBUFFERPARAMETERIVPROC), getNamedFramebufferAttachmentParameteriv: removeOptional(c.PFNGLGETNAMEDFRAMEBUFFERATTACHMENTPARAMETERIVPROC), createRenderbuffers: removeOptional(c.PFNGLCREATERENDERBUFFERSPROC), namedRenderbufferStorage: removeOptional(c.PFNGLNAMEDRENDERBUFFERSTORAGEPROC), namedRenderbufferStorageMultisample: removeOptional(c.PFNGLNAMEDRENDERBUFFERSTORAGEMULTISAMPLEPROC), getNamedRenderbufferParameteriv: removeOptional(c.PFNGLGETNAMEDRENDERBUFFERPARAMETERIVPROC), createTextures: removeOptional(c.PFNGLCREATETEXTURESPROC), textureBuffer: removeOptional(c.PFNGLTEXTUREBUFFERPROC), textureBufferRange: removeOptional(c.PFNGLTEXTUREBUFFERRANGEPROC), textureStorage1D: removeOptional(c.PFNGLTEXTURESTORAGE1DPROC), textureStorage2D: removeOptional(c.PFNGLTEXTURESTORAGE2DPROC), textureStorage3D: removeOptional(c.PFNGLTEXTURESTORAGE3DPROC), textureStorage2DMultisample: removeOptional(c.PFNGLTEXTURESTORAGE2DMULTISAMPLEPROC), textureStorage3DMultisample: removeOptional(c.PFNGLTEXTURESTORAGE3DMULTISAMPLEPROC), textureSubImage1D: removeOptional(c.PFNGLTEXTURESUBIMAGE1DPROC), textureSubImage2D: removeOptional(c.PFNGLTEXTURESUBIMAGE2DPROC), textureSubImage3D: removeOptional(c.PFNGLTEXTURESUBIMAGE3DPROC), compressedTextureSubImage1D: removeOptional(c.PFNGLCOMPRESSEDTEXTURESUBIMAGE1DPROC), compressedTextureSubImage2D: removeOptional(c.PFNGLCOMPRESSEDTEXTURESUBIMAGE2DPROC), compressedTextureSubImage3D: removeOptional(c.PFNGLCOMPRESSEDTEXTURESUBIMAGE3DPROC), copyTextureSubImage1D: removeOptional(c.PFNGLCOPYTEXTURESUBIMAGE1DPROC), copyTextureSubImage2D: removeOptional(c.PFNGLCOPYTEXTURESUBIMAGE2DPROC), copyTextureSubImage3D: removeOptional(c.PFNGLCOPYTEXTURESUBIMAGE3DPROC), textureParameterf: removeOptional(c.PFNGLTEXTUREPARAMETERFPROC), textureParameterfv: removeOptional(c.PFNGLTEXTUREPARAMETERFVPROC), textureParameteri: removeOptional(c.PFNGLTEXTUREPARAMETERIPROC), textureParameterIiv: removeOptional(c.PFNGLTEXTUREPARAMETERIIVPROC), textureParameterIuiv: removeOptional(c.PFNGLTEXTUREPARAMETERIUIVPROC), textureParameteriv: removeOptional(c.PFNGLTEXTUREPARAMETERIVPROC), generateTextureMipmap: removeOptional(c.PFNGLGENERATETEXTUREMIPMAPPROC), bindTextureUnit: removeOptional(c.PFNGLBINDTEXTUREUNITPROC), getTextureImage: removeOptional(c.PFNGLGETTEXTUREIMAGEPROC), getCompressedTextureImage: removeOptional(c.PFNGLGETCOMPRESSEDTEXTUREIMAGEPROC), getTextureLevelParameterfv: removeOptional(c.PFNGLGETTEXTURELEVELPARAMETERFVPROC), getTextureLevelParameteriv: removeOptional(c.PFNGLGETTEXTURELEVELPARAMETERIVPROC), getTextureParameterfv: removeOptional(c.PFNGLGETTEXTUREPARAMETERFVPROC), getTextureParameterIiv: removeOptional(c.PFNGLGETTEXTUREPARAMETERIIVPROC), getTextureParameterIuiv: removeOptional(c.PFNGLGETTEXTUREPARAMETERIUIVPROC), getTextureParameteriv: removeOptional(c.PFNGLGETTEXTUREPARAMETERIVPROC), createVertexArrays: removeOptional(c.PFNGLCREATEVERTEXARRAYSPROC), disableVertexArrayAttrib: removeOptional(c.PFNGLDISABLEVERTEXARRAYATTRIBPROC), enableVertexArrayAttrib: removeOptional(c.PFNGLENABLEVERTEXARRAYATTRIBPROC), vertexArrayElementBuffer: removeOptional(c.PFNGLVERTEXARRAYELEMENTBUFFERPROC), vertexArrayVertexBuffer: removeOptional(c.PFNGLVERTEXARRAYVERTEXBUFFERPROC), vertexArrayVertexBuffers: removeOptional(c.PFNGLVERTEXARRAYVERTEXBUFFERSPROC), vertexArrayAttribBinding: removeOptional(c.PFNGLVERTEXARRAYATTRIBBINDINGPROC), vertexArrayAttribFormat: removeOptional(c.PFNGLVERTEXARRAYATTRIBFORMATPROC), vertexArrayAttribIFormat: removeOptional(c.PFNGLVERTEXARRAYATTRIBIFORMATPROC), vertexArrayAttribLFormat: removeOptional(c.PFNGLVERTEXARRAYATTRIBLFORMATPROC), vertexArrayBindingDivisor: removeOptional(c.PFNGLVERTEXARRAYBINDINGDIVISORPROC), getVertexArrayiv: removeOptional(c.PFNGLGETVERTEXARRAYIVPROC), getVertexArrayIndexediv: removeOptional(c.PFNGLGETVERTEXARRAYINDEXEDIVPROC), getVertexArrayIndexed64iv: removeOptional(c.PFNGLGETVERTEXARRAYINDEXED64IVPROC), createSamplers: removeOptional(c.PFNGLCREATESAMPLERSPROC), createProgramPipelines: removeOptional(c.PFNGLCREATEPROGRAMPIPELINESPROC), createQueries: removeOptional(c.PFNGLCREATEQUERIESPROC), getQueryBufferObjecti64v: removeOptional(c.PFNGLGETQUERYBUFFEROBJECTI64VPROC), getQueryBufferObjectiv: removeOptional(c.PFNGLGETQUERYBUFFEROBJECTIVPROC), getQueryBufferObjectui64v: removeOptional(c.PFNGLGETQUERYBUFFEROBJECTUI64VPROC), getQueryBufferObjectuiv: removeOptional(c.PFNGLGETQUERYBUFFEROBJECTUIVPROC), memoryBarrierByRegion: removeOptional(c.PFNGLMEMORYBARRIERBYREGIONPROC), getTextureSubImage: removeOptional(c.PFNGLGETTEXTURESUBIMAGEPROC), getCompressedTextureSubImage: removeOptional(c.PFNGLGETCOMPRESSEDTEXTURESUBIMAGEPROC), getGraphicsResetStatus: removeOptional(c.PFNGLGETGRAPHICSRESETSTATUSPROC), getnCompressedTexImage: removeOptional(c.PFNGLGETNCOMPRESSEDTEXIMAGEPROC), getnTexImage: removeOptional(c.PFNGLGETNTEXIMAGEPROC), getnUniformdv: removeOptional(c.PFNGLGETNUNIFORMDVPROC), getnUniformfv: removeOptional(c.PFNGLGETNUNIFORMFVPROC), getnUniformiv: removeOptional(c.PFNGLGETNUNIFORMIVPROC), getnUniformuiv: removeOptional(c.PFNGLGETNUNIFORMUIVPROC), readnPixels: removeOptional(c.PFNGLREADNPIXELSPROC), textureBarrier: removeOptional(c.PFNGLTEXTUREBARRIERPROC), // 4.6 specializeShader: removeOptional(c.PFNGLSPECIALIZESHADERPROC), multiDrawArraysIndirectCount: removeOptional(c.PFNGLMULTIDRAWARRAYSINDIRECTCOUNTPROC), multiDrawElementsIndirectCount: removeOptional(c.PFNGLMULTIDRAWELEMENTSINDIRECTCOUNTPROC), polygonOffsetClamp: removeOptional(c.PFNGLPOLYGONOFFSETCLAMPPROC), pub fn loadVersion(gl: *DeviceGL, major_version: u32, minor_version: u32) void { const version = major_version * 100 + minor_version * 10; if (version >= 100) { gl.cullFace = @ptrCast(getProcAddress("glCullFace")); gl.frontFace = @ptrCast(getProcAddress("glFrontFace")); gl.hint = @ptrCast(getProcAddress("glHint")); gl.lineWidth = @ptrCast(getProcAddress("glLineWidth")); gl.pointSize = @ptrCast(getProcAddress("glPointSize")); gl.polygonMode = @ptrCast(getProcAddress("glPolygonMode")); gl.scissor = @ptrCast(getProcAddress("glScissor")); gl.texParameterf = @ptrCast(getProcAddress("glTexParameterf")); gl.texParameterfv = @ptrCast(getProcAddress("glTexParameterfv")); gl.texParameteri = @ptrCast(getProcAddress("glTexParameteri")); gl.texParameteriv = @ptrCast(getProcAddress("glTexParameteriv")); gl.texImage1D = @ptrCast(getProcAddress("glTexImage1D")); gl.texImage2D = @ptrCast(getProcAddress("glTexImage2D")); gl.drawBuffer = @ptrCast(getProcAddress("glDrawBuffer")); gl.clear = @ptrCast(getProcAddress("glClear")); gl.clearColor = @ptrCast(getProcAddress("glClearColor")); gl.clearStencil = @ptrCast(getProcAddress("glClearStencil")); gl.clearDepth = @ptrCast(getProcAddress("glClearDepth")); gl.stencilMask = @ptrCast(getProcAddress("glStencilMask")); gl.colorMask = @ptrCast(getProcAddress("glColorMask")); gl.depthMask = @ptrCast(getProcAddress("glDepthMask")); gl.disable = @ptrCast(getProcAddress("glDisable")); gl.enable = @ptrCast(getProcAddress("glEnable")); gl.finish = @ptrCast(getProcAddress("glFinish")); gl.flush = @ptrCast(getProcAddress("glFlush")); gl.blendFunc = @ptrCast(getProcAddress("glBlendFunc")); gl.logicOp = @ptrCast(getProcAddress("glLogicOp")); gl.stencilFunc = @ptrCast(getProcAddress("glStencilFunc")); gl.stencilOp = @ptrCast(getProcAddress("glStencilOp")); gl.depthFunc = @ptrCast(getProcAddress("glDepthFunc")); gl.pixelStoref = @ptrCast(getProcAddress("glPixelStoref")); gl.pixelStorei = @ptrCast(getProcAddress("glPixelStorei")); gl.readBuffer = @ptrCast(getProcAddress("glReadBuffer")); gl.readPixels = @ptrCast(getProcAddress("glReadPixels")); gl.getBooleanv = @ptrCast(getProcAddress("glGetBooleanv")); gl.getDoublev = @ptrCast(getProcAddress("glGetDoublev")); gl.getError = @ptrCast(getProcAddress("glGetError")); gl.getFloatv = @ptrCast(getProcAddress("glGetFloatv")); gl.getIntegerv = @ptrCast(getProcAddress("glGetIntegerv")); gl.getString = @ptrCast(getProcAddress("glGetString")); gl.getTexImage = @ptrCast(getProcAddress("glGetTexImage")); gl.getTexParameterfv = @ptrCast(getProcAddress("glGetTexParameterfv")); gl.getTexParameteriv = @ptrCast(getProcAddress("glGetTexParameteriv")); gl.getTexLevelParameterfv = @ptrCast(getProcAddress("glGetTexLevelParameterfv")); gl.getTexLevelParameteriv = @ptrCast(getProcAddress("glGetTexLevelParameteriv")); gl.isEnabled = @ptrCast(getProcAddress("glIsEnabled")); gl.depthRange = @ptrCast(getProcAddress("glDepthRange")); gl.viewport = @ptrCast(getProcAddress("glViewport")); } if (version >= 110) { gl.drawArrays = @ptrCast(getProcAddress("glDrawArrays")); gl.drawElements = @ptrCast(getProcAddress("glDrawElements")); gl.getPointerv = @ptrCast(getProcAddress("glGetPointerv")); gl.polygonOffset = @ptrCast(getProcAddress("glPolygonOffset")); gl.copyTexImage1D = @ptrCast(getProcAddress("glCopyTexImage1D")); gl.copyTexImage2D = @ptrCast(getProcAddress("glCopyTexImage2D")); gl.copyTexSubImage1D = @ptrCast(getProcAddress("glCopyTexSubImage1D")); gl.copyTexSubImage2D = @ptrCast(getProcAddress("glCopyTexSubImage2D")); gl.texSubImage1D = @ptrCast(getProcAddress("glTexSubImage1D")); gl.texSubImage2D = @ptrCast(getProcAddress("glTexSubImage2D")); gl.bindTexture = @ptrCast(getProcAddress("glBindTexture")); gl.deleteTextures = @ptrCast(getProcAddress("glDeleteTextures")); gl.genTextures = @ptrCast(getProcAddress("glGenTextures")); gl.isTexture = @ptrCast(getProcAddress("glIsTexture")); } if (version >= 120) { gl.drawRangeElements = @ptrCast(c.wglGetProcAddress("glDrawRangeElements")); gl.texImage3D = @ptrCast(c.wglGetProcAddress("glTexImage3D")); gl.texSubImage3D = @ptrCast(c.wglGetProcAddress("glTexSubImage3D")); gl.copyTexSubImage3D = @ptrCast(c.wglGetProcAddress("glCopyTexSubImage3D")); } if (version >= 130) { gl.activeTexture = @ptrCast(c.wglGetProcAddress("glActiveTexture")); gl.sampleCoverage = @ptrCast(c.wglGetProcAddress("glSampleCoverage")); gl.compressedTexImage3D = @ptrCast(c.wglGetProcAddress("glCompressedTexImage3D")); gl.compressedTexImage2D = @ptrCast(c.wglGetProcAddress("glCompressedTexImage2D")); gl.compressedTexImage1D = @ptrCast(c.wglGetProcAddress("glCompressedTexImage1D")); gl.compressedTexSubImage3D = @ptrCast(c.wglGetProcAddress("glCompressedTexSubImage3D")); gl.compressedTexSubImage2D = @ptrCast(c.wglGetProcAddress("glCompressedTexSubImage2D")); gl.compressedTexSubImage1D = @ptrCast(c.wglGetProcAddress("glCompressedTexSubImage1D")); gl.getCompressedTexImage = @ptrCast(c.wglGetProcAddress("glGetCompressedTexImage")); } if (version >= 140) { gl.blendFuncSeparate = @ptrCast(c.wglGetProcAddress("glBlendFuncSeparate")); gl.multiDrawArrays = @ptrCast(c.wglGetProcAddress("glMultiDrawArrays")); gl.multiDrawElements = @ptrCast(c.wglGetProcAddress("glMultiDrawElements")); gl.pointParameterf = @ptrCast(c.wglGetProcAddress("glPointParameterf")); gl.pointParameterfv = @ptrCast(c.wglGetProcAddress("glPointParameterfv")); gl.pointParameteri = @ptrCast(c.wglGetProcAddress("glPointParameteri")); gl.pointParameteriv = @ptrCast(c.wglGetProcAddress("glPointParameteriv")); gl.blendColor = @ptrCast(c.wglGetProcAddress("glBlendColor")); gl.blendEquation = @ptrCast(c.wglGetProcAddress("glBlendEquation")); } if (version >= 150) { gl.genQueries = @ptrCast(c.wglGetProcAddress("glGenQueries")); gl.deleteQueries = @ptrCast(c.wglGetProcAddress("glDeleteQueries")); gl.isQuery = @ptrCast(c.wglGetProcAddress("glIsQuery")); gl.beginQuery = @ptrCast(c.wglGetProcAddress("glBeginQuery")); gl.endQuery = @ptrCast(c.wglGetProcAddress("glEndQuery")); gl.getQueryiv = @ptrCast(c.wglGetProcAddress("glGetQueryiv")); gl.getQueryObjectiv = @ptrCast(c.wglGetProcAddress("glGetQueryObjectiv")); gl.getQueryObjectuiv = @ptrCast(c.wglGetProcAddress("glGetQueryObjectuiv")); gl.bindBuffer = @ptrCast(c.wglGetProcAddress("glBindBuffer")); gl.deleteBuffers = @ptrCast(c.wglGetProcAddress("glDeleteBuffers")); gl.genBuffers = @ptrCast(c.wglGetProcAddress("glGenBuffers")); gl.isBuffer = @ptrCast(c.wglGetProcAddress("glIsBuffer")); gl.bufferData = @ptrCast(c.wglGetProcAddress("glBufferData")); gl.bufferSubData = @ptrCast(c.wglGetProcAddress("glBufferSubData")); gl.getBufferSubData = @ptrCast(c.wglGetProcAddress("glGetBufferSubData")); gl.mapBuffer = @ptrCast(c.wglGetProcAddress("glMapBuffer")); gl.unmapBuffer = @ptrCast(c.wglGetProcAddress("glUnmapBuffer")); gl.getBufferParameteriv = @ptrCast(c.wglGetProcAddress("glGetBufferParameteriv")); gl.getBufferPointerv = @ptrCast(c.wglGetProcAddress("glGetBufferPointerv")); } if (version >= 200) { gl.blendEquationSeparate = @ptrCast(c.wglGetProcAddress("glBlendEquationSeparate")); gl.drawBuffers = @ptrCast(c.wglGetProcAddress("glDrawBuffers")); gl.stencilOpSeparate = @ptrCast(c.wglGetProcAddress("glStencilOpSeparate")); gl.stencilFuncSeparate = @ptrCast(c.wglGetProcAddress("glStencilFuncSeparate")); gl.stencilMaskSeparate = @ptrCast(c.wglGetProcAddress("glStencilMaskSeparate")); gl.attachShader = @ptrCast(c.wglGetProcAddress("glAttachShader")); gl.bindAttribLocation = @ptrCast(c.wglGetProcAddress("glBindAttribLocation")); gl.compileShader = @ptrCast(c.wglGetProcAddress("glCompileShader")); gl.createProgram = @ptrCast(c.wglGetProcAddress("glCreateProgram")); gl.createShader = @ptrCast(c.wglGetProcAddress("glCreateShader")); gl.deleteProgram = @ptrCast(c.wglGetProcAddress("glDeleteProgram")); gl.deleteShader = @ptrCast(c.wglGetProcAddress("glDeleteShader")); gl.detachShader = @ptrCast(c.wglGetProcAddress("glDetachShader")); gl.disableVertexAttribArray = @ptrCast(c.wglGetProcAddress("glDisableVertexAttribArray")); gl.enableVertexAttribArray = @ptrCast(c.wglGetProcAddress("glEnableVertexAttribArray")); gl.getActiveAttrib = @ptrCast(c.wglGetProcAddress("glGetActiveAttrib")); gl.getActiveUniform = @ptrCast(c.wglGetProcAddress("glGetActiveUniform")); gl.getAttachedShaders = @ptrCast(c.wglGetProcAddress("glGetAttachedShaders")); gl.getAttribLocation = @ptrCast(c.wglGetProcAddress("glGetAttribLocation")); gl.getProgramiv = @ptrCast(c.wglGetProcAddress("glGetProgramiv")); gl.getProgramInfoLog = @ptrCast(c.wglGetProcAddress("glGetProgramInfoLog")); gl.getShaderiv = @ptrCast(c.wglGetProcAddress("glGetShaderiv")); gl.getShaderInfoLog = @ptrCast(c.wglGetProcAddress("glGetShaderInfoLog")); gl.getShaderSource = @ptrCast(c.wglGetProcAddress("glGetShaderSource")); gl.getUniformLocation = @ptrCast(c.wglGetProcAddress("glGetUniformLocation")); gl.getUniformfv = @ptrCast(c.wglGetProcAddress("glGetUniformfv")); gl.getUniformiv = @ptrCast(c.wglGetProcAddress("glGetUniformiv")); gl.getVertexAttribdv = @ptrCast(c.wglGetProcAddress("glGetVertexAttribdv")); gl.getVertexAttribfv = @ptrCast(c.wglGetProcAddress("glGetVertexAttribfv")); gl.getVertexAttribiv = @ptrCast(c.wglGetProcAddress("glGetVertexAttribiv")); gl.getVertexAttribPointerv = @ptrCast(c.wglGetProcAddress("glGetVertexAttribPointerv")); gl.isProgram = @ptrCast(c.wglGetProcAddress("glIsProgram")); gl.isShader = @ptrCast(c.wglGetProcAddress("glIsShader")); gl.linkProgram = @ptrCast(c.wglGetProcAddress("glLinkProgram")); gl.shaderSource = @ptrCast(c.wglGetProcAddress("glShaderSource")); gl.useProgram = @ptrCast(c.wglGetProcAddress("glUseProgram")); gl.uniform1f = @ptrCast(c.wglGetProcAddress("glUniform1f")); gl.uniform2f = @ptrCast(c.wglGetProcAddress("glUniform2f")); gl.uniform3f = @ptrCast(c.wglGetProcAddress("glUniform3f")); gl.uniform4f = @ptrCast(c.wglGetProcAddress("glUniform4f")); gl.uniform1i = @ptrCast(c.wglGetProcAddress("glUniform1i")); gl.uniform2i = @ptrCast(c.wglGetProcAddress("glUniform2i")); gl.uniform3i = @ptrCast(c.wglGetProcAddress("glUniform3i")); gl.uniform4i = @ptrCast(c.wglGetProcAddress("glUniform4i")); gl.uniform1fv = @ptrCast(c.wglGetProcAddress("glUniform1fv")); gl.uniform2fv = @ptrCast(c.wglGetProcAddress("glUniform2fv")); gl.uniform3fv = @ptrCast(c.wglGetProcAddress("glUniform3fv")); gl.uniform4fv = @ptrCast(c.wglGetProcAddress("glUniform4fv")); gl.uniform1iv = @ptrCast(c.wglGetProcAddress("glUniform1iv")); gl.uniform2iv = @ptrCast(c.wglGetProcAddress("glUniform2iv")); gl.uniform3iv = @ptrCast(c.wglGetProcAddress("glUniform3iv")); gl.uniform4iv = @ptrCast(c.wglGetProcAddress("glUniform4iv")); gl.uniformMatrix2fv = @ptrCast(c.wglGetProcAddress("glUniformMatrix2fv")); gl.uniformMatrix3fv = @ptrCast(c.wglGetProcAddress("glUniformMatrix3fv")); gl.uniformMatrix4fv = @ptrCast(c.wglGetProcAddress("glUniformMatrix4fv")); gl.validateProgram = @ptrCast(c.wglGetProcAddress("glValidateProgram")); gl.vertexAttrib1d = @ptrCast(c.wglGetProcAddress("glVertexAttrib1d")); gl.vertexAttrib1dv = @ptrCast(c.wglGetProcAddress("glVertexAttrib1dv")); gl.vertexAttrib1f = @ptrCast(c.wglGetProcAddress("glVertexAttrib1f")); gl.vertexAttrib1fv = @ptrCast(c.wglGetProcAddress("glVertexAttrib1fv")); gl.vertexAttrib1s = @ptrCast(c.wglGetProcAddress("glVertexAttrib1s")); gl.vertexAttrib1sv = @ptrCast(c.wglGetProcAddress("glVertexAttrib1sv")); gl.vertexAttrib2d = @ptrCast(c.wglGetProcAddress("glVertexAttrib2d")); gl.vertexAttrib2dv = @ptrCast(c.wglGetProcAddress("glVertexAttrib2dv")); gl.vertexAttrib2f = @ptrCast(c.wglGetProcAddress("glVertexAttrib2f")); gl.vertexAttrib2fv = @ptrCast(c.wglGetProcAddress("glVertexAttrib2fv")); gl.vertexAttrib2s = @ptrCast(c.wglGetProcAddress("glVertexAttrib2s")); gl.vertexAttrib2sv = @ptrCast(c.wglGetProcAddress("glVertexAttrib2sv")); gl.vertexAttrib3d = @ptrCast(c.wglGetProcAddress("glVertexAttrib3d")); gl.vertexAttrib3dv = @ptrCast(c.wglGetProcAddress("glVertexAttrib3dv")); gl.vertexAttrib3f = @ptrCast(c.wglGetProcAddress("glVertexAttrib3f")); gl.vertexAttrib3fv = @ptrCast(c.wglGetProcAddress("glVertexAttrib3fv")); gl.vertexAttrib3s = @ptrCast(c.wglGetProcAddress("glVertexAttrib3s")); gl.vertexAttrib3sv = @ptrCast(c.wglGetProcAddress("glVertexAttrib3sv")); gl.vertexAttrib4Nbv = @ptrCast(c.wglGetProcAddress("glVertexAttrib4Nbv")); gl.vertexAttrib4Niv = @ptrCast(c.wglGetProcAddress("glVertexAttrib4Niv")); gl.vertexAttrib4Nsv = @ptrCast(c.wglGetProcAddress("glVertexAttrib4Nsv")); gl.vertexAttrib4Nub = @ptrCast(c.wglGetProcAddress("glVertexAttrib4Nub")); gl.vertexAttrib4Nubv = @ptrCast(c.wglGetProcAddress("glVertexAttrib4Nubv")); gl.vertexAttrib4Nuiv = @ptrCast(c.wglGetProcAddress("glVertexAttrib4Nuiv")); gl.vertexAttrib4Nusv = @ptrCast(c.wglGetProcAddress("glVertexAttrib4Nusv")); gl.vertexAttrib4bv = @ptrCast(c.wglGetProcAddress("glVertexAttrib4bv")); gl.vertexAttrib4d = @ptrCast(c.wglGetProcAddress("glVertexAttrib4d")); gl.vertexAttrib4dv = @ptrCast(c.wglGetProcAddress("glVertexAttrib4dv")); gl.vertexAttrib4f = @ptrCast(c.wglGetProcAddress("glVertexAttrib4f")); gl.vertexAttrib4fv = @ptrCast(c.wglGetProcAddress("glVertexAttrib4fv")); gl.vertexAttrib4iv = @ptrCast(c.wglGetProcAddress("glVertexAttrib4iv")); gl.vertexAttrib4s = @ptrCast(c.wglGetProcAddress("glVertexAttrib4s")); gl.vertexAttrib4sv = @ptrCast(c.wglGetProcAddress("glVertexAttrib4sv")); gl.vertexAttrib4ubv = @ptrCast(c.wglGetProcAddress("glVertexAttrib4ubv")); gl.vertexAttrib4uiv = @ptrCast(c.wglGetProcAddress("glVertexAttrib4uiv")); gl.vertexAttrib4usv = @ptrCast(c.wglGetProcAddress("glVertexAttrib4usv")); gl.vertexAttribPointer = @ptrCast(c.wglGetProcAddress("glVertexAttribPointer")); } if (version >= 210) { gl.uniformMatrix2x3fv = @ptrCast(c.wglGetProcAddress("glUniformMatrix2x3fv")); gl.uniformMatrix3x2fv = @ptrCast(c.wglGetProcAddress("glUniformMatrix3x2fv")); gl.uniformMatrix2x4fv = @ptrCast(c.wglGetProcAddress("glUniformMatrix2x4fv")); gl.uniformMatrix4x2fv = @ptrCast(c.wglGetProcAddress("glUniformMatrix4x2fv")); gl.uniformMatrix3x4fv = @ptrCast(c.wglGetProcAddress("glUniformMatrix3x4fv")); gl.uniformMatrix4x3fv = @ptrCast(c.wglGetProcAddress("glUniformMatrix4x3fv")); } if (version >= 300) { gl.colorMaski = @ptrCast(c.wglGetProcAddress("glColorMaski")); gl.getBooleani_v = @ptrCast(c.wglGetProcAddress("glGetBooleani_v")); gl.getIntegeri_v = @ptrCast(c.wglGetProcAddress("glGetIntegeri_v")); gl.enablei = @ptrCast(c.wglGetProcAddress("glEnablei")); gl.disablei = @ptrCast(c.wglGetProcAddress("glDisablei")); gl.isEnabledi = @ptrCast(c.wglGetProcAddress("glIsEnabledi")); gl.beginTransformFeedback = @ptrCast(c.wglGetProcAddress("glBeginTransformFeedback")); gl.endTransformFeedback = @ptrCast(c.wglGetProcAddress("glEndTransformFeedback")); gl.bindBufferRange = @ptrCast(c.wglGetProcAddress("glBindBufferRange")); gl.bindBufferBase = @ptrCast(c.wglGetProcAddress("glBindBufferBase")); gl.transformFeedbackVaryings = @ptrCast(c.wglGetProcAddress("glTransformFeedbackVaryings")); gl.getTransformFeedbackVarying = @ptrCast(c.wglGetProcAddress("glGetTransformFeedbackVarying")); gl.clampColor = @ptrCast(c.wglGetProcAddress("glClampColor")); gl.beginConditionalRender = @ptrCast(c.wglGetProcAddress("glBeginConditionalRender")); gl.endConditionalRender = @ptrCast(c.wglGetProcAddress("glEndConditionalRender")); gl.vertexAttribIPointer = @ptrCast(c.wglGetProcAddress("glVertexAttribIPointer")); gl.getVertexAttribIiv = @ptrCast(c.wglGetProcAddress("glGetVertexAttribIiv")); gl.getVertexAttribIuiv = @ptrCast(c.wglGetProcAddress("glGetVertexAttribIuiv")); gl.vertexAttribI1i = @ptrCast(c.wglGetProcAddress("glVertexAttribI1i")); gl.vertexAttribI2i = @ptrCast(c.wglGetProcAddress("glVertexAttribI2i")); gl.vertexAttribI3i = @ptrCast(c.wglGetProcAddress("glVertexAttribI3i")); gl.vertexAttribI4i = @ptrCast(c.wglGetProcAddress("glVertexAttribI4i")); gl.vertexAttribI1ui = @ptrCast(c.wglGetProcAddress("glVertexAttribI1ui")); gl.vertexAttribI2ui = @ptrCast(c.wglGetProcAddress("glVertexAttribI2ui")); gl.vertexAttribI3ui = @ptrCast(c.wglGetProcAddress("glVertexAttribI3ui")); gl.vertexAttribI4ui = @ptrCast(c.wglGetProcAddress("glVertexAttribI4ui")); gl.vertexAttribI1iv = @ptrCast(c.wglGetProcAddress("glVertexAttribI1iv")); gl.vertexAttribI2iv = @ptrCast(c.wglGetProcAddress("glVertexAttribI2iv")); gl.vertexAttribI3iv = @ptrCast(c.wglGetProcAddress("glVertexAttribI3iv")); gl.vertexAttribI4iv = @ptrCast(c.wglGetProcAddress("glVertexAttribI4iv")); gl.vertexAttribI1uiv = @ptrCast(c.wglGetProcAddress("glVertexAttribI1uiv")); gl.vertexAttribI2uiv = @ptrCast(c.wglGetProcAddress("glVertexAttribI2uiv")); gl.vertexAttribI3uiv = @ptrCast(c.wglGetProcAddress("glVertexAttribI3uiv")); gl.vertexAttribI4uiv = @ptrCast(c.wglGetProcAddress("glVertexAttribI4uiv")); gl.vertexAttribI4bv = @ptrCast(c.wglGetProcAddress("glVertexAttribI4bv")); gl.vertexAttribI4sv = @ptrCast(c.wglGetProcAddress("glVertexAttribI4sv")); gl.vertexAttribI4ubv = @ptrCast(c.wglGetProcAddress("glVertexAttribI4ubv")); gl.vertexAttribI4usv = @ptrCast(c.wglGetProcAddress("glVertexAttribI4usv")); gl.getUniformuiv = @ptrCast(c.wglGetProcAddress("glGetUniformuiv")); gl.bindFragDataLocation = @ptrCast(c.wglGetProcAddress("glBindFragDataLocation")); gl.getFragDataLocation = @ptrCast(c.wglGetProcAddress("glGetFragDataLocation")); gl.uniform1ui = @ptrCast(c.wglGetProcAddress("glUniform1ui")); gl.uniform2ui = @ptrCast(c.wglGetProcAddress("glUniform2ui")); gl.uniform3ui = @ptrCast(c.wglGetProcAddress("glUniform3ui")); gl.uniform4ui = @ptrCast(c.wglGetProcAddress("glUniform4ui")); gl.uniform1uiv = @ptrCast(c.wglGetProcAddress("glUniform1uiv")); gl.uniform2uiv = @ptrCast(c.wglGetProcAddress("glUniform2uiv")); gl.uniform3uiv = @ptrCast(c.wglGetProcAddress("glUniform3uiv")); gl.uniform4uiv = @ptrCast(c.wglGetProcAddress("glUniform4uiv")); gl.texParameterIiv = @ptrCast(c.wglGetProcAddress("glTexParameterIiv")); gl.texParameterIuiv = @ptrCast(c.wglGetProcAddress("glTexParameterIuiv")); gl.getTexParameterIiv = @ptrCast(c.wglGetProcAddress("glGetTexParameterIiv")); gl.getTexParameterIuiv = @ptrCast(c.wglGetProcAddress("glGetTexParameterIuiv")); gl.clearBufferiv = @ptrCast(c.wglGetProcAddress("glClearBufferiv")); gl.clearBufferuiv = @ptrCast(c.wglGetProcAddress("glClearBufferuiv")); gl.clearBufferfv = @ptrCast(c.wglGetProcAddress("glClearBufferfv")); gl.clearBufferfi = @ptrCast(c.wglGetProcAddress("glClearBufferfi")); gl.getStringi = @ptrCast(c.wglGetProcAddress("glGetStringi")); gl.isRenderbuffer = @ptrCast(c.wglGetProcAddress("glIsRenderbuffer")); gl.bindRenderbuffer = @ptrCast(c.wglGetProcAddress("glBindRenderbuffer")); gl.deleteRenderbuffers = @ptrCast(c.wglGetProcAddress("glDeleteRenderbuffers")); gl.genRenderbuffers = @ptrCast(c.wglGetProcAddress("glGenRenderbuffers")); gl.renderbufferStorage = @ptrCast(c.wglGetProcAddress("glRenderbufferStorage")); gl.getRenderbufferParameteriv = @ptrCast(c.wglGetProcAddress("glGetRenderbufferParameteriv")); gl.isFramebuffer = @ptrCast(c.wglGetProcAddress("glIsFramebuffer")); gl.bindFramebuffer = @ptrCast(c.wglGetProcAddress("glBindFramebuffer")); gl.deleteFramebuffers = @ptrCast(c.wglGetProcAddress("glDeleteFramebuffers")); gl.genFramebuffers = @ptrCast(c.wglGetProcAddress("glGenFramebuffers")); gl.checkFramebufferStatus = @ptrCast(c.wglGetProcAddress("glCheckFramebufferStatus")); gl.framebufferTexture1D = @ptrCast(c.wglGetProcAddress("glFramebufferTexture1D")); gl.framebufferTexture2D = @ptrCast(c.wglGetProcAddress("glFramebufferTexture2D")); gl.framebufferTexture3D = @ptrCast(c.wglGetProcAddress("glFramebufferTexture3D")); gl.framebufferRenderbuffer = @ptrCast(c.wglGetProcAddress("glFramebufferRenderbuffer")); gl.getFramebufferAttachmentParameteriv = @ptrCast(c.wglGetProcAddress("glGetFramebufferAttachmentParameteriv")); gl.generateMipmap = @ptrCast(c.wglGetProcAddress("glGenerateMipmap")); gl.blitFramebuffer = @ptrCast(c.wglGetProcAddress("glBlitFramebuffer")); gl.renderbufferStorageMultisample = @ptrCast(c.wglGetProcAddress("glRenderbufferStorageMultisample")); gl.framebufferTextureLayer = @ptrCast(c.wglGetProcAddress("glFramebufferTextureLayer")); gl.mapBufferRange = @ptrCast(c.wglGetProcAddress("glMapBufferRange")); gl.flushMappedBufferRange = @ptrCast(c.wglGetProcAddress("glFlushMappedBufferRange")); gl.bindVertexArray = @ptrCast(c.wglGetProcAddress("glBindVertexArray")); gl.deleteVertexArrays = @ptrCast(c.wglGetProcAddress("glDeleteVertexArrays")); gl.genVertexArrays = @ptrCast(c.wglGetProcAddress("glGenVertexArrays")); gl.isVertexArray = @ptrCast(c.wglGetProcAddress("glIsVertexArray")); } if (version >= 310) { gl.drawArraysInstanced = @ptrCast(c.wglGetProcAddress("glDrawArraysInstanced")); gl.drawElementsInstanced = @ptrCast(c.wglGetProcAddress("glDrawElementsInstanced")); gl.texBuffer = @ptrCast(c.wglGetProcAddress("glTexBuffer")); gl.primitiveRestartIndex = @ptrCast(c.wglGetProcAddress("glPrimitiveRestartIndex")); gl.copyBufferSubData = @ptrCast(c.wglGetProcAddress("glCopyBufferSubData")); gl.getUniformIndices = @ptrCast(c.wglGetProcAddress("glGetUniformIndices")); gl.getActiveUniformsiv = @ptrCast(c.wglGetProcAddress("glGetActiveUniformsiv")); gl.getActiveUniformName = @ptrCast(c.wglGetProcAddress("glGetActiveUniformName")); gl.getUniformBlockIndex = @ptrCast(c.wglGetProcAddress("glGetUniformBlockIndex")); gl.getActiveUniformBlockiv = @ptrCast(c.wglGetProcAddress("glGetActiveUniformBlockiv")); gl.getActiveUniformBlockName = @ptrCast(c.wglGetProcAddress("glGetActiveUniformBlockName")); gl.uniformBlockBinding = @ptrCast(c.wglGetProcAddress("glUniformBlockBinding")); } if (version >= 320) { gl.drawElementsBaseVertex = @ptrCast(c.wglGetProcAddress("glDrawElementsBaseVertex")); gl.drawRangeElementsBaseVertex = @ptrCast(c.wglGetProcAddress("glDrawRangeElementsBaseVertex")); gl.drawElementsInstancedBaseVertex = @ptrCast(c.wglGetProcAddress("glDrawElementsInstancedBaseVertex")); gl.multiDrawElementsBaseVertex = @ptrCast(c.wglGetProcAddress("glMultiDrawElementsBaseVertex")); gl.provokingVertex = @ptrCast(c.wglGetProcAddress("glProvokingVertex")); gl.fenceSync = @ptrCast(c.wglGetProcAddress("glFenceSync")); gl.isSync = @ptrCast(c.wglGetProcAddress("glIsSync")); gl.deleteSync = @ptrCast(c.wglGetProcAddress("glDeleteSync")); gl.clientWaitSync = @ptrCast(c.wglGetProcAddress("glClientWaitSync")); gl.waitSync = @ptrCast(c.wglGetProcAddress("glWaitSync")); gl.getInteger64v = @ptrCast(c.wglGetProcAddress("glGetInteger64v")); gl.getSynciv = @ptrCast(c.wglGetProcAddress("glGetSynciv")); gl.getInteger64i_v = @ptrCast(c.wglGetProcAddress("glGetInteger64i_v")); gl.getBufferParameteri64v = @ptrCast(c.wglGetProcAddress("glGetBufferParameteri64v")); gl.framebufferTexture = @ptrCast(c.wglGetProcAddress("glFramebufferTexture")); gl.texImage2DMultisample = @ptrCast(c.wglGetProcAddress("glTexImage2DMultisample")); gl.texImage3DMultisample = @ptrCast(c.wglGetProcAddress("glTexImage3DMultisample")); gl.getMultisamplefv = @ptrCast(c.wglGetProcAddress("glGetMultisamplefv")); gl.sampleMaski = @ptrCast(c.wglGetProcAddress("glSampleMaski")); } if (version >= 330) { gl.bindFragDataLocationIndexed = @ptrCast(c.wglGetProcAddress("glBindFragDataLocationIndexed")); gl.getFragDataIndex = @ptrCast(c.wglGetProcAddress("glGetFragDataIndex")); gl.genSamplers = @ptrCast(c.wglGetProcAddress("glGenSamplers")); gl.deleteSamplers = @ptrCast(c.wglGetProcAddress("glDeleteSamplers")); gl.isSampler = @ptrCast(c.wglGetProcAddress("glIsSampler")); gl.bindSampler = @ptrCast(c.wglGetProcAddress("glBindSampler")); gl.samplerParameteri = @ptrCast(c.wglGetProcAddress("glSamplerParameteri")); gl.samplerParameteriv = @ptrCast(c.wglGetProcAddress("glSamplerParameteriv")); gl.samplerParameterf = @ptrCast(c.wglGetProcAddress("glSamplerParameterf")); gl.samplerParameterfv = @ptrCast(c.wglGetProcAddress("glSamplerParameterfv")); gl.samplerParameterIiv = @ptrCast(c.wglGetProcAddress("glSamplerParameterIiv")); gl.samplerParameterIuiv = @ptrCast(c.wglGetProcAddress("glSamplerParameterIuiv")); gl.getSamplerParameteriv = @ptrCast(c.wglGetProcAddress("glGetSamplerParameteriv")); gl.getSamplerParameterIiv = @ptrCast(c.wglGetProcAddress("glGetSamplerParameterIiv")); gl.getSamplerParameterfv = @ptrCast(c.wglGetProcAddress("glGetSamplerParameterfv")); gl.getSamplerParameterIuiv = @ptrCast(c.wglGetProcAddress("glGetSamplerParameterIuiv")); gl.queryCounter = @ptrCast(c.wglGetProcAddress("glQueryCounter")); gl.getQueryObjecti64v = @ptrCast(c.wglGetProcAddress("glGetQueryObjecti64v")); gl.getQueryObjectui64v = @ptrCast(c.wglGetProcAddress("glGetQueryObjectui64v")); gl.vertexAttribDivisor = @ptrCast(c.wglGetProcAddress("glVertexAttribDivisor")); gl.vertexAttribP1ui = @ptrCast(c.wglGetProcAddress("glVertexAttribP1ui")); gl.vertexAttribP1uiv = @ptrCast(c.wglGetProcAddress("glVertexAttribP1uiv")); gl.vertexAttribP2ui = @ptrCast(c.wglGetProcAddress("glVertexAttribP2ui")); gl.vertexAttribP2uiv = @ptrCast(c.wglGetProcAddress("glVertexAttribP2uiv")); gl.vertexAttribP3ui = @ptrCast(c.wglGetProcAddress("glVertexAttribP3ui")); gl.vertexAttribP3uiv = @ptrCast(c.wglGetProcAddress("glVertexAttribP3uiv")); gl.vertexAttribP4ui = @ptrCast(c.wglGetProcAddress("glVertexAttribP4ui")); gl.vertexAttribP4uiv = @ptrCast(c.wglGetProcAddress("glVertexAttribP4uiv")); } if (version >= 400) { gl.minSampleShading = @ptrCast(c.wglGetProcAddress("glMinSampleShading")); gl.blendEquationi = @ptrCast(c.wglGetProcAddress("glBlendEquationi")); gl.blendEquationSeparatei = @ptrCast(c.wglGetProcAddress("glBlendEquationSeparatei")); gl.blendFunci = @ptrCast(c.wglGetProcAddress("glBlendFunci")); gl.blendFuncSeparatei = @ptrCast(c.wglGetProcAddress("glBlendFuncSeparatei")); gl.drawArraysIndirect = @ptrCast(c.wglGetProcAddress("glDrawArraysIndirect")); gl.drawElementsIndirect = @ptrCast(c.wglGetProcAddress("glDrawElementsIndirect")); gl.uniform1d = @ptrCast(c.wglGetProcAddress("glUniform1d")); gl.uniform2d = @ptrCast(c.wglGetProcAddress("glUniform2d")); gl.uniform3d = @ptrCast(c.wglGetProcAddress("glUniform3d")); gl.uniform4d = @ptrCast(c.wglGetProcAddress("glUniform4d")); gl.uniform1dv = @ptrCast(c.wglGetProcAddress("glUniform1dv")); gl.uniform2dv = @ptrCast(c.wglGetProcAddress("glUniform2dv")); gl.uniform3dv = @ptrCast(c.wglGetProcAddress("glUniform3dv")); gl.uniform4dv = @ptrCast(c.wglGetProcAddress("glUniform4dv")); gl.uniformMatrix2dv = @ptrCast(c.wglGetProcAddress("glUniformMatrix2dv")); gl.uniformMatrix3dv = @ptrCast(c.wglGetProcAddress("glUniformMatrix3dv")); gl.uniformMatrix4dv = @ptrCast(c.wglGetProcAddress("glUniformMatrix4dv")); gl.uniformMatrix2x3dv = @ptrCast(c.wglGetProcAddress("glUniformMatrix2x3dv")); gl.uniformMatrix2x4dv = @ptrCast(c.wglGetProcAddress("glUniformMatrix2x4dv")); gl.uniformMatrix3x2dv = @ptrCast(c.wglGetProcAddress("glUniformMatrix3x2dv")); gl.uniformMatrix3x4dv = @ptrCast(c.wglGetProcAddress("glUniformMatrix3x4dv")); gl.uniformMatrix4x2dv = @ptrCast(c.wglGetProcAddress("glUniformMatrix4x2dv")); gl.uniformMatrix4x3dv = @ptrCast(c.wglGetProcAddress("glUniformMatrix4x3dv")); gl.getUniformdv = @ptrCast(c.wglGetProcAddress("glGetUniformdv")); gl.getSubroutineUniformLocation = @ptrCast(c.wglGetProcAddress("glGetSubroutineUniformLocation")); gl.getSubroutineIndex = @ptrCast(c.wglGetProcAddress("glGetSubroutineIndex")); gl.getActiveSubroutineUniformiv = @ptrCast(c.wglGetProcAddress("glGetActiveSubroutineUniformiv")); gl.getActiveSubroutineUniformName = @ptrCast(c.wglGetProcAddress("glGetActiveSubroutineUniformName")); gl.getActiveSubroutineName = @ptrCast(c.wglGetProcAddress("glGetActiveSubroutineName")); gl.uniformSubroutinesuiv = @ptrCast(c.wglGetProcAddress("glUniformSubroutinesuiv")); gl.getUniformSubroutineuiv = @ptrCast(c.wglGetProcAddress("glGetUniformSubroutineuiv")); gl.getProgramStageiv = @ptrCast(c.wglGetProcAddress("glGetProgramStageiv")); gl.patchParameteri = @ptrCast(c.wglGetProcAddress("glPatchParameteri")); gl.patchParameterfv = @ptrCast(c.wglGetProcAddress("glPatchParameterfv")); gl.bindTransformFeedback = @ptrCast(c.wglGetProcAddress("glBindTransformFeedback")); gl.deleteTransformFeedbacks = @ptrCast(c.wglGetProcAddress("glDeleteTransformFeedbacks")); gl.genTransformFeedbacks = @ptrCast(c.wglGetProcAddress("glGenTransformFeedbacks")); gl.isTransformFeedback = @ptrCast(c.wglGetProcAddress("glIsTransformFeedback")); gl.pauseTransformFeedback = @ptrCast(c.wglGetProcAddress("glPauseTransformFeedback")); gl.resumeTransformFeedback = @ptrCast(c.wglGetProcAddress("glResumeTransformFeedback")); gl.drawTransformFeedback = @ptrCast(c.wglGetProcAddress("glDrawTransformFeedback")); gl.drawTransformFeedbackStream = @ptrCast(c.wglGetProcAddress("glDrawTransformFeedbackStream")); gl.beginQueryIndexed = @ptrCast(c.wglGetProcAddress("glBeginQueryIndexed")); gl.endQueryIndexed = @ptrCast(c.wglGetProcAddress("glEndQueryIndexed")); gl.getQueryIndexediv = @ptrCast(c.wglGetProcAddress("glGetQueryIndexediv")); } if (version >= 410) { gl.releaseShaderCompiler = @ptrCast(c.wglGetProcAddress("glReleaseShaderCompiler")); gl.shaderBinary = @ptrCast(c.wglGetProcAddress("glShaderBinary")); gl.getShaderPrecisionFormat = @ptrCast(c.wglGetProcAddress("glGetShaderPrecisionFormat")); gl.depthRangef = @ptrCast(c.wglGetProcAddress("glDepthRangef")); gl.clearDepthf = @ptrCast(c.wglGetProcAddress("glClearDepthf")); gl.getProgramBinary = @ptrCast(c.wglGetProcAddress("glGetProgramBinary")); gl.programBinary = @ptrCast(c.wglGetProcAddress("glProgramBinary")); gl.programParameteri = @ptrCast(c.wglGetProcAddress("glProgramParameteri")); gl.useProgramStages = @ptrCast(c.wglGetProcAddress("glUseProgramStages")); gl.activeShaderProgram = @ptrCast(c.wglGetProcAddress("glActiveShaderProgram")); gl.createShaderProgramv = @ptrCast(c.wglGetProcAddress("glCreateShaderProgramv")); gl.bindProgramPipeline = @ptrCast(c.wglGetProcAddress("glBindProgramPipeline")); gl.deleteProgramPipelines = @ptrCast(c.wglGetProcAddress("glDeleteProgramPipelines")); gl.genProgramPipelines = @ptrCast(c.wglGetProcAddress("glGenProgramPipelines")); gl.isProgramPipeline = @ptrCast(c.wglGetProcAddress("glIsProgramPipeline")); gl.getProgramPipelineiv = @ptrCast(c.wglGetProcAddress("glGetProgramPipelineiv")); gl.programUniform1i = @ptrCast(c.wglGetProcAddress("glProgramUniform1i")); gl.programUniform1iv = @ptrCast(c.wglGetProcAddress("glProgramUniform1iv")); gl.programUniform1f = @ptrCast(c.wglGetProcAddress("glProgramUniform1f")); gl.programUniform1fv = @ptrCast(c.wglGetProcAddress("glProgramUniform1fv")); gl.programUniform1d = @ptrCast(c.wglGetProcAddress("glProgramUniform1d")); gl.programUniform1dv = @ptrCast(c.wglGetProcAddress("glProgramUniform1dv")); gl.programUniform1ui = @ptrCast(c.wglGetProcAddress("glProgramUniform1ui")); gl.programUniform1uiv = @ptrCast(c.wglGetProcAddress("glProgramUniform1uiv")); gl.programUniform2i = @ptrCast(c.wglGetProcAddress("glProgramUniform2i")); gl.programUniform2iv = @ptrCast(c.wglGetProcAddress("glProgramUniform2iv")); gl.programUniform2f = @ptrCast(c.wglGetProcAddress("glProgramUniform2f")); gl.programUniform2fv = @ptrCast(c.wglGetProcAddress("glProgramUniform2fv")); gl.programUniform2d = @ptrCast(c.wglGetProcAddress("glProgramUniform2d")); gl.programUniform2dv = @ptrCast(c.wglGetProcAddress("glProgramUniform2dv")); gl.programUniform2ui = @ptrCast(c.wglGetProcAddress("glProgramUniform2ui")); gl.programUniform2uiv = @ptrCast(c.wglGetProcAddress("glProgramUniform2uiv")); gl.programUniform3i = @ptrCast(c.wglGetProcAddress("glProgramUniform3i")); gl.programUniform3iv = @ptrCast(c.wglGetProcAddress("glProgramUniform3iv")); gl.programUniform3f = @ptrCast(c.wglGetProcAddress("glProgramUniform3f")); gl.programUniform3fv = @ptrCast(c.wglGetProcAddress("glProgramUniform3fv")); gl.programUniform3d = @ptrCast(c.wglGetProcAddress("glProgramUniform3d")); gl.programUniform3dv = @ptrCast(c.wglGetProcAddress("glProgramUniform3dv")); gl.programUniform3ui = @ptrCast(c.wglGetProcAddress("glProgramUniform3ui")); gl.programUniform3uiv = @ptrCast(c.wglGetProcAddress("glProgramUniform3uiv")); gl.programUniform4i = @ptrCast(c.wglGetProcAddress("glProgramUniform4i")); gl.programUniform4iv = @ptrCast(c.wglGetProcAddress("glProgramUniform4iv")); gl.programUniform4f = @ptrCast(c.wglGetProcAddress("glProgramUniform4f")); gl.programUniform4fv = @ptrCast(c.wglGetProcAddress("glProgramUniform4fv")); gl.programUniform4d = @ptrCast(c.wglGetProcAddress("glProgramUniform4d")); gl.programUniform4dv = @ptrCast(c.wglGetProcAddress("glProgramUniform4dv")); gl.programUniform4ui = @ptrCast(c.wglGetProcAddress("glProgramUniform4ui")); gl.programUniform4uiv = @ptrCast(c.wglGetProcAddress("glProgramUniform4uiv")); gl.programUniformMatrix2fv = @ptrCast(c.wglGetProcAddress("glProgramUniformMatrix2fv")); gl.programUniformMatrix3fv = @ptrCast(c.wglGetProcAddress("glProgramUniformMatrix3fv")); gl.programUniformMatrix4fv = @ptrCast(c.wglGetProcAddress("glProgramUniformMatrix4fv")); gl.programUniformMatrix2dv = @ptrCast(c.wglGetProcAddress("glProgramUniformMatrix2dv")); gl.programUniformMatrix3dv = @ptrCast(c.wglGetProcAddress("glProgramUniformMatrix3dv")); gl.programUniformMatrix4dv = @ptrCast(c.wglGetProcAddress("glProgramUniformMatrix4dv")); gl.programUniformMatrix2x3fv = @ptrCast(c.wglGetProcAddress("glProgramUniformMatrix2x3fv")); gl.programUniformMatrix3x2fv = @ptrCast(c.wglGetProcAddress("glProgramUniformMatrix3x2fv")); gl.programUniformMatrix2x4fv = @ptrCast(c.wglGetProcAddress("glProgramUniformMatrix2x4fv")); gl.programUniformMatrix4x2fv = @ptrCast(c.wglGetProcAddress("glProgramUniformMatrix4x2fv")); gl.programUniformMatrix3x4fv = @ptrCast(c.wglGetProcAddress("glProgramUniformMatrix3x4fv")); gl.programUniformMatrix4x3fv = @ptrCast(c.wglGetProcAddress("glProgramUniformMatrix4x3fv")); gl.programUniformMatrix2x3dv = @ptrCast(c.wglGetProcAddress("glProgramUniformMatrix2x3dv")); gl.programUniformMatrix3x2dv = @ptrCast(c.wglGetProcAddress("glProgramUniformMatrix3x2dv")); gl.programUniformMatrix2x4dv = @ptrCast(c.wglGetProcAddress("glProgramUniformMatrix2x4dv")); gl.programUniformMatrix4x2dv = @ptrCast(c.wglGetProcAddress("glProgramUniformMatrix4x2dv")); gl.programUniformMatrix3x4dv = @ptrCast(c.wglGetProcAddress("glProgramUniformMatrix3x4dv")); gl.programUniformMatrix4x3dv = @ptrCast(c.wglGetProcAddress("glProgramUniformMatrix4x3dv")); gl.validateProgramPipeline = @ptrCast(c.wglGetProcAddress("glValidateProgramPipeline")); gl.getProgramPipelineInfoLog = @ptrCast(c.wglGetProcAddress("glGetProgramPipelineInfoLog")); gl.vertexAttribL1d = @ptrCast(c.wglGetProcAddress("glVertexAttribL1d")); gl.vertexAttribL2d = @ptrCast(c.wglGetProcAddress("glVertexAttribL2d")); gl.vertexAttribL3d = @ptrCast(c.wglGetProcAddress("glVertexAttribL3d")); gl.vertexAttribL4d = @ptrCast(c.wglGetProcAddress("glVertexAttribL4d")); gl.vertexAttribL1dv = @ptrCast(c.wglGetProcAddress("glVertexAttribL1dv")); gl.vertexAttribL2dv = @ptrCast(c.wglGetProcAddress("glVertexAttribL2dv")); gl.vertexAttribL3dv = @ptrCast(c.wglGetProcAddress("glVertexAttribL3dv")); gl.vertexAttribL4dv = @ptrCast(c.wglGetProcAddress("glVertexAttribL4dv")); gl.vertexAttribLPointer = @ptrCast(c.wglGetProcAddress("glVertexAttribLPointer")); gl.getVertexAttribLdv = @ptrCast(c.wglGetProcAddress("glGetVertexAttribLdv")); gl.viewportArrayv = @ptrCast(c.wglGetProcAddress("glViewportArrayv")); gl.viewportIndexedf = @ptrCast(c.wglGetProcAddress("glViewportIndexedf")); gl.viewportIndexedfv = @ptrCast(c.wglGetProcAddress("glViewportIndexedfv")); gl.scissorArrayv = @ptrCast(c.wglGetProcAddress("glScissorArrayv")); gl.scissorIndexed = @ptrCast(c.wglGetProcAddress("glScissorIndexed")); gl.scissorIndexedv = @ptrCast(c.wglGetProcAddress("glScissorIndexedv")); gl.depthRangeArrayv = @ptrCast(c.wglGetProcAddress("glDepthRangeArrayv")); gl.depthRangeIndexed = @ptrCast(c.wglGetProcAddress("glDepthRangeIndexed")); gl.getFloati_v = @ptrCast(c.wglGetProcAddress("glGetFloati_v")); gl.getDoublei_v = @ptrCast(c.wglGetProcAddress("glGetDoublei_v")); } if (version >= 420) { gl.drawArraysInstancedBaseInstance = @ptrCast(c.wglGetProcAddress("glDrawArraysInstancedBaseInstance")); gl.drawElementsInstancedBaseInstance = @ptrCast(c.wglGetProcAddress("glDrawElementsInstancedBaseInstance")); gl.drawElementsInstancedBaseVertexBaseInstance = @ptrCast(c.wglGetProcAddress("glDrawElementsInstancedBaseVertexBaseInstance")); gl.getInternalformativ = @ptrCast(c.wglGetProcAddress("glGetInternalformativ")); gl.getActiveAtomicCounterBufferiv = @ptrCast(c.wglGetProcAddress("glGetActiveAtomicCounterBufferiv")); gl.bindImageTexture = @ptrCast(c.wglGetProcAddress("glBindImageTexture")); gl.memoryBarrier = @ptrCast(c.wglGetProcAddress("glMemoryBarrier")); gl.texStorage1D = @ptrCast(c.wglGetProcAddress("glTexStorage1D")); gl.texStorage2D = @ptrCast(c.wglGetProcAddress("glTexStorage2D")); gl.texStorage3D = @ptrCast(c.wglGetProcAddress("glTexStorage3D")); gl.drawTransformFeedbackInstanced = @ptrCast(c.wglGetProcAddress("glDrawTransformFeedbackInstanced")); gl.drawTransformFeedbackStreamInstanced = @ptrCast(c.wglGetProcAddress("glDrawTransformFeedbackStreamInstanced")); } if (version >= 430) { gl.clearBufferData = @ptrCast(c.wglGetProcAddress("glClearBufferData")); gl.clearBufferSubData = @ptrCast(c.wglGetProcAddress("glClearBufferSubData")); gl.dispatchCompute = @ptrCast(c.wglGetProcAddress("glDispatchCompute")); gl.dispatchComputeIndirect = @ptrCast(c.wglGetProcAddress("glDispatchComputeIndirect")); gl.copyImageSubData = @ptrCast(c.wglGetProcAddress("glCopyImageSubData")); gl.framebufferParameteri = @ptrCast(c.wglGetProcAddress("glFramebufferParameteri")); gl.getFramebufferParameteriv = @ptrCast(c.wglGetProcAddress("glGetFramebufferParameteriv")); gl.getInternalformati64v = @ptrCast(c.wglGetProcAddress("glGetInternalformati64v")); gl.invalidateTexSubImage = @ptrCast(c.wglGetProcAddress("glInvalidateTexSubImage")); gl.invalidateTexImage = @ptrCast(c.wglGetProcAddress("glInvalidateTexImage")); gl.invalidateBufferSubData = @ptrCast(c.wglGetProcAddress("glInvalidateBufferSubData")); gl.invalidateBufferData = @ptrCast(c.wglGetProcAddress("glInvalidateBufferData")); gl.invalidateFramebuffer = @ptrCast(c.wglGetProcAddress("glInvalidateFramebuffer")); gl.invalidateSubFramebuffer = @ptrCast(c.wglGetProcAddress("glInvalidateSubFramebuffer")); gl.multiDrawArraysIndirect = @ptrCast(c.wglGetProcAddress("glMultiDrawArraysIndirect")); gl.multiDrawElementsIndirect = @ptrCast(c.wglGetProcAddress("glMultiDrawElementsIndirect")); gl.getProgramInterfaceiv = @ptrCast(c.wglGetProcAddress("glGetProgramInterfaceiv")); gl.getProgramResourceIndex = @ptrCast(c.wglGetProcAddress("glGetProgramResourceIndex")); gl.getProgramResourceName = @ptrCast(c.wglGetProcAddress("glGetProgramResourceName")); gl.getProgramResourceiv = @ptrCast(c.wglGetProcAddress("glGetProgramResourceiv")); gl.getProgramResourceLocation = @ptrCast(c.wglGetProcAddress("glGetProgramResourceLocation")); gl.getProgramResourceLocationIndex = @ptrCast(c.wglGetProcAddress("glGetProgramResourceLocationIndex")); gl.shaderStorageBlockBinding = @ptrCast(c.wglGetProcAddress("glShaderStorageBlockBinding")); gl.texBufferRange = @ptrCast(c.wglGetProcAddress("glTexBufferRange")); gl.texStorage2DMultisample = @ptrCast(c.wglGetProcAddress("glTexStorage2DMultisample")); gl.texStorage3DMultisample = @ptrCast(c.wglGetProcAddress("glTexStorage3DMultisample")); gl.textureView = @ptrCast(c.wglGetProcAddress("glTextureView")); gl.bindVertexBuffer = @ptrCast(c.wglGetProcAddress("glBindVertexBuffer")); gl.vertexAttribFormat = @ptrCast(c.wglGetProcAddress("glVertexAttribFormat")); gl.vertexAttribIFormat = @ptrCast(c.wglGetProcAddress("glVertexAttribIFormat")); gl.vertexAttribLFormat = @ptrCast(c.wglGetProcAddress("glVertexAttribLFormat")); gl.vertexAttribBinding = @ptrCast(c.wglGetProcAddress("glVertexAttribBinding")); gl.vertexBindingDivisor = @ptrCast(c.wglGetProcAddress("glVertexBindingDivisor")); gl.debugMessageControl = @ptrCast(c.wglGetProcAddress("glDebugMessageControl")); gl.debugMessageInsert = @ptrCast(c.wglGetProcAddress("glDebugMessageInsert")); gl.debugMessageCallback = @ptrCast(c.wglGetProcAddress("glDebugMessageCallback")); gl.getDebugMessageLog = @ptrCast(c.wglGetProcAddress("glGetDebugMessageLog")); gl.pushDebugGroup = @ptrCast(c.wglGetProcAddress("glPushDebugGroup")); gl.popDebugGroup = @ptrCast(c.wglGetProcAddress("glPopDebugGroup")); gl.objectLabel = @ptrCast(c.wglGetProcAddress("glObjectLabel")); gl.getObjectLabel = @ptrCast(c.wglGetProcAddress("glGetObjectLabel")); gl.objectPtrLabel = @ptrCast(c.wglGetProcAddress("glObjectPtrLabel")); gl.getObjectPtrLabel = @ptrCast(c.wglGetProcAddress("glGetObjectPtrLabel")); } if (version >= 440) { gl.bufferStorage = @ptrCast(c.wglGetProcAddress("glBufferStorage")); gl.clearTexImage = @ptrCast(c.wglGetProcAddress("glClearTexImage")); gl.clearTexSubImage = @ptrCast(c.wglGetProcAddress("glClearTexSubImage")); gl.bindBuffersBase = @ptrCast(c.wglGetProcAddress("glBindBuffersBase")); gl.bindBuffersRange = @ptrCast(c.wglGetProcAddress("glBindBuffersRange")); gl.bindTextures = @ptrCast(c.wglGetProcAddress("glBindTextures")); gl.bindSamplers = @ptrCast(c.wglGetProcAddress("glBindSamplers")); gl.bindImageTextures = @ptrCast(c.wglGetProcAddress("glBindImageTextures")); gl.bindVertexBuffers = @ptrCast(c.wglGetProcAddress("glBindVertexBuffers")); } if (version >= 450) { gl.clipControl = @ptrCast(c.wglGetProcAddress("glClipControl")); gl.createTransformFeedbacks = @ptrCast(c.wglGetProcAddress("glCreateTransformFeedbacks")); gl.transformFeedbackBufferBase = @ptrCast(c.wglGetProcAddress("glTransformFeedbackBufferBase")); gl.transformFeedbackBufferRange = @ptrCast(c.wglGetProcAddress("glTransformFeedbackBufferRange")); gl.getTransformFeedbackiv = @ptrCast(c.wglGetProcAddress("glGetTransformFeedbackiv")); gl.getTransformFeedbacki_v = @ptrCast(c.wglGetProcAddress("glGetTransformFeedbacki_v")); gl.getTransformFeedbacki64_v = @ptrCast(c.wglGetProcAddress("glGetTransformFeedbacki64_v")); gl.createBuffers = @ptrCast(c.wglGetProcAddress("glCreateBuffers")); gl.namedBufferStorage = @ptrCast(c.wglGetProcAddress("glNamedBufferStorage")); gl.namedBufferData = @ptrCast(c.wglGetProcAddress("glNamedBufferData")); gl.namedBufferSubData = @ptrCast(c.wglGetProcAddress("glNamedBufferSubData")); gl.copyNamedBufferSubData = @ptrCast(c.wglGetProcAddress("glCopyNamedBufferSubData")); gl.clearNamedBufferData = @ptrCast(c.wglGetProcAddress("glClearNamedBufferData")); gl.clearNamedBufferSubData = @ptrCast(c.wglGetProcAddress("glClearNamedBufferSubData")); gl.mapNamedBuffer = @ptrCast(c.wglGetProcAddress("glMapNamedBuffer")); gl.mapNamedBufferRange = @ptrCast(c.wglGetProcAddress("glMapNamedBufferRange")); gl.unmapNamedBuffer = @ptrCast(c.wglGetProcAddress("glUnmapNamedBuffer")); gl.flushMappedNamedBufferRange = @ptrCast(c.wglGetProcAddress("glFlushMappedNamedBufferRange")); gl.getNamedBufferParameteriv = @ptrCast(c.wglGetProcAddress("glGetNamedBufferParameteriv")); gl.getNamedBufferParameteri64v = @ptrCast(c.wglGetProcAddress("glGetNamedBufferParameteri64v")); gl.getNamedBufferPointerv = @ptrCast(c.wglGetProcAddress("glGetNamedBufferPointerv")); gl.getNamedBufferSubData = @ptrCast(c.wglGetProcAddress("glGetNamedBufferSubData")); gl.createFramebuffers = @ptrCast(c.wglGetProcAddress("glCreateFramebuffers")); gl.namedFramebufferRenderbuffer = @ptrCast(c.wglGetProcAddress("glNamedFramebufferRenderbuffer")); gl.namedFramebufferParameteri = @ptrCast(c.wglGetProcAddress("glNamedFramebufferParameteri")); gl.namedFramebufferTexture = @ptrCast(c.wglGetProcAddress("glNamedFramebufferTexture")); gl.namedFramebufferTextureLayer = @ptrCast(c.wglGetProcAddress("glNamedFramebufferTextureLayer")); gl.namedFramebufferDrawBuffer = @ptrCast(c.wglGetProcAddress("glNamedFramebufferDrawBuffer")); gl.namedFramebufferDrawBuffers = @ptrCast(c.wglGetProcAddress("glNamedFramebufferDrawBuffers")); gl.namedFramebufferReadBuffer = @ptrCast(c.wglGetProcAddress("glNamedFramebufferReadBuffer")); gl.invalidateNamedFramebufferData = @ptrCast(c.wglGetProcAddress("glInvalidateNamedFramebufferData")); gl.invalidateNamedFramebufferSubData = @ptrCast(c.wglGetProcAddress("glInvalidateNamedFramebufferSubData")); gl.clearNamedFramebufferiv = @ptrCast(c.wglGetProcAddress("glClearNamedFramebufferiv")); gl.clearNamedFramebufferuiv = @ptrCast(c.wglGetProcAddress("glClearNamedFramebufferuiv")); gl.clearNamedFramebufferfv = @ptrCast(c.wglGetProcAddress("glClearNamedFramebufferfv")); gl.clearNamedFramebufferfi = @ptrCast(c.wglGetProcAddress("glClearNamedFramebufferfi")); gl.blitNamedFramebuffer = @ptrCast(c.wglGetProcAddress("glBlitNamedFramebuffer")); gl.checkNamedFramebufferStatus = @ptrCast(c.wglGetProcAddress("glCheckNamedFramebufferStatus")); gl.getNamedFramebufferParameteriv = @ptrCast(c.wglGetProcAddress("glGetNamedFramebufferParameteriv")); gl.getNamedFramebufferAttachmentParameteriv = @ptrCast(c.wglGetProcAddress("glGetNamedFramebufferAttachmentParameteriv")); gl.createRenderbuffers = @ptrCast(c.wglGetProcAddress("glCreateRenderbuffers")); gl.namedRenderbufferStorage = @ptrCast(c.wglGetProcAddress("glNamedRenderbufferStorage")); gl.namedRenderbufferStorageMultisample = @ptrCast(c.wglGetProcAddress("glNamedRenderbufferStorageMultisample")); gl.getNamedRenderbufferParameteriv = @ptrCast(c.wglGetProcAddress("glGetNamedRenderbufferParameteriv")); gl.createTextures = @ptrCast(c.wglGetProcAddress("glCreateTextures")); gl.textureBuffer = @ptrCast(c.wglGetProcAddress("glTextureBuffer")); gl.textureBufferRange = @ptrCast(c.wglGetProcAddress("glTextureBufferRange")); gl.textureStorage1D = @ptrCast(c.wglGetProcAddress("glTextureStorage1D")); gl.textureStorage2D = @ptrCast(c.wglGetProcAddress("glTextureStorage2D")); gl.textureStorage3D = @ptrCast(c.wglGetProcAddress("glTextureStorage3D")); gl.textureStorage2DMultisample = @ptrCast(c.wglGetProcAddress("glTextureStorage2DMultisample")); gl.textureStorage3DMultisample = @ptrCast(c.wglGetProcAddress("glTextureStorage3DMultisample")); gl.textureSubImage1D = @ptrCast(c.wglGetProcAddress("glTextureSubImage1D")); gl.textureSubImage2D = @ptrCast(c.wglGetProcAddress("glTextureSubImage2D")); gl.textureSubImage3D = @ptrCast(c.wglGetProcAddress("glTextureSubImage3D")); gl.compressedTextureSubImage1D = @ptrCast(c.wglGetProcAddress("glCompressedTextureSubImage1D")); gl.compressedTextureSubImage2D = @ptrCast(c.wglGetProcAddress("glCompressedTextureSubImage2D")); gl.compressedTextureSubImage3D = @ptrCast(c.wglGetProcAddress("glCompressedTextureSubImage3D")); gl.copyTextureSubImage1D = @ptrCast(c.wglGetProcAddress("glCopyTextureSubImage1D")); gl.copyTextureSubImage2D = @ptrCast(c.wglGetProcAddress("glCopyTextureSubImage2D")); gl.copyTextureSubImage3D = @ptrCast(c.wglGetProcAddress("glCopyTextureSubImage3D")); gl.textureParameterf = @ptrCast(c.wglGetProcAddress("glTextureParameterf")); gl.textureParameterfv = @ptrCast(c.wglGetProcAddress("glTextureParameterfv")); gl.textureParameteri = @ptrCast(c.wglGetProcAddress("glTextureParameteri")); gl.textureParameterIiv = @ptrCast(c.wglGetProcAddress("glTextureParameterIiv")); gl.textureParameterIuiv = @ptrCast(c.wglGetProcAddress("glTextureParameterIuiv")); gl.textureParameteriv = @ptrCast(c.wglGetProcAddress("glTextureParameteriv")); gl.generateTextureMipmap = @ptrCast(c.wglGetProcAddress("glGenerateTextureMipmap")); gl.bindTextureUnit = @ptrCast(c.wglGetProcAddress("glBindTextureUnit")); gl.getTextureImage = @ptrCast(c.wglGetProcAddress("glGetTextureImage")); gl.getCompressedTextureImage = @ptrCast(c.wglGetProcAddress("glGetCompressedTextureImage")); gl.getTextureLevelParameterfv = @ptrCast(c.wglGetProcAddress("glGetTextureLevelParameterfv")); gl.getTextureLevelParameteriv = @ptrCast(c.wglGetProcAddress("glGetTextureLevelParameteriv")); gl.getTextureParameterfv = @ptrCast(c.wglGetProcAddress("glGetTextureParameterfv")); gl.getTextureParameterIiv = @ptrCast(c.wglGetProcAddress("glGetTextureParameterIiv")); gl.getTextureParameterIuiv = @ptrCast(c.wglGetProcAddress("glGetTextureParameterIuiv")); gl.getTextureParameteriv = @ptrCast(c.wglGetProcAddress("glGetTextureParameteriv")); gl.createVertexArrays = @ptrCast(c.wglGetProcAddress("glCreateVertexArrays")); gl.disableVertexArrayAttrib = @ptrCast(c.wglGetProcAddress("glDisableVertexArrayAttrib")); gl.enableVertexArrayAttrib = @ptrCast(c.wglGetProcAddress("glEnableVertexArrayAttrib")); gl.vertexArrayElementBuffer = @ptrCast(c.wglGetProcAddress("glVertexArrayElementBuffer")); gl.vertexArrayVertexBuffer = @ptrCast(c.wglGetProcAddress("glVertexArrayVertexBuffer")); gl.vertexArrayVertexBuffers = @ptrCast(c.wglGetProcAddress("glVertexArrayVertexBuffers")); gl.vertexArrayAttribBinding = @ptrCast(c.wglGetProcAddress("glVertexArrayAttribBinding")); gl.vertexArrayAttribFormat = @ptrCast(c.wglGetProcAddress("glVertexArrayAttribFormat")); gl.vertexArrayAttribIFormat = @ptrCast(c.wglGetProcAddress("glVertexArrayAttribIFormat")); gl.vertexArrayAttribLFormat = @ptrCast(c.wglGetProcAddress("glVertexArrayAttribLFormat")); gl.vertexArrayBindingDivisor = @ptrCast(c.wglGetProcAddress("glVertexArrayBindingDivisor")); gl.getVertexArrayiv = @ptrCast(c.wglGetProcAddress("glGetVertexArrayiv")); gl.getVertexArrayIndexediv = @ptrCast(c.wglGetProcAddress("glGetVertexArrayIndexediv")); gl.getVertexArrayIndexed64iv = @ptrCast(c.wglGetProcAddress("glGetVertexArrayIndexed64iv")); gl.createSamplers = @ptrCast(c.wglGetProcAddress("glCreateSamplers")); gl.createProgramPipelines = @ptrCast(c.wglGetProcAddress("glCreateProgramPipelines")); gl.createQueries = @ptrCast(c.wglGetProcAddress("glCreateQueries")); gl.getQueryBufferObjecti64v = @ptrCast(c.wglGetProcAddress("glGetQueryBufferObjecti64v")); gl.getQueryBufferObjectiv = @ptrCast(c.wglGetProcAddress("glGetQueryBufferObjectiv")); gl.getQueryBufferObjectui64v = @ptrCast(c.wglGetProcAddress("glGetQueryBufferObjectui64v")); gl.getQueryBufferObjectuiv = @ptrCast(c.wglGetProcAddress("glGetQueryBufferObjectuiv")); gl.memoryBarrierByRegion = @ptrCast(c.wglGetProcAddress("glMemoryBarrierByRegion")); gl.getTextureSubImage = @ptrCast(c.wglGetProcAddress("glGetTextureSubImage")); gl.getCompressedTextureSubImage = @ptrCast(c.wglGetProcAddress("glGetCompressedTextureSubImage")); gl.getGraphicsResetStatus = @ptrCast(c.wglGetProcAddress("glGetGraphicsResetStatus")); gl.getnCompressedTexImage = @ptrCast(c.wglGetProcAddress("glGetnCompressedTexImage")); gl.getnTexImage = @ptrCast(c.wglGetProcAddress("glGetnTexImage")); gl.getnUniformdv = @ptrCast(c.wglGetProcAddress("glGetnUniformdv")); gl.getnUniformfv = @ptrCast(c.wglGetProcAddress("glGetnUniformfv")); gl.getnUniformiv = @ptrCast(c.wglGetProcAddress("glGetnUniformiv")); gl.getnUniformuiv = @ptrCast(c.wglGetProcAddress("glGetnUniformuiv")); gl.readnPixels = @ptrCast(c.wglGetProcAddress("glReadnPixels")); gl.textureBarrier = @ptrCast(c.wglGetProcAddress("glTextureBarrier")); } if (version >= 460) { gl.specializeShader = @ptrCast(c.wglGetProcAddress("glSpecializeShader")); gl.multiDrawArraysIndirectCount = @ptrCast(c.wglGetProcAddress("glMultiDrawArraysIndirectCount")); gl.multiDrawElementsIndirectCount = @ptrCast(c.wglGetProcAddress("glMultiDrawElementsIndirectCount")); gl.polygonOffsetClamp = @ptrCast(c.wglGetProcAddress("glPolygonOffsetClamp")); } } };
0
repos/mach-sysgpu/src
repos/mach-sysgpu/src/opengl/c.zig
pub usingnamespace @cImport({ @cInclude("windows.h"); @cInclude("GL/glcorearb.h"); @cInclude("GL/glext.h"); @cInclude("GL/wglext.h"); });
0
repos/mach-sysgpu/src
repos/mach-sysgpu/src/opengl/conv.zig
const sysgpu = @import("../sysgpu/main.zig"); const utils = @import("../utils.zig"); const c = @import("c.zig"); fn stencilEnable(stencil: sysgpu.StencilFaceState) bool { return stencil.compare != .always or stencil.fail_op != .keep or stencil.depth_fail_op != .keep or stencil.pass_op != .keep; } pub fn glAttributeCount(format: sysgpu.VertexFormat) c.GLint { return switch (format) { .undefined => unreachable, .uint8x2 => 2, .uint8x4 => 4, .sint8x2 => 2, .sint8x4 => 4, .unorm8x2 => 2, .unorm8x4 => 4, .snorm8x2 => 2, .snorm8x4 => 4, .uint16x2 => 2, .uint16x4 => 4, .sint16x2 => 2, .sint16x4 => 4, .unorm16x2 => 2, .unorm16x4 => 4, .snorm16x2 => 2, .snorm16x4 => 4, .float16x2 => 2, .float16x4 => 4, .float32 => 1, .float32x2 => 2, .float32x3 => 3, .float32x4 => 4, .uint32 => 1, .uint32x2 => 2, .uint32x3 => 3, .uint32x4 => 4, .sint32 => 1, .sint32x2 => 2, .sint32x3 => 3, .sint32x4 => 4, }; } pub fn glAttributeIsNormalized(format_type: utils.FormatType) c.GLboolean { return switch (format_type) { .unorm, .unorm_srgb, .snorm => c.GL_TRUE, else => c.GL_FALSE, }; } pub fn glAttributeIsInt(format_type: utils.FormatType) bool { return switch (format_type) { .uint, .sint => true, else => false, }; } pub fn glAttributeType(format: sysgpu.VertexFormat) c.GLenum { return switch (format) { .undefined => unreachable, .uint8x2 => c.GL_UNSIGNED_BYTE, .uint8x4 => c.GL_UNSIGNED_BYTE, .sint8x2 => c.GL_BYTE, .sint8x4 => c.GL_BYTE, .unorm8x2 => c.GL_UNSIGNED_BYTE, .unorm8x4 => c.GL_UNSIGNED_BYTE, .snorm8x2 => c.GL_BYTE, .snorm8x4 => c.GL_BYTE, .uint16x2 => c.GL_UNSIGNED_SHORT, .uint16x4 => c.GL_UNSIGNED_SHORT, .sint16x2 => c.GL_SHORT, .sint16x4 => c.GL_SHORT, .unorm16x2 => c.GL_UNSIGNED_SHORT, .unorm16x4 => c.GL_UNSIGNED_SHORT, .snorm16x2 => c.GL_SHORT, .snorm16x4 => c.GL_SHORT, .float16x2 => c.GL_HALF_FLOAT, .float16x4 => c.GL_HALF_FLOAT, .float32 => c.GL_FLOAT, .float32x2 => c.GL_FLOAT, .float32x3 => c.GL_FLOAT, .float32x4 => c.GL_FLOAT, .uint32 => c.GL_UNSIGNED_INT, .uint32x2 => c.GL_UNSIGNED_INT, .uint32x3 => c.GL_UNSIGNED_INT, .uint32x4 => c.GL_UNSIGNED_INT, .sint32 => c.GL_INT, .sint32x2 => c.GL_INT, .sint32x3 => c.GL_INT, .sint32x4 => c.GL_INT, }; } pub fn glBlendFactor(factor: sysgpu.BlendFactor, color: bool) c.GLenum { return switch (factor) { .zero => c.GL_ZERO, .one => c.GL_ONE, .src => c.GL_SRC_COLOR, .one_minus_src => c.GL_ONE_MINUS_SRC_COLOR, .src_alpha => c.GL_SRC_ALPHA, .one_minus_src_alpha => c.GL_ONE_MINUS_SRC_ALPHA, .dst => c.GL_DST_COLOR, .one_minus_dst => c.GL_ONE_MINUS_DST_COLOR, .dst_alpha => c.GL_DST_ALPHA, .one_minus_dst_alpha => c.GL_ONE_MINUS_DST_ALPHA, .src_alpha_saturated => c.GL_SRC_ALPHA_SATURATE, .constant => if (color) c.GL_CONSTANT_COLOR else c.GL_CONSTANT_ALPHA, .one_minus_constant => if (color) c.GL_ONE_MINUS_CONSTANT_COLOR else c.GL_ONE_MINUS_CONSTANT_ALPHA, .src1 => c.GL_SRC1_COLOR, .one_minus_src1 => c.GL_ONE_MINUS_SRC1_COLOR, .src1_alpha => c.GL_SRC1_ALPHA, .one_minus_src1_alpha => c.GL_ONE_MINUS_SRC1_ALPHA, }; } pub fn glBlendOp(op: sysgpu.BlendOperation) c.GLenum { return switch (op) { .add => c.GL_FUNC_ADD, .subtract => c.GL_FUNC_SUBTRACT, .reverse_subtract => c.GL_FUNC_REVERSE_SUBTRACT, .min => c.GL_MIN, .max => c.GL_MAX, }; } //pub fn glBufferDataUsage(usage: sysgpu.Buffer.UsageFlags, mapped_at_creation: sysgpu.Bool32) c.GLenum {} pub fn glBufferStorageFlags(usage: sysgpu.Buffer.UsageFlags, mapped_at_creation: sysgpu.Bool32) c.GLbitfield { var flags: c.GLbitfield = 0; if (mapped_at_creation == .true) flags |= c.GL_MAP_WRITE_BIT; if (usage.map_read) flags |= c.GL_MAP_PERSISTENT_BIT | c.GL_MAP_READ_BIT; if (usage.map_write) flags |= c.GL_MAP_PERSISTENT_BIT | c.GL_MAP_COHERENT_BIT | c.GL_MAP_WRITE_BIT; return flags; } pub fn glCompareFunc(func: sysgpu.CompareFunction) c.GLenum { return switch (func) { .undefined => unreachable, .never => c.GL_NEVER, .less => c.GL_LESS, .less_equal => c.GL_LEQUAL, .greater => c.GL_GREATER, .greater_equal => c.GL_GEQUAL, .equal => c.GL_EQUAL, .not_equal => c.GL_NOTEQUAL, .always => c.GL_ALWAYS, }; } pub fn glCullEnabled(cull_mode: sysgpu.CullMode) bool { return switch (cull_mode) { .none => false, else => true, }; } pub fn glCullFace(cull_mode: sysgpu.CullMode) c.GLenum { return switch (cull_mode) { .none => c.GL_BACK, .front => c.GL_FRONT, .back => c.GL_BACK, }; } pub fn glDepthMask(ds: *const sysgpu.DepthStencilState) c.GLboolean { return if (ds.depth_write_enabled == .true) c.GL_TRUE else c.GL_FALSE; } pub fn glDepthTestEnabled(ds: *const sysgpu.DepthStencilState) bool { return ds.depth_compare != .always or ds.depth_write_enabled == .true; } pub fn glFrontFace(front_face: sysgpu.FrontFace) c.GLenum { return switch (front_face) { .ccw => c.GL_CCW, .cw => c.GL_CW, }; } pub fn glIndexType(format: sysgpu.IndexFormat) c.GLenum { return switch (format) { .undefined => unreachable, .uint16 => c.GL_UNSIGNED_SHORT, .uint32 => c.GL_UNSIGNED_INT, }; } pub fn glIndexElementSize(format: sysgpu.IndexFormat) usize { return switch (format) { .undefined => unreachable, .uint16 => 2, .uint32 => 4, }; } pub fn glMapAccess(usage: sysgpu.Buffer.UsageFlags, mapped_at_creation: sysgpu.Bool32) c.GLbitfield { var flags: c.GLbitfield = 0; if (mapped_at_creation == .true) flags |= c.GL_MAP_WRITE_BIT; if (usage.map_read) flags |= c.GL_MAP_PERSISTENT_BIT | c.GL_MAP_READ_BIT; if (usage.map_write) flags |= c.GL_MAP_PERSISTENT_BIT | c.GL_MAP_WRITE_BIT; return flags; } pub fn glPrimitiveMode(topology: sysgpu.PrimitiveTopology) c.GLenum { return switch (topology) { .point_list => c.GL_POINTS, .line_list => c.GL_LINES, .line_strip => c.GL_LINE_STRIP, .triangle_list => c.GL_TRIANGLES, .triangle_strip => c.GL_TRIANGLE_STRIP, }; } pub fn glStencilOp(op: sysgpu.StencilOperation) c.GLenum { return switch (op) { .keep => c.GL_KEEP, .zero => c.GL_ZERO, .replace => c.GL_REPLACE, .invert => c.GL_INVERT, .increment_clamp => c.GL_INCR, .decrement_clamp => c.GL_DECR, .increment_wrap => c.GL_INCR_WRAP, .decrement_wrap => c.GL_DECR_WRAP, }; } pub fn glStencilTestEnabled(ds: *const sysgpu.DepthStencilState) bool { return stencilEnable(ds.stencil_front) or stencilEnable(ds.stencil_back); } pub fn glTargetForBuffer(usage: sysgpu.Buffer.UsageFlags) c.GLenum { // Not sure if this matters anymore - only get to pick one anyway if (usage.index) return c.GL_ELEMENT_ARRAY_BUFFER; if (usage.vertex) return c.GL_ARRAY_BUFFER; if (usage.uniform) return c.GL_UNIFORM_BUFFER; if (usage.storage) return c.GL_SHADER_STORAGE_BUFFER; if (usage.indirect) return c.GL_DRAW_INDIRECT_BUFFER; if (usage.query_resolve) return c.GL_QUERY_BUFFER; return c.GL_ARRAY_BUFFER; } pub fn glTargetForBufferBinding(binding_type: sysgpu.Buffer.BindingType) c.GLenum { return switch (binding_type) { .undefined => unreachable, .uniform => c.GL_UNIFORM_BUFFER, .storage => c.GL_SHADER_STORAGE_BUFFER, .read_only_storage => c.GL_SHADER_STORAGE_BUFFER, }; }
0
repos/mach-sysgpu/src
repos/mach-sysgpu/src/metal/conv.zig
const mtl = @import("objc").metal.mtl; const sysgpu = @import("../sysgpu/main.zig"); pub fn metalBlendFactor(factor: sysgpu.BlendFactor, color: bool) mtl.BlendFactor { return switch (factor) { .zero => mtl.BlendFactorZero, .one => mtl.BlendFactorOne, .src => mtl.BlendFactorSourceColor, .one_minus_src => mtl.BlendFactorOneMinusSourceColor, .src_alpha => mtl.BlendFactorSourceAlpha, .one_minus_src_alpha => mtl.BlendFactorOneMinusSourceAlpha, .dst => mtl.BlendFactorDestinationColor, .one_minus_dst => mtl.BlendFactorOneMinusDestinationColor, .dst_alpha => mtl.BlendFactorDestinationAlpha, .one_minus_dst_alpha => mtl.BlendFactorOneMinusDestinationAlpha, .src_alpha_saturated => mtl.BlendFactorSourceAlphaSaturated, .constant => if (color) mtl.BlendFactorBlendColor else mtl.BlendFactorBlendAlpha, .one_minus_constant => if (color) mtl.BlendFactorOneMinusBlendColor else mtl.BlendFactorOneMinusBlendAlpha, .src1 => mtl.BlendFactorSource1Color, .one_minus_src1 => mtl.BlendFactorOneMinusSource1Color, .src1_alpha => mtl.BlendFactorSource1Alpha, .one_minus_src1_alpha => mtl.BlendFactorOneMinusSource1Alpha, }; } pub fn metalBlendOperation(op: sysgpu.BlendOperation) mtl.BlendOperation { return switch (op) { .add => mtl.BlendOperationAdd, .subtract => mtl.BlendOperationSubtract, .reverse_subtract => mtl.BlendOperationReverseSubtract, .min => mtl.BlendOperationMin, .max => mtl.BlendOperationMax, }; } pub fn metalColorWriteMask(mask: sysgpu.ColorWriteMaskFlags) mtl.ColorWriteMask { var writeMask = mtl.ColorWriteMaskNone; if (mask.red) writeMask |= mtl.ColorWriteMaskRed; if (mask.green) writeMask |= mtl.ColorWriteMaskGreen; if (mask.blue) writeMask |= mtl.ColorWriteMaskBlue; if (mask.alpha) writeMask |= mtl.ColorWriteMaskAlpha; return writeMask; } pub fn metalCommonCounter(name: sysgpu.PipelineStatisticName) mtl.CommonCounter { return switch (name) { .vertex_shader_invocations => mtl.CommonCounterVertexInvocations, .cliiper_invocations => mtl.CommonCounterClipperInvocations, .clipper_primitives_out => mtl.CommonCounterClipperPrimitivesOut, .fragment_shader_invocations => mtl.CommonCounterFragmentInvocations, .compute_shader_invocations => mtl.CommonCounterComputeKernelInvocations, }; } pub fn metalCompareFunction(func: sysgpu.CompareFunction) mtl.CompareFunction { return switch (func) { .undefined => unreachable, .never => mtl.CompareFunctionNever, .less => mtl.CompareFunctionLess, .less_equal => mtl.CompareFunctionLessEqual, .greater => mtl.CompareFunctionGreater, .greater_equal => mtl.CompareFunctionGreaterEqual, .equal => mtl.CompareFunctionEqual, .not_equal => mtl.CompareFunctionNotEqual, .always => mtl.CompareFunctionAlways, }; } pub fn metalCullMode(mode: sysgpu.CullMode) mtl.CullMode { return switch (mode) { .none => mtl.CullModeNone, .front => mtl.CullModeFront, .back => mtl.CullModeBack, }; } pub fn metalIndexType(format: sysgpu.IndexFormat) mtl.IndexType { return switch (format) { .undefined => unreachable, .uint16 => mtl.IndexTypeUInt16, .uint32 => mtl.IndexTypeUInt32, }; } pub fn metalIndexElementSize(format: sysgpu.IndexFormat) usize { return switch (format) { .undefined => unreachable, .uint16 => 2, .uint32 => 4, }; } pub fn metalLoadAction(op: sysgpu.LoadOp) mtl.LoadAction { return switch (op) { .undefined => unreachable, .load => mtl.LoadActionLoad, .clear => mtl.LoadActionClear, }; } pub fn metalPixelFormat(format: sysgpu.Texture.Format) mtl.PixelFormat { return switch (format) { .undefined => mtl.PixelFormatInvalid, .r8_unorm => mtl.PixelFormatR8Unorm, .r8_snorm => mtl.PixelFormatR8Snorm, .r8_uint => mtl.PixelFormatR8Uint, .r8_sint => mtl.PixelFormatR8Sint, .r16_uint => mtl.PixelFormatR16Uint, .r16_sint => mtl.PixelFormatR16Sint, .r16_float => mtl.PixelFormatR16Float, .rg8_unorm => mtl.PixelFormatRG8Unorm, .rg8_snorm => mtl.PixelFormatRG8Snorm, .rg8_uint => mtl.PixelFormatRG8Uint, .rg8_sint => mtl.PixelFormatRG8Sint, .r32_float => mtl.PixelFormatR32Float, .r32_uint => mtl.PixelFormatR32Uint, .r32_sint => mtl.PixelFormatR32Sint, .rg16_uint => mtl.PixelFormatRG16Uint, .rg16_sint => mtl.PixelFormatRG16Sint, .rg16_float => mtl.PixelFormatRG16Float, .rgba8_unorm => mtl.PixelFormatRGBA8Unorm, .rgba8_unorm_srgb => mtl.PixelFormatRGBA8Unorm_sRGB, .rgba8_snorm => mtl.PixelFormatRGBA8Snorm, .rgba8_uint => mtl.PixelFormatRGBA8Uint, .rgba8_sint => mtl.PixelFormatRGBA8Sint, .bgra8_unorm => mtl.PixelFormatBGRA8Unorm, .bgra8_unorm_srgb => mtl.PixelFormatBGRA8Unorm_sRGB, .rgb10_a2_unorm => mtl.PixelFormatRGB10A2Unorm, .rg11_b10_ufloat => mtl.PixelFormatRG11B10Float, .rgb9_e5_ufloat => mtl.PixelFormatRGB9E5Float, .rg32_float => mtl.PixelFormatRG32Float, .rg32_uint => mtl.PixelFormatRG32Uint, .rg32_sint => mtl.PixelFormatRG32Sint, .rgba16_uint => mtl.PixelFormatRGBA16Uint, .rgba16_sint => mtl.PixelFormatRGBA16Sint, .rgba16_float => mtl.PixelFormatRGBA16Float, .rgba32_float => mtl.PixelFormatRGBA32Float, .rgba32_uint => mtl.PixelFormatRGBA32Uint, .rgba32_sint => mtl.PixelFormatRGBA32Sint, .stencil8 => mtl.PixelFormatStencil8, .depth16_unorm => mtl.PixelFormatDepth16Unorm, .depth24_plus => mtl.PixelFormatDepth32Float, // mtl.PixelFormatDepth24Unorm_Stencil8 only for non-Apple Silicon .depth24_plus_stencil8 => mtl.PixelFormatDepth32Float_Stencil8, // mtl.PixelFormatDepth24Unorm_Stencil8 only for non-Apple Silicon .depth32_float => mtl.PixelFormatDepth32Float, .depth32_float_stencil8 => mtl.PixelFormatDepth32Float_Stencil8, .bc1_rgba_unorm => mtl.PixelFormatBC1_RGBA, .bc1_rgba_unorm_srgb => mtl.PixelFormatBC1_RGBA_sRGB, .bc2_rgba_unorm => mtl.PixelFormatBC2_RGBA, .bc2_rgba_unorm_srgb => mtl.PixelFormatBC2_RGBA_sRGB, .bc3_rgba_unorm => mtl.PixelFormatBC3_RGBA, .bc3_rgba_unorm_srgb => mtl.PixelFormatBC3_RGBA_sRGB, .bc4_runorm => mtl.PixelFormatBC4_RUnorm, .bc4_rsnorm => mtl.PixelFormatBC4_RSnorm, .bc5_rg_unorm => mtl.PixelFormatBC5_RGUnorm, .bc5_rg_snorm => mtl.PixelFormatBC5_RGSnorm, .bc6_hrgb_ufloat => mtl.PixelFormatBC6H_RGBUfloat, .bc6_hrgb_float => mtl.PixelFormatBC6H_RGBFloat, .bc7_rgba_unorm => mtl.PixelFormatBC7_RGBAUnorm, .bc7_rgba_unorm_srgb => mtl.PixelFormatBC7_RGBAUnorm_sRGB, .etc2_rgb8_unorm => mtl.PixelFormatETC2_RGB8, .etc2_rgb8_unorm_srgb => mtl.PixelFormatETC2_RGB8_sRGB, .etc2_rgb8_a1_unorm => mtl.PixelFormatETC2_RGB8A1, .etc2_rgb8_a1_unorm_srgb => mtl.PixelFormatETC2_RGB8A1_sRGB, .etc2_rgba8_unorm => mtl.PixelFormatEAC_RGBA8, .etc2_rgba8_unorm_srgb => mtl.PixelFormatEAC_RGBA8_sRGB, .eacr11_unorm => mtl.PixelFormatEAC_R11Unorm, .eacr11_snorm => mtl.PixelFormatEAC_R11Snorm, .eacrg11_unorm => mtl.PixelFormatEAC_RG11Unorm, .eacrg11_snorm => mtl.PixelFormatEAC_RG11Snorm, .astc4x4_unorm => mtl.PixelFormatASTC_4x4_LDR, .astc4x4_unorm_srgb => mtl.PixelFormatASTC_4x4_sRGB, .astc5x4_unorm => mtl.PixelFormatASTC_5x4_LDR, .astc5x4_unorm_srgb => mtl.PixelFormatASTC_5x4_sRGB, .astc5x5_unorm => mtl.PixelFormatASTC_5x5_LDR, .astc5x5_unorm_srgb => mtl.PixelFormatASTC_5x5_sRGB, .astc6x5_unorm => mtl.PixelFormatASTC_6x5_LDR, .astc6x5_unorm_srgb => mtl.PixelFormatASTC_6x5_sRGB, .astc6x6_unorm => mtl.PixelFormatASTC_6x6_LDR, .astc6x6_unorm_srgb => mtl.PixelFormatASTC_6x6_sRGB, .astc8x5_unorm => mtl.PixelFormatASTC_8x5_LDR, .astc8x5_unorm_srgb => mtl.PixelFormatASTC_8x5_sRGB, .astc8x6_unorm => mtl.PixelFormatASTC_8x6_LDR, .astc8x6_unorm_srgb => mtl.PixelFormatASTC_8x6_sRGB, .astc8x8_unorm => mtl.PixelFormatASTC_8x8_LDR, .astc8x8_unorm_srgb => mtl.PixelFormatASTC_8x8_sRGB, .astc10x5_unorm => mtl.PixelFormatASTC_10x5_LDR, .astc10x5_unorm_srgb => mtl.PixelFormatASTC_10x5_sRGB, .astc10x6_unorm => mtl.PixelFormatASTC_10x6_LDR, .astc10x6_unorm_srgb => mtl.PixelFormatASTC_10x6_sRGB, .astc10x8_unorm => mtl.PixelFormatASTC_10x8_LDR, .astc10x8_unorm_srgb => mtl.PixelFormatASTC_10x8_sRGB, .astc10x10_unorm => mtl.PixelFormatASTC_10x10_LDR, .astc10x10_unorm_srgb => mtl.PixelFormatASTC_10x10_sRGB, .astc12x10_unorm => mtl.PixelFormatASTC_12x10_LDR, .astc12x10_unorm_srgb => mtl.PixelFormatASTC_12x10_sRGB, .astc12x12_unorm => mtl.PixelFormatASTC_12x12_LDR, .astc12x12_unorm_srgb => mtl.PixelFormatASTC_12x12_sRGB, .r8_bg8_biplanar420_unorm => unreachable, }; } pub fn metalPixelFormatForView(viewFormat: sysgpu.Texture.Format, textureFormat: mtl.PixelFormat, aspect: sysgpu.Texture.Aspect) mtl.PixelFormat { // TODO - depth/stencil only views _ = aspect; _ = textureFormat; return metalPixelFormat(viewFormat); } pub fn metalPrimitiveTopologyClass(topology: sysgpu.PrimitiveTopology) mtl.PrimitiveTopologyClass { return switch (topology) { .point_list => mtl.PrimitiveTopologyClassPoint, .line_list => mtl.PrimitiveTopologyClassLine, .line_strip => mtl.PrimitiveTopologyClassLine, .triangle_list => mtl.PrimitiveTopologyClassTriangle, .triangle_strip => mtl.PrimitiveTopologyClassTriangle, }; } pub fn metalPrimitiveType(topology: sysgpu.PrimitiveTopology) mtl.PrimitiveType { return switch (topology) { .point_list => mtl.PrimitiveTypePoint, .line_list => mtl.PrimitiveTypeLine, .line_strip => mtl.PrimitiveTypeLineStrip, .triangle_list => mtl.PrimitiveTypeTriangle, .triangle_strip => mtl.PrimitiveTypeTriangleStrip, }; } pub fn metalResourceOptionsForBuffer(usage: sysgpu.Buffer.UsageFlags) mtl.ResourceOptions { const cpu_cache_mode = if (usage.map_write and !usage.map_read) mtl.ResourceCPUCacheModeWriteCombined else mtl.ResourceCPUCacheModeDefaultCache; const storage_mode = mtl.ResourceStorageModeShared; // optimizing for UMA only const hazard_tracking_mode = mtl.ResourceHazardTrackingModeDefault; return cpu_cache_mode | storage_mode | hazard_tracking_mode; } pub fn metalSamplerAddressMode(mode: sysgpu.Sampler.AddressMode) mtl.SamplerAddressMode { return switch (mode) { .repeat => mtl.SamplerAddressModeRepeat, .mirror_repeat => mtl.SamplerAddressModeMirrorRepeat, .clamp_to_edge => mtl.SamplerAddressModeClampToEdge, }; } pub fn metalSamplerMinMagFilter(mode: sysgpu.FilterMode) mtl.SamplerMinMagFilter { return switch (mode) { .nearest => mtl.SamplerMinMagFilterNearest, .linear => mtl.SamplerMinMagFilterLinear, }; } pub fn metalSamplerMipFilter(mode: sysgpu.MipmapFilterMode) mtl.SamplerMipFilter { return switch (mode) { .nearest => mtl.SamplerMipFilterNearest, .linear => mtl.SamplerMipFilterLinear, }; } pub fn metalStencilOperation(op: sysgpu.StencilOperation) mtl.StencilOperation { return switch (op) { .keep => mtl.StencilOperationKeep, .zero => mtl.StencilOperationZero, .replace => mtl.StencilOperationReplace, .invert => mtl.StencilOperationInvert, .increment_clamp => mtl.StencilOperationIncrementClamp, .decrement_clamp => mtl.StencilOperationDecrementClamp, .increment_wrap => mtl.StencilOperationIncrementWrap, .decrement_wrap => mtl.StencilOperationDecrementWrap, }; } pub fn metalStorageModeForTexture(usage: sysgpu.Texture.UsageFlags) mtl.StorageMode { if (usage.transient_attachment) { return mtl.StorageModeMemoryless; } else { return mtl.StorageModePrivate; } } pub fn metalStoreAction(op: sysgpu.StoreOp, has_resolve_target: bool) mtl.StoreAction { return switch (op) { .undefined => unreachable, .store => if (has_resolve_target) mtl.StoreActionStoreAndMultisampleResolve else mtl.StoreActionStore, .discard => if (has_resolve_target) mtl.StoreActionMultisampleResolve else mtl.StoreActionDontCare, }; } pub fn metalTextureType(dimension: sysgpu.Texture.Dimension, size: sysgpu.Extent3D, sample_count: u32) mtl.TextureType { return switch (dimension) { .dimension_1d => if (size.depth_or_array_layers > 1) mtl.TextureType1DArray else mtl.TextureType1D, .dimension_2d => if (sample_count > 1) if (size.depth_or_array_layers > 1) mtl.TextureType2DMultisampleArray else mtl.TextureType2DMultisample else if (size.depth_or_array_layers > 1) mtl.TextureType2DArray else mtl.TextureType2D, .dimension_3d => mtl.TextureType3D, }; } pub fn metalTextureTypeForView(dimension: sysgpu.TextureView.Dimension) mtl.TextureType { return switch (dimension) { .dimension_undefined => unreachable, .dimension_1d => mtl.TextureType1D, .dimension_2d => mtl.TextureType2D, .dimension_2d_array => mtl.TextureType2DArray, .dimension_cube => mtl.TextureTypeCube, .dimension_cube_array => mtl.TextureTypeCubeArray, .dimension_3d => mtl.TextureType3D, }; } pub fn metalTextureUsage(usage: sysgpu.Texture.UsageFlags, view_format_count: usize) mtl.TextureUsage { var mtl_usage = mtl.TextureUsageUnknown; if (usage.texture_binding) mtl_usage |= mtl.TextureUsageShaderRead; if (usage.storage_binding) mtl_usage |= mtl.TextureUsageShaderWrite; if (usage.render_attachment) mtl_usage |= mtl.TextureUsageRenderTarget; if (view_format_count > 0) mtl_usage |= mtl.TextureUsagePixelFormatView; return mtl_usage; } pub fn metalVertexFormat(format: sysgpu.VertexFormat) mtl.VertexFormat { return switch (format) { .undefined => mtl.VertexFormatInvalid, .uint8x2 => mtl.VertexFormatUChar2, .uint8x4 => mtl.VertexFormatUChar4, .sint8x2 => mtl.VertexFormatChar2, .sint8x4 => mtl.VertexFormatChar4, .unorm8x2 => mtl.VertexFormatUChar2Normalized, .unorm8x4 => mtl.VertexFormatUChar4Normalized, .snorm8x2 => mtl.VertexFormatChar2Normalized, .snorm8x4 => mtl.VertexFormatChar4Normalized, .uint16x2 => mtl.VertexFormatUShort2, .uint16x4 => mtl.VertexFormatUShort4, .sint16x2 => mtl.VertexFormatShort2, .sint16x4 => mtl.VertexFormatShort4, .unorm16x2 => mtl.VertexFormatUShort2Normalized, .unorm16x4 => mtl.VertexFormatUShort4Normalized, .snorm16x2 => mtl.VertexFormatShort2Normalized, .snorm16x4 => mtl.VertexFormatShort4Normalized, .float16x2 => mtl.VertexFormatHalf2, .float16x4 => mtl.VertexFormatHalf4, .float32 => mtl.VertexFormatFloat, .float32x2 => mtl.VertexFormatFloat2, .float32x3 => mtl.VertexFormatFloat3, .float32x4 => mtl.VertexFormatFloat4, .uint32 => mtl.VertexFormatUInt, .uint32x2 => mtl.VertexFormatUInt2, .uint32x3 => mtl.VertexFormatUInt3, .uint32x4 => mtl.VertexFormatUInt4, .sint32 => mtl.VertexFormatInt, .sint32x2 => mtl.VertexFormatInt2, .sint32x3 => mtl.VertexFormatInt3, .sint32x4 => mtl.VertexFormatInt4, }; } pub fn metalVertexStepFunction(mode: sysgpu.VertexStepMode) mtl.VertexStepFunction { return switch (mode) { .vertex => mtl.VertexStepFunctionPerVertex, .instance => mtl.VertexStepFunctionPerInstance, .vertex_buffer_not_used => undefined, }; } pub fn metalWinding(face: sysgpu.FrontFace) mtl.Winding { return switch (face) { .ccw => mtl.WindingCounterClockwise, .cw => mtl.WindingClockwise, }; }
0
repos/mach-sysgpu/src
repos/mach-sysgpu/src/vulkan/proc.zig
const std = @import("std"); const builtin = @import("builtin"); const vk = @import("vulkan"); pub const BaseFunctions = vk.BaseWrapper(.{ .createInstance = true, .enumerateInstanceExtensionProperties = true, .enumerateInstanceLayerProperties = true, .getInstanceProcAddr = true, }); pub const InstanceFunctions = vk.InstanceWrapper(.{ .createDevice = true, // TODO: renderdoc will not work with wayland // .createWaylandSurfaceKHR = builtin.target.os.tag == .linux, .createWin32SurfaceKHR = builtin.target.os.tag == .windows, .createXlibSurfaceKHR = builtin.target.os.tag == .linux, .destroyInstance = true, .destroySurfaceKHR = true, .enumerateDeviceExtensionProperties = true, .enumerateDeviceLayerProperties = true, .enumeratePhysicalDevices = true, .getDeviceProcAddr = true, .getPhysicalDeviceFeatures = true, .getPhysicalDeviceFormatProperties = true, .getPhysicalDeviceProperties = true, .getPhysicalDeviceMemoryProperties = true, .getPhysicalDeviceQueueFamilyProperties = true, .getPhysicalDeviceSurfaceCapabilitiesKHR = true, .getPhysicalDeviceSurfaceFormatsKHR = true, }); pub const DeviceFunctions = vk.DeviceWrapper(.{ .acquireNextImageKHR = true, .allocateCommandBuffers = true, .allocateDescriptorSets = true, .allocateMemory = true, .beginCommandBuffer = true, .bindBufferMemory = true, .bindImageMemory = true, .cmdBeginRenderPass = true, .cmdBindDescriptorSets = true, .cmdBindIndexBuffer = true, .cmdBindPipeline = true, .cmdBindVertexBuffers = true, .cmdCopyBuffer = true, .cmdCopyBufferToImage = true, .cmdCopyImage = true, .cmdDispatch = true, .cmdDraw = true, .cmdDrawIndexed = true, .cmdEndRenderPass = true, .cmdPipelineBarrier = true, .cmdSetScissor = true, .cmdSetStencilReference = true, .cmdSetViewport = true, .createBuffer = true, .createCommandPool = true, .createComputePipelines = true, .createDescriptorPool = true, .createDescriptorSetLayout = true, .createFence = true, .createFramebuffer = true, .createGraphicsPipelines = true, .createImage = true, .createImageView = true, .createPipelineLayout = true, .createRenderPass = true, .createSampler = true, .createSemaphore = true, .createShaderModule = true, .createSwapchainKHR = true, .destroyBuffer = true, .destroyCommandPool = true, .destroyDescriptorPool = true, .destroyDescriptorSetLayout = true, .destroyDevice = true, .destroyFence = true, .destroyFramebuffer = true, .destroyImage = true, .destroyImageView = true, .destroyPipeline = true, .destroyPipelineLayout = true, .destroyRenderPass = true, .destroySampler = true, .destroySemaphore = true, .destroyShaderModule = true, .destroySwapchainKHR = true, .deviceWaitIdle = true, .endCommandBuffer = true, .freeCommandBuffers = true, .freeDescriptorSets = true, .freeMemory = true, .getBufferMemoryRequirements = true, .getDeviceQueue = true, .getFenceStatus = true, .getImageMemoryRequirements = true, .getSwapchainImagesKHR = true, .mapMemory = true, .queuePresentKHR = true, .queueSubmit = true, .queueWaitIdle = true, .resetCommandBuffer = true, .resetFences = true, .unmapMemory = true, .updateDescriptorSets = true, .waitForFences = true, }); pub const BaseLoader = *const fn (vk.Instance, [*:0]const u8) vk.PfnVoidFunction; pub fn loadBase(baseLoader: BaseLoader) !BaseFunctions { return BaseFunctions.load(baseLoader) catch return error.ProcLoadingFailed; } pub fn loadInstance(instance: vk.Instance, instanceLoader: vk.PfnGetInstanceProcAddr) !InstanceFunctions { return InstanceFunctions.load(instance, instanceLoader) catch return error.ProcLoadingFailed; } pub fn loadDevice(device: vk.Device, deviceLoader: vk.PfnGetDeviceProcAddr) !DeviceFunctions { return DeviceFunctions.load(device, deviceLoader) catch return error.ProcLoadingFailed; }
0
repos/mach-sysgpu/src
repos/mach-sysgpu/src/vulkan/conv.zig
const vk = @import("vulkan"); const sysgpu = @import("../sysgpu/main.zig"); const vulkan = @import("../vulkan.zig"); const utils = @import("../utils.zig"); pub fn stencilEnable(stencil: sysgpu.StencilFaceState) bool { return stencil.compare != .always or stencil.fail_op != .keep or stencil.depth_fail_op != .keep or stencil.pass_op != .keep; } pub fn sysgpuAdapterType(device_type: vk.PhysicalDeviceType) sysgpu.Adapter.Type { return switch (device_type) { .integrated_gpu => .integrated_gpu, .discrete_gpu => .discrete_gpu, .cpu => .cpu, else => .unknown, }; } pub fn vulkanAccessFlagsForBufferRead(usage: sysgpu.Buffer.UsageFlags) vk.AccessFlags { return .{ .indirect_command_read_bit = usage.indirect, .index_read_bit = usage.index, .vertex_attribute_read_bit = usage.vertex, .uniform_read_bit = usage.uniform, .shader_read_bit = usage.storage, .host_read_bit = usage.map_read, }; } pub fn vulkanAccessFlagsForImageRead(usage: sysgpu.Texture.UsageFlags, format: sysgpu.Texture.Format) vk.AccessFlags { return .{ .shader_read_bit = usage.texture_binding or usage.storage_binding, .color_attachment_read_bit = usage.render_attachment and !utils.formatHasDepthOrStencil(format), .depth_stencil_attachment_read_bit = usage.render_attachment and utils.formatHasDepthOrStencil(format), }; } pub fn vulkanBlendOp(op: sysgpu.BlendOperation) vk.BlendOp { return switch (op) { .add => .add, .subtract => .subtract, .reverse_subtract => .reverse_subtract, .min => .min, .max => .max, }; } pub fn vulkanBlendFactor(op: sysgpu.BlendFactor, color: bool) vk.BlendFactor { return switch (op) { .zero => .zero, .one => .one, .src => .src_color, .one_minus_src => .one_minus_src_color, .src_alpha => .src_alpha, .one_minus_src_alpha => .one_minus_src_alpha, .dst => .dst_color, .one_minus_dst => .one_minus_dst_color, .dst_alpha => .dst_alpha, .one_minus_dst_alpha => .one_minus_dst_alpha, .src_alpha_saturated => .src_alpha_saturate, .constant => if (color) .constant_color else .constant_alpha, .one_minus_constant => if (color) .one_minus_constant_color else .one_minus_constant_alpha, .src1 => .src1_color, .one_minus_src1 => .one_minus_src1_color, .src1_alpha => .src1_alpha, .one_minus_src1_alpha => .one_minus_src1_alpha, }; } pub fn vulkanBufferUsageFlags(flags: sysgpu.Buffer.UsageFlags) vk.BufferUsageFlags { return .{ .transfer_src_bit = flags.copy_src, .transfer_dst_bit = flags.copy_dst or flags.query_resolve, .uniform_buffer_bit = flags.uniform, .storage_buffer_bit = flags.storage, .index_buffer_bit = flags.index, .vertex_buffer_bit = flags.vertex, .indirect_buffer_bit = flags.indirect, }; } pub fn vulkanCompareOp(op: sysgpu.CompareFunction) vk.CompareOp { return switch (op) { .never => .never, .less => .less, .less_equal => .less_or_equal, .greater => .greater, .greater_equal => .greater_or_equal, .equal => .equal, .not_equal => .not_equal, .always => .always, .undefined => unreachable, }; } pub fn vulkanCullMode(cull_mode: sysgpu.CullMode) vk.CullModeFlags { return .{ .front_bit = cull_mode == .front, .back_bit = cull_mode == .back, }; } pub fn vulkanDepthBias(ds: ?*const sysgpu.DepthStencilState) f32 { if (ds == null) return 0; return @floatFromInt(ds.?.depth_bias); } pub fn vulkanDepthBiasClamp(ds: ?*const sysgpu.DepthStencilState) f32 { if (ds == null) return 0; return ds.?.depth_bias_clamp; } pub fn vulkanDepthBiasSlopeScale(ds: ?*const sysgpu.DepthStencilState) f32 { if (ds == null) return 0; return ds.?.depth_bias_slope_scale; } pub fn vulkanDescriptorType(entry: sysgpu.BindGroupLayout.Entry) vk.DescriptorType { switch (entry.buffer.type) { .undefined => {}, .uniform => if (entry.buffer.has_dynamic_offset == .true) { return .uniform_buffer_dynamic; } else { return .uniform_buffer; }, .storage, .read_only_storage, => if (entry.buffer.has_dynamic_offset == .true) { return .storage_buffer_dynamic; } else { return .storage_buffer; }, } switch (entry.sampler.type) { .undefined => {}, else => return .sampler, } switch (entry.texture.sample_type) { .undefined => {}, else => return .sampled_image, } switch (entry.storage_texture.format) { .undefined => {}, else => return .storage_image, } unreachable; } pub fn vulkanFilter(filter: sysgpu.FilterMode) vk.Filter { return switch (filter) { .nearest => .nearest, .linear => .linear, }; } pub fn vulkanFormat(device: *const vulkan.Device, format: sysgpu.Texture.Format) vk.Format { return switch (format) { .r8_unorm => .r8_unorm, .r8_snorm => .r8_snorm, .r8_uint => .r8_uint, .r8_sint => .r8_sint, .r16_uint => .r16_uint, .r16_sint => .r16_sint, .r16_float => .r16_sfloat, .rg8_unorm => .r8g8_unorm, .rg8_snorm => .r8g8_snorm, .rg8_uint => .r8g8_uint, .rg8_sint => .r8g8_sint, .r32_float => .r32_sfloat, .r32_uint => .r32_uint, .r32_sint => .r32_sint, .rg16_uint => .r16g16_uint, .rg16_sint => .r16g16_sint, .rg16_float => .r16g16_sfloat, .rgba8_unorm => .r8g8b8a8_unorm, .rgba8_unorm_srgb => .r8g8b8a8_srgb, .rgba8_snorm => .r8g8b8a8_snorm, .rgba8_uint => .r8g8b8a8_uint, .rgba8_sint => .r8g8b8a8_sint, .bgra8_unorm => .b8g8r8a8_unorm, .bgra8_unorm_srgb => .b8g8r8a8_srgb, .rgb10_a2_unorm => .a2r10g10b10_unorm_pack32, .rg11_b10_ufloat => .b10g11r11_ufloat_pack32, .rgb9_e5_ufloat => .e5b9g9r9_ufloat_pack32, .rg32_float => .r32g32_sfloat, .rg32_uint => .r32g32_uint, .rg32_sint => .r32g32_sint, .rgba16_uint => .r16g16b16a16_uint, .rgba16_sint => .r16g16b16a16_sint, .rgba16_float => .r16g16b16a16_sfloat, .rgba32_float => .r32g32b32a32_sfloat, .rgba32_uint => .r32g32b32a32_uint, .rgba32_sint => .r32g32b32a32_sint, .stencil8 => if (device.supported_ds_formats.get(.s8_uint) != null) { return .s8_uint; } else { return vulkanFormat(device, .depth24_plus_stencil8); }, .depth16_unorm => .d16_unorm, .depth24_plus => .d32_sfloat, .depth24_plus_stencil8 => if (device.supported_ds_formats.get(.d24_unorm_s8_uint) != null) { return .d24_unorm_s8_uint; } else { return .d32_sfloat_s8_uint; }, .depth32_float => .d32_sfloat, .depth32_float_stencil8 => .d32_sfloat_s8_uint, .bc1_rgba_unorm => .bc1_rgba_unorm_block, .bc1_rgba_unorm_srgb => .bc1_rgba_srgb_block, .bc2_rgba_unorm => .bc2_unorm_block, .bc2_rgba_unorm_srgb => .bc2_srgb_block, .bc3_rgba_unorm => .bc3_unorm_block, .bc3_rgba_unorm_srgb => .bc3_srgb_block, .bc4_runorm => .bc4_unorm_block, .bc4_rsnorm => .bc4_snorm_block, .bc5_rg_unorm => .bc5_unorm_block, .bc5_rg_snorm => .bc5_snorm_block, .bc6_hrgb_ufloat => .bc6h_ufloat_block, .bc6_hrgb_float => .bc6h_sfloat_block, .bc7_rgba_unorm => .bc7_unorm_block, .bc7_rgba_unorm_srgb => .bc7_srgb_block, .etc2_rgb8_unorm => .etc2_r8g8b8_unorm_block, .etc2_rgb8_unorm_srgb => .etc2_r8g8b8_srgb_block, .etc2_rgb8_a1_unorm => .etc2_r8g8b8a1_unorm_block, .etc2_rgb8_a1_unorm_srgb => .etc2_r8g8b8a1_srgb_block, .etc2_rgba8_unorm => .etc2_r8g8b8a8_unorm_block, .etc2_rgba8_unorm_srgb => .etc2_r8g8b8a8_srgb_block, .eacr11_unorm => .eac_r11_unorm_block, .eacr11_snorm => .eac_r11_snorm_block, .eacrg11_unorm => .eac_r11g11_unorm_block, .eacrg11_snorm => .eac_r11g11_snorm_block, .astc4x4_unorm => .astc_4x_4_unorm_block, .astc4x4_unorm_srgb => .astc_4x_4_srgb_block, .astc5x4_unorm => .astc_5x_4_unorm_block, .astc5x4_unorm_srgb => .astc_5x_4_srgb_block, .astc5x5_unorm => .astc_5x_5_unorm_block, .astc5x5_unorm_srgb => .astc_5x_5_srgb_block, .astc6x5_unorm => .astc_6x_5_unorm_block, .astc6x5_unorm_srgb => .astc_6x_5_srgb_block, .astc6x6_unorm => .astc_6x_6_unorm_block, .astc6x6_unorm_srgb => .astc_6x_6_srgb_block, .astc8x5_unorm => .astc_8x_5_unorm_block, .astc8x5_unorm_srgb => .astc_8x_5_srgb_block, .astc8x6_unorm => .astc_8x_6_unorm_block, .astc8x6_unorm_srgb => .astc_8x_6_srgb_block, .astc8x8_unorm => .astc_8x_8_unorm_block, .astc8x8_unorm_srgb => .astc_8x_8_srgb_block, .astc10x5_unorm => .astc_1_0x_5_unorm_block, .astc10x5_unorm_srgb => .astc_1_0x_5_srgb_block, .astc10x6_unorm => .astc_1_0x_6_unorm_block, .astc10x6_unorm_srgb => .astc_1_0x_6_srgb_block, .astc10x8_unorm => .astc_1_0x_8_unorm_block, .astc10x8_unorm_srgb => .astc_1_0x_8_srgb_block, .astc10x10_unorm => .astc_1_0x_10_unorm_block, .astc10x10_unorm_srgb => .astc_1_0x_10_srgb_block, .astc12x10_unorm => .astc_1_2x_10_unorm_block, .astc12x10_unorm_srgb => .astc_1_2x_10_srgb_block, .astc12x12_unorm => .astc_1_2x_12_unorm_block, .astc12x12_unorm_srgb => .astc_1_2x_12_srgb_block, .r8_bg8_biplanar420_unorm => .g8_b8r8_2plane_420_unorm, .undefined => unreachable, }; } pub fn vulkanFrontFace(front_face: sysgpu.FrontFace) vk.FrontFace { return switch (front_face) { .ccw => vk.FrontFace.counter_clockwise, .cw => vk.FrontFace.clockwise, }; } pub fn vulkanImageAspectFlags(aspect: sysgpu.Texture.Aspect, format: sysgpu.Texture.Format) vk.ImageAspectFlags { return switch (aspect) { .all => vulkanImageAspectFlagsForFormat(format), .stencil_only => .{ .stencil_bit = true }, .depth_only => .{ .depth_bit = true }, .plane0_only => .{ .plane_0_bit = true }, .plane1_only => .{ .plane_1_bit = true }, }; } pub fn vulkanImageAspectFlagsForFormat(format: sysgpu.Texture.Format) vk.ImageAspectFlags { return switch (format) { .stencil8 => .{ .stencil_bit = true }, .depth16_unorm, .depth24_plus, .depth32_float => .{ .depth_bit = true }, .depth24_plus_stencil8, .depth32_float_stencil8 => .{ .depth_bit = true, .stencil_bit = true }, .r8_bg8_biplanar420_unorm => .{ .plane_0_bit = true, .plane_1_bit = true }, else => .{ .color_bit = true }, }; } pub fn vulkanImageCreateFlags(cube_compatible: bool, view_format_count: usize) vk.ImageCreateFlags { return .{ .mutable_format_bit = view_format_count > 0, .cube_compatible_bit = cube_compatible, }; } pub fn vulkanImageLayoutForRead(usage: sysgpu.Texture.UsageFlags, format: sysgpu.Texture.Format) vk.ImageLayout { // In case where we do not read, use an appropriate write state to avoid unnecessary layout changes return if (usage.texture_binding) .shader_read_only_optimal else if (usage.render_attachment and utils.formatHasDepthOrStencil(format)) .depth_stencil_attachment_optimal else if (usage.render_attachment) .color_attachment_optimal else .general; } pub fn vulkanImageLayoutForTextureBinding(sample_type: sysgpu.Texture.SampleType) vk.ImageLayout { return switch (sample_type) { .undefined => .general, else => .shader_read_only_optimal, }; } pub fn vulkanImageType(dimension: sysgpu.Texture.Dimension) vk.ImageType { return switch (dimension) { .dimension_1d => .@"1d", .dimension_2d => .@"2d", .dimension_3d => .@"3d", }; } pub fn vulkanImageUsageFlags(usage: sysgpu.Texture.UsageFlags, format: sysgpu.Texture.Format) vk.ImageUsageFlags { return .{ .transfer_src_bit = usage.copy_src, .transfer_dst_bit = usage.copy_dst, .sampled_bit = usage.texture_binding, .storage_bit = usage.storage_binding, .color_attachment_bit = usage.render_attachment and !utils.formatHasDepthOrStencil(format), .transient_attachment_bit = usage.transient_attachment, .depth_stencil_attachment_bit = usage.render_attachment and utils.formatHasDepthOrStencil(format), }; } pub fn vulkanImageViewType(dimension: sysgpu.TextureView.Dimension) vk.ImageViewType { return switch (dimension) { .dimension_undefined => unreachable, .dimension_1d => .@"1d", .dimension_2d => .@"2d", .dimension_2d_array => .@"2d_array", .dimension_cube => .cube, .dimension_cube_array => .cube_array, .dimension_3d => .@"3d", }; } pub fn vulkanIndexType(format: sysgpu.IndexFormat) vk.IndexType { return switch (format) { .undefined => unreachable, .uint16 => .uint16, .uint32 => .uint32, }; } pub fn vulkanLoadOp(op: sysgpu.LoadOp) vk.AttachmentLoadOp { return switch (op) { .load => .load, .clear => .clear, .undefined => .dont_care, }; } pub fn vulkanPipelineStageFlagsForBufferRead(usage: sysgpu.Buffer.UsageFlags) vk.PipelineStageFlags { return .{ .draw_indirect_bit = usage.indirect, .vertex_input_bit = usage.index or usage.vertex, .vertex_shader_bit = usage.uniform or usage.storage, .fragment_shader_bit = usage.uniform or usage.storage, .compute_shader_bit = usage.uniform or usage.storage, .host_bit = usage.map_read, }; } pub fn vulkanPipelineStageFlagsForImageRead(usage: sysgpu.Texture.UsageFlags, format: sysgpu.Texture.Format) vk.PipelineStageFlags { return .{ .vertex_shader_bit = usage.texture_binding or usage.storage_binding, .fragment_shader_bit = usage.texture_binding or usage.storage_binding, .early_fragment_tests_bit = usage.render_attachment and utils.formatHasDepthOrStencil(format), .late_fragment_tests_bit = usage.render_attachment and utils.formatHasDepthOrStencil(format), .color_attachment_output_bit = usage.render_attachment and !utils.formatHasDepthOrStencil(format), .compute_shader_bit = usage.texture_binding or usage.storage_binding, }; } pub fn vulkanPrimitiveTopology(topology: sysgpu.PrimitiveTopology) vk.PrimitiveTopology { return switch (topology) { .point_list => .point_list, .line_list => .line_list, .line_strip => .line_strip, .triangle_list => .triangle_list, .triangle_strip => .triangle_strip, }; } pub fn vulkanPresentMode(present_mode: sysgpu.PresentMode) vk.PresentModeKHR { return switch (present_mode) { .immediate => .immediate_khr, .fifo => .fifo_khr, .mailbox => .mailbox_khr, }; } pub fn vulkanSampleCount(samples: u32) vk.SampleCountFlags { // TODO: https://github.com/Snektron/vulkan-zig/issues/27 return switch (samples) { 1 => .{ .@"1_bit" = true }, 2 => .{ .@"2_bit" = true }, 4 => .{ .@"4_bit" = true }, 8 => .{ .@"8_bit" = true }, 16 => .{ .@"16_bit" = true }, 32 => .{ .@"32_bit" = true }, else => unreachable, }; } pub fn vulkanSamplerAddressMode(address_mode: sysgpu.Sampler.AddressMode) vk.SamplerAddressMode { return switch (address_mode) { .repeat => .repeat, .mirror_repeat => .mirrored_repeat, .clamp_to_edge => .clamp_to_edge, }; } pub fn vulkanSamplerMipmapMode(filter: sysgpu.MipmapFilterMode) vk.SamplerMipmapMode { return switch (filter) { .nearest => .nearest, .linear => .linear, }; } pub fn vulkanShaderStageFlags(flags: sysgpu.ShaderStageFlags) vk.ShaderStageFlags { return .{ .vertex_bit = flags.vertex, .fragment_bit = flags.fragment, .compute_bit = flags.compute, }; } pub fn vulkanStencilOp(op: sysgpu.StencilOperation) vk.StencilOp { return switch (op) { .keep => .keep, .zero => .zero, .replace => .replace, .invert => .invert, .increment_clamp => .increment_and_clamp, .decrement_clamp => .decrement_and_clamp, .increment_wrap => .increment_and_wrap, .decrement_wrap => .decrement_and_wrap, }; } pub fn vulkanStoreOp(op: sysgpu.StoreOp) vk.AttachmentStoreOp { return switch (op) { .store => .store, .discard => .dont_care, .undefined => .dont_care, }; } pub fn vulkanVertexFormat(format: sysgpu.VertexFormat) vk.Format { return switch (format) { .uint8x2 => .r8g8_uint, .uint8x4 => .r8g8b8a8_uint, .sint8x2 => .r8g8_sint, .sint8x4 => .r8g8b8a8_sint, .unorm8x2 => .r8g8_unorm, .unorm8x4 => .r8g8b8a8_unorm, .snorm8x2 => .r8g8_snorm, .snorm8x4 => .r8g8b8a8_snorm, .uint16x2 => .r16g16_uint, .uint16x4 => .r16g16b16a16_uint, .sint16x2 => .r16g16_sint, .sint16x4 => .r16g16b16a16_sint, .unorm16x2 => .r16g16_unorm, .unorm16x4 => .r16g16b16a16_unorm, .snorm16x2 => .r16g16_snorm, .snorm16x4 => .r16g16b16a16_snorm, .float16x2 => .r16g16_sfloat, .float16x4 => .r16g16b16a16_sfloat, .float32 => .r32_sfloat, .float32x2 => .r32g32_sfloat, .float32x3 => .r32g32b32_sfloat, .float32x4 => .r32g32b32a32_sfloat, .uint32 => .r32_uint, .uint32x2 => .r32g32_uint, .uint32x3 => .r32g32b32_uint, .uint32x4 => .r32g32b32a32_uint, .sint32 => .r32_sint, .sint32x2 => .r32g32_sint, .sint32x3 => .r32g32b32_sint, .sint32x4 => .r32g32b32a32_sint, .undefined => unreachable, }; } pub fn vulkanVertexInputRate(step_mode: sysgpu.VertexStepMode) vk.VertexInputRate { return switch (step_mode) { .vertex => .vertex, .instance => .instance, .vertex_buffer_not_used => unreachable, }; }
0
repos/mach-sysgpu/src
repos/mach-sysgpu/src/shader/Token.zig
const std = @import("std"); tag: Tag, loc: Loc, pub const Loc = struct { start: u32, end: u32, const Extra = struct { line: u32, col: u32, line_start: u32, line_end: u32, }; pub fn slice(self: Loc, source: []const u8) []const u8 { return source[self.start..self.end]; } pub fn extraInfo(self: Loc, source: []const u8) Extra { var result = Extra{ .line = 1, .col = 1, .line_start = 0, .line_end = @intCast(source.len), }; for (source[0..self.start], 0..) |c, i| { if (c == '\n') { result.line += 1; result.line_start = @as(u32, @intCast(i)) + 1; } } for (source[self.end..], 0..) |c, i| { if (c == '\n') { result.line_end = self.end + @as(u32, @intCast(i)); break; } } result.col += self.start - result.line_start; return result; } }; pub const Tag = enum { eof, invalid, ident, number, paren_left, paren_right, brace_left, brace_right, bracket_left, bracket_right, dot, comma, colon, semicolon, arrow, attr, equal, equal_equal, bang, bang_equal, ampersand, ampersand_equal, ampersand_ampersand, pipe, pipe_equal, pipe_pipe, tilde, plus, plus_equal, plus_plus, minus, minus_equal, minus_minus, asterisk, asterisk_equal, slash, slash_equal, percent, percent_equal, xor, xor_equal, angle_bracket_left, angle_bracket_left_equal, angle_bracket_angle_bracket_left, angle_bracket_angle_bracket_left_equal, angle_bracket_right, angle_bracket_right_equal, angle_bracket_angle_bracket_right, angle_bracket_angle_bracket_right_equal, underscore, k_enable, k_requires, k_fn, k_var, k_let, k_const, k_override, k_type, k_if, k_else, k_loop, k_while, k_for, k_break, k_continue, k_continuing, k_discard, k_switch, k_case, k_default, k_return, k_const_assert, k_bitcast, k_bool, k_u32, k_i32, k_f16, k_f32, k_vec2, k_vec3, k_vec4, k_mat2x2, k_mat2x3, k_mat2x4, k_mat3x2, k_mat3x3, k_mat3x4, k_mat4x2, k_mat4x3, k_mat4x4, k_ptr, k_array, k_atomic, k_struct, k_sampler, k_sampler_comparison, k_texture_depth_2d, k_texture_depth_2d_array, k_texture_depth_cube, k_texture_depth_cube_array, k_texture_depth_multisampled_2d, k_texture_external, k_texture_multisampled_2d, k_texture_1d, k_texture_2d, k_texture_2d_array, k_texture_3d, k_texture_cube, k_texture_cube_array, k_texture_storage_1d, k_texture_storage_2d, k_texture_storage_2d_array, k_texture_storage_3d, k_false, k_true, template_left, template_right, pub fn symbol(self: Tag) []const u8 { return switch (self) { .eof => "EOF", .invalid => "invalid bytes", .ident => "an identifier", .number => "a number literal", .paren_left => "(", .paren_right => ")", .brace_left => "{", .brace_right => "}", .bracket_left => "[", .bracket_right => "]", .dot => ".", .comma => ",", .colon => ":", .semicolon => ";", .arrow => "->", .attr => "@", .equal => "=", .equal_equal => "==", .bang => "!", .bang_equal => "!=", .ampersand => "&", .ampersand_equal => "&=", .ampersand_ampersand => "&&", .pipe => "|", .pipe_equal => "|=", .pipe_pipe => "||", .tilde => "~", .plus => "+", .plus_equal => "+=", .plus_plus => "++", .minus => "-", .minus_equal => "-=", .minus_minus => "--", .asterisk => "*", .asterisk_equal => "*=", .slash => "/", .slash_equal => "/=", .percent => "%", .percent_equal => "%=", .xor => "^", .xor_equal => "^=", .angle_bracket_left => "<", .angle_bracket_left_equal => "<=", .angle_bracket_angle_bracket_left => "<<", .angle_bracket_angle_bracket_left_equal => "<<=", .angle_bracket_right => ">", .angle_bracket_right_equal => ">=", .angle_bracket_angle_bracket_right => ">>", .angle_bracket_angle_bracket_right_equal => ">>=", .underscore => "_", .k_enable => "enable", .k_requires => "requires", .k_fn => "fn", .k_var => "var", .k_let => "let", .k_const => "const", .k_override => "override", .k_type => "type", .k_if => "if", .k_else => "else", .k_loop => "loop", .k_while => "while", .k_for => "for", .k_break => "break", .k_continue => "continue", .k_continuing => "continuing", .k_discard => "discard", .k_switch => "switch", .k_case => "case", .k_default => "default", .k_return => "return", .k_const_assert => "const_assert", .k_bitcast => "bitcast", .k_bool => "bool", .k_u32 => "u32", .k_i32 => "i32", .k_f16 => "f16", .k_f32 => "f32", .k_vec2 => "vec2", .k_vec3 => "vec3", .k_vec4 => "vec4", .k_mat2x2 => "mat2x2", .k_mat2x3 => "mat2x3", .k_mat2x4 => "mat2x4", .k_mat3x2 => "mat3x2", .k_mat3x3 => "mat3x3", .k_mat3x4 => "mat3x4", .k_mat4x2 => "mat4x2", .k_mat4x3 => "mat4x3", .k_mat4x4 => "mat4x4", .k_ptr => "ptr", .k_array => "array", .k_atomic => "atomic", .k_struct => "struct", .k_sampler => "sampler", .k_sampler_comparison => "sampler_comparison", .k_texture_depth_2d => "texture_depth_2d", .k_texture_depth_2d_array => "texture_depth_2d_array", .k_texture_depth_cube => "texture_depth_cube", .k_texture_depth_cube_array => "texture_depth_cube_array", .k_texture_depth_multisampled_2d => "texture_depth_multisampled_2d", .k_texture_external => "texture_external", .k_texture_multisampled_2d => "texture_multisampled_2d", .k_texture_1d => "texture_1d", .k_texture_2d => "texture_2d", .k_texture_2d_array => "texture_2d_array", .k_texture_3d => "texture_3d", .k_texture_cube => "texture_cube", .k_texture_cube_array => "texture_cube_array", .k_texture_storage_1d => "texture_storage_1d", .k_texture_storage_2d => "texture_storage_2d", .k_texture_storage_2d_array => "texture_storage_2d_array", .k_texture_storage_3d => "texture_storage_3d", .k_false => "false", .k_true => "true", .template_left => "<", .template_right => ">", }; } }; pub const keywords = std.ComptimeStringMap(Tag, .{ .{ "enable", .k_enable }, .{ "requires", .k_requires }, .{ "fn", .k_fn }, .{ "var", .k_var }, .{ "let", .k_let }, .{ "const", .k_const }, .{ "override", .k_override }, .{ "type", .k_type }, .{ "if", .k_if }, .{ "else", .k_else }, .{ "loop", .k_loop }, .{ "while", .k_while }, .{ "for", .k_for }, .{ "break", .k_break }, .{ "continue", .k_continue }, .{ "continuing", .k_continuing }, .{ "discard", .k_discard }, .{ "switch", .k_switch }, .{ "case", .k_case }, .{ "default", .k_default }, .{ "return", .k_return }, .{ "const_assert", .k_const_assert }, .{ "bitcast", .k_bitcast }, .{ "bool", .k_bool }, .{ "u32", .k_u32 }, .{ "i32", .k_i32 }, .{ "f16", .k_f16 }, .{ "f32", .k_f32 }, .{ "vec2", .k_vec2 }, .{ "vec3", .k_vec3 }, .{ "vec4", .k_vec4 }, .{ "mat2x2", .k_mat2x2 }, .{ "mat2x3", .k_mat2x3 }, .{ "mat2x4", .k_mat2x4 }, .{ "mat3x2", .k_mat3x2 }, .{ "mat3x3", .k_mat3x3 }, .{ "mat3x4", .k_mat3x4 }, .{ "mat4x2", .k_mat4x2 }, .{ "mat4x3", .k_mat4x3 }, .{ "mat4x4", .k_mat4x4 }, .{ "ptr", .k_ptr }, .{ "array", .k_array }, .{ "atomic", .k_atomic }, .{ "struct", .k_struct }, .{ "sampler", .k_sampler }, .{ "sampler_comparison", .k_sampler_comparison }, .{ "texture_depth_2d", .k_texture_depth_2d }, .{ "texture_depth_2d_array", .k_texture_depth_2d_array }, .{ "texture_depth_cube", .k_texture_depth_cube }, .{ "texture_depth_cube_array", .k_texture_depth_cube_array }, .{ "texture_depth_multisampled_2d", .k_texture_depth_multisampled_2d }, .{ "texture_external", .k_texture_external }, .{ "texture_multisampled_2d", .k_texture_multisampled_2d }, .{ "texture_1d", .k_texture_1d }, .{ "texture_2d", .k_texture_2d }, .{ "texture_2d_array", .k_texture_2d_array }, .{ "texture_3d", .k_texture_3d }, .{ "texture_cube", .k_texture_cube }, .{ "texture_cube_array", .k_texture_cube_array }, .{ "texture_storage_1d", .k_texture_storage_1d }, .{ "texture_storage_2d", .k_texture_storage_2d }, .{ "texture_storage_2d_array", .k_texture_storage_2d_array }, .{ "texture_storage_3d", .k_texture_storage_3d }, .{ "false", .k_false }, .{ "true", .k_true }, }); pub const reserved = blk: { @setEvalBranchQuota(3000); break :blk std.ComptimeStringMap(void, .{ .{ "NULL", {} }, .{ "Self", {} }, .{ "abstract", {} }, .{ "active", {} }, .{ "alignas", {} }, .{ "alignof", {} }, .{ "as", {} }, .{ "asm", {} }, .{ "asm_fragment", {} }, .{ "async", {} }, .{ "attribute", {} }, .{ "auto", {} }, .{ "await", {} }, .{ "become", {} }, .{ "binding_array", {} }, .{ "cast", {} }, .{ "catch", {} }, .{ "class", {} }, .{ "co_await", {} }, .{ "co_return", {} }, .{ "co_yield", {} }, .{ "coherent", {} }, .{ "column_major", {} }, .{ "common", {} }, .{ "compile", {} }, .{ "compile_fragment", {} }, .{ "concept", {} }, .{ "const_cast", {} }, .{ "consteval", {} }, .{ "constexpr", {} }, .{ "constinit", {} }, .{ "crate", {} }, .{ "debugger", {} }, .{ "decltype", {} }, .{ "delete", {} }, .{ "demote", {} }, .{ "demote_to_helper", {} }, .{ "do", {} }, .{ "dynamic_cast", {} }, .{ "enum", {} }, .{ "explicit", {} }, .{ "export", {} }, .{ "extends", {} }, .{ "extern", {} }, .{ "external", {} }, .{ "fallthrough", {} }, .{ "filter", {} }, .{ "final", {} }, .{ "finally", {} }, .{ "friend", {} }, .{ "from", {} }, .{ "fxgroup", {} }, .{ "get", {} }, .{ "goto", {} }, .{ "groupshared", {} }, .{ "highp", {} }, .{ "impl", {} }, .{ "implements", {} }, .{ "import", {} }, .{ "inline", {} }, .{ "instanceof", {} }, .{ "interface", {} }, .{ "layout", {} }, .{ "lowp", {} }, .{ "macro", {} }, .{ "macro_rules", {} }, .{ "match", {} }, .{ "mediump", {} }, .{ "meta", {} }, .{ "mod", {} }, .{ "module", {} }, .{ "move", {} }, .{ "mut", {} }, .{ "mutable", {} }, .{ "namespace", {} }, .{ "new", {} }, .{ "nil", {} }, .{ "noexcept", {} }, .{ "noinline", {} }, .{ "nointerpolation", {} }, .{ "noperspective", {} }, .{ "null", {} }, .{ "nullptr", {} }, .{ "of", {} }, .{ "operator", {} }, .{ "package", {} }, .{ "packoffset", {} }, .{ "partition", {} }, .{ "pass", {} }, .{ "patch", {} }, .{ "pixelfragment", {} }, .{ "precise", {} }, .{ "precision", {} }, .{ "premerge", {} }, .{ "priv", {} }, .{ "protected", {} }, .{ "pub", {} }, .{ "public", {} }, .{ "readonly", {} }, .{ "ref", {} }, .{ "regardless", {} }, .{ "register", {} }, .{ "reinterpret_cast", {} }, .{ "require", {} }, .{ "resource", {} }, .{ "restrict", {} }, .{ "self", {} }, .{ "set", {} }, .{ "shared", {} }, .{ "sizeof", {} }, .{ "smooth", {} }, .{ "snorm", {} }, .{ "static", {} }, .{ "static_assert", {} }, .{ "static_cast", {} }, .{ "std", {} }, .{ "subroutine", {} }, .{ "super", {} }, .{ "target", {} }, .{ "template", {} }, .{ "this", {} }, .{ "thread_local", {} }, .{ "throw", {} }, .{ "trait", {} }, .{ "try", {} }, .{ "type", {} }, .{ "typedef", {} }, .{ "typeid", {} }, .{ "typename", {} }, .{ "typeof", {} }, .{ "union", {} }, .{ "unless", {} }, .{ "unorm", {} }, .{ "unsafe", {} }, .{ "unsized", {} }, .{ "use", {} }, .{ "using", {} }, .{ "varying", {} }, .{ "virtual", {} }, .{ "volatile", {} }, .{ "wgsl", {} }, .{ "where", {} }, .{ "with", {} }, .{ "writeonly", {} }, .{ "yield", {} }, }); };
0
repos/mach-sysgpu/src
repos/mach-sysgpu/src/shader/Ast.zig
const std = @import("std"); const Parser = @import("Parser.zig"); const Token = @import("Token.zig"); const Tokenizer = @import("Tokenizer.zig"); const ErrorList = @import("ErrorList.zig"); const Extensions = @import("wgsl.zig").Extensions; const Ast = @This(); pub const NodeList = std.MultiArrayList(Node); pub const TokenList = std.MultiArrayList(Token); source: []const u8, tokens: TokenList.Slice, nodes: NodeList.Slice, extra: []const u32, extensions: Extensions, pub fn deinit(tree: *Ast, allocator: std.mem.Allocator) void { tree.tokens.deinit(allocator); tree.nodes.deinit(allocator); allocator.free(tree.extra); tree.* = undefined; } /// parses a TranslationUnit (WGSL Program) pub fn parse(allocator: std.mem.Allocator, errors: *ErrorList, source: [:0]const u8) error{ OutOfMemory, Parsing }!Ast { var p = Parser{ .allocator = allocator, .source = source, .tokens = blk: { const estimated_tokens = source.len / 8; var tokens = std.MultiArrayList(Token){}; errdefer tokens.deinit(allocator); try tokens.ensureTotalCapacity(allocator, estimated_tokens); var tokenizer = Tokenizer.init(source); while (true) { const tok = tokenizer.next(); try tokens.append(allocator, tok); if (tok.tag == .eof) break; } break :blk tokens; }, .errors = errors, }; defer p.scratch.deinit(allocator); errdefer { p.tokens.deinit(allocator); p.nodes.deinit(allocator); p.extra.deinit(allocator); } const estimated_nodes = p.tokens.len / 2 + 1; try p.nodes.ensureTotalCapacity(allocator, estimated_nodes); try p.translationUnit(); return .{ .source = source, .tokens = p.tokens.toOwnedSlice(), .nodes = p.nodes.toOwnedSlice(), .extra = try p.extra.toOwnedSlice(allocator), .extensions = p.extensions, }; } pub fn spanToList(tree: Ast, span: NodeIndex) []const NodeIndex { std.debug.assert(tree.nodeTag(span) == .span); return @ptrCast(tree.extra[@intFromEnum(tree.nodeLHS(span))..@intFromEnum(tree.nodeRHS(span))]); } pub fn extraData(tree: Ast, comptime T: type, index: NodeIndex) T { const fields = std.meta.fields(T); var result: T = undefined; inline for (fields, 0..) |field, i| { @field(result, field.name) = @enumFromInt(tree.extra[@intFromEnum(index) + i]); } return result; } pub fn tokenTag(tree: Ast, i: TokenIndex) Token.Tag { return tree.tokens.items(.tag)[@intFromEnum(i)]; } pub fn tokenLoc(tree: Ast, i: TokenIndex) Token.Loc { return tree.tokens.items(.loc)[@intFromEnum(i)]; } pub fn nodeTag(tree: Ast, i: NodeIndex) Node.Tag { return tree.nodes.items(.tag)[@intFromEnum(i)]; } pub fn nodeToken(tree: Ast, i: NodeIndex) TokenIndex { return tree.nodes.items(.main_token)[@intFromEnum(i)]; } pub fn nodeLHS(tree: Ast, i: NodeIndex) NodeIndex { return tree.nodes.items(.lhs)[@intFromEnum(i)]; } pub fn nodeRHS(tree: Ast, i: NodeIndex) NodeIndex { return tree.nodes.items(.rhs)[@intFromEnum(i)]; } pub fn nodeLoc(tree: Ast, i: NodeIndex) Token.Loc { var loc = tree.tokenLoc(tree.nodeToken(i)); switch (tree.nodeTag(i)) { .deref, .addr_of => { const lhs_loc = tree.tokenLoc(tree.nodeToken(tree.nodeLHS(i))); loc.end = lhs_loc.end; }, .field_access => { const component_loc = tree.tokenLoc(@enumFromInt(@intFromEnum(tree.nodeToken(i)) + 1)); loc.end = component_loc.end; }, else => {}, } return loc; } pub fn declNameLoc(tree: Ast, node: NodeIndex) ?Token.Loc { const token: TokenIndex = switch (tree.nodeTag(node)) { .global_var => tree.extraData(Node.GlobalVar, tree.nodeLHS(node)).name, .@"var" => tree.extraData(Node.Var, tree.nodeLHS(node)).name, .@"struct", .@"fn", .@"const", .let, .override, .type_alias, => @enumFromInt(@intFromEnum(tree.nodeToken(node)) + 1), .struct_member, .fn_param => tree.nodeToken(node), else => return null, }; return tree.tokenLoc(token); } pub const NodeIndex = enum(u32) { none = std.math.maxInt(u32), globals = 0, _, pub fn asTokenIndex(self: NodeIndex) TokenIndex { return @enumFromInt(@intFromEnum(self)); } }; pub const TokenIndex = enum(u32) { none = std.math.maxInt(u32), _, pub fn asNodeIndex(self: TokenIndex) NodeIndex { return @enumFromInt(@intFromEnum(self)); } }; pub const Node = struct { tag: Tag, main_token: TokenIndex, lhs: NodeIndex = .none, rhs: NodeIndex = .none, pub const Tag = enum { /// an slice NodeIndex in extra [LHS..RHS] /// TOK : undefined /// LHS : NodeIndex /// RHS : NodeIndex span, /// TOK : k_var /// LHS : GlobalVar /// RHS : Expr? global_var, /// TOK : k_override /// LHS : Override /// RHS : Expr override, /// TOK : k_type /// LHS : Type /// RHS : -- type_alias, /// TOK : k_const_assert /// LHS : Expr /// RHS : -- const_assert, /// TOK : k_struct /// LHS : span(struct_member) /// RHS : -- @"struct", /// TOK : ident /// LHS : span(Attribute) /// RHS : Type struct_member, /// TOK : k_fn /// LHS : FnProto /// RHS : block @"fn", /// TOK : ident /// LHS : ?span(Attribute) /// RHS : type fn_param, /// TOK : brace_left /// LHS : span(Statement)? /// RHS : -- block, /// TOK : k_return /// LHS : Expr? /// RHS : -- @"return", /// TOK : k_discard /// LHS : -- /// RHS : -- discard, /// TOK : k_loop /// LHS : block /// RHS : -- loop, /// TOK : k_continuing /// LHS : block /// RHS : -- continuing, /// TOK : k_break /// LHS : Expr /// RHS : -- break_if, /// TOK : k_break /// LHS : -- /// RHS : -- @"break", /// TOK : k_continue /// LHS : -- /// RHS : -- @"continue", /// TOK : k_if /// LHS : Expr /// RHS : block @"if", /// RHS is else body /// TOK : k_if /// LHS : if /// RHS : block if_else, /// TOK : k_if /// LHS : if /// RHS : if, if_else, if_else_if if_else_if, /// TOK : k_switch /// LHS : Expr /// RHS : span(switch_case, switch_default, switch_case_default) @"switch", /// TOK : k_case /// LHS : span(Expr) /// RHS : block switch_case, /// TOK : k_default /// LHS : -- /// RHS : block switch_default, /// switch_case with default (`case 1, 2, default {}`) /// TOK : k_case /// LHS : span(Expr) /// RHS : block switch_case_default, /// TOK : k_var /// LHS : Var /// RHS : Expr? @"var", /// TOK : k_const /// LHS : Type? /// RHS : Expr @"const", /// TOK : k_let /// LHS : Type? /// RHS : Expr let, /// TOK : k_while /// LHS : Expr /// RHS : block @"while", /// TOK : k_for /// LHS : ForHeader /// RHS : block @"for", /// TOK : plus_plus /// LHS : Expr increase, /// TOK : minus_minus /// LHS : Expr decrease, /// TOK : plus_equal, minus_equal, /// times_equal, division_equal, /// modulo_equal, and_equal, /// or_equal, xor_equal, /// shl_equal, shl_equal /// LHS : Expr /// RHS : Expr compound_assign, /// TOK : equal /// LHS : Expr /// RHS : -- phony_assign, /// TOK : k_i32, k_u32, k_f32, k_f16, k_bool /// LHS : -- /// RHS : -- number_type, /// TOK : k_bool /// LHS : -- /// RHS : -- bool_type, /// TOK : k_sampler, k_sampler_comparison /// LHS : -- /// RHS : -- sampler_type, /// TOK : k_vec2, k_vec3, k_vec4 /// LHS : Type? /// RHS : -- vector_type, /// TOK : k_mat2x2, k_mat2x3, k_mat2x4, /// k_mat3x2, k_mat3x3, k_mat3x4, /// k_mat4x2, k_mat4x3, k_mat4x4 /// LHS : Type? /// RHS : -- matrix_type, /// TOK : k_atomic /// LHS : Type /// RHS : -- atomic_type, /// TOK : k_array /// LHS : Type? /// RHS : Expr? array_type, /// TOK : k_ptr /// LHS : Type /// RHS : PtrType ptr_type, /// TOK : k_texture_1d, k_texture_2d, k_texture_2d_array, /// k_texture_3d, k_texture_cube, k_texture_cube_array /// LHS : Type /// RHS : -- sampled_texture_type, /// TOK : k_texture_multisampled_2d, k_texture_depth_multisampled_2d /// LHS : Type? /// RHS : -- multisampled_texture_type, /// TOK : k_texture_external /// LHS : Type /// RHS : -- external_texture_type, /// TOK : k_texture_storage_1d, k_texture_storage_2d, /// k_texture_storage_2d_array, k_texture_storage_3d /// LHS : Token(TexelFormat) /// RHS : Token(AccessMode) storage_texture_type, /// TOK : k_texture_depth_2d, k_texture_depth_2d_array /// k_texture_depth_cube, k_texture_depth_cube_array /// LHS : -- /// RHS : -- depth_texture_type, /// TOK : attr attr_const, /// TOK : attr attr_invariant, /// TOK : attr attr_must_use, /// TOK : attr attr_vertex, /// TOK : attr attr_fragment, /// TOK : attr attr_compute, /// TOK : attr /// LHS : Expr /// RHS : -- attr_align, /// TOK : attr /// LHS : Expr /// RHS : -- attr_binding, /// TOK : attr /// LHS : Expr /// RHS : -- attr_group, /// TOK : attr /// LHS : Expr /// RHS : -- attr_id, /// TOK : attr /// LHS : Expr /// RHS : -- attr_location, /// TOK : attr /// LHS : Expr /// RHS : -- attr_size, /// TOK : attr /// LHS : Token(Builtin) /// RHS : -- attr_builtin, /// TOK : attr /// LHS : WorkgroupSize /// RHS : -- attr_workgroup_size, /// TOK : attr /// LHS : Token(InterpolationType) /// RHS : Token(InterpolationSample)) attr_interpolate, /// TOK : * /// LHS : Expr /// RHS : Expr mul, /// TOK : / /// LHS : Expr /// RHS : Expr div, /// TOK : % /// LHS : Expr /// RHS : Expr mod, /// TOK : + /// LHS : Expr /// RHS : Expr add, /// TOK : - /// LHS : Expr /// RHS : Expr sub, /// TOK : << /// LHS : Expr /// RHS : Expr shl, /// TOK : >> /// LHS : Expr /// RHS : Expr shr, /// TOK : & /// LHS : Expr /// RHS : Expr @"and", /// TOK : | /// LHS : Expr /// RHS : Expr @"or", /// TOK : ^ /// LHS : Expr /// RHS : Expr xor, /// TOK : && /// LHS : Expr /// RHS : Expr logical_and, /// TOK : || /// LHS : Expr /// RHS : Expr logical_or, /// TOK : ! /// LHS : Expr /// RHS : -- not, /// TOK : - /// LHS : Expr /// RHS : -- negate, /// TOK : * /// LHS : Expr /// RHS : -- deref, /// TOK : & /// LHS : Expr /// RHS : -- addr_of, /// TOK : == /// LHS : Expr /// RHS : Expr equal, /// TOK : != /// LHS : Expr /// RHS : Expr not_equal, /// TOK : < /// LHS : Expr /// RHS : Expr less_than, /// TOK : <= /// LHS : Expr /// RHS : Expr less_than_equal, /// TOK : > /// LHS : Expr /// RHS : Expr greater_than, /// TOK : >= /// LHS : Expr /// RHS : Expr greater_than_equal, /// for identifier, array without element type specified, /// vector prefix (e.g. vec2) and matrix prefix (e.g. mat2x2) RHS is null /// see callExpr in Parser.zig if you don't understand this /// /// TOK : ident, k_array, k_bool, 'number type keywords', 'vector keywords', 'matrix keywords' /// LHS : span(Arguments Expr) /// RHS : (number_type, bool_type, vector_type, matrix_type, array_type)? call, /// TOK : k_bitcast /// LHS : Type /// RHS : Expr bitcast, /// TOK : ident /// LHS : -- /// RHS : -- ident, /// LHS is prefix expression /// TOK : ident /// LHS : Expr /// RHS : Token(NodeIndex(ident)) field_access, /// LHS is prefix expression /// TOK : bracket_left /// LHS : Expr /// RHS : Expr index_access, /// TOK : k_true /// LHS : -- /// RHS : -- true, /// TOK : k_false /// LHS : -- /// RHS : -- false, /// TOK : number /// LHS : -- /// RHS : -- number, }; pub const GlobalVar = struct { /// span(Attr)? attrs: NodeIndex = .none, /// Token(ident) name: TokenIndex, /// Token(AddressSpace)? addr_space: TokenIndex = .none, /// Token(AccessMode)? access_mode: TokenIndex = .none, /// Type? type: NodeIndex = .none, }; pub const Var = struct { /// Token(ident) name: TokenIndex, /// Token(AddressSpace)? addr_space: TokenIndex = .none, /// Token(AccessMode)? access_mode: TokenIndex = .none, /// Type? type: NodeIndex = .none, }; pub const Override = struct { /// span(Attr)? attrs: NodeIndex = .none, /// Type? type: NodeIndex = .none, }; pub const PtrType = struct { /// Token(AddressSpace) addr_space: TokenIndex, /// Token(AccessMode) access_mode: TokenIndex, }; pub const WorkgroupSize = struct { /// Expr x: NodeIndex, /// Expr? y: NodeIndex = .none, /// Expr? z: NodeIndex = .none, }; pub const FnProto = struct { /// span(Attr)? attrs: NodeIndex = .none, /// span(fn_param)? params: NodeIndex = .none, /// span(Attr)? return_attrs: NodeIndex = .none, /// Type? return_type: NodeIndex = .none, }; pub const ForHeader = struct { /// var, const, let, phony_assign, compound_assign init: NodeIndex = .none, /// Expr cond: NodeIndex = .none, /// call, phony_assign, compound_assign update: NodeIndex = .none, }; }; pub const Builtin = enum { vertex_index, instance_index, position, front_facing, frag_depth, local_invocation_id, local_invocation_index, global_invocation_id, workgroup_id, num_workgroups, sample_index, sample_mask, }; pub const InterpolationType = enum { perspective, linear, flat, }; pub const InterpolationSample = enum { center, centroid, sample, }; pub const AddressSpace = enum { function, private, workgroup, uniform, storage, }; pub const AccessMode = enum { read, write, read_write, }; pub const Attribute = enum { invariant, @"const", must_use, vertex, fragment, compute, @"align", binding, group, id, location, size, builtin, workgroup_size, interpolate, }; pub const TexelFormat = enum { rgba8unorm, rgba8snorm, rgba8uint, rgba8sint, rgba16uint, rgba16sint, rgba16float, r32uint, r32sint, r32float, rg32uint, rg32sint, rg32float, rgba32uint, rgba32sint, rgba32float, bgra8unorm, };
0
repos/mach-sysgpu/src
repos/mach-sysgpu/src/shader/test.zig
const std = @import("std"); const ErrorList = @import("ErrorList.zig"); const Ast = @import("Ast.zig"); const Air = @import("Air.zig"); const CodeGen = @import("CodeGen.zig"); const printAir = @import("print_air.zig").printAir; const expect = std.testing.expect; const expectEqual = std.testing.expectEqual; const allocator = std.testing.allocator; test "builtins" { const builtins = @embedFile("test/builtins.wgsl"); try expectCodegen(builtins, "builtins.spv", .spirv, false); try expectCodegen(builtins, "builtins.hlsl", .hlsl, false); try expectCodegen(builtins, "builtins.msl", .msl, false); try expectCodegen(builtins, "builtins-spirvcross.glsl", .glsl, true); // try expectCodegen(if_else, "if-else.glsl", .glsl, false); } test "if-else" { const if_else = @embedFile("test/if-else.wgsl"); try expectCodegen(if_else, "if-else.spv", .spirv, false); try expectCodegen(if_else, "if-else.hlsl", .hlsl, false); try expectCodegen(if_else, "if-else.msl", .msl, false); try expectCodegen(if_else, "if-else-spirvcross.glsl", .glsl, true); // try expectCodegen(if_else, "if-else.glsl", .glsl, false); } test "boids-sprite" { const boids_sprite = @embedFile("test/boids-sprite.wgsl"); try expectCodegen(boids_sprite, "boids-sprite.spv", .spirv, false); try expectCodegen(boids_sprite, "boids-sprite.hlsl", .hlsl, false); try expectCodegen(boids_sprite, "boids-sprite.msl", .msl, false); try expectCodegen(boids_sprite, "boids-sprite-spirvcross.glsl", .glsl, true); // try expectCodegen(boids_sprite, "boids-sprite.glsl", .glsl, false); } test "boids-sprite-update" { const boids_sprite_update = @embedFile("test/boids-sprite-update.wgsl"); try expectCodegen(boids_sprite_update, "boids-sprite-update.spv", .spirv, false); try expectCodegen(boids_sprite_update, "boids-sprite-update.hlsl", .hlsl, false); try expectCodegen(boids_sprite_update, "boids-sprite-update.msl", .msl, false); try expectCodegen(boids_sprite_update, "boids-sprite-update-spirvcross.glsl", .glsl, true); // try expectCodegen(boids_sprite_update, "boids-sprite-update.glsl", .glsl, false); } test "cube-map" { const cube_map = @embedFile("test/cube-map.wgsl"); try expectCodegen(cube_map, "cube-map.spv", .spirv, false); try expectCodegen(cube_map, "cube-map.hlsl", .hlsl, false); try expectCodegen(cube_map, "cube-map.msl", .msl, false); try expectCodegen(cube_map, "cube-map-spirvcross.glsl", .glsl, true); // try expectCodegen(cube_map, "cube-map.glsl", .glsl, false); } test "fractal-cube" { const fractal_cube = @embedFile("test/fractal-cube.wgsl"); try expectCodegen(fractal_cube, "fractal-cube.spv", .spirv, false); try expectCodegen(fractal_cube, "fractal-cube.hlsl", .hlsl, false); try expectCodegen(fractal_cube, "fractal-cube.msl", .msl, false); try expectCodegen(fractal_cube, "fractal-cube-spirvcross.glsl", .glsl, true); // try expectCodegen(fractal_cube, "fractal-cube.glsl", .glsl, false); } test "gen-texture-light" { const gen_texture_light = @embedFile("test/gen-texture-light.wgsl"); try expectCodegen(gen_texture_light, "gen-texture-light.spv", .spirv, false); try expectCodegen(gen_texture_light, "gen-texture-light.hlsl", .hlsl, false); try expectCodegen(gen_texture_light, "gen-texture-light.msl", .msl, false); try expectCodegen(gen_texture_light, "gen-texture-light-spirvcross.glsl", .glsl, true); // try expectCodegen(gen_texture_light, "gen-texture-light.glsl", .glsl, false); } test "gen-texture-light-cube" { const gen_texture_light_cube = @embedFile("test/gen-texture-light-cube.wgsl"); try expectCodegen(gen_texture_light_cube, "gen-texture-light-cube.spv", .spirv, false); try expectCodegen(gen_texture_light_cube, "gen-texture-light-cube.hlsl", .hlsl, false); try expectCodegen(gen_texture_light_cube, "gen-texture-light-cube.msl", .msl, false); try expectCodegen(gen_texture_light_cube, "gen-texture-light-cube-spirvcross.glsl", .glsl, true); // try expectCodegen(gen_texture_light_cube, "gen-texture-light-cube.glsl", .glsl, false); } test "sprite2d" { const sprite2d = @embedFile("test/sprite2d.wgsl"); try expectCodegen(sprite2d, "sprite2d.spv", .spirv, false); try expectCodegen(sprite2d, "sprite2d.hlsl", .hlsl, false); try expectCodegen(sprite2d, "sprite2d.msl", .msl, false); try expectCodegen(sprite2d, "sprite2d-spirvcross.glsl", .glsl, true); // try expectCodegen(sprite2d, "sprite2d.glsl", .glsl, false); } test "two-cubes" { const two_cubes = @embedFile("test/two-cubes.wgsl"); try expectCodegen(two_cubes, "two-cubes.spv", .spirv, false); try expectCodegen(two_cubes, "two-cubes.hlsl", .hlsl, false); try expectCodegen(two_cubes, "two-cubes.msl", .msl, false); try expectCodegen(two_cubes, "two-cubes-spirvcross.glsl", .glsl, true); // try expectCodegen(two_cubes, "two-cubes.glsl", .glsl, false); } test "fullscreen-textured-quad" { const fullscreen_textured_quad = @embedFile("test/fullscreen-textured-quad.wgsl"); try expectCodegen(fullscreen_textured_quad, "fullscreen-textured-quad.spv", .spirv, false); try expectCodegen(fullscreen_textured_quad, "fullscreen-textured-quad.hlsl", .hlsl, false); try expectCodegen(fullscreen_textured_quad, "fullscreen-textured-quad.msl", .msl, false); try expectCodegen(fullscreen_textured_quad, "fullscreen-textured-quad-spirvcross.glsl", .glsl, true); // try expectCodegen(fullscreen_textured_quad, "fullscreen-textured-quad.glsl", .glsl, false); } test "image-blur" { const image_blur = @embedFile("test/image-blur.wgsl"); try expectCodegen(image_blur, "image-blur.spv", .spirv, false); try expectCodegen(image_blur, "image-blur.hlsl", .hlsl, false); try expectCodegen(image_blur, "image-blur.msl", .msl, false); try expectCodegen(image_blur, "image-blur-spirvcross.glsl", .glsl, true); // try expectCodegen(image_blur, "image-blur.glsl", .glsl, false); } test "instanced-cube" { const instanced_cube = @embedFile("test/instanced-cube.wgsl"); try expectCodegen(instanced_cube, "instanced-cube.spv", .spirv, false); try expectCodegen(instanced_cube, "instanced-cube.hlsl", .hlsl, false); try expectCodegen(instanced_cube, "instanced-cube.msl", .msl, false); // TODO // try expectCodegen(instanced_cube, "instanced-cube-spirvcross.glsl", .glsl, true); // try expectCodegen(instanced_cube, "instanced-cube.glsl", .glsl, false); } test "map-async" { const map_async = @embedFile("test/map-async.wgsl"); try expectCodegen(map_async, "map-async.spv", .spirv, false); try expectCodegen(map_async, "map-async.hlsl", .hlsl, false); try expectCodegen(map_async, "map-async.msl", .msl, false); try expectCodegen(map_async, "map-async-spirvcross.glsl", .glsl, true); // try expectCodegen(map_async, "map-async.glsl", .glsl, false); } test "pbr-basic" { const pbr_basic = @embedFile("test/pbr-basic.wgsl"); try expectCodegen(pbr_basic, "pbr-basic.spv", .spirv, false); try expectCodegen(pbr_basic, "pbr-basic.hlsl", .hlsl, false); try expectCodegen(pbr_basic, "pbr-basic.msl", .msl, false); try expectCodegen(pbr_basic, "pbr-basic-spirvcross.glsl", .glsl, true); // try expectCodegen(pbr_basic, "pbr-basic.glsl", .glsl, false); } test "pixel-post-process-normal-frag" { const pixel_post_process_normal_frag = @embedFile("test/pixel-post-process-normal-frag.wgsl"); try expectCodegen(pixel_post_process_normal_frag, "pixel-post-process-normal-frag.spv", .spirv, false); try expectCodegen(pixel_post_process_normal_frag, "pixel-post-process-normal-frag.hlsl", .hlsl, false); try expectCodegen(pixel_post_process_normal_frag, "pixel-post-process-normal-frag.msl", .msl, false); try expectCodegen(pixel_post_process_normal_frag, "pixel-post-process-normal-frag-spirvcross.glsl", .glsl, true); // try expectCodegen(pixel_post_process_normal_frag, "pixel-post-process-normal-frag.glsl", .glsl, false); } test "pixel-post-process-pixel-vert" { const pixel_post_process_pixel_vert = @embedFile("test/pixel-post-process-pixel-vert.wgsl"); try expectCodegen(pixel_post_process_pixel_vert, "pixel-post-process-pixel-vert.spv", .spirv, false); try expectCodegen(pixel_post_process_pixel_vert, "pixel-post-process-pixel-vert.hlsl", .hlsl, false); try expectCodegen(pixel_post_process_pixel_vert, "pixel-post-process-pixel-vert.msl", .msl, false); try expectCodegen(pixel_post_process_pixel_vert, "pixel-post-process-pixel-vert-spirvcross.glsl", .glsl, true); // try expectCodegen(pixel_post_process_pixel_vert, "pixel-post-process-pixel-vert.glsl", .glsl, false); } test "pixel-post-process-pixel-frag" { const pixel_post_process_pixel_frag = @embedFile("test/pixel-post-process-pixel-frag.wgsl"); try expectCodegen(pixel_post_process_pixel_frag, "pixel-post-process-pixel-frag.spv", .spirv, false); try expectCodegen(pixel_post_process_pixel_frag, "pixel-post-process-pixel-frag.hlsl", .hlsl, false); try expectCodegen(pixel_post_process_pixel_frag, "pixel-post-process-pixel-frag.msl", .msl, false); try expectCodegen(pixel_post_process_pixel_frag, "pixel-post-process-pixel-frag-spirvcross.glsl", .glsl, true); // try expectCodegen(pixel_post_process_pixel_frag, "pixel-post-process-pixel-frag.glsl", .glsl, false); } test "pixel-post-process" { const pixel_post_process = @embedFile("test/pixel-post-process.wgsl"); try expectCodegen(pixel_post_process, "pixel-post-process.spv", .spirv, false); try expectCodegen(pixel_post_process, "pixel-post-process.hlsl", .hlsl, false); try expectCodegen(pixel_post_process, "pixel-post-process.msl", .msl, false); try expectCodegen(pixel_post_process, "pixel-post-process-spirvcross.glsl", .glsl, true); // try expectCodegen(pixel_post_process, "pixel-post-process.glsl", .glsl, false); } test "procedural-primitives" { const procedural_primitives = @embedFile("test/procedural-primitives.wgsl"); try expectCodegen(procedural_primitives, "procedural-primitives.spv", .spirv, false); try expectCodegen(procedural_primitives, "procedural-primitives.hlsl", .hlsl, false); try expectCodegen(procedural_primitives, "procedural-primitives.msl", .msl, false); try expectCodegen(procedural_primitives, "procedural-primitives-spirvcross.glsl", .glsl, true); // try expectCodegen(procedural_primitives, "procedural-primitives.glsl", .glsl, false); } test "rotating-cube" { const rotating_cube = @embedFile("test/rotating-cube.wgsl"); try expectCodegen(rotating_cube, "rotating-cube.spv", .spirv, false); try expectCodegen(rotating_cube, "rotating-cube.hlsl", .hlsl, false); try expectCodegen(rotating_cube, "rotating-cube.msl", .msl, false); try expectCodegen(rotating_cube, "rotating-cube-spirvcross.glsl", .glsl, true); // try expectCodegen(rotating_cube, "rotating-cube.glsl", .glsl, false); } test "triangle" { const triangle = @embedFile("test/triangle.wgsl"); try expectCodegen(triangle, "triangle.spv", .spirv, false); try expectCodegen(triangle, "triangle.hlsl", .hlsl, false); try expectCodegen(triangle, "triangle.msl", .msl, false); try expectCodegen(triangle, "triangle-spirvcross.glsl", .glsl, true); // try expectCodegen(triangle, "triangle.glsl", .glsl, false); } test "fragmentDeferredRendering" { const fragmentDeferredRendering = @embedFile("test/fragmentDeferredRendering.wgsl"); try expectCodegen(fragmentDeferredRendering, "fragmentDeferredRendering.spv", .spirv, false); try expectCodegen(fragmentDeferredRendering, "fragmentDeferredRendering.hlsl", .hlsl, false); try expectCodegen(fragmentDeferredRendering, "triangle.msl", .msl, false); try expectCodegen(fragmentDeferredRendering, "triangle-spirvcross.glsl", .glsl, true); // try expectCodegen(fragmentDeferredRendering, "triangle.glsl", .glsl, false); } test "fragmentGBuffersDebugView" { const fragmentGBuffersDebugView = @embedFile("test/fragmentGBuffersDebugView.wgsl"); try expectCodegen(fragmentGBuffersDebugView, "fragmentGBuffersDebugView.spv", .spirv, false); try expectCodegen(fragmentGBuffersDebugView, "fragmentGBuffersDebugView.hlsl", .hlsl, false); try expectCodegen(fragmentGBuffersDebugView, "triangle.msl", .msl, false); try expectCodegen(fragmentGBuffersDebugView, "triangle-spirvcross.glsl", .glsl, true); // try expectCodegen(fragmentGBuffersDebugView, "triangle.glsl", .glsl, false); } test "fragmentWriteGBuffers" { const fragmentWriteGBuffers = @embedFile("test/fragmentWriteGBuffers.wgsl"); try expectCodegen(fragmentWriteGBuffers, "fragmentWriteGBuffers.spv", .spirv, false); try expectCodegen(fragmentWriteGBuffers, "fragmentWriteGBuffers.hlsl", .hlsl, false); try expectCodegen(fragmentWriteGBuffers, "triangle.msl", .msl, false); try expectCodegen(fragmentWriteGBuffers, "triangle-spirvcross.glsl", .glsl, true); // try expectCodegen(fragmentWriteGBuffers, "triangle.glsl", .glsl, false); } test "lightUpdate" { const lightUpdate = @embedFile("test/lightUpdate.wgsl"); try expectCodegen(lightUpdate, "lightUpdate.spv", .spirv, false); try expectCodegen(lightUpdate, "lightUpdate.hlsl", .hlsl, false); try expectCodegen(lightUpdate, "triangle.msl", .msl, false); try expectCodegen(lightUpdate, "triangle-spirvcross.glsl", .glsl, true); // try expectCodegen(lightUpdate, "triangle.glsl", .glsl, false); } test "vertexTextureQuad" { const vertexTextureQuad = @embedFile("test/vertexTextureQuad.wgsl"); try expectCodegen(vertexTextureQuad, "vertexTextureQuad.spv", .spirv, false); try expectCodegen(vertexTextureQuad, "vertexTextureQuad.hlsl", .hlsl, false); try expectCodegen(vertexTextureQuad, "triangle.msl", .msl, false); try expectCodegen(vertexTextureQuad, "triangle-spirvcross.glsl", .glsl, true); // try expectCodegen(vertexTextureQuad, "triangle.glsl", .glsl, false); } test "vertexWriteGBuffers" { const vertexWriteGBuffers = @embedFile("test/vertexWriteGBuffers.wgsl"); try expectCodegen(vertexWriteGBuffers, "vertexWriteGBuffers.spv", .spirv, false); try expectCodegen(vertexWriteGBuffers, "vertexWriteGBuffers.hlsl", .hlsl, false); try expectCodegen(vertexWriteGBuffers, "triangle.msl", .msl, false); try expectCodegen(vertexWriteGBuffers, "triangle-spirvcross.glsl", .glsl, true); // try expectCodegen(vertexWriteGBuffers, "triangle.glsl", .glsl, false); } fn expectCodegen( source: [:0]const u8, comptime file_name: []const u8, lang: CodeGen.Language, use_spirv_cross: bool, ) !void { var errors = try ErrorList.init(allocator); defer errors.deinit(); var tree = Ast.parse(allocator, &errors, source) catch |err| { if (err == error.Parsing) { try errors.print(source, null); } return err; }; defer tree.deinit(allocator); var ir = Air.generate(allocator, &tree, &errors, null) catch |err| { if (err == error.AnalysisFail) { try errors.print(source, null); } return err; }; defer ir.deinit(allocator); const out = try CodeGen.generate(allocator, &ir, lang, use_spirv_cross, .{}, null, null, null); defer allocator.free(out); try std.fs.cwd().makePath("zig-out/shader/"); try std.fs.cwd().writeFile("zig-out/shader/" ++ file_name, out); }
0
repos/mach-sysgpu/src
repos/mach-sysgpu/src/shader/Air.zig
//! Analyzed Intermediate Representation. //! This data is produced by AstGen and consumed by CodeGen. const std = @import("std"); const AstGen = @import("AstGen.zig"); const Ast = @import("Ast.zig"); const ErrorList = @import("ErrorList.zig"); const Extensions = @import("wgsl.zig").Extensions; const Air = @This(); tree: *const Ast, globals_index: RefIndex, compute_stage: InstIndex, vertex_stage: InstIndex, fragment_stage: InstIndex, instructions: []const Inst, refs: []const InstIndex, strings: []const u8, values: []const u8, extensions: Extensions, pub fn deinit(air: *Air, allocator: std.mem.Allocator) void { allocator.free(air.instructions); allocator.free(air.refs); allocator.free(air.strings); allocator.free(air.values); air.* = undefined; } pub fn generate( allocator: std.mem.Allocator, tree: *const Ast, errors: *ErrorList, entry_point: ?[]const u8, ) error{ OutOfMemory, AnalysisFail }!Air { var astgen = AstGen{ .allocator = allocator, .tree = tree, .scope_pool = std.heap.MemoryPool(AstGen.Scope).init(allocator), .inst_arena = std.heap.ArenaAllocator.init(allocator), .entry_point_name = entry_point, .errors = errors, }; defer { astgen.instructions.deinit(allocator); astgen.scratch.deinit(allocator); astgen.globals.deinit(allocator); astgen.global_var_refs.deinit(allocator); astgen.scope_pool.deinit(); astgen.inst_arena.deinit(); } errdefer { astgen.refs.deinit(allocator); astgen.strings.deinit(allocator); astgen.values.deinit(allocator); } const globals_index = try astgen.genTranslationUnit(); return .{ .tree = tree, .globals_index = globals_index, .compute_stage = astgen.compute_stage, .vertex_stage = astgen.vertex_stage, .fragment_stage = astgen.fragment_stage, .instructions = try allocator.dupe(Inst, astgen.instructions.keys()), .refs = try astgen.refs.toOwnedSlice(allocator), .strings = try astgen.strings.toOwnedSlice(allocator), .values = try astgen.values.toOwnedSlice(allocator), .extensions = tree.extensions, }; } pub fn refToList(air: Air, ref: RefIndex) []const InstIndex { return std.mem.sliceTo(air.refs[@intFromEnum(ref)..], .none); } pub fn getInst(air: Air, index: InstIndex) Inst { return air.instructions[@intFromEnum(index)]; } pub fn getStr(air: Air, index: StringIndex) []const u8 { return std.mem.sliceTo(air.strings[@intFromEnum(index)..], 0); } pub fn getValue(air: Air, comptime T: type, value: ValueIndex) T { return std.mem.bytesAsValue(T, air.values[@intFromEnum(value)..][0..@sizeOf(T)]).*; } pub fn typeSize(air: Air, index: InstIndex) ?u32 { return switch (air.getInst(index)) { inline .int, .float => |num| num.type.size(), .vector => |vec| @as(u32, @intFromEnum(vec.size)), .matrix => |mat| @as(u32, @intFromEnum(mat.cols)) * @as(u32, @intFromEnum(mat.rows)), .array => |arr| { if (arr.len == .none) return null; return @intCast(air.resolveInt(arr.len) orelse return null); }, else => unreachable, }; } pub const ConstExpr = union(enum) { guaranteed, bool: bool, int: i64, float: f32, fn negate(unary: *ConstExpr) void { switch (unary.*) { .int => unary.int = -unary.int, .float => unary.float = -unary.float, else => unreachable, } } fn not(unary: *ConstExpr) void { switch (unary.*) { .bool => unary.bool = !unary.bool, else => unreachable, } } fn mul(lhs: *ConstExpr, rhs: ConstExpr) void { switch (lhs.*) { .int => lhs.int *= rhs.int, .float => lhs.float *= rhs.float, else => unreachable, } } fn div(lhs: *ConstExpr, rhs: ConstExpr) void { switch (lhs.*) { .int => lhs.int = @divExact(lhs.int, rhs.int), .float => lhs.float /= rhs.float, else => unreachable, } } fn mod(lhs: *ConstExpr, rhs: ConstExpr) void { switch (lhs.*) { .int => lhs.int = @mod(lhs.int, rhs.int), .float => lhs.float = @mod(lhs.float, rhs.float), else => unreachable, } } fn add(lhs: *ConstExpr, rhs: ConstExpr) void { switch (lhs.*) { .int => lhs.int += rhs.int, .float => lhs.float += rhs.float, else => unreachable, } } fn sub(lhs: *ConstExpr, rhs: ConstExpr) void { switch (lhs.*) { .int => lhs.int -= rhs.int, .float => lhs.float -= rhs.float, else => unreachable, } } fn shiftLeft(lhs: *ConstExpr, rhs: ConstExpr) void { switch (lhs.*) { .int => lhs.int <<= @intCast(rhs.int), else => unreachable, } } fn shiftRight(lhs: *ConstExpr, rhs: ConstExpr) void { switch (lhs.*) { .int => lhs.int >>= @intCast(rhs.int), else => unreachable, } } fn bitwiseAnd(lhs: *ConstExpr, rhs: ConstExpr) void { switch (lhs.*) { .int => lhs.int &= rhs.int, else => unreachable, } } fn bitwiseOr(lhs: *ConstExpr, rhs: ConstExpr) void { switch (lhs.*) { .int => lhs.int |= rhs.int, else => unreachable, } } fn bitwiseXor(lhs: *ConstExpr, rhs: ConstExpr) void { switch (lhs.*) { .int => lhs.int ^= rhs.int, else => unreachable, } } fn equal(lhs: ConstExpr, rhs: ConstExpr) ConstExpr { return switch (lhs) { .bool => .{ .bool = lhs.bool == rhs.bool }, .int => .{ .bool = lhs.int == rhs.int }, .float => .{ .bool = lhs.float == rhs.float }, .guaranteed => unreachable, }; } fn notEqual(lhs: ConstExpr, rhs: ConstExpr) ConstExpr { return switch (lhs) { .int => .{ .bool = lhs.int != rhs.int }, .float => .{ .bool = lhs.float != rhs.float }, else => unreachable, }; } fn lessThan(lhs: ConstExpr, rhs: ConstExpr) ConstExpr { return switch (lhs) { .int => .{ .bool = lhs.int < rhs.int }, .float => .{ .bool = lhs.float < rhs.float }, else => unreachable, }; } fn greaterThan(lhs: ConstExpr, rhs: ConstExpr) ConstExpr { return switch (lhs) { .int => .{ .bool = lhs.int > rhs.int }, .float => .{ .bool = lhs.float > rhs.float }, else => unreachable, }; } fn lessThanEqual(lhs: ConstExpr, rhs: ConstExpr) ConstExpr { return switch (lhs) { .int => .{ .bool = lhs.int <= rhs.int }, .float => .{ .bool = lhs.float <= rhs.float }, else => unreachable, }; } fn greaterThanEqual(lhs: ConstExpr, rhs: ConstExpr) ConstExpr { return switch (lhs) { .int => .{ .bool = lhs.int >= rhs.int }, .float => .{ .bool = lhs.float >= rhs.float }, else => unreachable, }; } fn logicalAnd(lhs: ConstExpr, rhs: ConstExpr) ConstExpr { return .{ .bool = lhs.bool and rhs.bool }; } fn logicalOr(lhs: ConstExpr, rhs: ConstExpr) ConstExpr { return .{ .bool = lhs.bool or rhs.bool }; } }; pub fn resolveConstExpr(air: Air, inst_idx: InstIndex) ?ConstExpr { const inst = air.getInst(inst_idx); switch (inst) { .bool => |data| { if (data.value) |value| { switch (value) { .literal => |literal| return .{ .bool = literal }, .cast => return null, } } else { return null; } }, .int => |data| { if (data.value) |value| { switch (air.getValue(Inst.Int.Value, value)) { .literal => |literal| return .{ .int = literal }, .cast => return null, } } else { return null; } }, .float => |data| { if (data.value) |value| { switch (air.getValue(Inst.Float.Value, value)) { .literal => |literal| return .{ .float = literal }, .cast => return null, } } else { return null; } }, .vector => |vec| { if (vec.value.? == .none) return .guaranteed; switch (air.getValue(Inst.Vector.Value, vec.value.?)) { .literal => |literal| for (literal[0..@intFromEnum(vec.size)]) |elem_val| { if (air.resolveConstExpr(elem_val) == null) { return null; } }, .cast => return null, } return .guaranteed; }, .matrix => |mat| { if (mat.value.? == .none) return .guaranteed; for (air.getValue(Inst.Matrix.Value, mat.value.?)[0..@intFromEnum(mat.cols)]) |elem_val| { if (air.resolveConstExpr(elem_val) == null) { return null; } } return .guaranteed; }, .array => |arr| { for (air.refToList(arr.value.?)) |elem_val| { if (air.resolveConstExpr(elem_val) == null) { return null; } } return .guaranteed; }, .struct_construct => |sc| { for (air.refToList(sc.members)) |elem_val| { if (air.resolveConstExpr(elem_val) == null) { return null; } } return .guaranteed; }, .unary => |un| { var value = air.resolveConstExpr(un.expr) orelse return null; switch (un.op) { .negate => value.negate(), .not => value.not(), else => unreachable, } return value; }, .binary => |bin| { var lhs = air.resolveConstExpr(bin.lhs) orelse return null; const rhs = air.resolveConstExpr(bin.rhs) orelse return null; switch (bin.op) { .mul => lhs.mul(rhs), .div => lhs.div(rhs), .mod => lhs.mod(rhs), .add => lhs.add(rhs), .sub => lhs.sub(rhs), .shl => lhs.shiftLeft(rhs), .shr => lhs.shiftRight(rhs), .@"and" => lhs.bitwiseAnd(rhs), .@"or" => lhs.bitwiseOr(rhs), .xor => lhs.bitwiseXor(rhs), .equal => return lhs.equal(rhs), .not_equal => return lhs.notEqual(rhs), .less_than => return lhs.lessThan(rhs), .greater_than => return lhs.greaterThan(rhs), .less_than_equal => return lhs.lessThanEqual(rhs), .greater_than_equal => return lhs.greaterThanEqual(rhs), .logical_and => return lhs.logicalAnd(rhs), .logical_or => return lhs.logicalOr(rhs), } return lhs; }, .var_ref => |var_ref| return air.resolveConstExpr(var_ref), .@"const" => |@"const"| return air.resolveConstExpr(@"const".init).?, inline .index_access, .field_access, .swizzle_access => |access| return air.resolveConstExpr(access.base), else => return null, } } pub fn resolveInt(air: Air, inst_idx: InstIndex) ?i64 { if (inst_idx != .none) { if (air.resolveConstExpr(inst_idx)) |const_expr| { switch (const_expr) { .int => |x| return x, else => {}, } } } return null; } pub fn findFunction(air: Air, name: []const u8) ?Inst.Fn { for (air.refToList(air.globals_index)) |global_inst_idx| { switch (air.getInst(global_inst_idx)) { .@"fn" => |inst| { if (std.mem.eql(u8, air.getStr(inst.name), name)) { return inst; } }, else => {}, } } return null; } pub const InstIndex = enum(u32) { none = std.math.maxInt(u32), _ }; pub const RefIndex = enum(u32) { none = std.math.maxInt(u32), _ }; pub const ValueIndex = enum(u32) { none = std.math.maxInt(u32), _ }; pub const StringIndex = enum(u32) { _ }; pub const Inst = union(enum) { @"var": Var, @"const": Const, var_ref: InstIndex, @"fn": Fn, fn_param: FnParam, @"struct": Struct, struct_member: StructMember, bool: Bool, int: Int, float: Float, vector: Vector, matrix: Matrix, array: Array, atomic_type: AtomicType, ptr_type: PointerType, texture_type: TextureType, sampler_type, comparison_sampler_type, external_texture_type, nil_intrinsic: NilIntrinsic, unary: Unary, unary_intrinsic: UnaryIntrinsic, binary: Binary, binary_intrinsic: BinaryIntrinsic, triple_intrinsic: TripleIntrinsic, block: RefIndex, loop: InstIndex, continuing: InstIndex, @"return": InstIndex, break_if: InstIndex, @"if": If, @"while": While, @"for": For, @"switch": Switch, switch_case: SwitchCase, assign: Assign, discard, @"break", @"continue", field_access: FieldAccess, swizzle_access: SwizzleAccess, index_access: IndexAccess, call: FnCall, struct_construct: StructConstruct, bitcast: Bitcast, select: BuiltinSelect, texture_sample: TextureSample, texture_dimension: TextureDimension, texture_load: TextureLoad, texture_store: TextureStore, pub const Var = struct { name: StringIndex, type: InstIndex, init: InstIndex, addr_space: PointerType.AddressSpace, access_mode: PointerType.AccessMode, binding: InstIndex = .none, group: InstIndex = .none, id: InstIndex = .none, }; pub const Const = struct { name: StringIndex, type: InstIndex, init: InstIndex, }; pub const Fn = struct { name: StringIndex, stage: Stage, is_const: bool, params: RefIndex, return_type: InstIndex, return_attrs: ReturnAttrs, block: InstIndex, global_var_refs: RefIndex, has_array_length: bool, pub const Stage = union(enum) { none, vertex, fragment, compute: WorkgroupSize, pub const WorkgroupSize = struct { x: InstIndex, y: InstIndex, z: InstIndex, }; }; pub const ReturnAttrs = struct { builtin: ?Builtin, location: ?u16, interpolate: ?Interpolate, invariant: bool, }; }; pub const FnParam = struct { name: StringIndex, type: InstIndex, builtin: ?Builtin, location: ?u16, interpolate: ?Interpolate, invariant: bool, }; pub const Builtin = Ast.Builtin; pub const Interpolate = struct { type: Type, sample: Sample, pub const Type = enum { perspective, linear, flat, }; pub const Sample = enum { none, center, centroid, sample, }; }; pub const Struct = struct { name: StringIndex, members: RefIndex, }; pub const StructMember = struct { name: StringIndex, index: u32, type: InstIndex, @"align": ?u29, size: ?u32, location: ?u16, builtin: ?Builtin, interpolate: ?Interpolate, }; pub const Bool = struct { value: ?Value, pub const Value = union(enum) { literal: bool, cast: ScalarCast, }; }; pub const Int = struct { type: Type, value: ?ValueIndex, pub const Type = enum { u32, i32, pub fn size(int: Type) u8 { _ = int; return 4; } pub fn sizeBits(int: Type) u8 { _ = int; return 32; } pub fn signedness(int: Type) bool { return switch (int) { .u32 => false, .i32 => true, }; } }; pub const Value = union(enum) { literal: i33, cast: ScalarCast, }; }; pub const Float = struct { type: Type, value: ?ValueIndex, pub const Type = enum { f32, f16, pub fn size(float: Type) u8 { return switch (float) { .f32 => 4, .f16 => 2, }; } pub fn sizeBits(float: Type) u8 { return switch (float) { .f32 => 32, .f16 => 16, }; } }; pub const Value = union(enum) { literal: f32, cast: ScalarCast, }; }; pub const ScalarCast = struct { type: InstIndex, value: InstIndex, }; pub const Vector = struct { elem_type: InstIndex, size: Size, value: ?ValueIndex, pub const Size = enum(u3) { two = 2, three = 3, four = 4 }; pub const Value = union(enum) { literal: [4]InstIndex, cast: Cast, }; pub const Cast = struct { type: InstIndex, value: [4]InstIndex, }; }; pub const Matrix = struct { elem_type: InstIndex, cols: Vector.Size, rows: Vector.Size, value: ?ValueIndex, pub const Value = [4]InstIndex; }; pub const Array = struct { elem_type: InstIndex, len: InstIndex, value: ?RefIndex, }; pub const AtomicType = struct { elem_type: InstIndex }; pub const PointerType = struct { elem_type: InstIndex, addr_space: AddressSpace, access_mode: AccessMode, pub const AddressSpace = enum { uniform_constant, function, private, workgroup, uniform, storage, }; pub const AccessMode = enum { read, write, read_write, }; }; pub const TextureType = struct { kind: Kind, elem_type: InstIndex = .none, texel_format: TexelFormat = .none, access_mode: AccessMode = .write, pub const Kind = enum { sampled_1d, sampled_2d, sampled_2d_array, sampled_3d, sampled_cube, sampled_cube_array, multisampled_2d, multisampled_depth_2d, storage_1d, storage_2d, storage_2d_array, storage_3d, depth_2d, depth_2d_array, depth_cube, depth_cube_array, pub const Dimension = enum { @"1d", @"2d", @"3d", cube, }; pub fn dimension(k: Kind) Dimension { return switch (k) { .sampled_1d, .storage_1d => .@"1d", .sampled_2d, .sampled_2d_array, .multisampled_2d, .multisampled_depth_2d, .storage_2d, .storage_2d_array, .depth_2d, .depth_2d_array, => .@"2d", .sampled_3d, .storage_3d => .@"3d", .sampled_cube, .sampled_cube_array, .depth_cube, .depth_cube_array => .cube, }; } }; pub const TexelFormat = enum { none, rgba8unorm, rgba8snorm, rgba8uint, rgba8sint, rgba16uint, rgba16sint, rgba16float, r32uint, r32sint, r32float, rg32uint, rg32sint, rg32float, rgba32uint, rgba32sint, rgba32float, bgra8unorm, }; pub const AccessMode = enum { write }; }; pub const Unary = struct { result_type: InstIndex, expr: InstIndex, op: Op, pub const Op = enum { not, negate, deref, addr_of, }; }; pub const NilIntrinsic = enum { storage_barrier, workgroup_barrier, }; pub const UnaryIntrinsic = struct { result_type: InstIndex, expr: InstIndex, op: Op, pub const Op = enum { all, any, abs, acos, acosh, asin, asinh, atan, atanh, ceil, cos, cosh, count_leading_zeros, count_one_bits, count_trailing_zeros, degrees, exp, exp2, first_leading_bit, first_trailing_bit, floor, fract, inverse_sqrt, length, log, log2, quantize_to_F16, radians, reverseBits, round, saturate, sign, sin, sinh, sqrt, tan, tanh, trunc, dpdx, dpdx_coarse, dpdx_fine, dpdy, dpdy_coarse, dpdy_fine, fwidth, fwidth_coarse, fwidth_fine, array_length, normalize, }; }; pub const Binary = struct { op: Op, result_type: InstIndex, lhs_type: InstIndex, rhs_type: InstIndex, lhs: InstIndex, rhs: InstIndex, pub const Op = enum { mul, div, mod, add, sub, shl, shr, @"and", @"or", xor, logical_and, logical_or, equal, not_equal, less_than, less_than_equal, greater_than, greater_than_equal, }; }; pub const BinaryIntrinsic = struct { op: Op, result_type: InstIndex, lhs_type: InstIndex, rhs_type: InstIndex, lhs: InstIndex, rhs: InstIndex, pub const Op = enum { min, max, atan2, distance, dot, pow, step, }; }; pub const TripleIntrinsic = struct { op: Op, result_type: InstIndex, a1_type: InstIndex, a2_type: InstIndex, a3_type: InstIndex, a1: InstIndex, a2: InstIndex, a3: InstIndex, pub const Op = enum { smoothstep, clamp, mix, }; }; pub const Assign = struct { mod: Modifier, type: InstIndex, lhs: InstIndex, rhs: InstIndex, pub const Modifier = enum { none, add, sub, mul, div, mod, @"and", @"or", xor, shl, shr, }; }; pub const FieldAccess = struct { base: InstIndex, field: InstIndex, name: StringIndex, }; pub const SwizzleAccess = struct { base: InstIndex, type: InstIndex, size: Size, pattern: [4]Component, pub const Size = enum(u3) { one = 1, two = 2, three = 3, four = 4, }; pub const Component = enum(u3) { x, y, z, w }; }; pub const IndexAccess = struct { base: InstIndex, type: InstIndex, index: InstIndex, }; pub const FnCall = struct { @"fn": InstIndex, args: RefIndex, }; pub const StructConstruct = struct { @"struct": InstIndex, members: RefIndex, }; pub const Bitcast = struct { type: InstIndex, expr: InstIndex, result_type: InstIndex, }; pub const BuiltinSelect = struct { type: InstIndex, true: InstIndex, false: InstIndex, cond: InstIndex, }; pub const TextureSample = struct { kind: TextureType.Kind, texture_type: InstIndex, texture: InstIndex, sampler: InstIndex, coords: InstIndex, result_type: InstIndex, offset: InstIndex = .none, array_index: InstIndex = .none, operands: Operands = .none, pub const Operands = union(enum) { none, level: InstIndex, grad: struct { dpdx: InstIndex, dpdy: InstIndex }, }; }; pub const TextureDimension = struct { kind: TextureType.Kind, texture: InstIndex, level: InstIndex, result_type: InstIndex, }; pub const TextureLoad = struct { kind: TextureType.Kind, texture: InstIndex, coords: InstIndex, level: InstIndex, result_type: InstIndex, }; pub const TextureStore = struct { kind: TextureType.Kind, texture: InstIndex, coords: InstIndex, value: InstIndex, }; pub const If = struct { cond: InstIndex, body: InstIndex, /// `if` or `block` @"else": InstIndex, }; pub const Switch = struct { switch_on: InstIndex, cases_list: RefIndex, }; pub const SwitchCase = struct { cases: RefIndex, body: InstIndex, default: bool, }; pub const While = struct { cond: InstIndex, body: InstIndex, }; pub const For = struct { init: InstIndex, cond: InstIndex, update: InstIndex, body: InstIndex, }; comptime { std.debug.assert(@sizeOf(Inst) <= 64); } };
0
repos/mach-sysgpu/src
repos/mach-sysgpu/src/shader/CodeGen.zig
const std = @import("std"); const Air = @import("Air.zig"); const c = @cImport({ @cInclude("spirv-cross/spirv_cross_c.h"); @cInclude("spirv-tools/libspirv.h"); }); const genGlsl = @import("codegen/glsl.zig").gen; const genHlsl = @import("codegen/hlsl.zig").gen; const genMsl = @import("codegen/msl.zig").gen; const genSpirv = @import("codegen/spirv.zig").gen; pub const Language = enum { glsl, hlsl, msl, spirv, }; pub const DebugInfo = struct { emit_source_file: ?[]const u8 = null, emit_names: bool = true, }; pub const Stage = enum { vertex, fragment, compute, }; pub const Entrypoint = struct { stage: Stage, name: [*:0]const u8, }; pub const BindingPoint = struct { group: u32, binding: u32 }; pub const BindingTable = std.AutoHashMapUnmanaged(BindingPoint, u32); pub fn generate( allocator: std.mem.Allocator, air: *const Air, out_lang: Language, use_spirv_cross: bool, debug_info: DebugInfo, entrypoint: ?Entrypoint, bindings: ?*const BindingTable, label: ?[*:0]const u8, ) ![]const u8 { _ = use_spirv_cross; // if (!use_spirv_cross) { // const spirv_data = try genSpirv(allocator, air, .{}); // const spirv_data_z = try allocator.dupeZ(u8, spirv_data); // defer allocator.free(spirv_data_z); // allocator.free(spirv_data); // const spirv_words_ptr = @as([*]const u32, @ptrCast(@alignCast(spirv_data_z.ptr))); // const spirv_words = spirv_words_ptr[0 .. spirv_data_z.len / @sizeOf(u32)]; // // Optimize // var optimized_spirv: c.spv_binary = undefined; // const target_env = spvTargetEnv(out_lang); // const optimizer = c.spvOptimizerCreate(target_env); // defer c.spvOptimizerDestroy(optimizer); // c.spvOptimizerSetMessageConsumer(optimizer, spvMessageConsumer); // c.spvOptimizerRegisterPerformancePasses(optimizer); // c.spvOptimizerRegisterLegalizationPasses(optimizer); // const opt_options = c.spvOptimizerOptionsCreate(); // defer c.spvOptimizerOptionsDestroy(opt_options); // c.spvOptimizerOptionsSetRunValidator(opt_options, false); // var res = c.spvOptimizerRun( // optimizer, // spirv_words.ptr, // spirv_words.len, // &optimized_spirv, // opt_options, // ); // switch (res) { // c.SPV_SUCCESS => {}, // else => return error.SpvOptimizerFailed, // } // if (out_lang == .spirv) { // const code_bytes_ptr = @as([*]const u8, @ptrCast(optimized_spirv.*.code)); // const code_bytes = code_bytes_ptr[0 .. optimized_spirv.*.wordCount * @sizeOf(u32)]; // return allocator.dupe(u8, code_bytes); // } // // Translate // var context: c.spvc_context = undefined; // _ = c.spvc_context_create(&context); // defer c.spvc_context_destroy(context); // c.spvc_context_set_error_callback(context, spvcErrorCallback, null); // var ir: c.spvc_parsed_ir = undefined; // _ = c.spvc_context_parse_spirv(context, optimized_spirv.*.code, optimized_spirv.*.wordCount, &ir); // var compiler: c.spvc_compiler = undefined; // _ = c.spvc_context_create_compiler( // context, // spvcBackend(out_lang), // ir, // c.SPVC_CAPTURE_MODE_TAKE_OWNERSHIP, // &compiler, // ); // var resources: c.spvc_resources = undefined; // _ = c.spvc_compiler_create_shader_resources(compiler, &resources); // var options: c.spvc_compiler_options = undefined; // _ = c.spvc_compiler_create_compiler_options(compiler, &options); // switch (out_lang) { // .glsl => { // const resource_types = [_]c.spvc_resource_type{ // c.SPVC_RESOURCE_TYPE_UNIFORM_BUFFER, // c.SPVC_RESOURCE_TYPE_STORAGE_BUFFER, // c.SPVC_RESOURCE_TYPE_STORAGE_IMAGE, // c.SPVC_RESOURCE_TYPE_SAMPLED_IMAGE, // c.SPVC_RESOURCE_TYPE_SEPARATE_IMAGE, // c.SPVC_RESOURCE_TYPE_SEPARATE_SAMPLERS, // }; // for (resource_types) |resource_type| { // glslRemapResources(compiler, resources, resource_type, bindings orelse &.{}); // } // _ = c.spvc_compiler_options_set_uint(options, c.SPVC_COMPILER_OPTION_GLSL_VERSION, 450); // _ = c.spvc_compiler_options_set_bool(options, c.SPVC_COMPILER_OPTION_GLSL_ES, c.SPVC_FALSE); // if (entrypoint) |e| { // _ = c.spvc_compiler_set_entry_point(compiler, e.name, spvExecutionModel(e.stage)); // } // // combiner samplers/textures // var id: c.spvc_variable_id = undefined; // res = c.spvc_compiler_build_dummy_sampler_for_combined_images(compiler, &id); // if (res == c.SPVC_SUCCESS) { // c.spvc_compiler_set_decoration(compiler, id, c.SpvDecorationDescriptorSet, 0); // c.spvc_compiler_set_decoration(compiler, id, c.SpvDecorationBinding, 0); // } // _ = c.spvc_compiler_build_combined_image_samplers(compiler); // }, // else => @panic("TODO"), // } // _ = c.spvc_compiler_install_compiler_options(compiler, options); // var source: [*c]const u8 = undefined; // _ = c.spvc_compiler_compile(compiler, &source); // return allocator.dupe(u8, std.mem.span(source)); // } // Direct translation return switch (out_lang) { .spirv => try genSpirv(allocator, air, debug_info), .hlsl => try genHlsl(allocator, air, debug_info), .msl => try genMsl(allocator, air, debug_info, entrypoint, bindings, label orelse "<ShaderModule label not specified>"), .glsl => try genGlsl(allocator, air, debug_info, entrypoint, bindings), }; } fn spvMessageConsumer( level: c.spv_message_level_t, src: [*c]const u8, pos: [*c]const c.spv_position_t, msg: [*c]const u8, ) callconv(.C) void { switch (level) { c.SPV_MSG_FATAL, c.SPV_MSG_INTERNAL_ERROR, c.SPV_MSG_ERROR, => { // TODO - don't panic std.debug.panic("{s} at :{d}:{d}\n{s}", .{ std.mem.span(msg), pos.*.line, pos.*.column, std.mem.span(src), }); }, else => {}, } } fn spvTargetEnv(language: Language) c.spv_target_env { return switch (language) { .glsl => c.SPV_ENV_OPENGL_4_5, .spirv => c.SPV_ENV_VULKAN_1_0, else => unreachable, }; } fn spvExecutionModel(stage: Stage) c.SpvExecutionModel { return switch (stage) { .vertex => c.SpvExecutionModelVertex, .fragment => c.SpvExecutionModelFragment, .compute => c.SpvExecutionModelGLCompute, }; } fn spvcErrorCallback(userdata: ?*anyopaque, err: [*c]const u8) callconv(.C) void { _ = userdata; // TODO - don't panic @panic(std.mem.span(err)); } fn spvcBackend(language: Language) c_uint { return switch (language) { .glsl => c.SPVC_BACKEND_GLSL, .hlsl => c.SPVC_BACKEND_HLSL, .msl => c.SPVC_BACKEND_MSL, .spirv => unreachable, }; } fn glslRemapResources( compiler: c.spvc_compiler, resources: c.spvc_resources, resource_type: c.spvc_resource_type, bindings: *const BindingTable, ) void { var resource_list: [*c]c.spvc_reflected_resource = undefined; var resource_size: usize = undefined; _ = c.spvc_resources_get_resource_list_for_type(resources, resource_type, &resource_list, &resource_size); for (resource_list[0..resource_size]) |resource| { const key = BindingPoint{ .group = c.spvc_compiler_get_decoration(compiler, resource.id, c.SpvDecorationDescriptorSet), .binding = c.spvc_compiler_get_decoration(compiler, resource.id, c.SpvDecorationBinding), }; if (bindings.get(key)) |slot| { _ = c.spvc_compiler_unset_decoration(compiler, resource.id, c.SpvDecorationDescriptorSet); _ = c.spvc_compiler_set_decoration(compiler, resource.id, c.SpvDecorationBinding, slot); } } }
0
repos/mach-sysgpu/src
repos/mach-sysgpu/src/shader/Tokenizer.zig
const std = @import("std"); const Token = @import("Token.zig"); const Tokenizer = @This(); source: [:0]const u8, index: u32 = 0, const State = union(enum) { start, ident, underscore, number: struct { is_hex: bool = false, allow_leading_sign: bool = false, has_dot: bool = false, }, block_comment, ampersand, bang, equal, angle_bracket_left, angle_bracket_angle_bracket_left, angle_bracket_right, angle_bracket_angle_bracket_right, minus, percent, dot, pipe, plus, slash, asterisk, xor, }; pub fn init(source: [:0]const u8) Tokenizer { // skip the UTF-8 BOM if present const src_start: u32 = if (std.mem.startsWith(u8, source, "\xEF\xBB\xBF")) 3 else 0; return Tokenizer{ .source = source[src_start..] }; } pub fn peek(tokenizer: *Tokenizer) Token { var index = tokenizer.index; var state: State = .start; var result = Token{ .tag = .eof, .loc = .{ .start = index, .end = undefined, }, }; while (true) : (index += 1) { const c = tokenizer.source[index]; switch (state) { .start => switch (c) { 0 => { if (index != tokenizer.source.len) { result.tag = .invalid; index += 1; } break; }, ' ', '\n', '\t', '\r' => result.loc.start = index + 1, 'a'...'z', 'A'...'Z' => state = .ident, '0'...'9' => state = .{ .number = .{} }, '&' => state = .ampersand, '!' => state = .bang, '=' => state = .equal, '<' => state = .angle_bracket_left, '>' => state = .angle_bracket_right, '-' => state = .minus, '%' => state = .percent, '.' => state = .dot, '|' => state = .pipe, '+' => state = .plus, '/' => state = .slash, '*' => state = .asterisk, '_' => state = .underscore, '^' => state = .xor, '@' => { result.tag = .attr; index += 1; break; }, '[' => { result.tag = .bracket_left; index += 1; break; }, ']' => { result.tag = .bracket_right; index += 1; break; }, '{' => { result.tag = .brace_left; index += 1; break; }, '}' => { result.tag = .brace_right; index += 1; break; }, ':' => { result.tag = .colon; index += 1; break; }, ',' => { result.tag = .comma; index += 1; break; }, '(' => { result.tag = .paren_left; index += 1; break; }, ')' => { result.tag = .paren_right; index += 1; break; }, ';' => { result.tag = .semicolon; index += 1; break; }, '~' => { result.tag = .tilde; index += 1; break; }, else => { result.tag = .invalid; index += 1; break; }, }, .ident => switch (c) { 'a'...'z', 'A'...'Z', '0'...'9', '_' => {}, else => { result.tag = .ident; if (Token.keywords.get(tokenizer.source[result.loc.start..index])) |tag| { result.tag = tag; } else if (Token.reserved.get(tokenizer.source[result.loc.start..index])) |_| { result.tag = .invalid; } break; }, }, .underscore => switch (c) { // TODO: two underscore `__` https://www.w3.org/TR/WGSL/#identifiers 'a'...'z', 'A'...'Z', '_', '0'...'9' => state = .ident, else => { result.tag = .underscore; break; }, }, .number => |*number| { result.tag = .number; switch (c) { '0'...'9' => {}, 'a'...'d', 'A'...'D' => if (!number.is_hex) break, 'x', 'X' => number.is_hex = true, '.' => { if (number.has_dot) break; number.has_dot = true; }, '+', '-' => { if (!number.allow_leading_sign) break; number.allow_leading_sign = false; number.is_hex = false; }, 'e', 'E' => if (!number.is_hex) { number.allow_leading_sign = true; }, 'p', 'P' => if (number.is_hex) { number.allow_leading_sign = true; }, 'i', 'u' => { index += 1; break; }, 'f', 'h' => if (!number.is_hex) { index += 1; break; }, else => break, } }, .block_comment => switch (c) { 0 => break, '\n' => { state = .start; result.loc.start = index + 1; }, else => {}, }, .ampersand => switch (c) { '&' => { result.tag = .ampersand_ampersand; index += 1; break; }, '=' => { result.tag = .ampersand_equal; index += 1; break; }, else => { result.tag = .ampersand; break; }, }, .bang => switch (c) { '=' => { result.tag = .bang_equal; index += 1; break; }, else => { result.tag = .bang; break; }, }, .equal => switch (c) { '=' => { result.tag = .equal_equal; index += 1; break; }, else => { result.tag = .equal; break; }, }, .angle_bracket_left => switch (c) { '<' => state = .angle_bracket_angle_bracket_left, '=' => { result.tag = .angle_bracket_left_equal; index += 1; break; }, else => { result.tag = .angle_bracket_left; break; }, }, .angle_bracket_angle_bracket_left => switch (c) { '=' => { result.tag = .angle_bracket_angle_bracket_left_equal; index += 1; break; }, else => { result.tag = .angle_bracket_angle_bracket_left; break; }, }, .angle_bracket_right => switch (c) { '>' => state = .angle_bracket_angle_bracket_right, '=' => { result.tag = .angle_bracket_right_equal; index += 1; break; }, else => { result.tag = .angle_bracket_right; break; }, }, .angle_bracket_angle_bracket_right => switch (c) { '=' => { result.tag = .angle_bracket_angle_bracket_right_equal; index += 1; break; }, else => { result.tag = .angle_bracket_angle_bracket_right; break; }, }, .minus => switch (c) { '-' => { result.tag = .minus_minus; index += 1; break; }, '=' => { result.tag = .minus_equal; index += 1; break; }, '>' => { result.tag = .arrow; index += 1; break; }, '0'...'9' => { // workaround for x-1 being tokenized as [x] [-1] // TODO: maybe it's user fault? :^) // duplicated at .plus too if (index >= 2 and std.ascii.isAlphabetic(tokenizer.source[index - 2])) { result.tag = .minus; break; } state = .{ .number = .{} }; }, else => { result.tag = .minus; break; }, }, .percent => switch (c) { '=' => { result.tag = .percent_equal; index += 1; break; }, else => { result.tag = .percent; break; }, }, .pipe => switch (c) { '|' => { result.tag = .pipe_pipe; index += 1; break; }, '=' => { result.tag = .pipe_equal; index += 1; break; }, else => { result.tag = .pipe; break; }, }, .dot => switch (c) { '0'...'9' => state = .{ .number = .{} }, else => { result.tag = .dot; break; }, }, .plus => switch (c) { '+' => { result.tag = .plus_plus; index += 1; break; }, '=' => { result.tag = .plus_equal; index += 1; break; }, '0'...'9' => { if (index >= 2 and std.ascii.isAlphabetic(tokenizer.source[index - 2])) { result.tag = .plus; break; } state = .{ .number = .{} }; }, else => { result.tag = .plus; break; }, }, .slash => switch (c) { '/' => state = .block_comment, '=' => { result.tag = .slash_equal; index += 1; break; }, else => { result.tag = .slash; break; }, }, .asterisk => switch (c) { '=' => { result.tag = .asterisk_equal; index += 1; break; }, else => { result.tag = .asterisk; break; }, }, .xor => switch (c) { '=' => { result.tag = .xor_equal; index += 1; break; }, else => { result.tag = .xor; break; }, }, } } result.loc.end = index; return result; } pub fn next(tokenizer: *Tokenizer) Token { const tok = tokenizer.peek(); tokenizer.index = tok.loc.end; return tok; } // test "tokenize identifier and numbers" { // const str = // \\_ __ _iden iden -100i 100.8i // cc // \\// comment // \\ // ; // var tokenizer = Tokenizer.init(str); // try std.testing.expect(tokenizer.next().tag == .underscore); // try std.testing.expect(tokenizer.next().tag == .ident); // try std.testing.expect(tokenizer.next().tag == .ident); // try std.testing.expect(tokenizer.next().tag == .ident); // try std.testing.expectEqualStrings("-100i", tokenizer.next().loc.slice(str)); // try std.testing.expect(tokenizer.next().tag == .number); // try std.testing.expect(tokenizer.next().tag == .eof); // }
0
repos/mach-sysgpu/src
repos/mach-sysgpu/src/shader/wgsl.zig
pub const Extensions = struct { f16: bool = false, };
0
repos/mach-sysgpu/src
repos/mach-sysgpu/src/shader/ErrorList.zig
const std = @import("std"); const Token = @import("Token.zig"); pub const ErrorList = @This(); pub const ErrorMsg = struct { loc: Token.Loc, msg: []const u8, note: ?Note = null, pub const Note = struct { loc: ?Token.Loc = null, msg: []const u8, }; }; arena: std.heap.ArenaAllocator, list: std.ArrayListUnmanaged(ErrorMsg) = .{}, pub fn init(allocator: std.mem.Allocator) !ErrorList { return .{ .arena = std.heap.ArenaAllocator.init(allocator), }; } pub fn deinit(self: *ErrorList) void { self.arena.deinit(); self.* = undefined; } pub fn add( self: *ErrorList, loc: Token.Loc, comptime format: []const u8, args: anytype, note: ?ErrorMsg.Note, ) !void { const err_msg = .{ .loc = loc, .msg = try std.fmt.allocPrint(self.arena.allocator(), comptime format, args), .note = note, }; try self.list.append(self.arena.allocator(), err_msg); } pub fn createNote( self: *ErrorList, loc: ?Token.Loc, comptime format: []const u8, args: anytype, ) !ErrorMsg.Note { return .{ .loc = loc, .msg = try std.fmt.allocPrint(self.arena.allocator(), comptime format, args), }; } pub fn print(self: ErrorList, source: []const u8, file_path: ?[]const u8) !void { const stderr = std.io.getStdErr(); var bw = std.io.bufferedWriter(stderr.writer()); const b = bw.writer(); const term = std.io.tty.detectConfig(stderr); for (self.list.items) |*err| { const loc_extra = err.loc.extraInfo(source); // 'file:line:column error: MSG' try term.setColor(b, .bold); try b.print("{?s}:{d}:{d} ", .{ file_path, loc_extra.line, loc_extra.col }); try term.setColor(b, .bright_red); try b.writeAll("error: "); try term.setColor(b, .reset); try term.setColor(b, .bold); try b.writeAll(err.msg); try b.writeByte('\n'); try printCode(b, term, source, err.loc); // note if (err.note) |note| { if (note.loc) |note_loc| { const note_loc_extra = note_loc.extraInfo(source); try term.setColor(b, .reset); try term.setColor(b, .bold); try b.print("{?s}:{d}:{d} ", .{ file_path, note_loc_extra.line, note_loc_extra.col }); } try term.setColor(b, .cyan); try b.writeAll("note: "); try term.setColor(b, .reset); try term.setColor(b, .bold); try b.writeAll(note.msg); try b.writeByte('\n'); if (note.loc) |note_loc| { try printCode(b, term, source, note_loc); } } try term.setColor(b, .reset); } try bw.flush(); } fn printCode(writer: anytype, term: std.io.tty.Config, source: []const u8, loc: Token.Loc) !void { const loc_extra = loc.extraInfo(source); try term.setColor(writer, .dim); try writer.print("{d} │ ", .{loc_extra.line}); try term.setColor(writer, .reset); try writer.writeAll(source[loc_extra.line_start..loc.start]); try term.setColor(writer, .green); try writer.writeAll(source[loc.start..loc.end]); try term.setColor(writer, .reset); try writer.writeAll(source[loc.end..loc_extra.line_end]); try writer.writeByte('\n'); // location pointer const line_number_len = (std.math.log10(loc_extra.line) + 1) + 3; try writer.writeByteNTimes( ' ', line_number_len + (loc_extra.col - 1), ); try term.setColor(writer, .bold); try term.setColor(writer, .green); try writer.writeByte('^'); try writer.writeByteNTimes('~', loc.end - loc.start - 1); try writer.writeByte('\n'); }
0
repos/mach-sysgpu/src
repos/mach-sysgpu/src/shader/Parser.zig
//! Based on cb76461a088a2b554f0248e7cf94d5a12b77e28e const std = @import("std"); const Ast = @import("Ast.zig"); const Token = @import("Token.zig"); const Extensions = @import("wgsl.zig").Extensions; const ErrorList = @import("ErrorList.zig"); const Node = Ast.Node; const NodeIndex = Ast.NodeIndex; const TokenIndex = Ast.TokenIndex; const fieldNames = std.meta.fieldNames; const Parser = @This(); allocator: std.mem.Allocator, source: []const u8, tok_i: TokenIndex = @enumFromInt(0), tokens: std.MultiArrayList(Token), nodes: std.MultiArrayList(Node) = .{}, extra: std.ArrayListUnmanaged(u32) = .{}, scratch: std.ArrayListUnmanaged(NodeIndex) = .{}, extensions: Extensions = .{}, errors: *ErrorList, pub fn translationUnit(p: *Parser) !void { p.parameterizeTemplates() catch |err| switch (err) { error.Parsing => return, error.OutOfMemory => return error.OutOfMemory, }; const root = try p.addNode(.{ .tag = .span, .main_token = undefined }); while (try p.globalDirectiveRecoverable()) {} while (p.peekToken(.tag, 0) != .eof) { const decl = try p.expectGlobalDeclRecoverable() orelse continue; try p.scratch.append(p.allocator, decl); } if (p.errors.list.items.len > 0) return error.Parsing; try p.extra.appendSlice(p.allocator, @ptrCast(p.scratch.items)); p.nodes.items(.lhs)[@intFromEnum(root)] = @enumFromInt(p.extra.items.len - p.scratch.items.len); p.nodes.items(.rhs)[@intFromEnum(root)] = @enumFromInt(p.extra.items.len); } // Based on https://gpuweb.github.io/gpuweb/wgsl/#template-lists-sec fn parameterizeTemplates(p: *Parser) !void { const UnclosedCandidate = struct { token_tag: *Token.Tag, depth: u32, }; var discovered_tmpls = std.BoundedArray(UnclosedCandidate, 16).init(0) catch unreachable; var depth: u32 = 0; var i: u32 = 0; while (i < p.tokens.len) : (i += 1) { switch (p.tokens.items(.tag)[i]) { .ident, .k_var, .k_bitcast, .k_array, .k_atomic, .k_ptr, .k_vec2, .k_vec3, .k_vec4, .k_mat2x2, .k_mat2x3, .k_mat2x4, .k_mat3x2, .k_mat3x3, .k_mat3x4, .k_mat4x2, .k_mat4x3, .k_mat4x4, .k_texture_1d, .k_texture_2d, .k_texture_2d_array, .k_texture_3d, .k_texture_cube, .k_texture_cube_array, .k_texture_storage_1d, .k_texture_storage_2d, .k_texture_storage_2d_array, .k_texture_storage_3d, => if (p.tokens.items(.tag)[i + 1] == .angle_bracket_left) { discovered_tmpls.append(.{ .token_tag = &p.tokens.items(.tag)[i + 1], .depth = depth, }) catch { try p.errors.add(p.tokens.items(.loc)[i + 1], "too deep template", .{}, null); return error.Parsing; }; i += 1; }, .angle_bracket_right => { if (discovered_tmpls.len > 0 and discovered_tmpls.get(discovered_tmpls.len - 1).depth == depth) { discovered_tmpls.pop().token_tag.* = .template_left; p.tokens.items(.tag)[i] = .template_right; } }, .angle_bracket_angle_bracket_right => { if (discovered_tmpls.len > 0 and discovered_tmpls.get(discovered_tmpls.len - 1).depth == depth) { discovered_tmpls.pop().token_tag.* = .template_left; discovered_tmpls.pop().token_tag.* = .template_left; p.tokens.items(.tag)[i] = .template_right; try p.tokens.insert(p.allocator, i, Token{ .tag = .template_right, .loc = .{ .start = p.tokens.items(.loc)[i].start + 1, .end = p.tokens.items(.loc)[i].end + 1, }, }); } }, .paren_left, .bracket_left => { depth += 1; }, .paren_right, .bracket_right => { while (discovered_tmpls.len > 0 and discovered_tmpls.get(discovered_tmpls.len - 1).depth == depth) { _ = discovered_tmpls.pop(); } if (depth > 0) { depth -= 1; } }, .semicolon, .colon, .brace_left => { depth = 0; discovered_tmpls.resize(0) catch unreachable; }, .pipe_pipe, .ampersand_ampersand => { while (discovered_tmpls.len > 0 and discovered_tmpls.get(discovered_tmpls.len - 1).depth == depth) { _ = discovered_tmpls.pop(); } }, else => {}, } } } fn globalDirectiveRecoverable(p: *Parser) !bool { return p.globalDirective() catch |err| switch (err) { error.Parsing => { p.findNextGlobalDirective(); return false; }, error.OutOfMemory => error.OutOfMemory, }; } fn globalDirective(p: *Parser) !bool { _ = p.eatToken(.k_enable) orelse return false; const ext_token = try p.expectToken(.ident); const directive = p.getToken(.loc, ext_token).slice(p.source); if (std.mem.eql(u8, directive, "f16")) { p.extensions.f16 = true; } else { try p.errors.add(p.getToken(.loc, ext_token), "invalid extension", .{}, null); return error.Parsing; } return true; } fn expectGlobalDeclRecoverable(p: *Parser) !?NodeIndex { return p.expectGlobalDecl() catch |err| switch (err) { error.Parsing => { p.findNextGlobalDecl(); return null; }, error.OutOfMemory => error.OutOfMemory, }; } fn expectGlobalDecl(p: *Parser) !NodeIndex { while (p.eatToken(.semicolon)) |_| {} const attrs = try p.attributeList(); if (try p.structDecl() orelse try p.fnDecl(attrs)) |node| { return node; } if (try p.constDecl() orelse try p.typeAliasDecl() orelse try p.constAssert() orelse try p.globalVar(attrs) orelse try p.globalOverrideDecl(attrs)) |node| { _ = try p.expectToken(.semicolon); return node; } try p.errors.add( p.peekToken(.loc, 0), "expected global declaration, found '{s}'", .{p.peekToken(.tag, 0).symbol()}, null, ); return error.Parsing; } fn attributeList(p: *Parser) !?NodeIndex { const scratch_top = p.scratch.items.len; defer p.scratch.shrinkRetainingCapacity(scratch_top); while (true) { const attr = try p.attribute() orelse break; try p.scratch.append(p.allocator, attr); } const attrs = p.scratch.items[scratch_top..]; if (attrs.len == 0) return null; return try p.listToSpan(attrs); } fn attribute(p: *Parser) !?NodeIndex { const attr_token = p.eatToken(.attr) orelse return null; const ident_token = try p.expectToken(.ident); const str = p.getToken(.loc, ident_token).slice(p.source); const tag = std.meta.stringToEnum(Ast.Attribute, str) orelse { try p.errors.add( p.getToken(.loc, ident_token), "unknown attribute '{s}'", .{p.getToken(.loc, ident_token).slice(p.source)}, null, ); return error.Parsing; }; var node = Node{ .tag = undefined, .main_token = attr_token, }; switch (tag) { .invariant => node.tag = .attr_invariant, .@"const" => node.tag = .attr_const, .must_use => node.tag = .attr_must_use, .vertex => node.tag = .attr_vertex, .fragment => node.tag = .attr_fragment, .compute => node.tag = .attr_compute, .@"align" => { _ = try p.expectToken(.paren_left); node.tag = .attr_align; node.lhs = try p.expression() orelse { try p.errors.add( p.peekToken(.loc, 0), "expected expression, but found '{s}'", .{p.peekToken(.tag, 0).symbol()}, null, ); return error.Parsing; }; _ = p.eatToken(.comma); _ = try p.expectToken(.paren_right); }, .binding => { _ = try p.expectToken(.paren_left); node.tag = .attr_binding; node.lhs = try p.expression() orelse { try p.errors.add( p.peekToken(.loc, 0), "expected expression, but found '{s}'", .{p.peekToken(.tag, 0).symbol()}, null, ); return error.Parsing; }; _ = p.eatToken(.comma); _ = try p.expectToken(.paren_right); }, .group => { _ = try p.expectToken(.paren_left); node.tag = .attr_group; node.lhs = try p.expression() orelse { try p.errors.add( p.peekToken(.loc, 0), "expected expression, but found '{s}'", .{p.peekToken(.tag, 0).symbol()}, null, ); return error.Parsing; }; _ = p.eatToken(.comma); _ = try p.expectToken(.paren_right); }, .id => { _ = try p.expectToken(.paren_left); node.tag = .attr_id; node.lhs = try p.expression() orelse { try p.errors.add( p.peekToken(.loc, 0), "expected expression, but found '{s}'", .{p.peekToken(.tag, 0).symbol()}, null, ); return error.Parsing; }; _ = p.eatToken(.comma); _ = try p.expectToken(.paren_right); }, .location => { _ = try p.expectToken(.paren_left); node.tag = .attr_location; node.lhs = try p.expression() orelse { try p.errors.add( p.peekToken(.loc, 0), "expected expression, but found '{s}'", .{p.peekToken(.tag, 0).symbol()}, null, ); return error.Parsing; }; _ = p.eatToken(.comma); _ = try p.expectToken(.paren_right); }, .size => { _ = try p.expectToken(.paren_left); node.tag = .attr_size; node.lhs = try p.expression() orelse { try p.errors.add( p.peekToken(.loc, 0), "expected expression, but found '{s}'", .{p.peekToken(.tag, 0).symbol()}, null, ); return error.Parsing; }; _ = p.eatToken(.comma); _ = try p.expectToken(.paren_right); }, .builtin => { _ = try p.expectToken(.paren_left); node.tag = .attr_builtin; node.lhs = (try p.expectBuiltin()).asNodeIndex(); _ = p.eatToken(.comma); _ = try p.expectToken(.paren_right); }, .workgroup_size => { _ = try p.expectToken(.paren_left); node.tag = .attr_workgroup_size; var workgroup_size = Node.WorkgroupSize{ .x = try p.expression() orelse { try p.errors.add(p.peekToken(.loc, 0), "expected workgroup_size x parameter", .{}, null); return error.Parsing; }, }; if (p.eatToken(.comma) != null and p.peekToken(.tag, 0) != .paren_right) { workgroup_size.y = try p.expression() orelse { try p.errors.add(p.peekToken(.loc, 0), "expected workgroup_size y parameter", .{}, null); return error.Parsing; }; if (p.eatToken(.comma) != null and p.peekToken(.tag, 0) != .paren_right) { workgroup_size.z = try p.expression() orelse { try p.errors.add(p.peekToken(.loc, 0), "expected workgroup_size z parameter", .{}, null); return error.Parsing; }; _ = p.eatToken(.comma); } } _ = try p.expectToken(.paren_right); node.lhs = try p.addExtra(workgroup_size); }, .interpolate => { _ = try p.expectToken(.paren_left); node.tag = .attr_interpolate; node.lhs = (try p.expectInterpolationType()).asNodeIndex(); if (p.eatToken(.comma) != null and p.peekToken(.tag, 0) != .paren_right) { node.rhs = (try p.expectInterpolationSample()).asNodeIndex(); _ = p.eatToken(.comma); } _ = try p.expectToken(.paren_right); }, } return try p.addNode(node); } fn expectBuiltin(p: *Parser) !TokenIndex { const token = p.advanceToken(); if (p.getToken(.tag, token) == .ident) { const str = p.getToken(.loc, token).slice(p.source); if (std.meta.stringToEnum(Ast.Builtin, str)) |_| return token; } try p.errors.add( p.getToken(.loc, token), "unknown builtin value name '{s}'", .{p.getToken(.loc, token).slice(p.source)}, null, ); return error.Parsing; } fn expectInterpolationType(p: *Parser) !TokenIndex { const token = p.advanceToken(); if (p.getToken(.tag, token) == .ident) { const str = p.getToken(.loc, token).slice(p.source); if (std.meta.stringToEnum(Ast.InterpolationType, str)) |_| return token; } try p.errors.add( p.getToken(.loc, token), "unknown interpolation type name '{s}'", .{p.getToken(.loc, token).slice(p.source)}, null, ); return error.Parsing; } fn expectInterpolationSample(p: *Parser) !TokenIndex { const token = p.advanceToken(); if (p.getToken(.tag, token) == .ident) { const str = p.getToken(.loc, token).slice(p.source); if (std.meta.stringToEnum(Ast.InterpolationSample, str)) |_| return token; } try p.errors.add( p.getToken(.loc, token), "unknown interpolation sample name '{s}'", .{p.getToken(.loc, token).slice(p.source)}, null, ); return error.Parsing; } fn globalVar(p: *Parser, attrs: ?NodeIndex) !?NodeIndex { const var_token = p.eatToken(.k_var) orelse return null; // qualifier var addr_space = TokenIndex.none; var access_mode = TokenIndex.none; if (p.eatToken(.template_left)) |_| { addr_space = try p.expectAddressSpace(); if (p.eatToken(.comma)) |_| access_mode = try p.expectAccessMode(); _ = try p.expectToken(.template_right); } // name, type const name_token = try p.expectToken(.ident); var var_type = NodeIndex.none; if (p.eatToken(.colon)) |_| { var_type = try p.expectTypeSpecifier(); } var initializer = NodeIndex.none; if (p.eatToken(.equal)) |_| { initializer = try p.expression() orelse { try p.errors.add( p.peekToken(.loc, 0), "expected initializer expression, found '{s}'", .{p.peekToken(.tag, 0).symbol()}, null, ); return error.Parsing; }; } if (initializer == .none and var_type == .none) { try p.errors.add( p.getToken(.loc, var_token), "initializer expression is required while type is unknown", .{}, null, ); return error.Parsing; } const extra = try p.addExtra(Node.GlobalVar{ .attrs = attrs orelse .none, .name = name_token, .addr_space = addr_space, .access_mode = access_mode, .type = var_type, }); return try p.addNode(.{ .tag = .global_var, .main_token = var_token, .lhs = extra, .rhs = initializer, }); } fn globalOverrideDecl(p: *Parser, attrs: ?NodeIndex) !?NodeIndex { const override_token = p.eatToken(.k_override) orelse return null; // name, type _ = try p.expectToken(.ident); var override_type = NodeIndex.none; if (p.eatToken(.colon)) |_| { override_type = try p.expectTypeSpecifier(); } var initializer = NodeIndex.none; if (p.eatToken(.equal)) |_| { initializer = try p.expression() orelse { try p.errors.add( p.peekToken(.loc, 0), "expected initializer expression, found '{s}'", .{p.peekToken(.tag, 0).symbol()}, null, ); return error.Parsing; }; } const extra = try p.addExtra(Node.Override{ .attrs = attrs orelse .none, .type = override_type, }); return try p.addNode(.{ .tag = .override, .main_token = override_token, .lhs = extra, .rhs = initializer, }); } fn typeAliasDecl(p: *Parser) !?NodeIndex { const type_token = p.eatToken(.k_type) orelse return null; _ = try p.expectToken(.ident); _ = try p.expectToken(.equal); const value = try p.expectTypeSpecifier(); return try p.addNode(.{ .tag = .type_alias, .main_token = type_token, .lhs = value, }); } fn structDecl(p: *Parser) !?NodeIndex { const main_token = p.eatToken(.k_struct) orelse return null; const name_token = try p.expectToken(.ident); _ = try p.expectToken(.brace_left); const scratch_top = p.scratch.items.len; defer p.scratch.shrinkRetainingCapacity(scratch_top); while (true) { const attrs = try p.attributeList(); const member = try p.structMember(attrs) orelse { if (attrs != null) { try p.errors.add( p.peekToken(.loc, 0), "expected struct member, found '{s}'", .{p.peekToken(.tag, 0).symbol()}, null, ); return error.Parsing; } break; }; try p.scratch.append(p.allocator, member); _ = p.eatToken(.comma); } _ = try p.expectToken(.brace_right); const members = p.scratch.items[scratch_top..]; if (members.len == 0) { try p.errors.add( p.getToken(.loc, name_token), "struct '{s}' has no member", .{p.getToken(.loc, name_token).slice(p.source)}, null, ); return error.Parsing; } return try p.addNode(.{ .tag = .@"struct", .main_token = main_token, .lhs = try p.listToSpan(members), }); } fn structMember(p: *Parser, attrs: ?NodeIndex) !?NodeIndex { const name_token = p.eatToken(.ident) orelse return null; _ = try p.expectToken(.colon); const member_type = try p.expectTypeSpecifier(); return try p.addNode(.{ .tag = .struct_member, .main_token = name_token, .lhs = attrs orelse .none, .rhs = member_type, }); } fn constAssert(p: *Parser) !?NodeIndex { const main_token = p.eatToken(.k_const_assert) orelse return null; const expr = try p.expression() orelse { try p.errors.add( p.peekToken(.loc, 0), "expected expression, found '{s}'", .{p.peekToken(.tag, 0).symbol()}, null, ); return error.Parsing; }; return try p.addNode(.{ .tag = .const_assert, .main_token = main_token, .lhs = expr, }); } fn fnDecl(p: *Parser, attrs: ?NodeIndex) !?NodeIndex { const fn_token = p.eatToken(.k_fn) orelse return null; _ = try p.expectToken(.ident); _ = try p.expectToken(.paren_left); const params = try p.parameterList() orelse .none; _ = try p.expectToken(.paren_right); var return_attrs = NodeIndex.none; var return_type = NodeIndex.none; if (p.eatToken(.arrow)) |_| { return_attrs = try p.attributeList() orelse .none; return_type = try p.expectTypeSpecifier(); } const body = try p.block() orelse { try p.errors.add( p.peekToken(.loc, 0), "expected function body, found '{s}'", .{p.peekToken(.tag, 0).symbol()}, null, ); return error.Parsing; }; const fn_proto = try p.addExtra(Node.FnProto{ .attrs = attrs orelse .none, .params = params, .return_attrs = return_attrs, .return_type = return_type, }); return try p.addNode(.{ .tag = .@"fn", .main_token = fn_token, .lhs = fn_proto, .rhs = body, }); } fn parameterList(p: *Parser) !?NodeIndex { const scratch_top = p.scratch.items.len; defer p.scratch.shrinkRetainingCapacity(scratch_top); while (true) { const attrs = try p.attributeList(); const param = try p.parameter(attrs) orelse { if (attrs != null) { try p.errors.add( p.peekToken(.loc, 0), "expected function parameter, found '{s}'", .{p.peekToken(.tag, 0).symbol()}, null, ); return error.Parsing; } break; }; try p.scratch.append(p.allocator, param); if (p.eatToken(.comma) == null) break; } const list = p.scratch.items[scratch_top..]; if (list.len == 0) return null; return try p.listToSpan(list); } fn parameter(p: *Parser, attrs: ?NodeIndex) !?NodeIndex { const main_token = p.eatToken(.ident) orelse return null; _ = try p.expectToken(.colon); const param_type = try p.expectTypeSpecifier(); return try p.addNode(.{ .tag = .fn_param, .main_token = main_token, .lhs = attrs orelse .none, .rhs = param_type, }); } fn statementRecoverable(p: *Parser) !?NodeIndex { while (true) { return p.statement() catch |err| switch (err) { error.Parsing => { p.findNextStmt(); switch (p.peekToken(.tag, 0)) { .brace_right => return null, .eof => return error.Parsing, else => continue, } }, error.OutOfMemory => error.OutOfMemory, }; } } fn statement(p: *Parser) !?NodeIndex { while (p.eatToken(.semicolon)) |_| {} if (try p.breakStatement() orelse try p.breakIfStatement() orelse try p.callExpr() orelse try p.constAssert() orelse try p.continueStatement() orelse try p.discardStatement() orelse try p.returnStatement() orelse try p.varDecl() orelse try p.constDecl() orelse try p.letDecl() orelse try p.varUpdateStatement()) |node| { _ = try p.expectToken(.semicolon); return node; } if (try p.block() orelse try p.continuingStatement() orelse try p.forStatement() orelse try p.ifStatement() orelse try p.loopStatement() orelse try p.switchStatement() orelse try p.whileStatement()) |node| { return node; } return null; } fn expectBlock(p: *Parser) error{ OutOfMemory, Parsing }!NodeIndex { return try p.block() orelse { try p.errors.add( p.peekToken(.loc, 0), "expected block statement, found '{s}'", .{p.peekToken(.tag, 0).symbol()}, null, ); return error.Parsing; }; } fn block(p: *Parser) error{ OutOfMemory, Parsing }!?NodeIndex { const main_token = p.eatToken(.brace_left) orelse return null; const statements = try p.statementList() orelse .none; _ = try p.expectToken(.brace_right); return try p.addNode(.{ .tag = .block, .main_token = main_token, .lhs = statements, }); } fn statementList(p: *Parser) error{ OutOfMemory, Parsing }!?NodeIndex { const scratch_top = p.scratch.items.len; defer p.scratch.shrinkRetainingCapacity(scratch_top); while (true) { const stmt = try p.statement() orelse { if (p.peekToken(.tag, 0) == .brace_right) break; try p.errors.add( p.peekToken(.loc, 0), "expected statement, found '{s}'", .{p.peekToken(.tag, 0).symbol()}, null, ); return error.Parsing; }; try p.scratch.append(p.allocator, stmt); } const statements = p.scratch.items[scratch_top..]; if (statements.len == 0) return null; return try p.listToSpan(statements); } fn breakStatement(p: *Parser) !?NodeIndex { const main_token = p.eatToken(.k_break) orelse return null; return try p.addNode(.{ .tag = .@"break", .main_token = main_token }); } fn breakIfStatement(p: *Parser) !?NodeIndex { if (p.peekToken(.tag, 0) == .k_break and p.peekToken(.tag, 1) == .k_if) { const main_token = p.advanceToken(); _ = p.advanceToken(); const cond = try p.expression() orelse { try p.errors.add( p.peekToken(.loc, 0), "expected condition expression, found '{s}'", .{p.peekToken(.tag, 0).symbol()}, null, ); return error.Parsing; }; return try p.addNode(.{ .tag = .break_if, .main_token = main_token, .lhs = cond, }); } return null; } fn continueStatement(p: *Parser) !?NodeIndex { const main_token = p.eatToken(.k_continue) orelse return null; return try p.addNode(.{ .tag = .@"continue", .main_token = main_token }); } fn continuingStatement(p: *Parser) !?NodeIndex { const main_token = p.eatToken(.k_continuing) orelse return null; const body = try p.expectBlock(); return try p.addNode(.{ .tag = .continuing, .main_token = main_token, .lhs = body, }); } fn discardStatement(p: *Parser) !?NodeIndex { const main_token = p.eatToken(.k_discard) orelse return null; return try p.addNode(.{ .tag = .discard, .main_token = main_token }); } fn forStatement(p: *Parser) !?NodeIndex { const main_token = p.eatToken(.k_for) orelse return null; _ = try p.expectToken(.paren_left); // for init const for_init = try p.callExpr() orelse try p.varDecl() orelse try p.constDecl() orelse try p.letDecl() orelse try p.varUpdateStatement() orelse .none; _ = try p.expectToken(.semicolon); const for_cond = try p.expression() orelse .none; _ = try p.expectToken(.semicolon); // for update const for_update = try p.callExpr() orelse try p.varUpdateStatement() orelse .none; _ = try p.expectToken(.paren_right); const body = try p.expectBlock(); const extra = try p.addExtra(Node.ForHeader{ .init = for_init, .cond = for_cond, .update = for_update, }); return try p.addNode(.{ .tag = .@"for", .main_token = main_token, .lhs = extra, .rhs = body, }); } fn ifStatement(p: *Parser) !?NodeIndex { const if_token = p.eatToken(.k_if) orelse return null; const cond = try p.expression() orelse { try p.errors.add( p.peekToken(.loc, 0), "expected condition expression, found '{s}'", .{p.peekToken(.tag, 0).symbol()}, null, ); return error.Parsing; }; const body = try p.block() orelse { try p.errors.add( p.peekToken(.loc, 0), "expected if body block, found '{s}'", .{p.peekToken(.tag, 0).symbol()}, null, ); return error.Parsing; }; if (p.eatToken(.k_else)) |else_token| { const if_node = try p.addNode(.{ .tag = .@"if", .main_token = if_token, .lhs = cond, .rhs = body, }); if (p.peekToken(.tag, 0) == .k_if) { const else_if = try p.ifStatement() orelse unreachable; return try p.addNode(.{ .tag = .if_else_if, .main_token = else_token, .lhs = if_node, .rhs = else_if, }); } const else_body = try p.block() orelse { try p.errors.add( p.peekToken(.loc, 0), "expected else body block, found '{s}'", .{p.peekToken(.tag, 0).symbol()}, null, ); return error.Parsing; }; return try p.addNode(.{ .tag = .if_else, .main_token = else_token, .lhs = if_node, .rhs = else_body, }); } return try p.addNode(.{ .tag = .@"if", .main_token = if_token, .lhs = cond, .rhs = body, }); } fn loopStatement(p: *Parser) !?NodeIndex { const main_token = p.eatToken(.k_loop) orelse return null; const body = try p.expectBlock(); return try p.addNode(.{ .tag = .loop, .main_token = main_token, .lhs = body, }); } fn returnStatement(p: *Parser) !?NodeIndex { const main_token = p.eatToken(.k_return) orelse return null; const expr = try p.expression() orelse .none; return try p.addNode(.{ .tag = .@"return", .main_token = main_token, .lhs = expr, }); } fn switchStatement(p: *Parser) !?NodeIndex { const main_token = p.eatToken(.k_switch) orelse return null; const expr = try p.expression() orelse { try p.errors.add( p.peekToken(.loc, 0), "expected condition expression, found '{s}'", .{p.peekToken(.tag, 0).symbol()}, null, ); return error.Parsing; }; _ = try p.expectToken(.brace_left); const scratch_top = p.scratch.items.len; defer p.scratch.shrinkRetainingCapacity(scratch_top); while (true) { if (p.eatToken(.k_default)) |default_token| { _ = p.eatToken(.colon); const default_body = try p.expectBlock(); try p.scratch.append(p.allocator, try p.addNode(.{ .tag = .switch_default, .main_token = default_token, .rhs = default_body, })); } else if (p.eatToken(.k_case)) |case_token| { const cases_scratch_top = p.scratch.items.len; const has_default = false; while (true) { const case_expr = try p.expression() orelse { if (p.eatToken(.k_default)) |_| continue; break; }; _ = p.eatToken(.comma); try p.scratch.append(p.allocator, case_expr); } const case_expr_list = p.scratch.items[cases_scratch_top..]; _ = p.eatToken(.colon); const default_body = try p.expectBlock(); try p.scratch.append(p.allocator, try p.addNode(.{ .tag = if (has_default) .switch_case_default else .switch_case, .main_token = case_token, .lhs = if (case_expr_list.len == 0) .none else try p.listToSpan(case_expr_list), .rhs = default_body, })); p.scratch.shrinkRetainingCapacity(cases_scratch_top); } else { break; } } _ = try p.expectToken(.brace_right); const case_list = p.scratch.items[scratch_top..]; return try p.addNode(.{ .tag = .@"switch", .main_token = main_token, .lhs = expr, .rhs = if (case_list.len == 0) .none else try p.listToSpan(case_list), }); } fn varDecl(p: *Parser) !?NodeIndex { const main_token = p.eatToken(.k_var) orelse return null; var addr_space = TokenIndex.none; var access_mode = TokenIndex.none; if (p.eatToken(.template_left)) |_| { addr_space = try p.expectAddressSpace(); if (p.eatToken(.comma)) |_| access_mode = try p.expectAccessMode(); _ = try p.expectToken(.template_right); } const name_token = try p.expectToken(.ident); var var_type = NodeIndex.none; if (p.eatToken(.colon)) |_| { var_type = try p.expectTypeSpecifier(); } var initializer = NodeIndex.none; if (p.eatToken(.equal)) |_| { initializer = try p.expression() orelse { try p.errors.add( p.peekToken(.loc, 0), "expected initializer expression, found '{s}'", .{p.peekToken(.tag, 0).symbol()}, null, ); return error.Parsing; }; } const extra = try p.addExtra(Node.Var{ .name = name_token, .addr_space = addr_space, .access_mode = access_mode, .type = var_type, }); return try p.addNode(.{ .tag = .@"var", .main_token = main_token, .lhs = extra, .rhs = initializer, }); } fn constDecl(p: *Parser) !?NodeIndex { const const_token = p.eatToken(.k_const) orelse return null; _ = try p.expectToken(.ident); var const_type = NodeIndex.none; if (p.eatToken(.colon)) |_| { const_type = try p.expectTypeSpecifier(); } _ = try p.expectToken(.equal); const initializer = try p.expression() orelse { try p.errors.add( p.peekToken(.loc, 0), "expected initializer expression, found '{s}'", .{p.peekToken(.tag, 0).symbol()}, null, ); return error.Parsing; }; return try p.addNode(.{ .tag = .@"const", .main_token = const_token, .lhs = const_type, .rhs = initializer, }); } fn letDecl(p: *Parser) !?NodeIndex { const const_token = p.eatToken(.k_let) orelse return null; _ = try p.expectToken(.ident); var const_type = NodeIndex.none; if (p.eatToken(.colon)) |_| { const_type = try p.expectTypeSpecifier(); } _ = try p.expectToken(.equal); const initializer = try p.expression() orelse { try p.errors.add( p.peekToken(.loc, 0), "expected initializer expression, found '{s}'", .{p.peekToken(.tag, 0).symbol()}, null, ); return error.Parsing; }; return try p.addNode(.{ .tag = .let, .main_token = const_token, .lhs = const_type, .rhs = initializer, }); } fn varUpdateStatement(p: *Parser) !?NodeIndex { if (p.eatToken(.underscore)) |_| { const equal_token = try p.expectToken(.equal); const expr = try p.expression() orelse { try p.errors.add( p.peekToken(.loc, 0), "expected expression, found '{s}'", .{p.peekToken(.tag, 0).symbol()}, null, ); return error.Parsing; }; return try p.addNode(.{ .tag = .phony_assign, .main_token = equal_token, .lhs = expr, }); } else if (try p.lhsExpression()) |lhs| { const op_token = p.advanceToken(); switch (p.getToken(.tag, op_token)) { .plus_plus => { return try p.addNode(.{ .tag = .increase, .main_token = op_token, .lhs = lhs, }); }, .minus_minus => { return try p.addNode(.{ .tag = .decrease, .main_token = op_token, .lhs = lhs, }); }, .equal, .plus_equal, .minus_equal, .asterisk_equal, .slash_equal, .percent_equal, .ampersand_equal, .pipe_equal, .xor_equal, .angle_bracket_angle_bracket_left_equal, .angle_bracket_angle_bracket_right_equal, => { const expr = try p.expression() orelse { try p.errors.add( p.peekToken(.loc, 0), "expected expression, found '{s}'", .{p.peekToken(.tag, 0).symbol()}, null, ); return error.Parsing; }; return try p.addNode(.{ .tag = .compound_assign, .main_token = op_token, .lhs = lhs, .rhs = expr, }); }, else => { try p.errors.add( p.getToken(.loc, op_token), "invalid assignment operator '{s}'", .{p.getToken(.tag, op_token).symbol()}, null, ); return error.Parsing; }, } } return null; } fn whileStatement(p: *Parser) !?NodeIndex { const main_token = p.eatToken(.k_while) orelse return null; const cond = try p.expression() orelse { try p.errors.add( p.peekToken(.loc, 0), "expected condition expression, found '{s}'", .{p.peekToken(.tag, 0).symbol()}, null, ); return error.Parsing; }; const body = try p.expectBlock(); return try p.addNode(.{ .tag = .@"while", .main_token = main_token, .lhs = cond, .rhs = body, }); } fn expectTypeSpecifier(p: *Parser) error{ OutOfMemory, Parsing }!NodeIndex { return try p.typeSpecifier() orelse { try p.errors.add( p.peekToken(.loc, 0), "expected type specifier, found '{s}'", .{p.peekToken(.tag, 0).symbol()}, null, ); return error.Parsing; }; } fn typeSpecifier(p: *Parser) !?NodeIndex { if (p.peekToken(.tag, 0) == .ident) { const main_token = p.advanceToken(); return try p.addNode(.{ .tag = .ident, .main_token = main_token }); } return p.typeSpecifierWithoutIdent(); } fn typeSpecifierWithoutIdent(p: *Parser) !?NodeIndex { const main_token = p.advanceToken(); switch (p.getToken(.tag, main_token)) { .k_bool => return try p.addNode(.{ .tag = .bool_type, .main_token = main_token }), .k_i32, .k_u32, .k_f32, => return try p.addNode(.{ .tag = .number_type, .main_token = main_token }), .k_f16 => { if (p.extensions.f16) { return try p.addNode(.{ .tag = .number_type, .main_token = main_token }); } try p.errors.add(p.getToken(.loc, main_token), "f16 extension required", .{}, null); return error.Parsing; }, .k_vec2, .k_vec3, .k_vec4 => { var elem_type = NodeIndex.none; if (p.eatToken(.template_left)) |_| { elem_type = try p.expectTypeSpecifier(); _ = try p.expectToken(.template_right); } return try p.addNode(.{ .tag = .vector_type, .main_token = main_token, .lhs = elem_type, }); }, .k_mat2x2, .k_mat2x3, .k_mat2x4, .k_mat3x2, .k_mat3x3, .k_mat3x4, .k_mat4x2, .k_mat4x3, .k_mat4x4, => { var elem_type = NodeIndex.none; if (p.eatToken(.template_left)) |_| { elem_type = try p.expectTypeSpecifier(); _ = try p.expectToken(.template_right); } return try p.addNode(.{ .tag = .matrix_type, .main_token = main_token, .lhs = elem_type, }); }, .k_sampler, .k_sampler_comparison => { return try p.addNode(.{ .tag = .sampler_type, .main_token = main_token }); }, .k_atomic => { _ = try p.expectToken(.template_left); const elem_type = try p.expectTypeSpecifier(); _ = try p.expectToken(.template_right); return try p.addNode(.{ .tag = .atomic_type, .main_token = main_token, .lhs = elem_type, }); }, .k_array => { var elem_type = NodeIndex.none; var size = NodeIndex.none; if (p.eatToken(.template_left)) |_| { elem_type = try p.expectTypeSpecifier(); if (p.eatToken(.comma)) |_| { size = try p.elementCountExpr() orelse { try p.errors.add( p.peekToken(.loc, 0), "expected array size expression, found '{s}'", .{p.peekToken(.tag, 0).symbol()}, null, ); return error.Parsing; }; } _ = try p.expectToken(.template_right); } return try p.addNode(.{ .tag = .array_type, .main_token = main_token, .lhs = elem_type, .rhs = size, }); }, .k_ptr => { _ = try p.expectToken(.template_left); const addr_space = try p.expectAddressSpace(); _ = try p.expectToken(.comma); const elem_type = try p.expectTypeSpecifier(); _ = try p.expectToken(.comma); const access_mode = try p.expectAccessMode(); _ = try p.expectToken(.template_right); const extra = try p.addExtra(Node.PtrType{ .addr_space = addr_space, .access_mode = access_mode, }); return try p.addNode(.{ .tag = .ptr_type, .main_token = main_token, .lhs = elem_type, .rhs = extra, }); }, .k_texture_1d, .k_texture_2d, .k_texture_2d_array, .k_texture_3d, .k_texture_cube, .k_texture_cube_array, => { _ = try p.expectToken(.template_left); const elem_type = try p.expectTypeSpecifier(); _ = try p.expectToken(.template_right); return try p.addNode(.{ .tag = .sampled_texture_type, .main_token = main_token, .lhs = elem_type, }); }, .k_texture_multisampled_2d => { _ = try p.expectToken(.template_left); const elem_type = try p.expectTypeSpecifier(); _ = try p.expectToken(.template_right); return try p.addNode(.{ .tag = .multisampled_texture_type, .main_token = main_token, .lhs = elem_type, }); }, .k_texture_depth_multisampled_2d => { return try p.addNode(.{ .tag = .multisampled_texture_type, .main_token = main_token, }); }, .k_texture_external => { return try p.addNode(.{ .tag = .external_texture_type, .main_token = main_token, }); }, .k_texture_depth_2d, .k_texture_depth_2d_array, .k_texture_depth_cube, .k_texture_depth_cube_array, => { return try p.addNode(.{ .tag = .depth_texture_type, .main_token = main_token, }); }, .k_texture_storage_1d, .k_texture_storage_2d, .k_texture_storage_2d_array, .k_texture_storage_3d, => { _ = try p.expectToken(.template_left); const texel_format = try p.expectTexelFormat(); _ = try p.expectToken(.comma); const access_mode = try p.expectAccessMode(); _ = try p.expectToken(.template_right); return try p.addNode(.{ .tag = .storage_texture_type, .main_token = main_token, .lhs = texel_format.asNodeIndex(), .rhs = access_mode.asNodeIndex(), }); }, else => return null, } } fn expectAddressSpace(p: *Parser) !TokenIndex { const token = p.advanceToken(); if (p.getToken(.tag, token) == .ident) { const str = p.getToken(.loc, token).slice(p.source); if (std.meta.stringToEnum(Ast.AddressSpace, str)) |_| { return token; } } try p.errors.add( p.getToken(.loc, token), "unknown address space '{s}'", .{p.getToken(.loc, token).slice(p.source)}, null, ); return error.Parsing; } fn expectAccessMode(p: *Parser) !TokenIndex { const token = p.advanceToken(); if (p.getToken(.tag, token) == .ident) { const str = p.getToken(.loc, token).slice(p.source); if (std.meta.stringToEnum(Ast.AccessMode, str)) |_| { return token; } } try p.errors.add( p.getToken(.loc, token), "unknown access mode '{s}'", .{p.getToken(.loc, token).slice(p.source)}, null, ); return error.Parsing; } fn expectTexelFormat(p: *Parser) !TokenIndex { const token = p.advanceToken(); if (p.getToken(.tag, token) == .ident) { const str = p.getToken(.loc, token).slice(p.source); if (std.meta.stringToEnum(Ast.TexelFormat, str)) |_| { return token; } } try p.errors.add( p.getToken(.loc, token), "unknown address space '{s}'", .{p.getToken(.loc, token).slice(p.source)}, null, ); return error.Parsing; } fn expectParenExpr(p: *Parser) !NodeIndex { _ = try p.expectToken(.paren_left); const expr = try p.expression() orelse { try p.errors.add( p.peekToken(.loc, 0), "unable to parse expression '{s}'", .{p.peekToken(.tag, 0).symbol()}, null, ); return error.Parsing; }; _ = try p.expectToken(.paren_right); return expr; } fn callExpr(p: *Parser) !?NodeIndex { const main_token = p.tok_i; var rhs = NodeIndex.none; switch (p.peekToken(.tag, 0)) { // fn call or struct construct .ident => { if (p.peekToken(.tag, 1) == .paren_left) { _ = p.advanceToken(); } else { return null; } }, // construct .k_bool, .k_u32, .k_i32, .k_f32, .k_f16, .k_vec2, .k_vec3, .k_vec4, .k_mat2x2, .k_mat2x3, .k_mat2x4, .k_mat3x2, .k_mat3x3, .k_mat3x4, .k_mat4x2, .k_mat4x3, .k_mat4x4, .k_array, => { rhs = try p.typeSpecifierWithoutIdent() orelse return null; }, else => return null, } _ = try p.expectToken(.paren_left); const scratch_top = p.scratch.items.len; defer p.scratch.shrinkRetainingCapacity(scratch_top); while (true) { const expr = try p.expression() orelse break; try p.scratch.append(p.allocator, expr); if (p.eatToken(.comma) == null) break; } _ = try p.expectToken(.paren_right); const args = p.scratch.items[scratch_top..]; return try p.addNode(.{ .tag = .call, .main_token = main_token, .lhs = if (args.len == 0) .none else try p.listToSpan(args), .rhs = rhs, }); } fn expression(p: *Parser) !?NodeIndex { const lhs_unary = try p.unaryExpr() orelse return null; if (try p.bitwiseExpr(lhs_unary)) |bitwise| return bitwise; const lhs = try p.expectRelationalExpr(lhs_unary); return try p.expectShortCircuitExpr(lhs); } fn lhsExpression(p: *Parser) !?NodeIndex { if (p.eatToken(.ident)) |ident_token| { return try p.componentOrSwizzleSpecifier( try p.addNode(.{ .tag = .ident, .main_token = ident_token }), ); } if (p.eatToken(.paren_left)) |_| { const expr = try p.lhsExpression() orelse { try p.errors.add( p.peekToken(.loc, 0), "expected lhs expression, found '{s}'", .{p.peekToken(.tag, 0).symbol()}, null, ); return error.Parsing; }; _ = try p.expectToken(.paren_right); return try p.componentOrSwizzleSpecifier(expr); } if (p.eatToken(.asterisk)) |star_token| { return try p.addNode(.{ .tag = .deref, .main_token = star_token, .lhs = try p.lhsExpression() orelse { try p.errors.add( p.peekToken(.loc, 0), "expected lhs expression, found '{s}'", .{p.peekToken(.tag, 0).symbol()}, null, ); return error.Parsing; }, }); } if (p.eatToken(.ampersand)) |addr_of_token| { return try p.addNode(.{ .tag = .addr_of, .main_token = addr_of_token, .lhs = try p.lhsExpression() orelse { try p.errors.add( p.peekToken(.loc, 0), "expected lhs expression, found '{s}'", .{p.peekToken(.tag, 0).symbol()}, null, ); return error.Parsing; }, }); } return null; } fn singularExpr(p: *Parser) !?NodeIndex { const prefix = try p.primaryExpr() orelse return null; return try p.componentOrSwizzleSpecifier(prefix); } fn primaryExpr(p: *Parser) !?NodeIndex { const main_token = p.tok_i; if (try p.callExpr()) |call| return call; switch (p.getToken(.tag, main_token)) { .k_true => { _ = p.advanceToken(); return try p.addNode(.{ .tag = .true, .main_token = main_token }); }, .k_false => { _ = p.advanceToken(); return try p.addNode(.{ .tag = .false, .main_token = main_token }); }, .number => { _ = p.advanceToken(); return try p.addNode(.{ .tag = .number, .main_token = main_token }); }, .k_bitcast => { _ = p.advanceToken(); _ = try p.expectToken(.template_left); const dest_type = try p.expectTypeSpecifier(); _ = try p.expectToken(.template_right); const expr = try p.expectParenExpr(); return try p.addNode(.{ .tag = .bitcast, .main_token = main_token, .lhs = dest_type, .rhs = expr, }); }, .paren_left => return try p.expectParenExpr(), .ident => { _ = p.advanceToken(); return try p.addNode(.{ .tag = .ident, .main_token = main_token }); }, else => { return null; }, } } fn elementCountExpr(p: *Parser) !?NodeIndex { const left = try p.unaryExpr() orelse return null; if (try p.bitwiseExpr(left)) |right| return right; return try p.expectMathExpr(left); } fn unaryExpr(p: *Parser) error{ OutOfMemory, Parsing }!?NodeIndex { const op_token = p.tok_i; const op: Node.Tag = switch (p.getToken(.tag, op_token)) { .bang, .tilde => .not, .minus => .negate, .asterisk => .deref, .ampersand => .addr_of, else => return p.singularExpr(), }; _ = p.advanceToken(); const expr = try p.unaryExpr() orelse { try p.errors.add( p.peekToken(.loc, 0), "unable to parse right side of '{s}' expression", .{p.getToken(.tag, op_token).symbol()}, null, ); return error.Parsing; }; return try p.addNode(.{ .tag = op, .main_token = op_token, .lhs = expr, }); } fn expectRelationalExpr(p: *Parser, lhs_unary: NodeIndex) !NodeIndex { const lhs = try p.expectShiftExpr(lhs_unary); const op_token = p.tok_i; const op: Node.Tag = switch (p.getToken(.tag, op_token)) { .equal_equal => .equal, .bang_equal => .not_equal, .angle_bracket_right => .greater_than, .angle_bracket_right_equal => .greater_than_equal, .angle_bracket_left => .less_than, .angle_bracket_left_equal => .less_than_equal, else => return lhs, }; _ = p.advanceToken(); const rhs_unary = try p.unaryExpr() orelse { try p.errors.add( p.peekToken(.loc, 0), "unable to parse right side of '{s}' expression", .{p.getToken(.tag, op_token).symbol()}, null, ); return error.Parsing; }; const rhs = try p.expectShiftExpr(rhs_unary); return try p.addNode(.{ .tag = op, .main_token = op_token, .lhs = lhs, .rhs = rhs, }); } fn expectShortCircuitExpr(p: *Parser, lhs_relational: NodeIndex) !NodeIndex { var lhs = lhs_relational; const op_token = p.tok_i; const op: Node.Tag = switch (p.getToken(.tag, op_token)) { .ampersand_ampersand => .logical_and, .pipe_pipe => .logical_or, else => return lhs, }; while (p.peekToken(.tag, 0) == p.getToken(.tag, op_token)) { _ = p.advanceToken(); const rhs_unary = try p.unaryExpr() orelse { try p.errors.add( p.peekToken(.loc, 0), "unable to parse right side of '{s}' expression", .{p.getToken(.tag, op_token).symbol()}, null, ); return error.Parsing; }; const rhs = try p.expectRelationalExpr(rhs_unary); lhs = try p.addNode(.{ .tag = op, .main_token = op_token, .lhs = lhs, .rhs = rhs, }); } return lhs; } fn bitwiseExpr(p: *Parser, lhs: NodeIndex) !?NodeIndex { const op_token = p.tok_i; const op: Node.Tag = switch (p.getToken(.tag, op_token)) { .ampersand => .@"and", .pipe => .@"or", .xor => .xor, else => return null, }; _ = p.advanceToken(); var lhs_result = lhs; while (true) { const rhs = try p.unaryExpr() orelse { try p.errors.add( p.peekToken(.loc, 0), "unable to parse right side of '{s}' expression", .{p.getToken(.tag, op_token).symbol()}, null, ); return error.Parsing; }; lhs_result = try p.addNode(.{ .tag = op, .main_token = op_token, .lhs = lhs_result, .rhs = rhs, }); if (p.peekToken(.tag, 0) != p.getToken(.tag, op_token)) return lhs_result; } } fn expectShiftExpr(p: *Parser, lhs: NodeIndex) !NodeIndex { const op_token = p.tok_i; const op: Node.Tag = switch (p.getToken(.tag, op_token)) { .angle_bracket_angle_bracket_left => .shl, .angle_bracket_angle_bracket_right => .shl, else => return try p.expectMathExpr(lhs), }; _ = p.advanceToken(); const rhs = try p.unaryExpr() orelse { try p.errors.add( p.peekToken(.loc, 0), "unable to parse right side of '{s}' expression", .{p.getToken(.tag, op_token).symbol()}, null, ); return error.Parsing; }; return try p.addNode(.{ .tag = op, .main_token = op_token, .lhs = lhs, .rhs = rhs, }); } fn expectMathExpr(p: *Parser, left: NodeIndex) !NodeIndex { const right = try p.expectMultiplicativeExpr(left); return p.expectAdditiveExpr(right); } fn expectAdditiveExpr(p: *Parser, lhs_mul: NodeIndex) !NodeIndex { var lhs = lhs_mul; while (true) { const op_token = p.tok_i; const op: Node.Tag = switch (p.getToken(.tag, op_token)) { .plus => .add, .minus => .sub, else => return lhs, }; _ = p.advanceToken(); const unary = try p.unaryExpr() orelse { try p.errors.add( p.peekToken(.loc, 0), "unable to parse right side of '{s}' expression", .{p.getToken(.tag, op_token).symbol()}, null, ); return error.Parsing; }; const rhs = try p.expectMultiplicativeExpr(unary); lhs = try p.addNode(.{ .tag = op, .main_token = op_token, .lhs = lhs, .rhs = rhs, }); } } fn expectMultiplicativeExpr(p: *Parser, lhs_unary: NodeIndex) !NodeIndex { var lhs = lhs_unary; while (true) { const op_token = p.tok_i; const node_tag: Node.Tag = switch (p.peekToken(.tag, 0)) { .asterisk => .mul, .slash => .div, .percent => .mod, else => return lhs, }; _ = p.advanceToken(); const rhs = try p.unaryExpr() orelse { try p.errors.add( p.peekToken(.loc, 0), "unable to parse right side of '{s}' expression", .{p.peekToken(.tag, 0).symbol()}, null, ); return error.Parsing; }; lhs = try p.addNode(.{ .tag = node_tag, .main_token = op_token, .lhs = lhs, .rhs = rhs, }); } } fn componentOrSwizzleSpecifier(p: *Parser, prefix: NodeIndex) !NodeIndex { var prefix_result = prefix; while (true) { if (p.eatToken(.dot)) |dot_token| { const member_token = try p.expectToken(.ident); prefix_result = try p.addNode(.{ .tag = .field_access, .main_token = dot_token, .lhs = prefix_result, .rhs = member_token.asNodeIndex(), }); } else if (p.eatToken(.bracket_left)) |bracket_left_token| { const index_expr = try p.expression() orelse { try p.errors.add( p.peekToken(.loc, 0), "expected expression, but found '{s}'", .{p.peekToken(.tag, 0).symbol()}, null, ); return error.Parsing; }; _ = try p.expectToken(.bracket_right); prefix_result = try p.addNode(.{ .tag = .index_access, .main_token = bracket_left_token, .lhs = prefix_result, .rhs = index_expr, }); } else return prefix_result; } } fn findNextGlobalDirective(p: *Parser) void { while (true) { switch (p.peekToken(.tag, 0)) { .k_enable, .k_requires, .eof => return, .semicolon => { _ = p.advanceToken(); return; }, else => _ = p.advanceToken(), } } } fn findNextGlobalDecl(p: *Parser) void { var level: u32 = 0; while (true) { switch (p.peekToken(.tag, 0)) { .k_fn, .k_var, .k_const, .k_override, .k_struct, .attr, => { if (level == 0) return; }, .semicolon => { if (level == 0) { _ = p.advanceToken(); return; } }, .brace_left, .bracket_left, .paren_left, => { level += 1; }, .brace_right => { if (level == 0) { _ = p.advanceToken(); return; } level -= 1; }, .bracket_right, .paren_right => { if (level != 0) level -= 1; }, .eof => return, else => {}, } _ = p.advanceToken(); } } fn findNextStmt(p: *Parser) void { var level: NodeIndex = 0; while (true) { switch (p.peekToken(.tag, 0)) { .semicolon => { if (level == 0) { _ = p.advanceToken(); return; } }, .brace_left => { level += 1; }, .brace_right => { if (level == 0) { _ = p.advanceToken(); return; } level -= 1; }, .eof => return, else => {}, } _ = p.advanceToken(); } } fn listToSpan(p: *Parser, list: []const NodeIndex) !NodeIndex { try p.extra.appendSlice(p.allocator, @ptrCast(list)); return p.addNode(.{ .tag = .span, .main_token = undefined, .lhs = @enumFromInt(p.extra.items.len - list.len), .rhs = @enumFromInt(p.extra.items.len), }); } fn addNode(p: *Parser, node: Node) error{OutOfMemory}!NodeIndex { const i: NodeIndex = @enumFromInt(p.nodes.len); try p.nodes.append(p.allocator, node); return i; } fn addExtra(p: *Parser, extra: anytype) error{OutOfMemory}!NodeIndex { const fields = std.meta.fields(@TypeOf(extra)); try p.extra.ensureUnusedCapacity(p.allocator, fields.len); const result: NodeIndex = @enumFromInt(p.extra.items.len); inline for (fields) |field| { comptime std.debug.assert(field.type == NodeIndex or field.type == TokenIndex); p.extra.appendAssumeCapacity(@intFromEnum(@field(extra, field.name))); } return result; } fn getToken( p: Parser, comptime field: Ast.TokenList.Field, index: TokenIndex, ) std.meta.fieldInfo(Token, field).type { return p.tokens.items(field)[@intFromEnum(index)]; } fn peekToken( p: Parser, comptime field: Ast.TokenList.Field, offset: isize, ) std.meta.fieldInfo(Token, field).type { return p.tokens.items(field)[@intCast(@as(isize, @intCast(@intFromEnum(p.tok_i))) + offset)]; } fn advanceToken(p: *Parser) TokenIndex { const prev = p.tok_i; p.tok_i = @enumFromInt(@min(@intFromEnum(prev) + 1, p.tokens.len)); return prev; } fn eatToken(p: *Parser, tag: Token.Tag) ?TokenIndex { return if (p.peekToken(.tag, 0) == tag) p.advanceToken() else null; } fn expectToken(p: *Parser, tag: Token.Tag) !TokenIndex { const token = p.advanceToken(); if (p.getToken(.tag, token) == tag) return token; try p.errors.add( p.getToken(.loc, token), "expected '{s}', but found '{s}'", .{ tag.symbol(), p.getToken(.tag, token).symbol() }, null, ); return error.Parsing; }
0
repos/mach-sysgpu/src
repos/mach-sysgpu/src/shader/AstGen.zig
const std = @import("std"); const Ast = @import("Ast.zig"); const Air = @import("Air.zig"); const ErrorList = @import("ErrorList.zig"); const TokenTag = @import("Token.zig").Tag; const Loc = @import("Token.zig").Loc; const Inst = Air.Inst; const InstIndex = Air.InstIndex; const RefIndex = Air.RefIndex; const StringIndex = Air.StringIndex; const ValueIndex = Air.ValueIndex; const Node = Ast.Node; const NodeIndex = Ast.NodeIndex; const TokenIndex = Ast.TokenIndex; const stringToEnum = std.meta.stringToEnum; const indexOf = std.mem.indexOfScalar; const AstGen = @This(); allocator: std.mem.Allocator, tree: *const Ast, instructions: std.AutoArrayHashMapUnmanaged(Inst, void) = .{}, refs: std.ArrayListUnmanaged(InstIndex) = .{}, strings: std.ArrayListUnmanaged(u8) = .{}, values: std.ArrayListUnmanaged(u8) = .{}, scratch: std.ArrayListUnmanaged(InstIndex) = .{}, global_var_refs: std.AutoArrayHashMapUnmanaged(InstIndex, void) = .{}, globals: std.ArrayListUnmanaged(InstIndex) = .{}, has_array_length: bool = false, compute_stage: InstIndex = .none, vertex_stage: InstIndex = .none, fragment_stage: InstIndex = .none, entry_point_name: ?[]const u8 = null, scope_pool: std.heap.MemoryPool(Scope), current_fn_scope: *Scope = undefined, inst_arena: std.heap.ArenaAllocator, errors: *ErrorList, pub const Scope = struct { tag: Tag, /// this is undefined if tag == .root parent: *Scope, decls: std.AutoHashMapUnmanaged(NodeIndex, error{AnalysisFail}!InstIndex) = .{}, const Tag = union(enum) { root, @"fn": struct { stage: Inst.Fn.Stage, return_type: InstIndex, returned: bool, flattened_params: std.AutoHashMapUnmanaged(InstIndex, InstIndex), }, block, loop, continuing, switch_case, @"if", @"for", }; }; pub fn genTranslationUnit(astgen: *AstGen) !RefIndex { var root_scope = try astgen.scope_pool.create(); root_scope.* = .{ .tag = .root, .parent = undefined }; const global_nodes = astgen.tree.spanToList(.globals); try astgen.scanDecls(root_scope, global_nodes); for (global_nodes) |node| { var global = root_scope.decls.get(node).? catch continue; global = switch (astgen.tree.nodeTag(node)) { .@"fn" => blk: { break :blk astgen.genFn(root_scope, node, false) catch |err| switch (err) { error.Skiped => continue, else => |e| e, }; }, else => continue, } catch |err| { if (err == error.AnalysisFail) { root_scope.decls.putAssumeCapacity(node, error.AnalysisFail); continue; } return err; }; root_scope.decls.putAssumeCapacity(node, global); try astgen.globals.append(astgen.allocator, global); } if (astgen.errors.list.items.len > 0) return error.AnalysisFail; if (astgen.entry_point_name != null and astgen.compute_stage == .none and astgen.vertex_stage == .none and astgen.fragment_stage == .none) { try astgen.errors.add(Loc{ .start = 0, .end = 1 }, "entry point not found", .{}, null); } return astgen.addRefList(astgen.globals.items); } /// adds `nodes` to scope and checks for re-declarations fn scanDecls(astgen: *AstGen, scope: *Scope, nodes: []const NodeIndex) !void { for (nodes) |decl_node| { const loc = astgen.tree.declNameLoc(decl_node) orelse continue; const name = loc.slice(astgen.tree.source); var iter = scope.decls.keyIterator(); while (iter.next()) |node| { const name_loc = astgen.tree.declNameLoc(node.*).?; if (std.mem.eql(u8, name, name_loc.slice(astgen.tree.source))) { try astgen.errors.add( loc, "redeclaration of '{s}'", .{name}, try astgen.errors.createNote( name_loc, "other declaration here", .{}, ), ); return error.AnalysisFail; } } try scope.decls.putNoClobber(astgen.scope_pool.arena.allocator(), decl_node, .none); } } fn genGlobalDecl(astgen: *AstGen, scope: *Scope, node: NodeIndex) error{ OutOfMemory, AnalysisFail }!InstIndex { const decl = switch (astgen.tree.nodeTag(node)) { .global_var => astgen.genGlobalVar(scope, node), .override => astgen.genOverride(scope, node), .@"const" => astgen.genConst(scope, node), .@"struct" => astgen.genStruct(scope, node), .@"fn" => astgen.genFn(scope, node, false), .type_alias => astgen.genTypeAlias(scope, node), else => unreachable, } catch |err| switch (err) { error.AnalysisFail => { scope.decls.putAssumeCapacity(node, error.AnalysisFail); return error.AnalysisFail; }, error.Skiped => unreachable, else => |e| return e, }; scope.decls.putAssumeCapacity(node, decl); try astgen.globals.append(astgen.allocator, decl); return decl; } fn genGlobalVar(astgen: *AstGen, scope: *Scope, node: NodeIndex) !InstIndex { const node_rhs = astgen.tree.nodeRHS(node); const extra = astgen.tree.extraData(Node.GlobalVar, astgen.tree.nodeLHS(node)); const name_loc = astgen.tree.declNameLoc(node).?; var is_resource = false; var var_type = InstIndex.none; if (extra.type != .none) { var_type = try astgen.genType(scope, extra.type); switch (astgen.getInst(var_type)) { .sampler_type, .comparison_sampler_type, .external_texture_type, .texture_type, => { is_resource = true; }, else => {}, } } var addr_space = Inst.PointerType.AddressSpace.uniform_constant; if (extra.addr_space != .none) { const addr_space_loc = astgen.tree.tokenLoc(extra.addr_space); const ast_addr_space = stringToEnum(Ast.AddressSpace, addr_space_loc.slice(astgen.tree.source)).?; addr_space = switch (ast_addr_space) { .function => .function, .private => .private, .workgroup => .workgroup, .uniform => .uniform, .storage => .storage, }; } if (addr_space == .uniform or addr_space == .storage) { is_resource = true; } var access_mode = Inst.PointerType.AccessMode.read_write; if (extra.access_mode != .none) { const access_mode_loc = astgen.tree.tokenLoc(extra.access_mode); const ast_access_mode = stringToEnum(Ast.AccessMode, access_mode_loc.slice(astgen.tree.source)).?; access_mode = switch (ast_access_mode) { .read => .read, .write => .write, .read_write => .read_write, }; } var binding = InstIndex.none; var group = InstIndex.none; if (extra.attrs != .none) { for (astgen.tree.spanToList(extra.attrs)) |attr| { if (!is_resource) { try astgen.errors.add( astgen.tree.nodeLoc(attr), "variable '{s}' is not a resource", .{name_loc.slice(astgen.tree.source)}, null, ); return error.AnalysisFail; } switch (astgen.tree.nodeTag(attr)) { .attr_binding => binding = try astgen.attrBinding(scope, attr), .attr_group => group = try astgen.attrGroup(scope, attr), else => { try astgen.errors.add( astgen.tree.nodeLoc(attr), "unexpected attribute '{s}'", .{astgen.tree.nodeLoc(attr).slice(astgen.tree.source)}, null, ); return error.AnalysisFail; }, } } } if (is_resource and (binding == .none or group == .none)) { try astgen.errors.add( astgen.tree.nodeLoc(node), "resource variable must specify binding and group", .{}, null, ); return error.AnalysisFail; } var init = InstIndex.none; if (node_rhs != .none) { init = try astgen.genExpr(scope, node_rhs); } const name = try astgen.addString(name_loc.slice(astgen.tree.source)); return astgen.addInst(.{ .@"var" = .{ .name = name, .type = var_type, .addr_space = addr_space, .access_mode = access_mode, .binding = binding, .group = group, .init = init, }, }); } fn genOverride(astgen: *AstGen, scope: *Scope, node: NodeIndex) !InstIndex { const node_rhs = astgen.tree.nodeRHS(node); const extra = astgen.tree.extraData(Node.Override, astgen.tree.nodeLHS(node)); const name_loc = astgen.tree.declNameLoc(node).?; var override_type = InstIndex.none; if (extra.type != .none) { override_type = try astgen.genType(scope, extra.type); } var id = InstIndex.none; if (extra.attrs != .none) { for (astgen.tree.spanToList(extra.attrs)) |attr| { switch (astgen.tree.nodeTag(attr)) { .attr_id => id = try astgen.attrId(scope, attr), else => { try astgen.errors.add( astgen.tree.nodeLoc(attr), "unexpected attribute '{s}'", .{astgen.tree.nodeLoc(attr).slice(astgen.tree.source)}, null, ); return error.AnalysisFail; }, } } } var init = InstIndex.none; if (node_rhs != .none) { init = try astgen.genExpr(scope, node_rhs); } const name = try astgen.addString(name_loc.slice(astgen.tree.source)); return astgen.addInst(.{ .@"var" = .{ .name = name, .type = override_type, .init = init, .addr_space = .private, .access_mode = .read, .id = id, }, }); } fn genStruct(astgen: *AstGen, scope: *Scope, node: NodeIndex) !InstIndex { const name_str = astgen.tree.declNameLoc(node).?.slice(astgen.tree.source); const name = try astgen.addString(name_str); const members = try astgen.genStructMembers(scope, astgen.tree.nodeLHS(node)); return astgen.addInst(.{ .@"struct" = .{ .name = name, .members = members, }, }); } fn genStructMembers(astgen: *AstGen, scope: *Scope, node: NodeIndex) !RefIndex { const scratch_top = astgen.scratch.items.len; defer astgen.scratch.shrinkRetainingCapacity(scratch_top); const member_nodes_list = astgen.tree.spanToList(node); for (member_nodes_list, 0..) |member_node, i| { const member_name_loc = astgen.tree.tokenLoc(astgen.tree.nodeToken(member_node)); const member_attrs_node = astgen.tree.nodeLHS(member_node); const member_type_node = astgen.tree.nodeRHS(member_node); const member_type_loc = astgen.tree.nodeLoc(member_type_node); const member_type = astgen.genType(scope, member_type_node) catch |err| switch (err) { error.AnalysisFail => continue, error.OutOfMemory => return error.OutOfMemory, }; const member_type_inst = astgen.getInst(member_type); switch (member_type_inst) { .array, .atomic_type, .@"struct", => {}, inline .bool, .int, .float, .vector, .matrix => |data| { std.debug.assert(data.value == null); }, else => { try astgen.errors.add( member_name_loc, "invalid struct member type '{s}'", .{member_type_loc.slice(astgen.tree.source)}, null, ); return error.AnalysisFail; }, } if (member_type_inst == .array) { const array_len = member_type_inst.array.len; if (array_len == .none and i + 1 != member_nodes_list.len) { try astgen.errors.add( member_name_loc, "struct member with runtime-sized array type, must be the last member of the structure", .{}, null, ); return error.AnalysisFail; } } var @"align": ?u29 = null; var size: ?u32 = null; var builtin: ?Inst.Builtin = null; var location: ?u16 = null; var interpolate: ?Inst.Interpolate = null; if (member_attrs_node != .none) { for (astgen.tree.spanToList(member_attrs_node)) |attr| { switch (astgen.tree.nodeTag(attr)) { .attr_align => @"align" = try astgen.attrAlign(scope, attr), .attr_size => size = try astgen.attrSize(scope, attr), .attr_location => location = try astgen.attrLocation(scope, attr), .attr_builtin => builtin = astgen.attrBuiltin(attr), .attr_interpolate => interpolate = astgen.attrInterpolate(attr), else => { try astgen.errors.add( astgen.tree.nodeLoc(attr), "unexpected attribute '{s}'", .{astgen.tree.nodeLoc(attr).slice(astgen.tree.source)}, null, ); return error.AnalysisFail; }, } } } const name = try astgen.addString(member_name_loc.slice(astgen.tree.source)); const member = try astgen.addInst(.{ .struct_member = .{ .name = name, .type = member_type, .index = @intCast(i), .@"align" = @"align", .size = size, .builtin = builtin, .location = location, .interpolate = interpolate, }, }); try astgen.scratch.append(astgen.allocator, member); } return astgen.addRefList(astgen.scratch.items[scratch_top..]); } fn genFn(astgen: *AstGen, root_scope: *Scope, node: NodeIndex, only_entry_point: bool) !InstIndex { const scratch_top = astgen.global_var_refs.count(); defer astgen.global_var_refs.shrinkRetainingCapacity(scratch_top); astgen.has_array_length = false; const fn_proto = astgen.tree.extraData(Node.FnProto, astgen.tree.nodeLHS(node)); const node_rhs = astgen.tree.nodeRHS(node); const node_loc = astgen.tree.nodeLoc(node); var return_type = InstIndex.none; var return_attrs = Inst.Fn.ReturnAttrs{ .builtin = null, .location = null, .interpolate = null, .invariant = false, }; if (fn_proto.return_type != .none) { return_type = try astgen.genType(root_scope, fn_proto.return_type); if (fn_proto.return_attrs != .none) { for (astgen.tree.spanToList(fn_proto.return_attrs)) |attr| { switch (astgen.tree.nodeTag(attr)) { .attr_invariant => return_attrs.invariant = true, .attr_location => return_attrs.location = try astgen.attrLocation(root_scope, attr), .attr_builtin => return_attrs.builtin = astgen.attrBuiltin(attr), .attr_interpolate => return_attrs.interpolate = astgen.attrInterpolate(attr), else => { try astgen.errors.add( astgen.tree.nodeLoc(attr), "unexpected attribute '{s}'", .{astgen.tree.nodeLoc(attr).slice(astgen.tree.source)}, null, ); return error.AnalysisFail; }, } } } } var stage: Inst.Fn.Stage = .none; var workgroup_size_attr = NodeIndex.none; var is_const = false; if (fn_proto.attrs != .none) { for (astgen.tree.spanToList(fn_proto.attrs)) |attr| { switch (astgen.tree.nodeTag(attr)) { .attr_vertex, .attr_fragment, .attr_compute, => |stage_attr| { if (stage != .none) { try astgen.errors.add(astgen.tree.nodeLoc(attr), "multiple shader stages", .{}, null); return error.AnalysisFail; } stage = switch (stage_attr) { .attr_vertex => .vertex, .attr_fragment => .fragment, .attr_compute => .{ .compute = undefined }, else => unreachable, }; }, .attr_workgroup_size => workgroup_size_attr = attr, .attr_const => is_const = true, else => { try astgen.errors.add( astgen.tree.nodeLoc(attr), "unexpected attribute '{s}'", .{astgen.tree.nodeLoc(attr).slice(astgen.tree.source)}, null, ); return error.AnalysisFail; }, } } } if (only_entry_point and stage == .none) return error.Skiped; if (stage == .compute) { if (return_type != .none) { try astgen.errors.add( astgen.tree.nodeLoc(fn_proto.return_type), "return type on compute function", .{}, null, ); return error.AnalysisFail; } if (workgroup_size_attr == .none) { try astgen.errors.add( node_loc, "@workgroup_size not specified on compute shader", .{}, null, ); return error.AnalysisFail; } const workgroup_size_data = astgen.tree.extraData(Ast.Node.WorkgroupSize, astgen.tree.nodeLHS(workgroup_size_attr)); stage.compute = Inst.Fn.Stage.WorkgroupSize{ .x = try astgen.genExpr(root_scope, workgroup_size_data.x), .y = blk: { if (workgroup_size_data.y == .none) break :blk .none; break :blk try astgen.genExpr(root_scope, workgroup_size_data.y); }, .z = blk: { if (workgroup_size_data.z == .none) break :blk .none; break :blk try astgen.genExpr(root_scope, workgroup_size_data.z); }, }; } else if (workgroup_size_attr != .none) { try astgen.errors.add( node_loc, "@workgroup_size must be specified with a compute shader", .{}, null, ); return error.AnalysisFail; } const scope = try astgen.scope_pool.create(); scope.* = .{ .tag = .{ .@"fn" = .{ .stage = stage, .return_type = return_type, .returned = false, .flattened_params = .{}, }, }, .parent = root_scope, }; astgen.current_fn_scope = scope; var params = RefIndex.none; if (fn_proto.params != .none) { params = try astgen.genFnParams(scope, fn_proto.params); } const name_slice = astgen.tree.declNameLoc(node).?.slice(astgen.tree.source); const name = try astgen.addString(name_slice); const block = try astgen.genBlock(scope, node_rhs); if (return_type != .none and !scope.tag.@"fn".returned) { try astgen.errors.add(node_loc, "function does not return", .{}, null); return error.AnalysisFail; } const global_var_refs = try astgen.addRefList(astgen.global_var_refs.keys()[scratch_top..]); const inst = try astgen.addInst(.{ .@"fn" = .{ .name = name, .stage = stage, .is_const = is_const, .params = params, .return_type = return_type, .return_attrs = return_attrs, .block = block, .global_var_refs = global_var_refs, .has_array_length = astgen.has_array_length, }, }); if (astgen.entry_point_name) |entry_point_name| { if (std.mem.eql(u8, name_slice, entry_point_name)) { astgen.compute_stage = .none; astgen.vertex_stage = .none; astgen.fragment_stage = .none; if (stage == .none) { try astgen.errors.add(node_loc, "function is not an entry point", .{}, null); return error.AnalysisFail; } } } // only one kind of entry point per file switch (stage) { .none => {}, .compute => { if (astgen.compute_stage != .none) { try astgen.errors.add(node_loc, "multiple compute entry point found", .{}, null); return error.AnalysisFail; } astgen.compute_stage = inst; }, .vertex => { if (astgen.vertex_stage != .none) { try astgen.errors.add(node_loc, "multiple vertex entry point found", .{}, null); return error.AnalysisFail; } astgen.vertex_stage = inst; }, .fragment => { if (astgen.fragment_stage != .none) { try astgen.errors.add(node_loc, "multiple fragment entry point found", .{}, null); return error.AnalysisFail; } astgen.fragment_stage = inst; }, } return inst; } fn genTypeAlias(astgen: *AstGen, scope: *Scope, node: NodeIndex) !InstIndex { const node_lhs = astgen.tree.nodeLHS(node); return astgen.genType(scope, node_lhs); } fn genFnParams(astgen: *AstGen, scope: *Scope, node: NodeIndex) !RefIndex { const scratch_top = astgen.scratch.items.len; defer astgen.scratch.shrinkRetainingCapacity(scratch_top); const param_nodes = astgen.tree.spanToList(node); try astgen.scanDecls(scope, param_nodes); for (param_nodes) |param_node| { const param_type_node = astgen.tree.nodeRHS(param_node); const param_type = try astgen.genType(scope, param_type_node); const param_name_loc = astgen.tree.tokenLoc(astgen.tree.nodeToken(param_node)); const param_name = try astgen.addString(param_name_loc.slice(astgen.tree.source)); if (scope.tag.@"fn".stage != .none and astgen.getInst(param_type) == .@"struct") { const members = astgen.refToList(astgen.getInst(param_type).@"struct".members); for (members) |member_inst| { const member = astgen.getInst(member_inst).struct_member; const param = try astgen.addInst(.{ .fn_param = .{ .name = member.name, .type = member.type, .builtin = member.builtin, .interpolate = member.interpolate, .location = member.location, .invariant = false, }, }); try astgen.current_fn_scope.tag.@"fn".flattened_params.put( astgen.inst_arena.allocator(), member_inst, param, ); try astgen.scratch.append(astgen.allocator, param); } // TODO const param = try astgen.addInst(.{ .fn_param = .{ .name = param_name, .type = param_type, .builtin = null, .interpolate = null, .location = null, .invariant = false, }, }); scope.decls.putAssumeCapacity(param_node, param); } else { var builtin: ?Inst.Builtin = null; var inter: ?Inst.Interpolate = null; var location: ?u16 = null; var invariant: bool = false; const param_attrs_node = astgen.tree.nodeLHS(param_node); if (param_attrs_node != .none) { for (astgen.tree.spanToList(param_attrs_node)) |attr| { switch (astgen.tree.nodeTag(attr)) { .attr_invariant => invariant = true, .attr_location => location = try astgen.attrLocation(scope, attr), .attr_builtin => builtin = astgen.attrBuiltin(attr), .attr_interpolate => inter = astgen.attrInterpolate(attr), else => { try astgen.errors.add( astgen.tree.nodeLoc(attr), "unexpected attribute '{s}'", .{astgen.tree.nodeLoc(attr).slice(astgen.tree.source)}, null, ); return error.AnalysisFail; }, } } } const param = try astgen.addInst(.{ .fn_param = .{ .name = param_name, .type = param_type, .builtin = builtin, .interpolate = inter, .location = location, .invariant = invariant, }, }); try astgen.scratch.append(astgen.allocator, param); scope.decls.putAssumeCapacity(param_node, param); } } return astgen.addRefList(astgen.scratch.items[scratch_top..]); } fn attrBinding(astgen: *AstGen, scope: *Scope, node: NodeIndex) !InstIndex { const node_lhs = astgen.tree.nodeLHS(node); const node_lhs_loc = astgen.tree.nodeLoc(node_lhs); const binding = try astgen.genExpr(scope, node_lhs); const binding_res = try astgen.resolve(binding); if (astgen.getInst(binding_res) != .int) { try astgen.errors.add( node_lhs_loc, "binding value must be integer", .{}, null, ); return error.AnalysisFail; } if (astgen.getValue(Inst.Int.Value, astgen.getInst(binding_res).int.value.?).literal < 0) { try astgen.errors.add( node_lhs_loc, "binding value must be a positive", .{}, null, ); return error.AnalysisFail; } return binding; } fn attrId(astgen: *AstGen, scope: *Scope, node: NodeIndex) !InstIndex { const node_lhs = astgen.tree.nodeLHS(node); const node_lhs_loc = astgen.tree.nodeLoc(node_lhs); const id = try astgen.genExpr(scope, node_lhs); const id_res = try astgen.resolve(id); if (astgen.getInst(id_res) != .int) { try astgen.errors.add( node_lhs_loc, "id value must be integer", .{}, null, ); return error.AnalysisFail; } if (astgen.getValue(Inst.Int.Value, astgen.getInst(id_res).int.value.?).literal < 0) { try astgen.errors.add( node_lhs_loc, "id value must be a positive", .{}, null, ); return error.AnalysisFail; } return id; } fn attrGroup(astgen: *AstGen, scope: *Scope, node: NodeIndex) !InstIndex { const node_lhs = astgen.tree.nodeLHS(node); const node_lhs_loc = astgen.tree.nodeLoc(node_lhs); const group = try astgen.genExpr(scope, node_lhs); const inst = astgen.getInst(try astgen.resolve(group)); if (inst != .int or inst.int.value == null) { try astgen.errors.add( node_lhs_loc, "group value must be a constant integer", .{}, null, ); return error.AnalysisFail; } if (astgen.getValue(Inst.Int.Value, inst.int.value.?).literal < 0) { try astgen.errors.add( node_lhs_loc, "group value must be a positive", .{}, null, ); return error.AnalysisFail; } return group; } fn attrAlign(astgen: *AstGen, scope: *Scope, node: NodeIndex) !u29 { const expr = try astgen.genExpr(scope, astgen.tree.nodeLHS(node)); return @intCast(astgen.getValue(Air.Inst.Int.Value, astgen.getInst(expr).int.value.?).literal); } fn attrSize(astgen: *AstGen, scope: *Scope, node: NodeIndex) !u32 { const expr = try astgen.genExpr(scope, astgen.tree.nodeLHS(node)); return @intCast(astgen.getValue(Air.Inst.Int.Value, astgen.getInst(expr).int.value.?).literal); } fn attrLocation(astgen: *AstGen, scope: *Scope, node: NodeIndex) !u16 { const inst_idx = try astgen.genExpr(scope, astgen.tree.nodeLHS(node)); const value_idx = astgen.getInst(inst_idx).int.value.?; return @intCast(astgen.getValue(Inst.Int.Value, value_idx).literal); } fn attrBuiltin(astgen: *AstGen, node: NodeIndex) Inst.Builtin { const loc = astgen.tree.tokenLoc(astgen.tree.nodeLHS(node).asTokenIndex()); return stringToEnum(Ast.Builtin, loc.slice(astgen.tree.source)).?; } fn attrInterpolate(astgen: *AstGen, node: NodeIndex) Inst.Interpolate { const inter_type_token = astgen.tree.nodeLHS(node).asTokenIndex(); const inter_type_loc = astgen.tree.tokenLoc(inter_type_token); const inter_type_ast = stringToEnum(Ast.InterpolationType, inter_type_loc.slice(astgen.tree.source)).?; var inter = Inst.Interpolate{ .type = switch (inter_type_ast) { .perspective => .perspective, .linear => .linear, .flat => .flat, }, .sample = .none, }; if (astgen.tree.nodeRHS(node) != .none) { const inter_sample_token = astgen.tree.nodeRHS(node).asTokenIndex(); const inter_sample_loc = astgen.tree.tokenLoc(inter_sample_token); const inter_sample_ast = stringToEnum(Ast.InterpolationSample, inter_sample_loc.slice(astgen.tree.source)).?; inter.sample = switch (inter_sample_ast) { .center => .center, .centroid => .centroid, .sample => .sample, }; } return inter; } fn genBlock(astgen: *AstGen, scope: *Scope, node: NodeIndex) error{ OutOfMemory, AnalysisFail }!InstIndex { const node_lhs = astgen.tree.nodeLHS(node); if (node_lhs == .none) return .none; const stmnt_nodes = astgen.tree.spanToList(node_lhs); try astgen.scanDecls(scope, stmnt_nodes); const scratch_top = astgen.scratch.items.len; defer astgen.scratch.shrinkRetainingCapacity(scratch_top); var is_unreachable = false; for (stmnt_nodes) |stmnt_node| { const stmnt_node_loc = astgen.tree.nodeLoc(stmnt_node); if (is_unreachable) { try astgen.errors.add(stmnt_node_loc, "unreachable code", .{}, null); return error.AnalysisFail; } const stmnt = try astgen.genStatement(scope, stmnt_node); if (astgen.getInst(stmnt) == .@"return") { is_unreachable = true; } try astgen.scratch.append(astgen.allocator, stmnt); } const statements = try astgen.addRefList(astgen.scratch.items[scratch_top..]); return astgen.addInst(.{ .block = statements }); } fn genStatement(astgen: *AstGen, scope: *Scope, node: NodeIndex) !InstIndex { return switch (astgen.tree.nodeTag(node)) { .compound_assign => try astgen.genCompoundAssign(scope, node), .phony_assign => try astgen.genPhonyAssign(scope, node), .call => try astgen.genCall(scope, node), .@"return" => try astgen.genReturn(scope, node), .break_if => try astgen.genBreakIf(scope, node), .@"if" => try astgen.genIf(scope, node), .if_else => try astgen.genIfElse(scope, node), .if_else_if => try astgen.genIfElseIf(scope, node), .@"while" => try astgen.genWhile(scope, node), .@"for" => try astgen.genFor(scope, node), .@"switch" => try astgen.genSwitch(scope, node), .loop => try astgen.genLoop(scope, node), .block => blk: { const inner_scope = try astgen.scope_pool.create(); inner_scope.* = .{ .tag = .block, .parent = scope }; const inner_block = try astgen.genBlock(inner_scope, node); break :blk inner_block; }, .continuing => try astgen.genContinuing(scope, node), .discard => try astgen.addInst(.discard), .@"break" => try astgen.addInst(.@"break"), .@"continue" => try astgen.addInst(.@"continue"), .increase => try astgen.genIncreaseDecrease(scope, node, .add), .decrease => try astgen.genIncreaseDecrease(scope, node, .sub), .@"var" => blk: { const decl = try astgen.genVar(scope, node); scope.decls.putAssumeCapacity(node, decl); break :blk decl; }, .@"const" => blk: { const decl = try astgen.genConst(scope, node); scope.decls.putAssumeCapacity(node, decl); break :blk decl; }, .let => blk: { const decl = try astgen.genLet(scope, node); scope.decls.putAssumeCapacity(node, decl); break :blk decl; }, else => unreachable, }; } fn genLoop(astgen: *AstGen, parent_scope: *Scope, node: NodeIndex) !InstIndex { const scope = try astgen.scope_pool.create(); scope.* = .{ .tag = .loop, .parent = parent_scope }; const block = try astgen.genBlock(scope, astgen.tree.nodeLHS(node)); return astgen.addInst(.{ .loop = block }); } fn genContinuing(astgen: *AstGen, parent_scope: *Scope, node: NodeIndex) !InstIndex { const scope = try astgen.scope_pool.create(); scope.* = .{ .tag = .continuing, .parent = parent_scope }; const block = try astgen.genBlock(scope, astgen.tree.nodeLHS(node)); return astgen.addInst(.{ .continuing = block }); } fn genBreakIf(astgen: *AstGen, scope: *Scope, node: NodeIndex) !InstIndex { const expr = try astgen.genExpr(scope, astgen.tree.nodeLHS(node)); return astgen.addInst(.{ .break_if = expr }); } fn genIf(astgen: *AstGen, scope: *Scope, node: NodeIndex) !InstIndex { const node_lhs = astgen.tree.nodeLHS(node); const node_rhs = astgen.tree.nodeRHS(node); const node_lhs_loc = astgen.tree.nodeLoc(node_lhs); const cond = try astgen.genExpr(scope, node_lhs); const cond_res = try astgen.resolve(cond); if (astgen.getInst(cond_res) != .bool) { try astgen.errors.add(node_lhs_loc, "expected bool", .{}, null); return error.AnalysisFail; } const body_scope = try astgen.scope_pool.create(); body_scope.* = .{ .tag = .@"if", .parent = scope }; const block = try astgen.genBlock(body_scope, node_rhs); return astgen.addInst(.{ .@"if" = .{ .cond = cond, .body = block, .@"else" = .none, }, }); } fn genIfElse(astgen: *AstGen, scope: *Scope, node: NodeIndex) !InstIndex { const if_node = astgen.tree.nodeLHS(node); const cond = try astgen.genExpr(scope, astgen.tree.nodeLHS(if_node)); const if_body_scope = try astgen.scope_pool.create(); if_body_scope.* = .{ .tag = .@"if", .parent = scope }; const if_block = try astgen.genBlock(if_body_scope, astgen.tree.nodeRHS(if_node)); const else_body_scope = try astgen.scope_pool.create(); else_body_scope.* = .{ .tag = .@"if", .parent = scope }; const else_block = try astgen.genBlock(else_body_scope, astgen.tree.nodeRHS(node)); return astgen.addInst(.{ .@"if" = .{ .cond = cond, .body = if_block, .@"else" = else_block, }, }); } fn genIfElseIf(astgen: *AstGen, scope: *Scope, node: NodeIndex) !InstIndex { const if_node = astgen.tree.nodeLHS(node); const node_rhs = astgen.tree.nodeRHS(node); const cond = try astgen.genExpr(scope, astgen.tree.nodeLHS(if_node)); const block = try astgen.genBlock(scope, astgen.tree.nodeRHS(if_node)); const else_if = switch (astgen.tree.nodeTag(node_rhs)) { .@"if" => try astgen.genIf(scope, node_rhs), .if_else => try astgen.genIfElse(scope, node_rhs), .if_else_if => try astgen.genIfElseIf(scope, node_rhs), else => unreachable, }; return astgen.addInst(.{ .@"if" = .{ .cond = cond, .body = block, .@"else" = else_if, }, }); } fn genWhile(astgen: *AstGen, scope: *Scope, node: NodeIndex) !InstIndex { const node_lhs = astgen.tree.nodeLHS(node); const node_rhs = astgen.tree.nodeRHS(node); const node_lhs_loc = astgen.tree.nodeLoc(node_lhs); const cond = try astgen.genExpr(scope, node_lhs); const cond_res = try astgen.resolve(cond); if (astgen.getInst(cond_res) != .bool) { try astgen.errors.add(node_lhs_loc, "expected bool", .{}, null); return error.AnalysisFail; } const block = try astgen.genBlock(scope, node_rhs); return astgen.addInst(.{ .@"while" = .{ .cond = cond, .body = block } }); } fn genFor(astgen: *AstGen, scope: *Scope, node: NodeIndex) !InstIndex { const node_lhs = astgen.tree.nodeLHS(node); const node_rhs = astgen.tree.nodeRHS(node); const extra = astgen.tree.extraData(Ast.Node.ForHeader, node_lhs); var for_scope = try astgen.scope_pool.create(); for_scope.* = .{ .tag = .@"for", .parent = scope }; try astgen.scanDecls(for_scope, &.{extra.init}); const init = switch (astgen.tree.nodeTag(extra.init)) { .@"var" => try astgen.genVar(for_scope, extra.init), .@"const" => try astgen.genConst(for_scope, extra.init), .let => try astgen.genLet(for_scope, extra.init), else => unreachable, }; for_scope.decls.putAssumeCapacity(extra.init, init); const cond_node_loc = astgen.tree.nodeLoc(extra.cond); const cond = try astgen.genExpr(for_scope, extra.cond); const cond_res = try astgen.resolve(cond); if (astgen.getInst(cond_res) != .bool) { try astgen.errors.add(cond_node_loc, "expected bool", .{}, null); return error.AnalysisFail; } const update = switch (astgen.tree.nodeTag(extra.update)) { .phony_assign => try astgen.genPhonyAssign(for_scope, extra.update), .increase => try astgen.genIncreaseDecrease(for_scope, extra.update, .add), .decrease => try astgen.genIncreaseDecrease(for_scope, extra.update, .sub), .compound_assign => try astgen.genCompoundAssign(for_scope, extra.update), .call => try astgen.genCall(for_scope, extra.update), else => unreachable, }; const block = try astgen.genBlock(for_scope, node_rhs); return astgen.addInst(.{ .@"for" = .{ .init = init, .cond = cond, .update = update, .body = block, }, }); } fn genSwitch(astgen: *AstGen, scope: *Scope, node: NodeIndex) !InstIndex { const switch_on = try astgen.genExpr(scope, astgen.tree.nodeLHS(node)); const switch_on_res = try astgen.resolve(switch_on); const scratch_top = astgen.scratch.items.len; defer astgen.scratch.shrinkRetainingCapacity(scratch_top); const cases_nodes = astgen.tree.spanToList(astgen.tree.nodeRHS(node)); for (cases_nodes) |cases_node| { const cases_node_tag = astgen.tree.nodeTag(cases_node); const cases_scope = try astgen.scope_pool.create(); cases_scope.* = .{ .tag = .switch_case, .parent = scope }; var cases = RefIndex.none; const body = try astgen.genBlock(cases_scope, astgen.tree.nodeRHS(cases_node)); const default = cases_node_tag == .switch_default or cases_node_tag == .switch_case_default; switch (cases_node_tag) { .switch_case, .switch_case_default => { const cases_scratch_top = astgen.scratch.items.len; defer astgen.scratch.shrinkRetainingCapacity(cases_scratch_top); const case_nodes = astgen.tree.spanToList(astgen.tree.nodeLHS(cases_node)); for (case_nodes) |case_node| { const case_node_loc = astgen.tree.nodeLoc(case_node); const case = try astgen.genExpr(scope, case_node); const case_res = try astgen.resolve(case); if (!try astgen.coerce(case_res, switch_on_res)) { try astgen.errors.add(case_node_loc, "switch and case type mismatch", .{}, null); return error.AnalysisFail; } try astgen.scratch.append(astgen.allocator, case); } cases = try astgen.addRefList(astgen.scratch.items[scratch_top..]); }, .switch_default => {}, else => unreachable, } const case_inst = try astgen.addInst(.{ .switch_case = .{ .cases = cases, .body = body, .default = default, }, }); try astgen.scratch.append(astgen.allocator, case_inst); } const cases_list = try astgen.addRefList(astgen.scratch.items[scratch_top..]); return astgen.addInst(.{ .@"switch" = .{ .switch_on = switch_on, .cases_list = cases_list, }, }); } fn genCompoundAssign(astgen: *AstGen, scope: *Scope, node: NodeIndex) !InstIndex { const node_lhs = astgen.tree.nodeLHS(node); const node_rhs = astgen.tree.nodeRHS(node); const lhs = try astgen.genExpr(scope, node_lhs); const rhs = try astgen.genExpr(scope, node_rhs); const lhs_res = try astgen.resolve(lhs); const rhs_res = try astgen.resolve(rhs); if (!try astgen.isMutable(lhs)) { try astgen.errors.add(astgen.tree.nodeLoc(node), "cannot assign to constant", .{}, null); return error.AnalysisFail; } if (!try astgen.coerce(rhs_res, lhs_res)) { try astgen.errors.add(astgen.tree.nodeLoc(node), "type mismatch", .{}, null); return error.AnalysisFail; } const mod: Inst.Assign.Modifier = switch (astgen.tree.tokenTag(astgen.tree.nodeToken(node))) { .equal => .none, .plus_equal => .add, .minus_equal => .sub, .asterisk_equal => .mul, .slash_equal => .div, .percent_equal => .mod, .ampersand_equal => .@"and", .pipe_equal => .@"or", .xor_equal => .xor, .angle_bracket_angle_bracket_left_equal => .shl, .angle_bracket_angle_bracket_right_equal => .shr, else => unreachable, }; return astgen.addInst(.{ .assign = .{ .mod = mod, .type = lhs_res, .lhs = lhs, .rhs = rhs, }, }); } pub fn isMutable(astgen: *AstGen, index: InstIndex) !bool { var idx = index; while (true) switch (astgen.getInst(idx)) { inline .field_access, .swizzle_access, .index_access => |access| idx = access.base, .unary => |un| switch (un.op) { .deref => return astgen.getInst(try astgen.resolve(un.expr)).ptr_type.access_mode != .read, else => unreachable, }, .var_ref => |var_ref| idx = var_ref, .@"var" => |@"var"| return @"var".access_mode != .read, else => return false, }; } fn genPhonyAssign(astgen: *AstGen, scope: *Scope, node: NodeIndex) !InstIndex { const node_lhs = astgen.tree.nodeLHS(node); return astgen.genExpr(scope, node_lhs); } fn genIncreaseDecrease(astgen: *AstGen, scope: *Scope, node: NodeIndex, mod: Inst.Assign.Modifier) !InstIndex { const node_lhs = astgen.tree.nodeLHS(node); const node_lhs_loc = astgen.tree.nodeLoc(node_lhs); const lhs = try astgen.genExpr(scope, node_lhs); if (astgen.getInst(lhs) != .var_ref) { try astgen.errors.add(node_lhs_loc, "expected a reference", .{}, null); return error.AnalysisFail; } const lhs_res = try astgen.resolve(lhs); if (astgen.getInst(lhs_res) != .int) { try astgen.errors.add(node_lhs_loc, "expected an integer", .{}, null); return error.AnalysisFail; } const rhs = try astgen.addInst(.{ .int = .{ .type = astgen.getInst(lhs_res).int.type, .value = try astgen.addValue(Inst.Int.Value, .{ .literal = 1 }), } }); return astgen.addInst(.{ .assign = .{ .mod = mod, .type = lhs_res, .lhs = lhs, .rhs = rhs, } }); } fn genVar(astgen: *AstGen, scope: *Scope, node: NodeIndex) !InstIndex { const node_rhs = astgen.tree.nodeRHS(node); const extra = astgen.tree.extraData(Node.Var, astgen.tree.nodeLHS(node)); const name_loc = astgen.tree.declNameLoc(node).?; var is_resource = false; // TODO: research and remove this? var addr_space = Inst.PointerType.AddressSpace.function; if (extra.addr_space != .none) { const addr_space_loc = astgen.tree.tokenLoc(extra.addr_space); const ast_addr_space = stringToEnum(Ast.AddressSpace, addr_space_loc.slice(astgen.tree.source)).?; addr_space = switch (ast_addr_space) { .function => .function, .private => .private, .workgroup => .workgroup, .uniform => .uniform, .storage => .storage, }; } if (addr_space == .uniform or addr_space == .storage) { is_resource = true; } var access_mode = Inst.PointerType.AccessMode.read_write; if (extra.access_mode != .none) { const access_mode_loc = astgen.tree.tokenLoc(extra.access_mode); const ast_access_mode = stringToEnum(Ast.AccessMode, access_mode_loc.slice(astgen.tree.source)).?; access_mode = switch (ast_access_mode) { .read => .read, .write => .write, .read_write => .read_write, }; } var init = InstIndex.none; if (node_rhs != .none) { init = try astgen.genExpr(scope, node_rhs); } var var_type = InstIndex.none; if (extra.type != .none) { var_type = try astgen.genType(scope, extra.type); switch (astgen.getInst(var_type)) { .sampler_type, .comparison_sampler_type, .texture_type, .external_texture_type, => { is_resource = true; }, else => {}, } if (init != .none) { const init_res = try astgen.resolve(init); if (!try astgen.coerce(init_res, var_type)) { try astgen.errors.add(astgen.tree.nodeLoc(node_rhs), "type mismatch", .{}, null); return error.AnalysisFail; } } } else { var_type = try astgen.resolve(init); } const name = try astgen.addString(name_loc.slice(astgen.tree.source)); return astgen.addInst(.{ .@"var" = .{ .name = name, .type = var_type, .addr_space = addr_space, .access_mode = access_mode, .init = init, }, }); } fn genConst(astgen: *AstGen, scope: *Scope, node: NodeIndex) !InstIndex { const node_lhs = astgen.tree.nodeLHS(node); const node_rhs = astgen.tree.nodeRHS(node); const name_loc = astgen.tree.declNameLoc(node).?; const init = try astgen.genExpr(scope, node_rhs); var var_type = InstIndex.none; if (node_lhs != .none) { var_type = try astgen.genType(scope, node_lhs); } else { var_type = try astgen.resolve(init); } const name = try astgen.addString(name_loc.slice(astgen.tree.source)); return astgen.addInst(.{ .@"const" = .{ .name = name, .type = var_type, .init = init, }, }); } fn genLet(astgen: *AstGen, scope: *Scope, node: NodeIndex) !InstIndex { const node_lhs = astgen.tree.nodeLHS(node); const node_rhs = astgen.tree.nodeRHS(node); const name_loc = astgen.tree.declNameLoc(node).?; const init = try astgen.genExpr(scope, node_rhs); const name = try astgen.addString(name_loc.slice(astgen.tree.source)); var var_type = InstIndex.none; if (node_lhs != .none) { var_type = try astgen.genType(scope, node_lhs); } else { var_type = try astgen.resolve(init); } return astgen.addInst(.{ .@"var" = .{ .name = name, .type = var_type, .init = init, .addr_space = .function, .access_mode = .read, }, }); } fn genExpr(astgen: *AstGen, scope: *Scope, node: NodeIndex) error{ OutOfMemory, AnalysisFail }!InstIndex { const node_tag = astgen.tree.nodeTag(node); return switch (node_tag) { .number => astgen.genNumber(node), .true => astgen.addInst(.{ .bool = .{ .value = .{ .literal = true } } }), .false => astgen.addInst(.{ .bool = .{ .value = .{ .literal = false } } }), .not => astgen.genNot(scope, node), .negate => astgen.genNegate(scope, node), .deref => astgen.genDeref(scope, node), .addr_of => astgen.genAddrOf(scope, node), .mul, .div, .mod, .add, .sub, .shl, .shr, .@"and", .@"or", .xor, .logical_and, .logical_or, .equal, .not_equal, .less_than, .less_than_equal, .greater_than, .greater_than_equal, => astgen.genBinary(scope, node), .index_access => astgen.genIndexAccess(scope, node), .field_access => astgen.genFieldAccess(scope, node), .call => astgen.genCall(scope, node), .bitcast => astgen.genBitcast(scope, node), .ident => astgen.genVarRef(scope, node), else => unreachable, }; } fn genNumber(astgen: *AstGen, node: NodeIndex) !InstIndex { const node_loc = astgen.tree.nodeLoc(node); const bytes = node_loc.slice(astgen.tree.source); var i: usize = 0; var suffix: u8 = 0; var base: u8 = 10; var exponent = false; var dot = false; if (bytes.len >= 2 and bytes[0] == '0') switch (bytes[1]) { '0'...'9' => { try astgen.errors.add(node_loc, "leading zero disallowed", .{}, null); return error.AnalysisFail; }, 'x', 'X' => { i = 2; base = 16; }, else => {}, }; while (i < bytes.len) : (i += 1) { const c = bytes[i]; switch (c) { 'f', 'h' => suffix = c, 'i', 'u' => { if (dot or suffix == 'f' or suffix == 'h' or exponent) { try astgen.errors.add(node_loc, "suffix '{c}' on float literal", .{c}, null); return error.AnalysisFail; } suffix = c; }, 'e', 'E', 'p', 'P' => { if (exponent) { try astgen.errors.add(node_loc, "duplicate exponent '{c}'", .{c}, null); return error.AnalysisFail; } exponent = true; }, '.' => dot = true, else => {}, } } var inst: Inst = undefined; if (dot or exponent or suffix == 'f' or suffix == 'h') { if (base == 16) { // TODO try astgen.errors.add(node_loc, "hexadecimal float literals not implemented", .{}, null); return error.AnalysisFail; } const value = std.fmt.parseFloat(f32, bytes[0 .. bytes.len - @intFromBool(suffix != 0)]) catch |err| { try astgen.errors.add( node_loc, "cannot parse float literal ({s})", .{@errorName(err)}, try astgen.errors.createNote( null, "this is a bug in sysgpu. please report it", .{}, ), ); return error.AnalysisFail; }; inst = .{ .float = .{ .type = switch (suffix) { 0, 'f' => .f32, 'h' => .f16, else => unreachable, }, .value = try astgen.addValue(Inst.Float.Value, .{ .literal = value }), }, }; } else { const value = std.fmt.parseInt(i33, bytes[0 .. bytes.len - @intFromBool(suffix != 0)], 0) catch |err| { try astgen.errors.add( node_loc, "cannot parse integer literal ({s})", .{@errorName(err)}, try astgen.errors.createNote( null, "this is a bug in sysgpu. please report it", .{}, ), ); return error.AnalysisFail; }; inst = .{ .int = .{ .type = switch (suffix) { 0, 'i' => .i32, 'u' => .u32, else => unreachable, }, .value = try astgen.addValue(Inst.Int.Value, .{ .literal = value }), }, }; } return astgen.addInst(inst); } fn coerce(astgen: *AstGen, src: InstIndex, dst: InstIndex) !bool { if (astgen.eql(src, dst)) return true; const src_inst = astgen.getInst(src); const dst_inst = astgen.getInst(dst); switch (src_inst) { .int => |src_int| if (src_int.value != null) { const int_value = astgen.getValue(Air.Inst.Int.Value, src_inst.int.value.?); if (int_value == .literal) switch (dst_inst) { .int => |dst_int| { if (src_int.type == .i32 and dst_int.type == .u32 and int_value.literal < 0) { try astgen.errors.add( Loc{ .start = 0, .end = 0 }, "TODO: undefined behavior: Op ({d}, rhs)", .{int_value.literal}, null, ); return error.AnalysisFail; } const value = try astgen.addValue( Air.Inst.Int.Value, Air.Inst.Int.Value{ .literal = @intCast(int_value.literal) }, ); astgen.instructions.keys()[@intFromEnum(src)] = .{ .int = .{ .type = dst_int.type, .value = value, }, }; return true; }, .float => |dst_float| { const value = try astgen.addValue( Air.Inst.Float.Value, Air.Inst.Float.Value{ .literal = @floatFromInt(int_value.literal) }, ); astgen.instructions.keys()[@intFromEnum(src)] = .{ .float = .{ .type = dst_float.type, .value = value, }, }; return true; }, else => {}, }; }, else => {}, } return false; } fn genNot(astgen: *AstGen, scope: *Scope, node: NodeIndex) !InstIndex { const node_lhs = astgen.tree.nodeLHS(node); const node_lhs_loc = astgen.tree.nodeLoc(node_lhs); const expr = try astgen.genExpr(scope, node_lhs); const expr_res = try astgen.resolve(expr); if (astgen.getInst(expr_res) == .bool) { return astgen.addInst(.{ .unary = .{ .op = .not, .result_type = expr_res, .expr = expr } }); } try astgen.errors.add( node_lhs_loc, "cannot operate not (!) on '{s}'", .{node_lhs_loc.slice(astgen.tree.source)}, null, ); return error.AnalysisFail; } fn genNegate(astgen: *AstGen, scope: *Scope, node: NodeIndex) !InstIndex { const node_lhs = astgen.tree.nodeLHS(node); const node_lhs_loc = astgen.tree.nodeLoc(node_lhs); const expr = try astgen.genExpr(scope, node_lhs); const expr_res = try astgen.resolve(expr); switch (astgen.getInst(expr_res)) { .int, .float => return astgen.addInst(.{ .unary = .{ .op = .negate, .result_type = expr_res, .expr = expr, }, }), else => {}, } try astgen.errors.add( node_lhs_loc, "cannot negate '{s}'", .{node_lhs_loc.slice(astgen.tree.source)}, null, ); return error.AnalysisFail; } fn genDeref(astgen: *AstGen, scope: *Scope, node: NodeIndex) !InstIndex { const node_lhs = astgen.tree.nodeLHS(node); const node_lhs_loc = astgen.tree.nodeLoc(node_lhs); const expr = try astgen.genExpr(scope, node_lhs); const expr_res = try astgen.resolve(expr); const expr_res_inst = astgen.getInst(expr_res); if (expr_res_inst == .ptr_type) { return astgen.addInst(.{ .unary = .{ .op = .deref, .result_type = expr_res_inst.ptr_type.elem_type, .expr = expr, }, }); } try astgen.errors.add( node_lhs_loc, "cannot dereference '{s}'", .{node_lhs_loc.slice(astgen.tree.source)}, null, ); return error.AnalysisFail; } fn genAddrOf(astgen: *AstGen, scope: *Scope, node: NodeIndex) !InstIndex { const expr = try astgen.genExpr(scope, astgen.tree.nodeLHS(node)); const expr_res = try astgen.resolve(expr); const result_type = try astgen.addInst(.{ .ptr_type = .{ .elem_type = expr_res, .addr_space = .function, // TODO .access_mode = .read_write, // TODO }, }); const inst = try astgen.addInst(.{ .unary = .{ .op = .addr_of, .result_type = result_type, .expr = expr, }, }); return inst; } fn genBinary(astgen: *AstGen, scope: *Scope, node: NodeIndex) !InstIndex { const node_tag = astgen.tree.nodeTag(node); const node_loc = astgen.tree.nodeLoc(node); const node_lhs = astgen.tree.nodeLHS(node); const node_rhs = astgen.tree.nodeRHS(node); const lhs = try astgen.genExpr(scope, node_lhs); const rhs = try astgen.genExpr(scope, node_rhs); const lhs_res = try astgen.resolve(lhs); const rhs_res = try astgen.resolve(rhs); const lhs_res_inst = astgen.getInst(lhs_res); const rhs_res_inst = astgen.getInst(rhs_res); var is_valid = false; var vector_size: ?Inst.Vector.Size = null; var arithmetic_res_type = InstIndex.none; switch (node_tag) { .shl, .shr, .@"and", .@"or", .xor => { is_valid = lhs_res_inst == .int and rhs_res_inst == .int; }, .logical_and, .logical_or => { is_valid = lhs_res_inst == .bool and rhs_res_inst == .bool; }, .mul, .div, .mod, .add, .sub, .equal, .not_equal, .less_than, .less_than_equal, .greater_than, .greater_than_equal, => switch (lhs_res_inst) { .int, .float => { if (try astgen.coerce(rhs_res, lhs_res) or try astgen.coerce(lhs_res, rhs_res)) { is_valid = true; arithmetic_res_type = lhs_res; } switch (rhs_res_inst) { .vector => |v| { if (try astgen.coerce(lhs_res, rhs_res_inst.vector.elem_type)) { is_valid = true; vector_size = v.size; arithmetic_res_type = rhs_res; } }, else => {}, } }, .vector => |v| { vector_size = v.size; if (astgen.eql(rhs_res, lhs_res)) { is_valid = true; arithmetic_res_type = lhs_res; } if (try astgen.coerce(rhs_res, lhs_res_inst.vector.elem_type)) { is_valid = true; arithmetic_res_type = lhs_res; } if (rhs_res_inst == .matrix) { if (astgen.getInst(lhs_res_inst.vector.elem_type) == .float) { if (lhs_res_inst.vector.size == rhs_res_inst.matrix.cols) { is_valid = true; arithmetic_res_type = try astgen.addInst(.{ .vector = .{ .elem_type = lhs_res_inst.vector.elem_type, .size = rhs_res_inst.matrix.rows, .value = null, } }); } if (lhs_res_inst.vector.size == rhs_res_inst.matrix.rows) { is_valid = true; arithmetic_res_type = try astgen.addInst(.{ .vector = .{ .elem_type = lhs_res_inst.vector.elem_type, .size = rhs_res_inst.matrix.cols, .value = null, } }); } } } }, .matrix => { if (rhs_res_inst == .matrix) { if (astgen.eql(lhs_res_inst.matrix.elem_type, rhs_res_inst.matrix.elem_type)) { // matCxR<T> matCxR<T> if (lhs_res_inst.matrix.rows == rhs_res_inst.matrix.rows and lhs_res_inst.matrix.cols == rhs_res_inst.matrix.cols) { is_valid = true; arithmetic_res_type = lhs_res; } // matKxR<T> matCxK<T> if (lhs_res_inst.matrix.cols == rhs_res_inst.matrix.rows) { is_valid = true; arithmetic_res_type = try astgen.addInst(.{ .matrix = .{ .elem_type = lhs_res_inst.matrix.elem_type, .cols = rhs_res_inst.matrix.cols, .rows = lhs_res_inst.matrix.rows, .value = null, } }); } // matCxK<T> matKxR<T> if (rhs_res_inst.matrix.cols == lhs_res_inst.matrix.rows) { is_valid = true; arithmetic_res_type = try astgen.addInst(.{ .matrix = .{ .elem_type = lhs_res_inst.matrix.elem_type, .cols = lhs_res_inst.matrix.cols, .rows = rhs_res_inst.matrix.rows, .value = null, } }); } } } if (rhs_res_inst == .float) { is_valid = true; arithmetic_res_type = lhs_res; } if (rhs_res_inst == .vector) { if (rhs_res_inst.vector.size == lhs_res_inst.matrix.cols) { is_valid = true; arithmetic_res_type = try astgen.addInst(.{ .vector = .{ .elem_type = rhs_res_inst.vector.elem_type, .size = lhs_res_inst.matrix.rows, .value = null, } }); } if (rhs_res_inst.vector.size == lhs_res_inst.matrix.rows) { is_valid = true; arithmetic_res_type = try astgen.addInst(.{ .vector = .{ .elem_type = rhs_res_inst.vector.elem_type, .size = lhs_res_inst.matrix.cols, .value = null, } }); } } }, else => {}, }, else => unreachable, } if (!is_valid) { try astgen.errors.add( node_loc, "invalid operation between {s} and {s}", .{ @tagName(lhs_res_inst), @tagName(rhs_res_inst) }, null, ); return error.AnalysisFail; } const op: Inst.Binary.Op = switch (node_tag) { .mul => .mul, .div => .div, .mod => .mod, .add => .add, .sub => .sub, .shl => .shl, .shr => .shr, .@"and" => .@"and", .@"or" => .@"or", .xor => .xor, .logical_and => .logical_and, .logical_or => .logical_or, .equal => .equal, .not_equal => .not_equal, .less_than => .less_than, .less_than_equal => .less_than_equal, .greater_than => .greater_than, .greater_than_equal => .greater_than_equal, else => unreachable, }; const res_type = switch (op) { .mul, .div, .mod, .add, .sub, => arithmetic_res_type, .logical_and, .logical_or, .equal, .not_equal, .less_than, .less_than_equal, .greater_than, .greater_than_equal, => if (vector_size) |size| try astgen.addInst(.{ .vector = .{ .elem_type = try astgen.addInst(.{ .bool = .{ .value = null } }), .size = size, .value = null, } }) else try astgen.addInst(.{ .bool = .{ .value = null } }), else => lhs_res, }; return astgen.addInst(.{ .binary = .{ .op = op, .result_type = res_type, .lhs_type = lhs_res, .rhs_type = rhs_res, .lhs = lhs, .rhs = rhs, } }); } fn genCall(astgen: *AstGen, scope: *Scope, node: NodeIndex) !InstIndex { const token = astgen.tree.nodeToken(node); const token_tag = astgen.tree.tokenTag(token); const token_loc = astgen.tree.tokenLoc(token); const node_lhs = astgen.tree.nodeLHS(node); const node_rhs = astgen.tree.nodeRHS(node); const node_loc = astgen.tree.nodeLoc(node); if (node_rhs == .none) { std.debug.assert(token_tag == .ident); const builtin_fn = std.meta.stringToEnum(BuiltinFn, token_loc.slice(astgen.tree.source)) orelse { const decl = try astgen.findSymbol(scope, token); switch (astgen.getInst(decl)) { .@"fn" => return astgen.genFnCall(scope, node), .@"struct" => return astgen.genStructConstruct(scope, decl, node), else => { try astgen.errors.add( node_loc, "'{s}' cannot be called", .{token_loc.slice(astgen.tree.source)}, null, ); return error.AnalysisFail; }, } }; switch (builtin_fn) { .all => return astgen.genBuiltinAllAny(scope, node, true), .any => return astgen.genBuiltinAllAny(scope, node, false), .select => return astgen.genBuiltinSelect(scope, node), .abs => return astgen.genGenericUnaryBuiltin(scope, node, .abs, &.{ .u32, .i32 }, &.{ .f32, .f16 }, false, false), .acos => return astgen.genGenericUnaryBuiltin(scope, node, .acos, &.{}, &.{ .f32, .f16 }, false, false), .acosh => return astgen.genGenericUnaryBuiltin(scope, node, .acosh, &.{}, &.{ .f32, .f16 }, false, false), .asin => return astgen.genGenericUnaryBuiltin(scope, node, .asin, &.{}, &.{ .f32, .f16 }, false, false), .asinh => return astgen.genGenericUnaryBuiltin(scope, node, .asinh, &.{}, &.{ .f32, .f16 }, false, false), .atan => return astgen.genGenericUnaryBuiltin(scope, node, .atan, &.{}, &.{ .f32, .f16 }, false, false), .atanh => return astgen.genGenericUnaryBuiltin(scope, node, .atanh, &.{}, &.{ .f32, .f16 }, false, false), .ceil => return astgen.genGenericUnaryBuiltin(scope, node, .ceil, &.{}, &.{ .f32, .f16 }, false, false), .cos => return astgen.genGenericUnaryBuiltin(scope, node, .cos, &.{}, &.{ .f32, .f16 }, false, false), .cosh => return astgen.genGenericUnaryBuiltin(scope, node, .cosh, &.{}, &.{ .f32, .f16 }, false, false), .countLeadingZeros => return astgen.genGenericUnaryBuiltin(scope, node, .count_leading_zeros, &.{ .u32, .i32 }, &.{}, false, false), .countOneBits => return astgen.genGenericUnaryBuiltin(scope, node, .count_one_bits, &.{ .u32, .i32 }, &.{}, false, false), .countTrailingZeros => return astgen.genGenericUnaryBuiltin(scope, node, .count_trailing_zeros, &.{ .u32, .i32 }, &.{}, false, false), .degrees => return astgen.genGenericUnaryBuiltin(scope, node, .degrees, &.{}, &.{ .f32, .f16 }, false, false), .exp => return astgen.genGenericUnaryBuiltin(scope, node, .exp, &.{}, &.{ .f32, .f16 }, false, false), .exp2 => return astgen.genGenericUnaryBuiltin(scope, node, .exp2, &.{}, &.{ .f32, .f16 }, false, false), .firstLeadingBit => return astgen.genGenericUnaryBuiltin(scope, node, .first_leading_bit, &.{ .u32, .i32 }, &.{}, false, false), .firstTrailingBit => return astgen.genGenericUnaryBuiltin(scope, node, .first_trailing_bit, &.{ .u32, .i32 }, &.{}, false, false), .floor => return astgen.genGenericUnaryBuiltin(scope, node, .floor, &.{}, &.{ .f32, .f16 }, false, false), .fract => return astgen.genGenericUnaryBuiltin(scope, node, .fract, &.{}, &.{ .f32, .f16 }, false, false), .inverseSqrt => return astgen.genGenericUnaryBuiltin(scope, node, .inverse_sqrt, &.{}, &.{ .f32, .f16 }, false, false), .length => return astgen.genGenericUnaryBuiltin(scope, node, .length, &.{}, &.{ .f32, .f16 }, false, true), .log => return astgen.genGenericUnaryBuiltin(scope, node, .log, &.{}, &.{ .f32, .f16 }, false, false), .log2 => return astgen.genGenericUnaryBuiltin(scope, node, .log2, &.{}, &.{ .f32, .f16 }, false, false), .quantizeToF16 => return astgen.genGenericUnaryBuiltin(scope, node, .quantize_to_F16, &.{}, &.{.f32}, false, false), .radians => return astgen.genGenericUnaryBuiltin(scope, node, .radians, &.{}, &.{ .f32, .f16 }, false, false), .reverseBits => return astgen.genGenericUnaryBuiltin(scope, node, .reverseBits, &.{ .u32, .i32 }, &.{}, false, false), .round => return astgen.genGenericUnaryBuiltin(scope, node, .round, &.{}, &.{ .f32, .f16 }, false, false), .saturate => return astgen.genGenericUnaryBuiltin(scope, node, .saturate, &.{}, &.{ .f32, .f16 }, false, false), .sign => return astgen.genGenericUnaryBuiltin(scope, node, .sign, &.{ .u32, .i32 }, &.{.f16}, false, false), .sin => return astgen.genGenericUnaryBuiltin(scope, node, .sin, &.{}, &.{ .f32, .f16 }, false, false), .sinh => return astgen.genGenericUnaryBuiltin(scope, node, .sinh, &.{}, &.{ .f32, .f16 }, false, false), .sqrt => return astgen.genGenericUnaryBuiltin(scope, node, .sqrt, &.{}, &.{ .f32, .f16 }, false, false), .tan => return astgen.genGenericUnaryBuiltin(scope, node, .tan, &.{}, &.{ .f32, .f16 }, false, false), .tanh => return astgen.genGenericUnaryBuiltin(scope, node, .tanh, &.{}, &.{ .f32, .f16 }, false, false), .trunc => return astgen.genGenericUnaryBuiltin(scope, node, .trunc, &.{}, &.{ .f32, .f16 }, false, false), .normalize => return astgen.genGenericUnaryBuiltin(scope, node, .normalize, &.{}, &.{ .f32, .f16 }, true, false), .min => return astgen.genGenericBinaryBuiltin(scope, node, .min, .any_in_any_out, true), .max => return astgen.genGenericBinaryBuiltin(scope, node, .max, .any_in_any_out, true), .atan2 => return astgen.genGenericBinaryBuiltin(scope, node, .atan2, .any_in_any_out, true), .distance => return astgen.genGenericBinaryBuiltin(scope, node, .distance, .any_in_scalar_out, false), .dot => return astgen.genGenericBinaryBuiltin(scope, node, .dot, .vector_in_scalar_out, false), .pow => return astgen.genGenericBinaryBuiltin(scope, node, .pow, .any_in_any_out, false), .step => return astgen.genGenericBinaryBuiltin(scope, node, .step, .any_in_any_out, false), .smoothstep => return astgen.genGenericFloatTripleBuiltin(scope, node, .smoothstep, false), .clamp => return astgen.genGenericFloatTripleBuiltin(scope, node, .clamp, false), .mix => return astgen.genGenericFloatTripleBuiltin(scope, node, .mix, true), .dpdx => return astgen.genDerivativeBuiltin(scope, node, .dpdx), .dpdxCoarse => return astgen.genDerivativeBuiltin(scope, node, .dpdx_coarse), .dpdxFine => return astgen.genDerivativeBuiltin(scope, node, .dpdx_fine), .dpdy => return astgen.genDerivativeBuiltin(scope, node, .dpdy), .dpdyCoarse => return astgen.genDerivativeBuiltin(scope, node, .dpdy_coarse), .dpdyFine => return astgen.genDerivativeBuiltin(scope, node, .dpdy_fine), .fwidth => return astgen.genDerivativeBuiltin(scope, node, .fwidth), .fwidthCoarse => return astgen.genDerivativeBuiltin(scope, node, .fwidth_coarse), .fwidthFine => return astgen.genDerivativeBuiltin(scope, node, .fwidth_fine), .arrayLength => return astgen.genArrayLengthBuiltin(scope, node), .textureSample => return astgen.genTextureSampleBuiltin(scope, node), .textureSampleLevel => return astgen.genTextureSampleLevelBuiltin(scope, node), .textureSampleGrad => return astgen.genTextureSampleGradBuiltin(scope, node), .textureDimensions => return astgen.genTextureDimensionsBuiltin(scope, node), .textureLoad => return astgen.genTextureLoadBuiltin(scope, node), .textureStore => return astgen.genTextureStoreBuiltin(scope, node), .workgroupBarrier => return astgen.genSimpleBuiltin(.workgroup_barrier), .storageBarrier => return astgen.genSimpleBuiltin(.storage_barrier), else => { try astgen.errors.add( node_loc, "TODO: unimplemented builtin '{s}'", .{token_loc.slice(astgen.tree.source)}, null, ); return error.AnalysisFail; }, } } switch (token_tag) { .k_bool => { if (node_lhs == .none) { return astgen.addInst(.{ .bool = .{ .value = .{ .literal = false } } }); } const arg_node = astgen.tree.spanToList(node_lhs)[0]; const expr = try astgen.genExpr(scope, arg_node); const expr_res = try astgen.resolve(expr); switch (astgen.getInst(expr_res)) { .bool => return expr, .int, .float => return astgen.addInst(.{ .bool = .{ .value = .{ .cast = .{ .value = expr, .type = expr_res, }, }, } }), else => {}, } try astgen.errors.add(node_loc, "cannot construct bool", .{}, null); return error.AnalysisFail; }, .k_u32, .k_i32 => { const ty: Inst.Int.Type = switch (token_tag) { .k_u32 => .u32, .k_i32 => .i32, else => unreachable, }; if (node_lhs == .none) { const zero_value = try astgen.addValue(Inst.Int.Value, .{ .literal = 0 }); return astgen.addInst(.{ .int = .{ .value = zero_value, .type = ty } }); } const arg_nodes = astgen.tree.spanToList(node_lhs); if (arg_nodes.len > 1) { try astgen.errors.add(node_loc, "too many arguments", .{}, null); return error.AnalysisFail; } const arg_node = arg_nodes[0]; const expr = try astgen.genExpr(scope, arg_node); const expr_res = try astgen.resolve(expr); switch (astgen.getInst(expr_res)) { .bool, .float => {}, .int => |int| if (int.type == ty) return expr, else => { try astgen.errors.add(node_loc, "type mismatch", .{}, null); return error.AnalysisFail; }, } const value = try astgen.addValue(Inst.Int.Value, .{ .cast = .{ .value = expr, .type = expr_res, }, }); return astgen.addInst(.{ .int = .{ .value = value, .type = ty } }); }, .k_f32, .k_f16 => { const ty: Inst.Float.Type = switch (token_tag) { .k_f32 => .f32, .k_f16 => .f16, else => unreachable, }; if (node_lhs == .none) { const zero_value = try astgen.addValue(Inst.Float.Value, .{ .literal = 0 }); return astgen.addInst(.{ .float = .{ .value = zero_value, .type = ty } }); } const arg_nodes = astgen.tree.spanToList(node_lhs); if (arg_nodes.len > 1) { try astgen.errors.add(node_loc, "too many arguments", .{}, null); return error.AnalysisFail; } const arg_node = arg_nodes[0]; const expr = try astgen.genExpr(scope, arg_node); const expr_res = try astgen.resolve(expr); switch (astgen.getInst(expr_res)) { .bool, .int => {}, .float => |float| if (float.type == ty) return expr, else => { try astgen.errors.add(node_loc, "type mismatch", .{}, null); return error.AnalysisFail; }, } const value = try astgen.addValue(Inst.Float.Value, .{ .cast = .{ .value = expr, .type = expr_res, }, }); return astgen.addInst(.{ .float = .{ .value = value, .type = ty } }); }, .k_vec2, .k_vec3, .k_vec4 => { const elem_type_node = astgen.tree.nodeLHS(node_rhs); const size: Inst.Vector.Size = switch (token_tag) { .k_vec2 => .two, .k_vec3 => .three, .k_vec4 => .four, else => unreachable, }; var elem_type = InstIndex.none; if (elem_type_node != .none) { elem_type = try astgen.genType(scope, elem_type_node); switch (astgen.getInst(elem_type)) { .bool, .int, .float => {}, else => { try astgen.errors.add( astgen.tree.nodeLoc(elem_type_node), "invalid vector component type", .{}, try astgen.errors.createNote( null, "must be 'i32', 'u32', 'f32', 'f16' or 'bool'", .{}, ), ); return error.AnalysisFail; }, } } if (node_lhs == .none) { if (elem_type_node == .none) { try astgen.errors.add(node_loc, "cannot infer vector type", .{}, null); return error.AnalysisFail; } return astgen.addInst(.{ .vector = .{ .elem_type = elem_type, .size = size, .value = .none, }, }); } const arg_nodes = astgen.tree.spanToList(node_lhs); var args: [4]InstIndex = undefined; var cast = InstIndex.none; var capacity = @intFromEnum(size); for (arg_nodes) |arg_node| { const i = @intFromEnum(size) - capacity; const arg = try astgen.genExpr(scope, arg_node); const arg_loc = astgen.tree.nodeLoc(arg_node); const arg_res = try astgen.resolve(arg); if (capacity == 0) { try astgen.errors.add(arg_loc, "doesn't fit in this vector", .{}, null); return error.AnalysisFail; } switch (astgen.getInst(arg_res)) { .vector => |arg_vec| { if (elem_type == .none) { elem_type = arg_vec.elem_type; } else if (!astgen.eql(arg_vec.elem_type, elem_type)) { cast = arg_vec.elem_type; } if (capacity >= @intFromEnum(arg_vec.size)) { for (0..@intFromEnum(arg_vec.size)) |component_i| { args[i + component_i] = astgen.resolveVectorValue(arg, @intCast(component_i)) orelse try astgen.addInst(.{ .swizzle_access = .{ .base = arg, .type = astgen.getInst(arg_res).vector.elem_type, .size = .one, .pattern = [_]Inst.SwizzleAccess.Component{ @enumFromInt(component_i), undefined, undefined, undefined, }, }, }); } capacity -= @intFromEnum(arg_vec.size); } else { try astgen.errors.add(arg_loc, "doesn't fit in this vector", .{}, null); return error.AnalysisFail; } }, .bool, .int, .float => { var cast_arg = false; if (elem_type == .none) { elem_type = arg_res; } else if (!astgen.eql(arg_res, elem_type)) { cast_arg = true; } if (cast_arg) { switch (astgen.getInst(elem_type)) { .int => |int| { const arg_val = try astgen.addValue( Inst.Int.Value, .{ .cast = .{ .type = arg_res, .value = arg } }, ); args[i] = try astgen.addInst(.{ .int = .{ .type = int.type, .value = arg_val, } }); }, .float => |float| { const arg_val = try astgen.addValue( Inst.Float.Value, .{ .cast = .{ .type = arg_res, .value = arg } }, ); args[i] = try astgen.addInst(.{ .float = .{ .type = float.type, .value = arg_val, } }); }, .bool => { args[i] = try astgen.addInst(.{ .bool = .{ .value = .{ .cast = .{ .type = arg_res, .value = arg } }, } }); }, else => unreachable, } } else { args[i] = arg; } if (arg_nodes.len == 1) { @memset(args[i + 1 .. @intFromEnum(size)], args[i]); capacity = 1; } capacity -= 1; }, else => { try astgen.errors.add(arg_loc, "type mismatch", .{}, null); return error.AnalysisFail; }, } } if (capacity != 0) { try astgen.errors.add(node_loc, "arguments doesn't satisfy vector capacity", .{}, null); return error.AnalysisFail; } const value = try astgen.addValue( Inst.Vector.Value, if (cast == .none) .{ .literal = args } else .{ .cast = .{ .type = cast, .value = args } }, ); return astgen.addInst(.{ .vector = .{ .elem_type = elem_type, .size = size, .value = value, }, }); }, .k_mat2x2, .k_mat2x3, .k_mat2x4, .k_mat3x2, .k_mat3x3, .k_mat3x4, .k_mat4x2, .k_mat4x3, .k_mat4x4, => { const elem_type_node = astgen.tree.nodeLHS(node_rhs); const cols = matrixCols(token_tag); const rows = matrixRows(token_tag); var elem_type = InstIndex.none; if (elem_type_node != .none) { elem_type = try astgen.genType(scope, elem_type_node); if (astgen.getInst(elem_type) != .float) { try astgen.errors.add( astgen.tree.nodeLoc(elem_type_node), "invalid matrix component type", .{}, try astgen.errors.createNote(null, "must be 'f32', 'f16'", .{}), ); return error.AnalysisFail; } } if (node_lhs == .none) { if (elem_type_node == .none) { try astgen.errors.add(node_loc, "cannot infer matrix type", .{}, null); return error.AnalysisFail; } return astgen.addInst(.{ .matrix = .{ .elem_type = elem_type, .rows = rows, .cols = cols, .value = .none, }, }); } const arg_nodes = astgen.tree.spanToList(node_lhs); var args: [4]InstIndex = undefined; var capacity = @intFromEnum(cols); for (arg_nodes) |arg_node| { const i = @intFromEnum(cols) - capacity; const arg = try astgen.genExpr(scope, arg_node); const arg_loc = astgen.tree.nodeLoc(arg_node); const arg_res = try astgen.resolve(arg); if (capacity == 0) { try astgen.errors.add(arg_loc, "doesn't fit in this matrix", .{}, null); return error.AnalysisFail; } switch (astgen.getInst(arg_res)) { .matrix => |arg_mat| { if (elem_type == .none) { elem_type = arg_mat.elem_type; } else { if (!try astgen.coerce(arg_mat.elem_type, elem_type)) { try astgen.errors.add(arg_loc, "type mismatch", .{}, null); return error.AnalysisFail; } } if (arg_nodes.len == 1 and arg_mat.cols == cols and arg_mat.rows == rows) { return arg; } try astgen.errors.add(arg_loc, "invalid argument", .{}, null); return error.AnalysisFail; }, .vector => |arg_vec| { if (elem_type == .none) { elem_type = arg_vec.elem_type; } else { if (!try astgen.coerce(arg_vec.elem_type, elem_type)) { try astgen.errors.add(arg_loc, "type mismatch", .{}, null); return error.AnalysisFail; } } if (@intFromEnum(arg_vec.size) != @intFromEnum(rows)) { try astgen.errors.add(arg_loc, "invalid argument", .{}, null); return error.AnalysisFail; } args[i] = arg; capacity -= 1; }, .float => { if (elem_type == .none) { elem_type = arg_res; } if (arg_nodes.len != 1) { try astgen.errors.add(arg_loc, "invalid argument", .{}, null); return error.AnalysisFail; } for (args[0..@intFromEnum(cols)]) |*arg_col| { var arg_vec_value: [4]InstIndex = undefined; @memset(arg_vec_value[0..@intFromEnum(rows)], arg); const arg_vec = try astgen.addInst(.{ .vector = .{ .elem_type = elem_type, .size = rows, .value = try astgen.addValue(Inst.Vector.Value, .{ .literal = arg_vec_value }), }, }); arg_col.* = arg_vec; capacity -= 1; } }, else => { try astgen.errors.add(arg_loc, "type mismatch", .{}, null); return error.AnalysisFail; }, } } if (capacity != 0) { try astgen.errors.add(node_loc, "arguments doesn't satisfy matrix capacity", .{}, null); return error.AnalysisFail; } return astgen.addInst(.{ .matrix = .{ .elem_type = elem_type, .cols = cols, .rows = rows, .value = try astgen.addValue(Inst.Matrix.Value, args), }, }); }, .k_array => { if (node_lhs == .none) { return astgen.genArray(scope, node_rhs, .none); } const scratch_top = astgen.scratch.items.len; defer astgen.scratch.shrinkRetainingCapacity(scratch_top); var arg1_res = InstIndex.none; const arg_nodes = astgen.tree.spanToList(node_lhs); for (arg_nodes, 0..) |arg_node, i| { const arg = try astgen.genExpr(scope, arg_node); const arg_res = try astgen.resolve(arg); if (i == 0) { arg1_res = arg_res; } else if (!try astgen.coerce(arg_res, arg1_res)) { try astgen.errors.add(node_loc, "cannot construct array", .{}, null); return error.AnalysisFail; } try astgen.scratch.append(astgen.allocator, arg); } const args = try astgen.addRefList(astgen.scratch.items[scratch_top..]); return astgen.genArray(scope, node_rhs, args); }, else => unreachable, } } fn resolveVectorValue(astgen: *AstGen, vector_idx: InstIndex, value_idx: u3) ?InstIndex { return switch (astgen.getInst(vector_idx)) { .vector => |vector| switch (astgen.getValue(Inst.Vector.Value, vector.value.?)) { .literal => |literal| literal[value_idx], .cast => |cast| cast.value[value_idx], }, inline .swizzle_access, .index_access => |access| astgen.resolveVectorValue(access.base, value_idx), else => null, }; } fn genReturn(astgen: *AstGen, scope: *Scope, node: NodeIndex) !InstIndex { const node_lhs = astgen.tree.nodeLHS(node); const node_loc = astgen.tree.nodeLoc(node); var fn_scope = findFnScope(scope); var value = InstIndex.none; if (node_lhs != .none) { if (fn_scope.tag.@"fn".return_type == .none) { try astgen.errors.add(node_loc, "cannot return value", .{}, null); return error.AnalysisFail; } value = try astgen.genExpr(scope, node_lhs); const value_res = try astgen.resolve(value); if (!try astgen.coerce(value_res, fn_scope.tag.@"fn".return_type)) { try astgen.errors.add(node_loc, "return type mismatch", .{}, null); return error.AnalysisFail; } } else { if (fn_scope.tag.@"fn".return_type != .none) { try astgen.errors.add(node_loc, "return value not specified", .{}, null); return error.AnalysisFail; } } fn_scope.tag.@"fn".returned = true; return astgen.addInst(.{ .@"return" = value }); } fn findFnScope(scope: *Scope) *Scope { var s = scope; while (true) { switch (s.tag) { .root => unreachable, .@"fn" => return s, .block, .loop, .continuing, .switch_case, .@"if", .@"for", => s = s.parent, } } } fn genFnCall(astgen: *AstGen, scope: *Scope, node: NodeIndex) !InstIndex { const node_lhs = astgen.tree.nodeLHS(node); const node_loc = astgen.tree.nodeLoc(node); const token = astgen.tree.nodeToken(node); const decl = try astgen.findSymbol(scope, token); if (astgen.tree.nodeRHS(node) != .none) { try astgen.errors.add(node_loc, "expected a function", .{}, null); return error.AnalysisFail; } const scratch_top = astgen.scratch.items.len; defer astgen.scratch.shrinkRetainingCapacity(scratch_top); var args = RefIndex.none; if (node_lhs != .none) { const params = astgen.refToList(astgen.getInst(decl).@"fn".params); const arg_nodes = astgen.tree.spanToList(node_lhs); if (params.len != arg_nodes.len) { try astgen.errors.add(node_loc, "function params count mismatch", .{}, null); return error.AnalysisFail; } for (arg_nodes, 0..) |arg_node, i| { const arg = try astgen.genExpr(scope, arg_node); const arg_res = try astgen.resolve(arg); if (try astgen.coerce(astgen.getInst(params[i]).fn_param.type, arg_res)) { try astgen.scratch.append(astgen.allocator, arg); } else { try astgen.errors.add( astgen.tree.nodeLoc(arg_node), "value and member type mismatch", .{}, null, ); return error.AnalysisFail; } } args = try astgen.addRefList(astgen.scratch.items[scratch_top..]); } else { if (astgen.getInst(decl).@"fn".params != .none) { try astgen.errors.add(node_loc, "function params count mismatch", .{}, null); return error.AnalysisFail; } } for (astgen.refToList(astgen.getInst(decl).@"fn".global_var_refs)) |var_inst_idx| { try astgen.global_var_refs.put(astgen.allocator, var_inst_idx, {}); } return astgen.addInst(.{ .call = .{ .@"fn" = decl, .args = args } }); } fn genStructConstruct(astgen: *AstGen, scope: *Scope, decl: InstIndex, node: NodeIndex) !InstIndex { const node_lhs = astgen.tree.nodeLHS(node); const node_loc = astgen.tree.nodeLoc(node); const scratch_top = astgen.scratch.items.len; defer astgen.scratch.shrinkRetainingCapacity(scratch_top); const struct_members = astgen.refToList(astgen.getInst(decl).@"struct".members); if (node_lhs != .none) { const arg_nodes = astgen.tree.spanToList(node_lhs); if (struct_members.len != arg_nodes.len) { try astgen.errors.add(node_loc, "struct members count mismatch", .{}, null); return error.AnalysisFail; } for (arg_nodes, 0..) |arg_node, i| { const arg = try astgen.genExpr(scope, arg_node); const arg_res = try astgen.resolve(arg); if (try astgen.coerce(arg_res, astgen.getInst(struct_members[i]).struct_member.type)) { try astgen.scratch.append(astgen.allocator, arg); } else { try astgen.errors.add( astgen.tree.nodeLoc(arg_node), "value and member type mismatch", .{}, null, ); return error.AnalysisFail; } } } else { if (struct_members.len != 0) { try astgen.errors.add(node_loc, "struct members count mismatch", .{}, null); return error.AnalysisFail; } } const members = try astgen.addRefList(astgen.scratch.items[scratch_top..]); return astgen.addInst(.{ .struct_construct = .{ .@"struct" = decl, .members = members, }, }); } fn genBitcast(astgen: *AstGen, scope: *Scope, node: NodeIndex) !InstIndex { const node_lhs = astgen.tree.nodeLHS(node); const node_rhs = astgen.tree.nodeRHS(node); const node_lhs_loc = astgen.tree.nodeLoc(node_lhs); const node_rhs_loc = astgen.tree.nodeLoc(node_rhs); const lhs = try astgen.genType(scope, node_lhs); const lhs_inst = astgen.getInst(lhs); const rhs = try astgen.genExpr(scope, node_rhs); const rhs_res = try astgen.resolve(rhs); const rhs_res_inst = astgen.getInst(rhs_res); var result_type = InstIndex.none; const lhs_is_32bit = switch (lhs_inst) { .int => |int| int.type == .u32 or int.type == .i32, .float => |float| float.type == .f32, else => false, }; const rhs_is_32bit = switch (rhs_res_inst) { .int => |int| int.type == .u32 or int.type == .i32, .float => |float| float.type == .f32, else => false, }; if (lhs_is_32bit) { if (rhs_is_32bit) { // bitcast<T>(T) -> T // bitcast<T>(S) -> T result_type = lhs; } else if (rhs_res_inst == .vector) { const rhs_vec_type = astgen.getInst(rhs_res_inst.vector.elem_type); if (rhs_res_inst.vector.size == .two and rhs_vec_type == .float and rhs_vec_type.float.type == .f16) { // bitcast<T>(vec2<f16>) -> T result_type = lhs; } } } else if (lhs_inst == .vector) { if (rhs_is_32bit) { const lhs_vec_type = astgen.getInst(lhs_inst.vector.elem_type); if (lhs_inst.vector.size == .two and lhs_vec_type == .float and lhs_vec_type.float.type == .f16) { // bitcast<vec2<f16>>(T) -> vec2<f16> result_type = lhs; } } else if (rhs_res_inst == .vector) { const lhs_vec_type = astgen.getInst(lhs_inst.vector.elem_type); const rhs_vec_type = astgen.getInst(rhs_res_inst.vector.elem_type); const lhs_vec_is_32bit = switch (lhs_vec_type) { .int => |int| int.type == .u32 or int.type == .i32, .float => |float| float.type == .f32, else => false, }; const rhs_vec_is_32bit = switch (rhs_vec_type) { .int => |int| int.type == .u32 or int.type == .i32, .float => |float| float.type == .f32, else => false, }; if (lhs_vec_is_32bit) { if (rhs_vec_is_32bit) { if (lhs_inst.vector.size == rhs_res_inst.vector.size) { if (lhs_inst.vector.elem_type == rhs_res_inst.vector.elem_type) { // bitcast<vecN<T>>(vecN<T>) -> vecN<T> result_type = lhs; } else { // bitcast<vecN<T>>(vecN<S>) -> T result_type = lhs_inst.vector.elem_type; } } } else if (rhs_vec_type == .float and rhs_vec_type.float.type == .f16) { if (lhs_inst.vector.size == .two and rhs_res_inst.vector.size == .four) { // bitcast<vec2<T>>(vec4<f16>) -> vec2<T> result_type = lhs; } } } else if (lhs_vec_type == .float and lhs_vec_type.float.type == .f16) { if (rhs_res_inst.vector.size == .two and lhs_inst.vector.size == .four) { // bitcast<vec4<f16>>(vec2<T>) -> vec4<f16> result_type = lhs; } } } } if (result_type != .none) { const inst = try astgen.addInst(.{ .bitcast = .{ .type = lhs, .expr = rhs, .result_type = result_type, }, }); return inst; } try astgen.errors.add( node_rhs_loc, "cannot cast '{s}' into '{s}'", .{ node_rhs_loc.slice(astgen.tree.source), node_lhs_loc.slice(astgen.tree.source) }, null, ); return error.AnalysisFail; } fn genBuiltinAllAny(astgen: *AstGen, scope: *Scope, node: NodeIndex, all: bool) !InstIndex { const node_loc = astgen.tree.nodeLoc(node); const node_lhs = astgen.tree.nodeLHS(node); if (node_lhs == .none) { return astgen.failArgCountMismatch(node_loc, 1, 0); } const arg_nodes = astgen.tree.spanToList(node_lhs); if (arg_nodes.len != 1) { return astgen.failArgCountMismatch(node_loc, 1, arg_nodes.len); } const arg = try astgen.genExpr(scope, arg_nodes[0]); const arg_res = try astgen.resolve(arg); switch (astgen.getInst(arg_res)) { .bool => return arg, .vector => |vec| { if (astgen.getInst(vec.elem_type) != .bool) { try astgen.errors.add(node_loc, "invalid vector element type", .{}, null); return error.AnalysisFail; } const result_type = try astgen.addInst(.{ .bool = .{ .value = null } }); return astgen.addInst(.{ .unary_intrinsic = .{ .op = if (all) .all else .any, .expr = arg, .result_type = result_type, } }); }, else => { try astgen.errors.add(node_loc, "type mismatch", .{}, null); return error.AnalysisFail; }, } } fn genBuiltinSelect(astgen: *AstGen, scope: *Scope, node: NodeIndex) !InstIndex { const node_loc = astgen.tree.nodeLoc(node); const node_lhs = astgen.tree.nodeLHS(node); if (node_lhs == .none) { return astgen.failArgCountMismatch(node_loc, 3, 0); } const arg_nodes = astgen.tree.spanToList(node_lhs); if (arg_nodes.len != 3) { return astgen.failArgCountMismatch(node_loc, 3, arg_nodes.len); } const arg1 = try astgen.genExpr(scope, arg_nodes[0]); const arg2 = try astgen.genExpr(scope, arg_nodes[1]); const arg3 = try astgen.genExpr(scope, arg_nodes[2]); const arg1_res = try astgen.resolve(arg1); const arg2_res = try astgen.resolve(arg2); const arg3_res = try astgen.resolve(arg3); if (!try astgen.coerce(arg2_res, arg1_res)) { try astgen.errors.add(node_loc, "type mismatch", .{}, null); return error.AnalysisFail; } switch (astgen.getInst(arg3_res)) { .bool => { return astgen.addInst(.{ .select = .{ .type = arg1_res, .true = arg1, .false = arg2, .cond = arg3, }, }); }, .vector => |vec| { if (astgen.getInst(vec.elem_type) != .bool) { try astgen.errors.add(node_loc, "invalid vector element type", .{}, null); return error.AnalysisFail; } if (astgen.getInst(arg1_res) != .vector) { try astgen.errors.add(node_loc, "'true' and 'false' must be vector", .{}, null); return error.AnalysisFail; } return astgen.addInst(.{ .select = .{ .type = arg1_res, .true = arg1, .false = arg2, .cond = arg2, }, }); }, else => { try astgen.errors.add(node_loc, "type mismatch", .{}, null); return error.AnalysisFail; }, } } fn genDerivativeBuiltin( astgen: *AstGen, scope: *Scope, node: NodeIndex, comptime op: Inst.UnaryIntrinsic.Op, ) !InstIndex { const node_loc = astgen.tree.nodeLoc(node); if (astgen.current_fn_scope.tag.@"fn".stage != .fragment) { try astgen.errors.add( node_loc, "invalid builtin in {s} stage", .{@tagName(astgen.current_fn_scope.tag.@"fn".stage)}, null, ); return error.AnalysisFail; } const node_lhs = astgen.tree.nodeLHS(node); if (node_lhs == .none) { return astgen.failArgCountMismatch(node_loc, 1, 0); } const arg_nodes = astgen.tree.spanToList(node_lhs); if (arg_nodes.len != 1) { return astgen.failArgCountMismatch(node_loc, 1, arg_nodes.len); } const arg = try astgen.genExpr(scope, arg_nodes[0]); const arg_res = try astgen.resolve(arg); const inst = Inst{ .unary_intrinsic = .{ .op = op, .expr = arg, .result_type = arg_res, } }; switch (astgen.getInst(arg_res)) { .float => |float| { if (float.type == .f32) { return astgen.addInst(inst); } }, .vector => |vec| { switch (astgen.getInst(vec.elem_type)) { .float => |float| { if (float.type == .f32) { return astgen.addInst(inst); } }, else => {}, } }, else => {}, } try astgen.errors.add(node_loc, "type mismatch", .{}, null); return error.AnalysisFail; } fn genGenericUnaryBuiltin( astgen: *AstGen, scope: *Scope, node: NodeIndex, comptime op: Inst.UnaryIntrinsic.Op, comptime int_limit: []const Inst.Int.Type, comptime float_limit: []const Inst.Float.Type, comptime vector_only: bool, comptime scalar_result: bool, ) !InstIndex { const node_loc = astgen.tree.nodeLoc(node); const node_lhs = astgen.tree.nodeLHS(node); if (node_lhs == .none) { return astgen.failArgCountMismatch(node_loc, 1, 0); } const arg_nodes = astgen.tree.spanToList(node_lhs); if (arg_nodes.len != 1) { return astgen.failArgCountMismatch(node_loc, 1, arg_nodes.len); } const arg = try astgen.genExpr(scope, arg_nodes[0]); const arg_res = try astgen.resolve(arg); var result_type = arg_res; switch (astgen.getInst(arg_res)) { .int => |int| if (vector_only or indexOf(Inst.Int.Type, int_limit, int.type) == null) { try astgen.errors.add(node_loc, "type mismatch", .{}, null); return error.AnalysisFail; }, .float => |float| if (vector_only or indexOf(Inst.Float.Type, float_limit, float.type) == null) { try astgen.errors.add(node_loc, "type mismatch", .{}, null); return error.AnalysisFail; }, .vector => |vec| { switch (astgen.getInst(vec.elem_type)) { .bool => { try astgen.errors.add(node_loc, "invalid vector element type", .{}, null); return error.AnalysisFail; }, .int => |int| if (indexOf(Inst.Int.Type, int_limit, int.type) == null) { try astgen.errors.add(node_loc, "invalid vector element type", .{}, null); return error.AnalysisFail; }, .float => |float| if (indexOf(Inst.Float.Type, float_limit, float.type) == null) { try astgen.errors.add(node_loc, "invalid vector element type", .{}, null); return error.AnalysisFail; }, else => {}, } if (scalar_result) { result_type = vec.elem_type; } }, else => { try astgen.errors.add(node_loc, "type mismatch", .{}, null); return error.AnalysisFail; }, } return astgen.addInst(.{ .unary_intrinsic = .{ .op = op, .expr = arg, .result_type = result_type, }, }); } fn genGenericBinaryBuiltin( astgen: *AstGen, scope: *Scope, node: NodeIndex, comptime op: Inst.BinaryIntrinsic.Op, comptime form: enum { any_in_any_out, any_in_scalar_out, vector_in_scalar_out }, comptime allow_int: bool, ) !InstIndex { const node_loc = astgen.tree.nodeLoc(node); const node_lhs = astgen.tree.nodeLHS(node); if (node_lhs == .none) { return astgen.failArgCountMismatch(node_loc, 2, 0); } const arg_nodes = astgen.tree.spanToList(node_lhs); if (arg_nodes.len != 2) { return astgen.failArgCountMismatch(node_loc, 2, arg_nodes.len); } const arg1 = try astgen.genExpr(scope, arg_nodes[0]); const arg2 = try astgen.genExpr(scope, arg_nodes[1]); const arg1_res = try astgen.resolve(arg1); const arg2_res = try astgen.resolve(arg2); var result_type = arg1_res; switch (astgen.getInst(arg1_res)) { .float => if (form == .vector_in_scalar_out) { try astgen.errors.add(node_loc, "type mismatch", .{}, null); return error.AnalysisFail; }, .int => if (!allow_int or form == .vector_in_scalar_out) { try astgen.errors.add(node_loc, "type mismatch", .{}, null); return error.AnalysisFail; }, .vector => |vec| { switch (astgen.getInst(vec.elem_type)) { .bool => { try astgen.errors.add(node_loc, "invalid vector element type", .{}, null); return error.AnalysisFail; }, .int => if (!allow_int) { try astgen.errors.add(node_loc, "invalid vector element type", .{}, null); return error.AnalysisFail; }, else => {}, } switch (form) { .any_in_scalar_out, .vector_in_scalar_out => result_type = vec.elem_type, .any_in_any_out => {}, } }, else => { try astgen.errors.add(node_loc, "type mismatch", .{}, null); return error.AnalysisFail; }, } if (!try astgen.coerce(arg2_res, arg1_res)) { try astgen.errors.add(node_loc, "type mismatch", .{}, null); return error.AnalysisFail; } return astgen.addInst(.{ .binary_intrinsic = .{ .op = op, .lhs = arg1, .rhs = arg2, .lhs_type = arg1_res, .rhs_type = arg2_res, .result_type = result_type, } }); } fn genGenericFloatTripleBuiltin( astgen: *AstGen, scope: *Scope, node: NodeIndex, comptime op: Inst.TripleIntrinsic.Op, comptime scalar_float_arg3: bool, ) !InstIndex { const node_loc = astgen.tree.nodeLoc(node); const node_lhs = astgen.tree.nodeLHS(node); if (node_lhs == .none) { return astgen.failArgCountMismatch(node_loc, 3, 0); } const arg_nodes = astgen.tree.spanToList(node_lhs); if (arg_nodes.len != 3) { return astgen.failArgCountMismatch(node_loc, 3, arg_nodes.len); } const a1 = try astgen.genExpr(scope, arg_nodes[0]); const a2 = try astgen.genExpr(scope, arg_nodes[1]); const a3 = try astgen.genExpr(scope, arg_nodes[2]); const a1_res = try astgen.resolve(a1); const a2_res = try astgen.resolve(a2); const a3_res = try astgen.resolve(a3); if (!try astgen.coerce(a2_res, a1_res)) { try astgen.errors.add(node_loc, "type mismatch", .{}, null); return error.AnalysisFail; } if (scalar_float_arg3) { switch (astgen.getInst(a3_res)) { .float => {}, else => { try astgen.errors.add(node_loc, "type mismatch", .{}, null); return error.AnalysisFail; }, } } else { if (!try astgen.coerce(a3_res, a1_res)) { try astgen.errors.add(node_loc, "type mismatch", .{}, null); return error.AnalysisFail; } } if (astgen.getInst(a1_res) == .float or (astgen.getInst(a1_res) == .vector and astgen.getInst(astgen.getInst(a1_res).vector.elem_type) == .float)) { return astgen.addInst(.{ .triple_intrinsic = .{ .op = op, .result_type = a1_res, .a1_type = a1_res, .a2_type = a2_res, .a3_type = a3_res, .a1 = a1, .a2 = a2, .a3 = a3, }, }); } try astgen.errors.add(node_loc, "type mismatch", .{}, null); return error.AnalysisFail; } fn genArrayLengthBuiltin(astgen: *AstGen, scope: *Scope, node: NodeIndex) !InstIndex { astgen.has_array_length = true; const node_loc = astgen.tree.nodeLoc(node); const node_lhs = astgen.tree.nodeLHS(node); if (node_lhs == .none) { return astgen.failArgCountMismatch(node_loc, 1, 0); } const arg_nodes = astgen.tree.spanToList(node_lhs); if (arg_nodes.len != 1) { return astgen.failArgCountMismatch(node_loc, 1, arg_nodes.len); } const arg_node = arg_nodes[0]; const arg_node_loc = astgen.tree.nodeLoc(arg_node); const arg = try astgen.genExpr(scope, arg_node); const arg_res = try astgen.resolve(arg); const arg_res_inst = astgen.getInst(arg_res); if (arg_res_inst == .ptr_type) { const ptr_elem_inst = astgen.getInst(arg_res_inst.ptr_type.elem_type); if (ptr_elem_inst == .array) { const result_type = try astgen.addInst(.{ .int = .{ .type = .u32, .value = null } }); return astgen.addInst(.{ .unary_intrinsic = .{ .op = .array_length, .expr = arg, .result_type = result_type, } }); } } try astgen.errors.add(arg_node_loc, "type mismatch", .{}, null); return error.AnalysisFail; } fn genTextureDimensionsBuiltin(astgen: *AstGen, scope: *Scope, node: NodeIndex) !InstIndex { const node_loc = astgen.tree.nodeLoc(node); const node_lhs = astgen.tree.nodeLHS(node); if (node_lhs == .none) { return astgen.failArgCountMismatch(node_loc, 1, 0); } const arg_nodes = astgen.tree.spanToList(node_lhs); if (arg_nodes.len < 1) { return astgen.failArgCountMismatch(node_loc, 1, arg_nodes.len); } const a1_node = arg_nodes[0]; const a1_node_loc = astgen.tree.nodeLoc(a1_node); const a1 = try astgen.genExpr(scope, a1_node); const a1_res = try astgen.resolve(a1); const a1_inst = astgen.getInst(a1_res); if (a1_inst != .texture_type) { try astgen.errors.add(a1_node_loc, "expected a texture type", .{}, null); return error.AnalysisFail; } var level = InstIndex.none; switch (a1_inst.texture_type.kind) { .sampled_2d, .sampled_2d_array, .sampled_cube, .sampled_cube_array, .multisampled_2d, .multisampled_depth_2d, .storage_2d, .storage_2d_array, .depth_2d, .depth_2d_array, .depth_cube, .depth_cube_array, => { if (arg_nodes.len > 1) { const a2_node = arg_nodes[1]; const a2_node_loc = astgen.tree.nodeLoc(a2_node); const a2 = try astgen.genExpr(scope, a2_node); const a2_res = try astgen.resolve(a2); const a2_inst = astgen.getInst(a2_res); if (a2_inst != .int) { try astgen.errors.add(a2_node_loc, "expected i32 or u32", .{}, null); return error.AnalysisFail; } level = a2; } }, else => { try astgen.errors.add(a1_node_loc, "invalid texture", .{}, null); return error.AnalysisFail; }, } const result_type = try astgen.addInst(.{ .vector = .{ .elem_type = try astgen.addInst(.{ .int = .{ .type = .u32, .value = null } }), .size = .two, .value = null, } }); return astgen.addInst(.{ .texture_dimension = .{ .kind = a1_inst.texture_type.kind, .texture = a1, .level = level, .result_type = result_type, } }); } fn genTextureLoadBuiltin(astgen: *AstGen, scope: *Scope, node: NodeIndex) !InstIndex { const node_loc = astgen.tree.nodeLoc(node); const node_lhs = astgen.tree.nodeLHS(node); if (node_lhs == .none) { return astgen.failArgCountMismatch(node_loc, 3, 0); } const arg_nodes = astgen.tree.spanToList(node_lhs); if (arg_nodes.len < 3) { return astgen.failArgCountMismatch(node_loc, 3, arg_nodes.len); } const a1_node = arg_nodes[0]; const a1_node_loc = astgen.tree.nodeLoc(a1_node); const a1 = try astgen.genExpr(scope, a1_node); const a1_res = try astgen.resolve(a1); const a1_inst = astgen.getInst(a1_res); const a2_node = arg_nodes[1]; const a2_node_loc = astgen.tree.nodeLoc(a2_node); const a2 = try astgen.genExpr(scope, a2_node); const a2_res = try astgen.resolve(a2); const a2_inst = astgen.getInst(a2_res); const a3_node = arg_nodes[2]; const a3_node_loc = astgen.tree.nodeLoc(a3_node); const a3 = try astgen.genExpr(scope, a3_node); const a3_res = try astgen.resolve(a3); const a3_inst = astgen.getInst(a3_res); if (a1_inst != .texture_type) { try astgen.errors.add(a1_node_loc, "expected a texture type", .{}, null); return error.AnalysisFail; } if (a2_inst != .vector or a2_inst.vector.size != .two or astgen.getInst(a2_inst.vector.elem_type) != .int) { try astgen.errors.add(a2_node_loc, "expected vec2<i32> or vec2<u32>", .{}, null); return error.AnalysisFail; } if (a3_inst != .int) { try astgen.errors.add(a3_node_loc, "expected i32 or u32", .{}, null); return error.AnalysisFail; } const result_type = switch (a1_inst.texture_type.kind) { .sampled_2d => try astgen.addInst(.{ .vector = .{ .elem_type = a1_inst.texture_type.elem_type, .size = .four, .value = null, } }), .depth_2d => try astgen.addInst(.{ .float = .{ .type = .f32, .value = null } }), else => { try astgen.errors.add(a1_node_loc, "invalid texture", .{}, null); return error.AnalysisFail; }, }; return astgen.addInst(.{ .texture_load = .{ .kind = a1_inst.texture_type.kind, .texture = a1, .coords = a2, .level = a3, .result_type = result_type, } }); } fn genTextureStoreBuiltin(astgen: *AstGen, scope: *Scope, node: NodeIndex) !InstIndex { const node_loc = astgen.tree.nodeLoc(node); const node_lhs = astgen.tree.nodeLHS(node); if (node_lhs == .none) { return astgen.failArgCountMismatch(node_loc, 3, 0); } const arg_nodes = astgen.tree.spanToList(node_lhs); if (arg_nodes.len < 3) { return astgen.failArgCountMismatch(node_loc, 3, arg_nodes.len); } const a1_node = arg_nodes[0]; const a1_node_loc = astgen.tree.nodeLoc(a1_node); const a1 = try astgen.genExpr(scope, a1_node); const a1_res = try astgen.resolve(a1); const a1_inst = astgen.getInst(a1_res); const a2_node = arg_nodes[1]; const a2_node_loc = astgen.tree.nodeLoc(a2_node); const a2 = try astgen.genExpr(scope, a2_node); const a2_res = try astgen.resolve(a2); const a2_inst = astgen.getInst(a2_res); const a3_node = arg_nodes[2]; const a3_node_loc = astgen.tree.nodeLoc(a3_node); const a3 = try astgen.genExpr(scope, a3_node); const a3_res = try astgen.resolve(a3); const a3_inst = astgen.getInst(a3_res); if (a1_inst != .texture_type) { try astgen.errors.add(a1_node_loc, "expected a texture type", .{}, null); return error.AnalysisFail; } if (a2_inst != .vector or a2_inst.vector.size != .two or astgen.getInst(a2_inst.vector.elem_type) != .int) { try astgen.errors.add(a2_node_loc, "expected vec2<i32> or vec2<u32>", .{}, null); return error.AnalysisFail; } if (a3_inst != .vector or a3_inst.vector.size != .four) { try astgen.errors.add(a3_node_loc, "expected vec4", .{}, null); return error.AnalysisFail; } switch (a1_inst.texture_type.kind) { .storage_2d => {}, else => { try astgen.errors.add(a1_node_loc, "invalid texture", .{}, null); return error.AnalysisFail; }, } return astgen.addInst(.{ .texture_store = .{ .kind = a1_inst.texture_type.kind, .texture = a1, .coords = a2, .value = a3, } }); } fn genSimpleBuiltin(astgen: *AstGen, comptime op: Air.Inst.NilIntrinsic) !InstIndex { return astgen.addInst(.{ .nil_intrinsic = op }); } fn genTextureSampleBuiltin(astgen: *AstGen, scope: *Scope, node: NodeIndex) !InstIndex { const node_loc = astgen.tree.nodeLoc(node); const node_lhs = astgen.tree.nodeLHS(node); if (node_lhs == .none) { return astgen.failArgCountMismatch(node_loc, 3, 0); } const arg_nodes = astgen.tree.spanToList(node_lhs); if (arg_nodes.len < 3) { return astgen.failArgCountMismatch(node_loc, 3, arg_nodes.len); } const a1_node = arg_nodes[0]; const a1_node_loc = astgen.tree.nodeLoc(a1_node); const a1 = try astgen.genExpr(scope, a1_node); const a1_res = try astgen.resolve(a1); const a1_inst = astgen.getInst(a1_res); const a2_node = arg_nodes[1]; const a2_node_loc = astgen.tree.nodeLoc(a2_node); const a2 = try astgen.genExpr(scope, a2_node); const a2_res = try astgen.resolve(a2); const a2_inst = astgen.getInst(a2_res); const a3_node = arg_nodes[2]; const a3_node_loc = astgen.tree.nodeLoc(a3_node); const a3 = try astgen.genExpr(scope, a3_node); const a3_res = try astgen.resolve(a3); const a3_inst = astgen.getInst(a3_res); if (a1_inst != .texture_type) { // TODO: depth textures try astgen.errors.add(a1_node_loc, "expected a texture type", .{}, null); return error.AnalysisFail; } if (a2_inst != .sampler_type) { try astgen.errors.add(a2_node_loc, "expected a sampler", .{}, null); return error.AnalysisFail; } switch (a1_inst.texture_type.kind) { .sampled_1d => { if (a3_inst != .float) { try astgen.errors.add(a3_node_loc, "expected a f32", .{}, null); return error.AnalysisFail; } }, .sampled_2d, .sampled_2d_array, .depth_2d, .depth_2d_array => { if (a3_inst != .vector or a3_inst.vector.size != .two) { try astgen.errors.add(a3_node_loc, "expected a vec2<f32>", .{}, null); return error.AnalysisFail; } }, .sampled_3d, .sampled_cube, .sampled_cube_array, .depth_cube, .depth_cube_array => { if (a3_inst != .vector or a3_inst.vector.size != .three) { try astgen.errors.add(a3_node_loc, "expected a vec3<f32>", .{}, null); return error.AnalysisFail; } }, else => { try astgen.errors.add(a3_node_loc, "invalid texture", .{}, null); return error.AnalysisFail; }, } var offset = InstIndex.none; var array_index = InstIndex.none; switch (a1_inst.texture_type.kind) { .sampled_2d, .sampled_3d, .sampled_cube, .depth_2d, .depth_cube => { if (arg_nodes.len == 4) { const a4_node = arg_nodes[3]; const a4_node_loc = astgen.tree.nodeLoc(a4_node); const a4 = try astgen.genExpr(scope, a4_node); const a4_res = try astgen.resolve(a4); const a4_inst = astgen.getInst(a4_res); offset = a4; switch (a1_inst.texture_type.kind) { .sampled_3d, .sampled_cube, .depth_cube => { if (a4_inst != .vector or a4_inst.vector.size != .three) { try astgen.errors.add(a4_node_loc, "expected a vec3<i32>", .{}, null); return error.AnalysisFail; } }, .sampled_2d, .depth_2d => if (a4_inst != .vector or a4_inst.vector.size != .two) { try astgen.errors.add(a4_node_loc, "expected a vec2<i32>", .{}, null); return error.AnalysisFail; }, else => unreachable, } } }, .sampled_2d_array, .sampled_cube_array, .depth_2d_array, .depth_cube_array => { if (arg_nodes.len < 4) { return astgen.failArgCountMismatch(node_loc, 4, arg_nodes.len); } const a4_node = arg_nodes[3]; const a4_node_loc = astgen.tree.nodeLoc(a4_node); const a4 = try astgen.genExpr(scope, a4_node); const a4_res = try astgen.resolve(a4); const a4_inst = astgen.getInst(a4_res); array_index = a4; if (a4_inst != .int) { try astgen.errors.add(a4_node_loc, "expected i32 or u32", .{}, null); return error.AnalysisFail; } if (arg_nodes.len == 5) { const a5_node = arg_nodes[5]; const a5_node_loc = astgen.tree.nodeLoc(a5_node); const a5 = try astgen.genExpr(scope, a5_node); const a5_res = try astgen.resolve(a5); const a5_inst = astgen.getInst(a5_res); offset = a5; switch (a1_inst.texture_type.kind) { .sampled_cube_array, .depth_cube_array => { if (a5_inst != .vector or a5_inst.vector.size != .three) { try astgen.errors.add(a5_node_loc, "expected a vec3<i32>", .{}, null); return error.AnalysisFail; } }, .sampled_2d_array, .depth_2d_array => { if (a5_inst != .vector or a5_inst.vector.size != .two) { try astgen.errors.add(a5_node_loc, "expected a vec2<i32>", .{}, null); return error.AnalysisFail; } }, else => unreachable, } } }, else => unreachable, } const result_type = switch (a1_inst.texture_type.kind) { .depth_2d, .depth_2d_array, .depth_cube, .depth_cube_array, => try astgen.addInst(.{ .float = .{ .type = .f32, .value = null } }), else => try astgen.addInst(.{ .vector = .{ .elem_type = try astgen.addInst(.{ .float = .{ .type = .f32, .value = null } }), .size = .four, .value = null, } }), }; return astgen.addInst(.{ .texture_sample = .{ .kind = a1_inst.texture_type.kind, .texture_type = a1_res, .texture = a1, .sampler = a2, .coords = a3, .offset = offset, .array_index = array_index, .result_type = result_type, } }); } // TODO: Partial implementation fn genTextureSampleLevelBuiltin(astgen: *AstGen, scope: *Scope, node: NodeIndex) !InstIndex { const node_loc = astgen.tree.nodeLoc(node); const node_lhs = astgen.tree.nodeLHS(node); if (node_lhs == .none) { return astgen.failArgCountMismatch(node_loc, 4, 0); } const arg_nodes = astgen.tree.spanToList(node_lhs); if (arg_nodes.len < 4) { return astgen.failArgCountMismatch(node_loc, 4, arg_nodes.len); } const a1_node = arg_nodes[0]; const a1_node_loc = astgen.tree.nodeLoc(a1_node); const a1 = try astgen.genExpr(scope, a1_node); const a1_res = try astgen.resolve(a1); const a1_inst = astgen.getInst(a1_res); const a2_node = arg_nodes[1]; const a2_node_loc = astgen.tree.nodeLoc(a2_node); const a2 = try astgen.genExpr(scope, a2_node); const a2_res = try astgen.resolve(a2); const a2_inst = astgen.getInst(a2_res); const a3_node = arg_nodes[2]; const a3_node_loc = astgen.tree.nodeLoc(a3_node); const a3 = try astgen.genExpr(scope, a3_node); const a3_res = try astgen.resolve(a3); const a3_inst = astgen.getInst(a3_res); const a4_node = arg_nodes[3]; const a4_node_loc = astgen.tree.nodeLoc(a4_node); const a4 = try astgen.genExpr(scope, a4_node); const a4_res = try astgen.resolve(a4); const a4_inst = astgen.getInst(a4_res); if (a1_inst != .texture_type) { try astgen.errors.add(a1_node_loc, "expected a texture type", .{}, null); return error.AnalysisFail; } switch (a1_inst.texture_type.kind) { .sampled_2d => {}, else => { try astgen.errors.add(a1_node_loc, "invalid texture", .{}, null); return error.AnalysisFail; }, } if (a2_inst != .sampler_type) { try astgen.errors.add(a2_node_loc, "expected a sampler", .{}, null); return error.AnalysisFail; } if (a3_inst != .vector or a3_inst.vector.size != .two) { try astgen.errors.add(a3_node_loc, "expected a vec2<f32>", .{}, null); return error.AnalysisFail; } if (a4_inst != .float) { try astgen.errors.add(a4_node_loc, "expected f32", .{}, null); return error.AnalysisFail; } const result_type = try astgen.addInst(.{ .vector = .{ .elem_type = try astgen.addInst(.{ .float = .{ .type = .f32, .value = null } }), .size = .four, .value = null, } }); return astgen.addInst(.{ .texture_sample = .{ .kind = a1_inst.texture_type.kind, .texture_type = a1_res, .texture = a1, .sampler = a2, .coords = a3, .result_type = result_type, .operands = .{ .level = a4 }, } }); } fn genTextureSampleGradBuiltin(astgen: *AstGen, scope: *Scope, node: NodeIndex) !InstIndex { const node_loc = astgen.tree.nodeLoc(node); const node_lhs = astgen.tree.nodeLHS(node); if (node_lhs == .none) { return astgen.failArgCountMismatch(node_loc, 4, 0); } const arg_nodes = astgen.tree.spanToList(node_lhs); if (arg_nodes.len < 4) { return astgen.failArgCountMismatch(node_loc, 4, arg_nodes.len); } const a1_node = arg_nodes[0]; const a1_node_loc = astgen.tree.nodeLoc(a1_node); const a1 = try astgen.genExpr(scope, a1_node); const a1_res = try astgen.resolve(a1); const a1_inst = astgen.getInst(a1_res); const a2_node = arg_nodes[1]; const a2_node_loc = astgen.tree.nodeLoc(a2_node); const a2 = try astgen.genExpr(scope, a2_node); const a2_res = try astgen.resolve(a2); const a2_inst = astgen.getInst(a2_res); const a3_node = arg_nodes[2]; const a3_node_loc = astgen.tree.nodeLoc(a3_node); const a3 = try astgen.genExpr(scope, a3_node); const a3_res = try astgen.resolve(a3); const a3_inst = astgen.getInst(a3_res); const a4_node = arg_nodes[3]; const a4_node_loc = astgen.tree.nodeLoc(a4_node); const a4 = try astgen.genExpr(scope, a4_node); const a4_res = try astgen.resolve(a4); const a4_inst = astgen.getInst(a4_res); const a5_node = arg_nodes[3]; const a5_node_loc = astgen.tree.nodeLoc(a5_node); const a5 = try astgen.genExpr(scope, a5_node); const a5_res = try astgen.resolve(a5); const a5_inst = astgen.getInst(a5_res); if (a1_inst != .texture_type) { try astgen.errors.add(a1_node_loc, "expected a texture type", .{}, null); return error.AnalysisFail; } switch (a1_inst.texture_type.kind) { .sampled_2d => {}, else => { try astgen.errors.add(a1_node_loc, "invalid texture", .{}, null); return error.AnalysisFail; }, } if (a2_inst != .sampler_type) { try astgen.errors.add(a2_node_loc, "expected a sampler", .{}, null); return error.AnalysisFail; } if (a3_inst != .vector or astgen.getInst(a4_inst.vector.elem_type) != .float or a3_inst.vector.size != .two) { try astgen.errors.add(a3_node_loc, "expected a vec2<f32>", .{}, null); return error.AnalysisFail; } if (a4_inst != .vector or astgen.getInst(a4_inst.vector.elem_type) != .float or a4_inst.vector.size != .two) { try astgen.errors.add(a4_node_loc, "expected vec2<f32>", .{}, null); return error.AnalysisFail; } if (a5_inst != .vector or astgen.getInst(a5_inst.vector.elem_type) != .float or a5_inst.vector.size != .two) { try astgen.errors.add(a5_node_loc, "expected vec2<f32>", .{}, null); return error.AnalysisFail; } const result_type = try astgen.addInst(.{ .vector = .{ .elem_type = try astgen.addInst(.{ .float = .{ .type = .f32, .value = null } }), .size = .four, .value = null, } }); return astgen.addInst(.{ .texture_sample = .{ .kind = a1_inst.texture_type.kind, .texture_type = a1_res, .texture = a1, .sampler = a2, .coords = a3, .result_type = result_type, .operands = .{ .grad = .{ .dpdx = a4, .dpdy = a5 } }, } }); } fn genVarRef(astgen: *AstGen, scope: *Scope, node: NodeIndex) !InstIndex { const inst_idx = try astgen.findSymbol(scope, astgen.tree.nodeToken(node)); switch (astgen.getInst(inst_idx)) { .@"var" => |inst| { if (inst.addr_space != .function) { try astgen.global_var_refs.put(astgen.allocator, inst_idx, {}); } }, else => {}, } return astgen.addInst(.{ .var_ref = inst_idx }); } fn genIndexAccess(astgen: *AstGen, scope: *Scope, node: NodeIndex) !InstIndex { const base = try astgen.genExpr(scope, astgen.tree.nodeLHS(node)); const base_type = try astgen.resolve(base); switch (astgen.getInst(base_type)) { .vector, .matrix, .array => {}, else => { try astgen.errors.add( astgen.tree.nodeLoc(astgen.tree.nodeRHS(node)), "cannot access index of a non-array", .{}, null, ); return error.AnalysisFail; }, } const rhs = try astgen.genExpr(scope, astgen.tree.nodeRHS(node)); const rhs_res = try astgen.resolve(rhs); const elem_type = switch (astgen.getInst(base_type)) { inline .vector, .array => |ty| ty.elem_type, .matrix => |ty| try astgen.addInst(.{ .vector = .{ .elem_type = ty.elem_type, .size = ty.rows, .value = null, } }), else => unreachable, }; if (astgen.getInst(rhs_res) == .int) { const inst = try astgen.addInst(.{ .index_access = .{ .base = base, .type = elem_type, .index = rhs, }, }); return inst; } try astgen.errors.add( astgen.tree.nodeLoc(astgen.tree.nodeRHS(node)), "index must be an integer", .{}, null, ); return error.AnalysisFail; } fn genFieldAccess(astgen: *AstGen, scope: *Scope, node: NodeIndex) !InstIndex { const base = try astgen.genExpr(scope, astgen.tree.nodeLHS(node)); const base_type = try astgen.resolve(base); const field_node = astgen.tree.nodeRHS(node).asTokenIndex(); const field_name = astgen.tree.tokenLoc(field_node).slice(astgen.tree.source); switch (astgen.getInst(base_type)) { .vector => |base_vec| { if (field_name.len > 4) { try astgen.errors.add( astgen.tree.tokenLoc(field_node), "invalid swizzle name", .{}, null, ); return error.AnalysisFail; } var pattern: [4]Inst.SwizzleAccess.Component = undefined; for (field_name, 0..) |c, i| { pattern[i] = switch (c) { 'x', 'r' => .x, 'y', 'g' => .y, 'z', 'b' => .z, 'w', 'a' => .w, else => { try astgen.errors.add( astgen.tree.tokenLoc(field_node), "invalid swizzle name", .{}, null, ); return error.AnalysisFail; }, }; } const inst = try astgen.addInst(.{ .swizzle_access = .{ .base = base, .type = base_vec.elem_type, .size = @enumFromInt(field_name.len), .pattern = pattern, }, }); return inst; }, .@"struct" => |@"struct"| { const struct_members = @"struct".members; for (astgen.refToList(struct_members)) |member| { const member_data = astgen.getInst(member).struct_member; if (std.mem.eql(u8, field_name, astgen.getStr(member_data.name))) { if (astgen.current_fn_scope.tag.@"fn".flattened_params.get(member)) |fv| { return try astgen.addInst(.{ .var_ref = fv }); } const inst = try astgen.addInst(.{ .field_access = .{ .base = base, .field = member, .name = member_data.name, }, }); return inst; } } try astgen.errors.add( astgen.tree.nodeLoc(node), "struct '{s}' has no member named '{s}'", .{ astgen.getStr(@"struct".name), field_name, }, null, ); return error.AnalysisFail; }, else => { try astgen.errors.add( astgen.tree.nodeLoc(node), "expected struct type", .{}, null, ); return error.AnalysisFail; }, } } fn genType(astgen: *AstGen, scope: *Scope, node: NodeIndex) error{ AnalysisFail, OutOfMemory }!InstIndex { const inst = switch (astgen.tree.nodeTag(node)) { .bool_type => try astgen.addInst(.{ .bool = .{ .value = null } }), .number_type => try astgen.genNumberType(node), .vector_type => try astgen.genVectorType(scope, node), .matrix_type => try astgen.genMatrixType(scope, node), .atomic_type => try astgen.genAtomicType(scope, node), .array_type => try astgen.genArray(scope, node, null), .ptr_type => try astgen.genPtrType(scope, node), .sampler_type => try astgen.genSamplerType(node), .sampled_texture_type => try astgen.genSampledTextureType(scope, node), .multisampled_texture_type => try astgen.genMultisampledTextureType(scope, node), .storage_texture_type => try astgen.genStorageTextureType(node), .depth_texture_type => try astgen.genDepthTextureType(node), .external_texture_type => try astgen.addInst(.external_texture_type), .ident => { const node_loc = astgen.tree.nodeLoc(node); const decl = try astgen.findSymbol(scope, astgen.tree.nodeToken(node)); switch (astgen.getInst(decl)) { .bool, .int, .float, .vector, .matrix, .atomic_type, .array, .ptr_type, .sampler_type, .comparison_sampler_type, .external_texture_type, .texture_type, .@"struct", => return decl, else => { try astgen.errors.add( node_loc, "'{s}' is not a type", .{node_loc.slice(astgen.tree.source)}, null, ); return error.AnalysisFail; }, } }, else => unreachable, }; return inst; } fn genNumberType(astgen: *AstGen, node: NodeIndex) !InstIndex { const token = astgen.tree.nodeToken(node); const token_tag = astgen.tree.tokenTag(token); return astgen.addInst(switch (token_tag) { .k_u32 => .{ .int = .{ .type = .u32, .value = null } }, .k_i32 => .{ .int = .{ .type = .i32, .value = null } }, .k_f32 => .{ .float = .{ .type = .f32, .value = null } }, .k_f16 => .{ .float = .{ .type = .f16, .value = null } }, else => unreachable, }); } fn genVectorType(astgen: *AstGen, scope: *Scope, node: NodeIndex) !InstIndex { const elem_type_node = astgen.tree.nodeLHS(node); const elem_type = try astgen.genType(scope, elem_type_node); switch (astgen.getInst(elem_type)) { .bool, .int, .float => { const token_tag = astgen.tree.tokenTag(astgen.tree.nodeToken(node)); return astgen.addInst(.{ .vector = .{ .size = switch (token_tag) { .k_vec2 => .two, .k_vec3 => .three, .k_vec4 => .four, else => unreachable, }, .elem_type = elem_type, .value = null, }, }); }, else => { try astgen.errors.add( astgen.tree.nodeLoc(elem_type_node), "invalid vector component type", .{}, try astgen.errors.createNote( null, "must be 'i32', 'u32', 'f32', 'f16' or 'bool'", .{}, ), ); return error.AnalysisFail; }, } } fn genMatrixType(astgen: *AstGen, scope: *Scope, node: NodeIndex) !InstIndex { const elem_type_node = astgen.tree.nodeLHS(node); const elem_type = try astgen.genType(scope, elem_type_node); switch (astgen.getInst(elem_type)) { .bool, .int, .float => { const token_tag = astgen.tree.tokenTag(astgen.tree.nodeToken(node)); return astgen.addInst(.{ .matrix = .{ .cols = matrixCols(token_tag), .rows = matrixRows(token_tag), .elem_type = elem_type, .value = null, }, }); }, else => { try astgen.errors.add( astgen.tree.nodeLoc(elem_type_node), "invalid matrix component type", .{}, try astgen.errors.createNote( null, "must be 'f32', or 'f16'", .{}, ), ); return error.AnalysisFail; }, } } fn matrixCols(tag: TokenTag) Air.Inst.Vector.Size { return switch (tag) { .k_mat2x2, .k_mat2x3, .k_mat2x4 => .two, .k_mat3x2, .k_mat3x3, .k_mat3x4 => .three, .k_mat4x2, .k_mat4x3, .k_mat4x4 => .four, else => unreachable, }; } fn matrixRows(tag: TokenTag) Air.Inst.Vector.Size { return switch (tag) { .k_mat2x2, .k_mat3x2, .k_mat4x2 => .two, .k_mat2x3, .k_mat3x3, .k_mat4x3 => .three, .k_mat2x4, .k_mat3x4, .k_mat4x4 => .four, else => unreachable, }; } fn genAtomicType(astgen: *AstGen, scope: *Scope, node: NodeIndex) !InstIndex { const node_lhs = astgen.tree.nodeLHS(node); const elem_type = try astgen.genType(scope, node_lhs); if (astgen.getInst(elem_type) == .int) { return astgen.addInst(.{ .atomic_type = .{ .elem_type = elem_type } }); } try astgen.errors.add( astgen.tree.nodeLoc(node_lhs), "invalid atomic component type", .{}, try astgen.errors.createNote( null, "must be 'i32' or 'u32'", .{}, ), ); return error.AnalysisFail; } fn genPtrType(astgen: *AstGen, scope: *Scope, node: NodeIndex) !InstIndex { const node_lhs = astgen.tree.nodeLHS(node); const elem_type = try astgen.genType(scope, node_lhs); switch (astgen.getInst(elem_type)) { .bool, .int, .float, .sampler_type, .comparison_sampler_type, .external_texture_type, => { const extra = astgen.tree.extraData(Node.PtrType, astgen.tree.nodeRHS(node)); const addr_space_loc = astgen.tree.tokenLoc(extra.addr_space); const ast_addr_space = stringToEnum(Ast.AddressSpace, addr_space_loc.slice(astgen.tree.source)).?; const addr_space: Inst.PointerType.AddressSpace = switch (ast_addr_space) { .function => .function, .private => .private, .workgroup => .workgroup, .uniform => .uniform, .storage => .storage, }; const access_mode_loc = astgen.tree.tokenLoc(extra.access_mode); const ast_access_mode = stringToEnum(Ast.AccessMode, access_mode_loc.slice(astgen.tree.source)).?; const access_mode: Inst.PointerType.AccessMode = switch (ast_access_mode) { .read => .read, .write => .write, .read_write => .read_write, }; return astgen.addInst(.{ .ptr_type = .{ .elem_type = elem_type, .addr_space = addr_space, .access_mode = access_mode, }, }); }, else => {}, } try astgen.errors.add( astgen.tree.nodeLoc(node_lhs), "invalid pointer component type", .{}, null, ); return error.AnalysisFail; } fn genArray(astgen: *AstGen, scope: *Scope, node: NodeIndex, args: ?RefIndex) !InstIndex { const node_lhs = astgen.tree.nodeLHS(node); var elem_type = InstIndex.none; if (node_lhs != .none) { elem_type = try astgen.genType(scope, node_lhs); switch (astgen.getInst(elem_type)) { .array, .atomic_type, .@"struct", .bool, .int, .float, .vector, .matrix, => { if (astgen.getInst(elem_type) == .array) { if (astgen.getInst(elem_type).array.len == .none) { try astgen.errors.add( astgen.tree.nodeLoc(node_lhs), "array component type can not be a runtime-known array", .{}, null, ); return error.AnalysisFail; } } }, else => { try astgen.errors.add( astgen.tree.nodeLoc(node_lhs), "invalid array component type", .{}, null, ); return error.AnalysisFail; }, } } if (args != null) { if (args.? == .none) { try astgen.errors.add( astgen.tree.nodeLoc(node), "element type not specified", .{}, null, ); return error.AnalysisFail; } if (elem_type == .none) { elem_type = astgen.refToList(args.?)[0]; } } const len_node = astgen.tree.nodeRHS(node); var len = InstIndex.none; if (len_node != .none) { len = try astgen.genExpr(scope, len_node); } else if (args != null) { len = try astgen.addInst(.{ .int = .{ .type = .u32, .value = try astgen.addValue(Inst.Int.Value, .{ .literal = @intCast(astgen.refToList(args.?).len) }), } }); } return astgen.addInst(.{ .array = .{ .elem_type = elem_type, .len = len, .value = args, }, }); } fn genSamplerType(astgen: *AstGen, node: NodeIndex) !InstIndex { const token = astgen.tree.nodeToken(node); const token_tag = astgen.tree.tokenTag(token); return astgen.addInst(switch (token_tag) { .k_sampler => .sampler_type, .k_sampler_comparison => .comparison_sampler_type, else => unreachable, }); } fn genSampledTextureType(astgen: *AstGen, scope: *Scope, node: NodeIndex) !InstIndex { const node_lhs = astgen.tree.nodeLHS(node); const elem_type = try astgen.genType(scope, node_lhs); const elem_type_inst = astgen.getInst(elem_type); if (elem_type_inst == .int or (elem_type_inst == .float and elem_type_inst.float.type == .f32)) { const token_tag = astgen.tree.tokenTag(astgen.tree.nodeToken(node)); return astgen.addInst(.{ .texture_type = .{ .kind = switch (token_tag) { .k_texture_1d => .sampled_1d, .k_texture_2d => .sampled_2d, .k_texture_2d_array => .sampled_2d_array, .k_texture_3d => .sampled_3d, .k_texture_cube => .sampled_cube, .k_texture_cube_array => .sampled_cube_array, else => unreachable, }, .elem_type = elem_type, }, }); } try astgen.errors.add( astgen.tree.nodeLoc(node_lhs), "invalid texture component type", .{}, try astgen.errors.createNote( null, "must be 'i32', 'u32' or 'f32'", .{}, ), ); return error.AnalysisFail; } fn genMultisampledTextureType(astgen: *AstGen, scope: *Scope, node: NodeIndex) !InstIndex { const node_lhs = astgen.tree.nodeLHS(node); var elem_type = InstIndex.none; if (node_lhs != .none) { elem_type = try astgen.genType(scope, node_lhs); const elem_type_inst = astgen.getInst(elem_type); if (elem_type_inst != .int and !(elem_type_inst == .float and elem_type_inst.float.type == .f32)) { try astgen.errors.add( astgen.tree.nodeLoc(node_lhs), "invalid multisampled texture component type", .{}, try astgen.errors.createNote( null, "must be 'i32', 'u32' or 'f32'", .{}, ), ); return error.AnalysisFail; } } const token_tag = astgen.tree.tokenTag(astgen.tree.nodeToken(node)); return astgen.addInst(.{ .texture_type = .{ .kind = switch (token_tag) { .k_texture_multisampled_2d => .multisampled_2d, .k_texture_depth_multisampled_2d => .multisampled_depth_2d, else => unreachable, }, .elem_type = elem_type, }, }); } fn genStorageTextureType(astgen: *AstGen, node: NodeIndex) !InstIndex { const node_lhs = astgen.tree.nodeLHS(node); const texel_format_loc = astgen.tree.tokenLoc(node_lhs.asTokenIndex()); const ast_texel_format = stringToEnum(Ast.TexelFormat, texel_format_loc.slice(astgen.tree.source)).?; const texel_format: Inst.TextureType.TexelFormat = switch (ast_texel_format) { .rgba8unorm => .rgba8unorm, .rgba8snorm => .rgba8snorm, .rgba8uint => .rgba8uint, .rgba8sint => .rgba8sint, .rgba16uint => .rgba16uint, .rgba16sint => .rgba16sint, .rgba16float => .rgba16float, .r32uint => .r32uint, .r32sint => .r32sint, .r32float => .r32float, .rg32uint => .rg32uint, .rg32sint => .rg32sint, .rg32float => .rg32float, .rgba32uint => .rgba32uint, .rgba32sint => .rgba32sint, .rgba32float => .rgba32float, .bgra8unorm => .bgra8unorm, }; const node_rhs = astgen.tree.nodeRHS(node); const access_mode_loc = astgen.tree.tokenLoc(node_rhs.asTokenIndex()); const access_mode_full = stringToEnum(Ast.AccessMode, access_mode_loc.slice(astgen.tree.source)).?; const access_mode = switch (access_mode_full) { .write => Inst.TextureType.AccessMode.write, else => { try astgen.errors.add( access_mode_loc, "invalid access mode", .{}, try astgen.errors.createNote( null, "only 'write' is allowed", .{}, ), ); return error.AnalysisFail; }, }; const token_tag = astgen.tree.tokenTag(astgen.tree.nodeToken(node)); const inst = try astgen.addInst(.{ .texture_type = .{ .kind = switch (token_tag) { .k_texture_storage_1d => .storage_1d, .k_texture_storage_2d => .storage_2d, .k_texture_storage_2d_array => .storage_2d_array, .k_texture_storage_3d => .storage_3d, else => unreachable, }, .texel_format = texel_format, .access_mode = access_mode, }, }); return inst; } fn genDepthTextureType(astgen: *AstGen, node: NodeIndex) !InstIndex { const token_tag = astgen.tree.tokenTag(astgen.tree.nodeToken(node)); const inst = try astgen.addInst(.{ .texture_type = .{ .kind = switch (token_tag) { .k_texture_depth_2d => .depth_2d, .k_texture_depth_2d_array => .depth_2d_array, .k_texture_depth_cube => .depth_cube, .k_texture_depth_cube_array => .depth_cube_array, else => unreachable, } }, }); return inst; } /// takes token and returns the first declaration in the current and parent scopes fn findSymbol(astgen: *AstGen, scope: *Scope, token: TokenIndex) error{ OutOfMemory, AnalysisFail }!InstIndex { std.debug.assert(astgen.tree.tokenTag(token) == .ident); const loc = astgen.tree.tokenLoc(token); const name = loc.slice(astgen.tree.source); var s = scope; while (true) { var iter = s.decls.iterator(); while (iter.next()) |decl| { const decl_node = decl.key_ptr.*; const decl_inst = try decl.value_ptr.*; if (std.mem.eql(u8, name, astgen.tree.declNameLoc(decl_node).?.slice(astgen.tree.source))) { if (decl_inst == .none) { // declaration has not analysed switch (s.tag) { .root => return astgen.genGlobalDecl(s, decl_node), .@"fn", .block, .loop, .continuing, .switch_case, .@"if", .@"for", => {}, } } else { return decl_inst; } } } if (s.tag == .root) { try astgen.errors.add( loc, "use of undeclared identifier '{s}'", .{name}, null, ); return error.AnalysisFail; } s = s.parent; } } fn resolve(astgen: *AstGen, index: InstIndex) !InstIndex { var idx = index; while (true) { const inst = astgen.getInst(idx); switch (inst) { inline .bool, .int, .float, .vector, .matrix, .array => |data| { std.debug.assert(data.value != null); return idx; }, .struct_construct => |struct_construct| return struct_construct.@"struct", .select => |select| return select.type, inline .texture_sample, .bitcast, .unary, .unary_intrinsic, .binary, .binary_intrinsic, .triple_intrinsic, .texture_dimension, .texture_load, => |instruction| return instruction.result_type, .call => |call| return astgen.getInst(call.@"fn").@"fn".return_type, .var_ref => |var_ref| idx = var_ref, .field_access => |field_access| return astgen.getInst(field_access.field).struct_member.type, .swizzle_access => |swizzle_access| { if (swizzle_access.size == .one) { return swizzle_access.type; } return astgen.addInst(.{ .vector = .{ .elem_type = swizzle_access.type, .size = @enumFromInt(@intFromEnum(swizzle_access.size)), .value = null, }, }); }, .index_access => |index_access| return index_access.type, inline .@"var", .@"const" => |decl| { std.debug.assert(index != idx); const decl_type = decl.type; const decl_expr = decl.init; if (decl_type != .none) return decl_type; idx = decl_expr; }, .fn_param => |param| return param.type, .nil_intrinsic, .texture_store, .atomic_type, .ptr_type, .sampler_type, .comparison_sampler_type, .external_texture_type, .texture_type, .@"fn", .@"struct", .struct_member, .block, .loop, .continuing, .@"return", .break_if, .@"if", .@"while", .@"for", .discard, .@"break", .@"continue", .@"switch", .switch_case, .assign, => unreachable, } } } fn eql(astgen: *AstGen, a_idx: InstIndex, b_idx: InstIndex) bool { const a = astgen.getInst(a_idx); const b = astgen.getInst(b_idx); return switch (a) { .int => |int_a| switch (b) { .int => |int_b| int_a.type == int_b.type, else => false, }, .vector => |vec_a| switch (b) { .vector => |vec_b| astgen.eqlVector(vec_a, vec_b), else => false, }, .matrix => |mat_a| switch (b) { .matrix => |mat_b| astgen.eqlMatrix(mat_a, mat_b), else => false, }, else => if (std.meta.activeTag(a) == std.meta.activeTag(b)) true else false, }; } fn eqlVector(astgen: *AstGen, a: Air.Inst.Vector, b: Air.Inst.Vector) bool { return a.size == b.size and astgen.eql(a.elem_type, b.elem_type); } fn eqlMatrix(astgen: *AstGen, a: Air.Inst.Matrix, b: Air.Inst.Matrix) bool { return a.cols == b.cols and a.rows == b.rows and astgen.eql(a.elem_type, b.elem_type); } fn addInst(astgen: *AstGen, inst: Inst) error{OutOfMemory}!InstIndex { try astgen.instructions.put(astgen.allocator, inst, {}); return @enumFromInt(astgen.instructions.getIndex(inst).?); } fn addRefList(astgen: *AstGen, list: []const InstIndex) error{OutOfMemory}!RefIndex { const len = list.len + 1; try astgen.refs.ensureUnusedCapacity(astgen.allocator, len); astgen.refs.appendSliceAssumeCapacity(list); astgen.refs.appendAssumeCapacity(.none); return @as(RefIndex, @enumFromInt(astgen.refs.items.len - len)); } fn addString(astgen: *AstGen, str: []const u8) error{OutOfMemory}!StringIndex { const len = str.len + 1; try astgen.strings.ensureUnusedCapacity(astgen.allocator, len); astgen.strings.appendSliceAssumeCapacity(str); astgen.strings.appendAssumeCapacity(0); return @enumFromInt(astgen.strings.items.len - len); } fn addValue(astgen: *AstGen, comptime T: type, value: T) error{OutOfMemory}!ValueIndex { const value_bytes = std.mem.asBytes(&value); try astgen.values.appendSlice(astgen.allocator, value_bytes); std.testing.expectEqual(value, std.mem.bytesToValue(T, value_bytes)) catch unreachable; return @enumFromInt(astgen.values.items.len - value_bytes.len); } fn getInst(astgen: *AstGen, inst: InstIndex) Inst { return astgen.instructions.entries.slice().items(.key)[@intFromEnum(inst)]; } fn getValue(astgen: *AstGen, comptime T: type, value: ValueIndex) T { return std.mem.bytesAsValue(T, astgen.values.items[@intFromEnum(value)..][0..@sizeOf(T)]).*; } fn getStr(astgen: *AstGen, index: StringIndex) []const u8 { return std.mem.sliceTo(astgen.strings.items[@intFromEnum(index)..], 0); } fn refToList(astgen: *AstGen, ref: RefIndex) []const InstIndex { return std.mem.sliceTo(astgen.refs.items[@intFromEnum(ref)..], .none); } fn failArgCountMismatch( astgen: *AstGen, node_loc: Loc, expected: usize, actual: usize, ) error{ OutOfMemory, AnalysisFail } { try astgen.errors.add( node_loc, "expected {} argument(s), found {}", .{ expected, actual }, null, ); return error.AnalysisFail; } const BuiltinFn = enum { all, any, select, arrayLength, abs, acos, acosh, asin, asinh, atan, atanh, atan2, ceil, clamp, cos, cosh, countLeadingZeros, countOneBits, countTrailingZeros, cross, // unimplemented degrees, determinant, // unimplemented distance, dot, // unimplemented exp, exp2, extractBits, // unimplemented faceForward, // unimplemented firstLeadingBit, firstTrailingBit, floor, fma, // unimplemented fract, frexp, // unimplemented insertBits, // unimplemented inverseSqrt, ldexp, // unimplemented length, log, log2, max, min, mix, modf, // unimplemented normalize, pow, // unimplemented quantizeToF16, radians, reflect, // unimplemented refract, // unimplemented reverseBits, round, saturate, sign, sin, sinh, smoothstep, sqrt, step, tan, tanh, transpose, // unimplemented trunc, dpdx, dpdxCoarse, dpdxFine, dpdy, dpdyCoarse, dpdyFine, fwidth, fwidthCoarse, fwidthFine, textureDimensions, textureGather, // unimplemented textureLoad, textureNumLayers, // unimplemented textureNumLevels, // unimplemented textureNumSamples, // unimplemented textureSample, textureSampleBias, // unimplemented textureSampleCompare, // unimplemented textureSampleCompareLevel, // unimplemented textureSampleGrad, // unimplemented textureSampleLevel, // unimplemented textureSampleBaseClampToEdge, // unimplemented textureStore, // unimplemented atomicLoad, // unimplemented atomicStore, // unimplemented atomicAdd, // unimplemented atomicSub, // unimplemented atomicMax, // unimplemented atomicMin, // unimplemented atomicAnd, // unimplemented atomicOr, // unimplemented atomicXor, // unimplemented atomicExchange, // unimplemented atomicCompareExchangeWeak, // unimplemented pack4x8unorm, // unimplemented pack2x16snorm, // unimplemented pack2x16unorm, // unimplemented pack2x16float, // unimplemented unpack4x8snorm, // unimplemented unpack4x8unorm, // unimplemented unpack2x16snorm, // unimplemented unpack2x16unorm, // unimplemented unpack2x16float, // unimplemented storageBarrier, workgroupBarrier, workgroupUniformLoad, // unimplemented };
0
repos/mach-sysgpu/src
repos/mach-sysgpu/src/shader/print_air.zig
const std = @import("std"); const Air = @import("Air.zig"); const indention_size = 2; pub fn printAir(ir: Air, writer: anytype) !void { var p = Printer(@TypeOf(writer)){ .ir = ir, .writer = writer, .tty = std.io.tty.Config{ .escape_codes = {} }, }; const globals = ir.refToList(ir.globals_index); for (globals) |ref| { try p.printInst(0, ref); } } fn Printer(comptime Writer: type) type { return struct { ir: Air, writer: Writer, tty: std.io.tty.Config, fn printInst(self: @This(), indent: u16, index: Air.InstIndex) Writer.Error!void { const inst = self.ir.getInst(index); switch (inst) { .@"const" => { try self.printConst(indent, index); if (indent == 0) { try self.printFieldEnd(); } }, .@"struct" => { try self.printStruct(0, index); try self.printFieldEnd(); }, .@"fn" => { std.debug.assert(indent == 0); try self.printFn(indent, index); try self.printFieldEnd(); }, .@"var" => try self.printVar(indent, index), .bool => try self.printBool(indent, index), .int, .float => try self.printNumber(indent, index), .vector => try self.printVector(indent, index), .matrix => try self.printMatrix(indent, index), .sampler_type, .comparison_sampler_type, .external_texture_type, => { try self.tty.setColor(self.writer, .bright_magenta); try self.writer.print(".{s}", .{@tagName(inst)}); try self.tty.setColor(self.writer, .reset); }, .binary => |bin| { try self.instBlockStart(index); try self.printFieldInst(indent + 1, "lhs", bin.lhs); try self.printFieldInst(indent + 1, "rhs", bin.rhs); try self.instBlockEnd(indent); }, .unary_intrinsic => |un| { try self.instBlockStart(index); try self.printFieldInst(indent + 1, "expr", un.expr); try self.printFieldInst(indent + 1, "res_ty", un.result_type); try self.printFieldEnum(indent + 1, "op", un.op); try self.instBlockEnd(indent); }, .increase, .decrease, .loop, .continuing, .@"return", .break_if, => |un| { try self.instStart(index); if (un != .none) { try self.printInst(indent, un); } try self.instEnd(); }, .block => try self.printBlock(indent, index), .@"if" => try self.printIf(indent, index), .@"while" => try self.printWhile(indent, index), .field_access => try self.printFieldAccess(indent, index), .index_access => try self.printIndexAccess(indent, index), .var_ref => |ref| { try self.instStart(index); try self.tty.setColor(self.writer, .yellow); try self.writer.print("{d}", .{@intFromEnum(ref)}); try self.tty.setColor(self.writer, .reset); try self.instEnd(); }, else => { try self.instStart(index); try self.writer.writeAll("TODO"); try self.instEnd(); }, } } fn printGlobalVar(self: @This(), indent: u16, index: Air.InstIndex) Writer.Error!void { const inst = self.ir.getInst(index).global_var; try self.instBlockStart(index); try self.printFieldString(indent + 1, "name", inst.name); if (inst.addr_space) |addr_space| { try self.printFieldEnum(indent + 1, "addr_space", addr_space); } if (inst.access_mode) |access_mode| { try self.printFieldEnum(indent + 1, "access_mode", access_mode); } if (inst.type != .none) { try self.printFieldInst(indent + 1, "type", inst.type); } if (inst.expr != .none) { try self.printFieldInst(indent + 1, "value", inst.expr); } try self.instBlockEnd(indent); } fn printVar(self: @This(), indent: u16, index: Air.InstIndex) Writer.Error!void { const inst = self.ir.getInst(index).@"var"; try self.instBlockStart(index); try self.printFieldString(indent + 1, "name", inst.name); try self.printFieldEnum(indent + 1, "addr_space", inst.addr_space); try self.printFieldEnum(indent + 1, "access_mode", inst.access_mode); if (inst.type != .none) { try self.printFieldInst(indent + 1, "type", inst.type); } if (inst.expr != .none) { try self.printFieldInst(indent + 1, "value", inst.expr); } try self.instBlockEnd(indent); } fn printConst(self: @This(), indent: u16, index: Air.InstIndex) Writer.Error!void { const inst = self.ir.getInst(index).@"const"; try self.instBlockStart(index); try self.printFieldString(indent + 1, "name", inst.name); if (inst.type != .none) { try self.printFieldInst(indent + 1, "type", inst.type); } try self.printFieldInst(indent + 1, "value", inst.expr); try self.instBlockEnd(indent); } fn printLet(self: @This(), indent: u16, index: Air.InstIndex) Writer.Error!void { const inst = self.ir.getInst(index).let; try self.instBlockStart(index); try self.printFieldString(indent + 1, "name", inst.name); if (inst.type != .none) { try self.printFieldInst(indent + 1, "type", inst.type); } try self.printFieldInst(indent + 1, "value", inst.expr); try self.instBlockEnd(indent); } fn printStruct(self: @This(), indent: u16, index: Air.InstIndex) Writer.Error!void { const inst = self.ir.getInst(index); try self.instBlockStart(index); try self.printFieldString(indent + 1, "name", inst.@"struct".name); try self.printFieldName(indent + 1, "members"); try self.listStart(); const members = self.ir.refToList(inst.@"struct".members); for (members) |member| { const member_index = member; const member_inst = self.ir.getInst(member_index); try self.printIndent(indent + 2); try self.instBlockStart(member_index); try self.printFieldString(indent + 3, "name", member_inst.struct_member.name); try self.printFieldInst(indent + 3, "type", member_inst.struct_member.type); if (member_inst.struct_member.@"align") |@"align"| { try self.printFieldAny(indent + 3, "align", @"align"); } if (member_inst.struct_member.size) |size| { try self.printFieldAny(indent + 3, "size", size); } if (member_inst.struct_member.builtin) |builtin| { try self.printFieldAny(indent + 3, "builtin", builtin); } if (member_inst.struct_member.location) |location| { try self.printFieldAny(indent + 3, "location", location); } try self.instBlockEnd(indent + 2); try self.printFieldEnd(); } try self.listEnd(indent + 1); try self.printFieldEnd(); try self.instBlockEnd(indent); } fn printFn(self: @This(), indent: u16, index: Air.InstIndex) Writer.Error!void { const inst = self.ir.getInst(index); try self.instBlockStart(index); try self.printFieldString(indent + 1, "name", inst.@"fn".name); if (inst.@"fn".params != .none) { try self.printFieldName(indent + 1, "params"); try self.listStart(); const params = self.ir.refToList(inst.@"fn".params); for (params) |arg| { const arg_index = arg; const arg_inst = self.ir.getInst(arg_index); try self.printIndent(indent + 2); try self.instBlockStart(arg_index); try self.printFieldString(indent + 3, "name", arg_inst.fn_param.name); try self.printFieldInst(indent + 3, "type", arg_inst.fn_param.type); if (arg_inst.fn_param.builtin) |builtin| { try self.printFieldEnum(indent + 3, "builtin", builtin); } if (arg_inst.fn_param.interpolate) |interpolate| { try self.printFieldName(indent + 3, "interpolate"); try self.instBlockStart(index); try self.printFieldEnum(indent + 4, "type", interpolate.type); if (interpolate.sample != .none) { try self.printFieldEnum(indent + 4, "sample", interpolate.sample); } try self.instBlockEnd(indent + 4); try self.printFieldEnd(); } if (arg_inst.fn_param.location) |location| { try self.printFieldAny(indent + 3, "location", location); } if (arg_inst.fn_param.invariant) { try self.printFieldAny(indent + 3, "invariant", arg_inst.fn_param.invariant); } try self.instBlockEnd(indent + 2); try self.printFieldEnd(); } try self.listEnd(indent + 1); try self.printFieldEnd(); } if (inst.@"fn".block != .none) { try self.printFieldName(indent + 1, "block"); try self.printBlock(indent + 1, inst.@"fn".block); try self.printFieldEnd(); } try self.instBlockEnd(indent); } fn printBlock(self: @This(), indent: u16, index: Air.InstIndex) Writer.Error!void { const inst = self.ir.getInst(index).block; const statements = self.ir.refToList(inst); try self.listStart(); for (statements) |statement| { try self.printIndent(indent + 1); try self.printInst(indent + 1, statement); try self.printFieldEnd(); } try self.listEnd(indent); } fn printIf(self: @This(), indent: u16, index: Air.InstIndex) Writer.Error!void { const inst = self.ir.getInst(index).@"if"; try self.instBlockStart(index); try self.printFieldInst(indent + 1, "cond", inst.cond); if (inst.body != .none) { try self.printFieldInst(indent + 1, "body", inst.body); } if (inst.@"else" != .none) { try self.printFieldInst(indent + 1, "else", inst.@"else"); } try self.instBlockEnd(indent); } fn printWhile(self: @This(), indent: u16, index: Air.InstIndex) Writer.Error!void { const inst = self.ir.getInst(index).@"while"; try self.instBlockStart(index); try self.printFieldInst(indent + 1, "cond", inst.cond); if (inst.body != .none) { try self.printFieldInst(indent + 1, "body", inst.body); } try self.instBlockEnd(indent); } fn printBool(self: @This(), indent: u16, index: Air.InstIndex) Writer.Error!void { const inst = self.ir.getInst(index); if (inst.bool.value) |value| { switch (value) { .literal => |lit| { try self.instStart(index); try self.tty.setColor(self.writer, .cyan); try self.writer.print("{}", .{lit}); try self.tty.setColor(self.writer, .reset); try self.instEnd(); }, .cast => |cast| { try self.instBlockStart(index); try self.printFieldInst(indent + 1, "type", cast.type); try self.printFieldInst(indent + 1, "value", cast.type); try self.instBlockEnd(indent); }, } } else { try self.instStart(index); try self.instEnd(); } } fn printNumber(self: @This(), indent: u16, index: Air.InstIndex) Writer.Error!void { const inst = self.ir.getInst(index); try self.instBlockStart(index); switch (inst) { .int => |int| { try self.printFieldEnum(indent + 1, "type", int.type); if (int.value) |value| { switch (self.ir.getValue(Air.Inst.Int.Value, value)) { .literal => |lit| try self.printFieldAny(indent + 1, "value", lit), .cast => |cast| { try self.printFieldName(indent + 1, "cast"); try self.instBlockStart(index); try self.printFieldInst(indent + 2, "type", cast.type); try self.printFieldInst(indent + 2, "value", cast.value); try self.instBlockEnd(indent); try self.printFieldEnd(); }, } } }, .float => |float| { try self.printFieldEnum(indent + 1, "type", float.type); if (float.value) |value| { switch (self.ir.getValue(Air.Inst.Float.Value, value)) { .literal => |lit| try self.printFieldAny(indent + 1, "value", lit), .cast => |cast| { try self.printFieldName(indent + 1, "cast"); try self.instBlockStart(index); try self.printFieldInst(indent + 2, "type", cast.type); try self.printFieldInst(indent + 2, "value", cast.value); try self.instBlockEnd(indent); try self.printFieldEnd(); }, } } }, else => unreachable, } try self.instBlockEnd(indent); } fn printVector(self: @This(), indent: u16, index: Air.InstIndex) Writer.Error!void { const vec = self.ir.getInst(index).vector; try self.instBlockStart(index); try self.printFieldInst(indent + 1, "type", vec.elem_type); if (vec.value) |value_idx| { if (value_idx == .none) { try self.printFieldAny(indent + 1, "value", "null"); } else { const value = self.ir.getValue(Air.Inst.Vector.Value, value_idx); switch (value) { .literal => |lit| { try self.printFieldName(indent + 1, "literal"); try self.listStart(); for (0..@intFromEnum(vec.size)) |i| { try self.printIndent(indent + 2); try self.printInst(indent + 2, lit[i]); try self.printFieldEnd(); } try self.listEnd(indent + 1); try self.printFieldEnd(); }, .cast => |cast| { try self.printFieldName(indent + 1, "cast"); try self.listStart(); for (0..@intFromEnum(vec.size)) |i| { try self.printIndent(indent + 2); try self.printInst(indent + 2, cast.value[i]); try self.printFieldEnd(); } try self.listEnd(indent + 1); try self.printFieldEnd(); }, } } } try self.instBlockEnd(indent); } fn printMatrix(self: @This(), indent: u16, index: Air.InstIndex) Writer.Error!void { const mat = self.ir.getInst(index).matrix; try self.instBlockStart(index); try self.printFieldInst(indent + 1, "type", mat.elem_type); if (mat.value) |value_idx| { const value = self.ir.getValue(Air.Inst.Matrix.Value, value_idx); try self.printFieldName(indent + 1, "value"); try self.listStart(); for (0..@intFromEnum(mat.cols) * @intFromEnum(mat.rows)) |i| { try self.printIndent(indent + 2); try self.printInst(indent + 2, value[i]); try self.printFieldEnd(); } try self.listEnd(indent + 1); try self.printFieldEnd(); } try self.instBlockEnd(indent); } fn printFieldAccess(self: @This(), indent: u16, index: Air.InstIndex) Writer.Error!void { const inst = self.ir.getInst(index); try self.instBlockStart(index); try self.printFieldInst(indent + 1, "base", inst.field_access.base); try self.printFieldString(indent + 1, "name", inst.field_access.name); try self.instBlockEnd(indent); } fn printIndexAccess(self: @This(), indent: u16, index: Air.InstIndex) Writer.Error!void { const inst = self.ir.getInst(index); try self.instBlockStart(index); try self.printFieldInst(indent + 1, "base", inst.index_access.base); try self.printFieldInst(indent + 1, "type", inst.index_access.type); try self.printFieldInst(indent + 1, "index", inst.index_access.index); try self.instBlockEnd(indent); } fn instStart(self: @This(), index: Air.InstIndex) !void { const inst = self.ir.getInst(index); try self.tty.setColor(self.writer, .bold); try self.writer.print("{s}", .{@tagName(inst)}); try self.tty.setColor(self.writer, .reset); try self.tty.setColor(self.writer, .dim); try self.writer.writeAll("<"); try self.tty.setColor(self.writer, .reset); try self.tty.setColor(self.writer, .blue); try self.writer.print("{d}", .{@intFromEnum(index)}); try self.tty.setColor(self.writer, .reset); try self.tty.setColor(self.writer, .dim); try self.writer.writeAll(">"); try self.writer.writeAll("("); try self.tty.setColor(self.writer, .reset); } fn instEnd(self: @This()) !void { try self.tty.setColor(self.writer, .dim); try self.writer.writeAll(")"); try self.tty.setColor(self.writer, .reset); } fn instBlockStart(self: @This(), index: Air.InstIndex) !void { const inst = self.ir.getInst(index); try self.tty.setColor(self.writer, .bold); try self.writer.print("{s}", .{@tagName(inst)}); try self.tty.setColor(self.writer, .reset); try self.tty.setColor(self.writer, .dim); try self.writer.writeAll("<"); try self.tty.setColor(self.writer, .reset); try self.tty.setColor(self.writer, .blue); try self.writer.print("{d}", .{@intFromEnum(index)}); try self.tty.setColor(self.writer, .reset); try self.tty.setColor(self.writer, .dim); try self.writer.writeAll(">"); try self.writer.writeAll("{\n"); try self.tty.setColor(self.writer, .reset); } fn instBlockEnd(self: @This(), indent: u16) !void { try self.printIndent(indent); try self.tty.setColor(self.writer, .dim); try self.writer.writeAll("}"); try self.tty.setColor(self.writer, .reset); } fn listStart(self: @This()) !void { try self.tty.setColor(self.writer, .dim); try self.writer.writeAll("[\n"); try self.tty.setColor(self.writer, .reset); } fn listEnd(self: @This(), indent: u16) !void { try self.printIndent(indent); try self.tty.setColor(self.writer, .dim); try self.writer.writeAll("]"); try self.tty.setColor(self.writer, .reset); } fn printFieldName(self: @This(), indent: u16, name: []const u8) !void { try self.printIndent(indent); try self.tty.setColor(self.writer, .reset); try self.writer.print("{s}", .{name}); try self.tty.setColor(self.writer, .dim); try self.writer.print(": ", .{}); try self.tty.setColor(self.writer, .reset); } fn printFieldString(self: @This(), indent: u16, name: []const u8, value: Air.StringIndex) !void { try self.printFieldName(indent, name); try self.tty.setColor(self.writer, .green); try self.writer.print("'{s}'", .{self.ir.getStr(value)}); try self.tty.setColor(self.writer, .reset); try self.printFieldEnd(); } fn printFieldInst(self: @This(), indent: u16, name: []const u8, value: Air.InstIndex) !void { try self.printFieldName(indent, name); try self.printInst(indent, value); try self.printFieldEnd(); } fn printFieldEnum(self: @This(), indent: u16, name: []const u8, value: anytype) !void { try self.printFieldName(indent, name); try self.tty.setColor(self.writer, .magenta); try self.writer.print(".{s}", .{@tagName(value)}); try self.tty.setColor(self.writer, .reset); try self.printFieldEnd(); } fn printFieldAny(self: @This(), indent: u16, name: []const u8, value: anytype) !void { try self.printFieldName(indent, name); try self.tty.setColor(self.writer, .cyan); if (@typeInfo(@TypeOf(value)) == .Pointer) { // assume string try self.writer.print("{s}", .{value}); } else { try self.writer.print("{}", .{value}); } try self.tty.setColor(self.writer, .reset); try self.printFieldEnd(); } fn printFieldEnd(self: @This()) !void { try self.writer.writeAll(",\n"); } fn printIndent(self: @This(), indent: u16) !void { try self.writer.writeByteNTimes(' ', indent * indention_size); } }; }
0
repos/mach-sysgpu/src/shader
repos/mach-sysgpu/src/shader/codegen/hlsl.zig
const std = @import("std"); const Air = @import("../Air.zig"); const DebugInfo = @import("../CodeGen.zig").DebugInfo; const Inst = Air.Inst; const InstIndex = Air.InstIndex; const Builtin = Air.Inst.Builtin; const Hlsl = @This(); const Section = std.ArrayListUnmanaged(u8); air: *const Air, allocator: std.mem.Allocator, arena: std.heap.ArenaAllocator, emitted_decls: std.AutoArrayHashMapUnmanaged(InstIndex, []const u8) = .{}, scratch: std.ArrayListUnmanaged(u8) = .{}, output: std.ArrayListUnmanaged(u8) = .{}, indent: u32 = 0, pub fn gen(allocator: std.mem.Allocator, air: *const Air, debug_info: DebugInfo) ![]const u8 { _ = debug_info; var hlsl = Hlsl{ .air = air, .allocator = allocator, .arena = std.heap.ArenaAllocator.init(allocator), }; defer { hlsl.scratch.deinit(allocator); hlsl.emitted_decls.deinit(allocator); hlsl.output.deinit(allocator); hlsl.arena.deinit(); } try hlsl.output.appendSlice(allocator, "#pragma pack_matrix( row_major )\n"); for (air.refToList(air.globals_index)) |inst_idx| { switch (air.getInst(inst_idx)) { .@"var" => try hlsl.emitGlobalVar(inst_idx), .@"const" => try hlsl.emitGlobalConst(inst_idx), .@"fn" => try hlsl.emitFn(inst_idx), .@"struct" => {}, else => unreachable, } } return hlsl.output.toOwnedSlice(allocator); } fn emitType(hlsl: *Hlsl, inst_idx: InstIndex) error{OutOfMemory}!void { if (inst_idx == .none) { try hlsl.writeAll("void"); } else { switch (hlsl.air.getInst(inst_idx)) { .bool => |inst| try hlsl.emitBoolType(inst), .int => |inst| try hlsl.emitIntType(inst), .float => |inst| try hlsl.emitFloatType(inst), .vector => |inst| try hlsl.emitVectorType(inst), .matrix => |inst| try hlsl.emitMatrixType(inst), .array => |inst| try hlsl.emitType(inst.elem_type), .@"struct" => { try hlsl.emitStruct(inst_idx, .normal); try hlsl.writeAll(hlsl.emitted_decls.get(inst_idx).?); }, else => |inst| try hlsl.print("Type: {}", .{inst}), // TODO } } } fn emitTypeSuffix(hlsl: *Hlsl, inst_idx: InstIndex) error{OutOfMemory}!void { if (inst_idx != .none) { switch (hlsl.air.getInst(inst_idx)) { .array => |inst| try hlsl.emitArrayTypeSuffix(inst), else => {}, } } } fn emitArrayTypeSuffix(hlsl: *Hlsl, inst: Inst.Array) !void { if (inst.len != .none) { if (hlsl.air.resolveInt(inst.len)) |len| { try hlsl.print("[{}]", .{len}); } } else { try hlsl.writeAll("[1]"); } try hlsl.emitTypeSuffix(inst.elem_type); } fn emitBoolType(hlsl: *Hlsl, inst: Inst.Bool) !void { _ = inst; try hlsl.writeAll("bool"); } fn emitIntType(hlsl: *Hlsl, inst: Inst.Int) !void { try hlsl.writeAll(switch (inst.type) { .u32 => "uint", .i32 => "int", }); } fn emitFloatType(hlsl: *Hlsl, inst: Inst.Float) !void { try hlsl.writeAll(switch (inst.type) { .f32 => "float", .f16 => "half", }); } fn emitVectorSize(hlsl: *Hlsl, size: Inst.Vector.Size) !void { try hlsl.writeAll(switch (size) { .two => "2", .three => "3", .four => "4", }); } fn emitVectorType(hlsl: *Hlsl, inst: Inst.Vector) !void { try hlsl.emitType(inst.elem_type); try hlsl.emitVectorSize(inst.size); } fn emitMatrixType(hlsl: *Hlsl, inst: Inst.Matrix) !void { // TODO - verify dimension order try hlsl.emitType(inst.elem_type); try hlsl.emitVectorSize(inst.cols); try hlsl.writeAll("x"); try hlsl.emitVectorSize(inst.rows); } fn structMemberLessThan(hlsl: *Hlsl, lhs: InstIndex, rhs: InstIndex) bool { const lhs_member = hlsl.air.getInst(lhs).struct_member; const rhs_member = hlsl.air.getInst(rhs).struct_member; // Location if (lhs_member.location != null and rhs_member.location == null) return true; if (lhs_member.location == null and rhs_member.location != null) return false; const lhs_location = lhs_member.location orelse 0; const rhs_location = rhs_member.location orelse 0; if (lhs_location < rhs_location) return true; if (lhs_location > rhs_location) return false; // Builtin if (lhs_member.builtin == null and rhs_member.builtin != null) return true; if (lhs_member.builtin != null and rhs_member.builtin == null) return false; const lhs_builtin = lhs_member.builtin orelse .vertex_index; const rhs_builtin = rhs_member.builtin orelse .vertex_index; return @intFromEnum(lhs_builtin) < @intFromEnum(rhs_builtin); } fn fnParamLessThan(hlsl: *Hlsl, lhs: InstIndex, rhs: InstIndex) bool { const lhs_param = hlsl.air.getInst(lhs).fn_param; const rhs_param = hlsl.air.getInst(rhs).fn_param; // Location if (lhs_param.location != null and rhs_param.location == null) return true; if (lhs_param.location == null and rhs_param.location != null) return false; const lhs_location = lhs_param.location orelse 0; const rhs_location = rhs_param.location orelse 0; if (lhs_location < rhs_location) return true; if (lhs_location > rhs_location) return false; // Builtin if (lhs_param.builtin == null and rhs_param.builtin != null) return true; if (lhs_param.builtin != null and rhs_param.builtin == null) return false; const lhs_builtin = lhs_param.builtin orelse .vertex_index; const rhs_builtin = rhs_param.builtin orelse .vertex_index; return @intFromEnum(lhs_builtin) < @intFromEnum(rhs_builtin); } const StructKind = enum { normal, frag_out, vert_out, }; fn emitStruct(hlsl: *Hlsl, inst_idx: InstIndex, kind: StructKind) !void { if (hlsl.emitted_decls.get(inst_idx)) |_| return; const scratch_top = hlsl.scratch.items.len; defer hlsl.scratch.shrinkRetainingCapacity(scratch_top); const inst = hlsl.air.getInst(inst_idx).@"struct"; const name = try std.fmt.allocPrint( hlsl.arena.allocator(), "{s}{d}", .{ if (kind == .frag_out) "FragOut" else hlsl.air.getStr(inst.name), @intFromEnum(inst_idx), }, ); try hlsl.print("struct {s} {{\n", .{name}); hlsl.enterScope(); defer hlsl.exitScope(); var sorted_members = std.ArrayListUnmanaged(InstIndex){}; defer sorted_members.deinit(hlsl.allocator); const struct_members = hlsl.air.refToList(inst.members); for (struct_members) |member_index| { try sorted_members.append(hlsl.allocator, member_index); } std.sort.insertion(InstIndex, sorted_members.items, hlsl, structMemberLessThan); var padding_index: u32 = 0; for (sorted_members.items) |member_index| { const member = hlsl.air.getInst(member_index).struct_member; try hlsl.writeIndent(); try hlsl.emitType(member.type); try hlsl.writeAll(" "); try hlsl.writeName(member.name); try hlsl.emitTypeSuffix(member.type); if (member.builtin) |builtin| { try hlsl.emitBuiltin(builtin); } else if (member.location) |location| { if (kind == .frag_out) { try hlsl.print(" : SV_Target{}", .{location}); } else { try hlsl.print(" : ATTR{}", .{location}); } } try hlsl.writeAll(";\n"); if (kind == .vert_out) { const size = hlsl.air.typeSize(member.type) orelse continue; const padding_size = if (size < 4) 4 - size else size % 4; if (padding_size != 0) { try hlsl.writeIndent(); try hlsl.print("float{} pad{} : PAD{};\n", .{ padding_size, padding_index, padding_index }); padding_index += 1; } } } try hlsl.writeAll("};\n"); try hlsl.output.appendSlice(hlsl.allocator, hlsl.scratch.items[scratch_top..]); try hlsl.emitted_decls.put(hlsl.allocator, inst_idx, name); } fn emitBufferElemType(hlsl: *Hlsl, inst_idx: InstIndex) !void { switch (hlsl.air.getInst(inst_idx)) { .@"struct" => |inst| { const struct_members = hlsl.air.refToList(inst.members); if (struct_members.len > 0) { const last_member_idx = struct_members[struct_members.len - 1]; const last_member = hlsl.air.getInst(last_member_idx).struct_member; try hlsl.emitBufferElemType(last_member.type); } else { std.debug.panic("Array member expected of buffer type expected to be last", .{}); } }, else => try hlsl.emitType(inst_idx), } } fn emitBuiltin(hlsl: *Hlsl, builtin: Builtin) !void { try hlsl.writeAll(" : "); try hlsl.writeAll(switch (builtin) { .vertex_index => "SV_VertexID", .instance_index => "SV_InstanceID", .position => "SV_Position", .front_facing => "SV_IsFrontFace", .frag_depth => "SV_Depth", .local_invocation_id => "SV_GroupThreadID", .local_invocation_index => "SV_GroupIndex", .global_invocation_id => "SV_DispatchThreadID", .workgroup_id => "SV_GroupID", .num_workgroups => "TODO", // TODO - is this available? .sample_index => "SV_SampleIndex", .sample_mask => "SV_Coverage", }); } fn emitWrapperStruct(hlsl: *Hlsl, inst: Inst.Var) !void { const type_inst = hlsl.air.getInst(inst.type); if (type_inst == .@"struct") return; try hlsl.print("struct Wrapper{} {{ ", .{@intFromEnum(inst.type)}); try hlsl.emitType(inst.type); try hlsl.writeAll(" data"); try hlsl.emitTypeSuffix(inst.type); try hlsl.writeAll("; };\n"); } fn emitGlobalVar(hlsl: *Hlsl, inst_idx: InstIndex) !void { if (hlsl.emitted_decls.get(inst_idx)) |_| return; const scratch_top = hlsl.scratch.items.len; defer hlsl.scratch.shrinkRetainingCapacity(scratch_top); const inst = hlsl.air.getInst(inst_idx).@"var"; if (inst.addr_space == .workgroup) { try hlsl.writeAll("groupshared "); try hlsl.emitType(inst.type); try hlsl.writeAll(" "); try hlsl.writeName(inst.name); try hlsl.emitTypeSuffix(inst.type); try hlsl.writeAll(";\n"); return; } const type_inst = hlsl.air.getInst(inst.type); const binding = hlsl.air.resolveInt(inst.binding) orelse return error.ConstExpr; const group = hlsl.air.resolveInt(inst.group) orelse return error.ConstExpr; var binding_space: []const u8 = undefined; switch (type_inst) { .texture_type => |texture| { try hlsl.writeAll(switch (texture.kind) { .sampled_1d => "Texture1D", .sampled_2d => "Texture2D", .sampled_2d_array => "Texture2dArray", .sampled_3d => "Texture3D", .sampled_cube => "TextureCube", .sampled_cube_array => "TextureCubeArray", .multisampled_2d => "Texture2DMS", .multisampled_depth_2d => "Texture2DMS", .storage_1d => "RWTexture1D", .storage_2d => "RWTexture2D", .storage_2d_array => "RWTexture2DArray", .storage_3d => "RWTexture3D", .depth_2d => "Texture2D", .depth_2d_array => "Texture2DArray", .depth_cube => "TextureCube", .depth_cube_array => "TextureCubeArray", }); switch (texture.kind) { .storage_1d, .storage_2d, .storage_2d_array, .storage_3d, => { try hlsl.writeAll("<"); try hlsl.writeAll(switch (texture.texel_format) { .none => unreachable, .rgba8unorm, .bgra8unorm, .rgba8snorm, .rgba16float, .r32float, .rg32float, .rgba32float, => "float4", .rgba8uint, .rgba16uint, .r32uint, .rg32uint, .rgba32uint, => "uint4", .rgba8sint, .rgba16sint, .r32sint, .rg32sint, .rgba32sint, => "int4", }); try hlsl.writeAll(">"); }, else => {}, } // TODO - I think access_mode may be wrong binding_space = switch (texture.kind) { .storage_1d, .storage_2d, .storage_2d_array, .storage_3d => "u", else => "t", }; }, .sampler_type => { try hlsl.writeAll("SamplerState"); binding_space = "s"; }, .comparison_sampler_type => { try hlsl.writeAll("SamplerComparisonState"); binding_space = "s"; }, else => { switch (inst.addr_space) { .uniform => { try hlsl.emitWrapperStruct(inst); try hlsl.writeAll("ConstantBuffer<"); if (type_inst != .@"struct") { try hlsl.print("Wrapper{}", .{@intFromEnum(inst.type)}); } else { try hlsl.emitType(inst.type); } try hlsl.writeAll(">"); binding_space = "b"; }, .storage => { if (inst.access_mode == .write or inst.access_mode == .read_write) { try hlsl.writeAll("RWStructuredBuffer<"); binding_space = "u"; } else { try hlsl.writeAll("StructuredBuffer<"); binding_space = "t"; } try hlsl.emitBufferElemType(inst.type); try hlsl.writeAll(">"); }, else => { std.debug.panic("TODO: implement workgroup variable {s}\n", .{hlsl.air.getStr(inst.name)}); }, } }, } try hlsl.writeAll(" "); try hlsl.writeName(inst.name); try hlsl.print(" : register({s}{}, space{});\n", .{ binding_space, binding, group }); try hlsl.output.appendSlice(hlsl.allocator, hlsl.scratch.items[scratch_top..]); try hlsl.emitted_decls.put(hlsl.allocator, inst_idx, hlsl.air.getStr(inst.name)); } fn emitGlobalConst(hlsl: *Hlsl, inst_idx: InstIndex) !void { if (hlsl.emitted_decls.get(inst_idx)) |_| return; const scratch_top = hlsl.scratch.items.len; defer hlsl.scratch.shrinkRetainingCapacity(scratch_top); const inst = hlsl.air.getInst(inst_idx).@"const"; const t = if (inst.type != .none) inst.type else inst.init; try hlsl.writeAll("static const "); try hlsl.emitType(t); try hlsl.writeAll(" "); try hlsl.writeName(inst.name); try hlsl.emitTypeSuffix(inst.type); try hlsl.writeAll(" = "); try hlsl.emitExpr(inst.init); try hlsl.writeAll(";\n"); try hlsl.output.appendSlice(hlsl.allocator, hlsl.scratch.items[scratch_top..]); try hlsl.emitted_decls.put(hlsl.allocator, inst_idx, hlsl.air.getStr(inst.name)); } fn emitFn(hlsl: *Hlsl, inst_idx: InstIndex) !void { if (hlsl.emitted_decls.get(inst_idx)) |_| return; const scratch_top = hlsl.scratch.items.len; defer hlsl.scratch.shrinkRetainingCapacity(scratch_top); const inst = hlsl.air.getInst(inst_idx).@"fn"; switch (inst.stage) { .compute => |workgroup_size| { try hlsl.print("[numthreads({}, {}, {})]\n", .{ hlsl.air.resolveInt(workgroup_size.x) orelse 1, hlsl.air.resolveInt(workgroup_size.y) orelse 1, hlsl.air.resolveInt(workgroup_size.z) orelse 1, }); }, else => {}, } if (inst.return_type != .none) { const return_type = hlsl.air.getInst(inst.return_type); if (inst.stage == .fragment and return_type == .@"struct") { try hlsl.emitStruct(inst.return_type, .frag_out); try hlsl.writeAll(hlsl.emitted_decls.get(inst.return_type).?); } else if (inst.stage == .vertex and return_type == .@"struct") { try hlsl.emitStruct(inst.return_type, .vert_out); try hlsl.writeAll(hlsl.emitted_decls.get(inst.return_type).?); } else { try hlsl.emitType(inst.return_type); } } else { try hlsl.writeAll("void"); } try hlsl.writeAll(" "); if (inst.stage != .none) { try hlsl.writeEntrypoint(inst.name); } else { try hlsl.writeName(inst.name); } try hlsl.writeAll("("); { hlsl.enterScope(); defer hlsl.exitScope(); if (inst.params != .none) { var fn_params = std.ArrayListUnmanaged(InstIndex){}; defer fn_params.deinit(hlsl.allocator); for (hlsl.air.refToList(inst.params)) |param_index| { try fn_params.append(hlsl.allocator, param_index); } if (inst.stage != .none) { std.sort.insertion(InstIndex, fn_params.items, hlsl, fnParamLessThan); } var add_comma = false; for (fn_params.items) |param_inst_idx| { try hlsl.writeAll(if (add_comma) ",\n" else "\n"); add_comma = true; try hlsl.writeIndent(); try hlsl.emitFnParam(param_inst_idx); } } } try hlsl.writeAll(")"); if (inst.return_attrs.builtin) |builtin| { try hlsl.emitBuiltin(builtin); } else if (inst.return_attrs.location) |location| { try hlsl.print(" : SV_Target{}", .{location}); } try hlsl.writeAll("\n"); const block = hlsl.air.getInst(inst.block).block; try hlsl.writeAll("{\n"); { hlsl.enterScope(); defer hlsl.exitScope(); if (inst.has_array_length) { try hlsl.writeIndent(); try hlsl.writeAll("uint _array_length, _array_stride;\n"); } try hlsl.writeIndent(); try hlsl.writeAll("uint _width, _height, _number_of_levels;\n"); for (hlsl.air.refToList(block)) |statement| { try hlsl.emitStatement(statement); } } try hlsl.writeIndent(); try hlsl.writeAll("}\n"); try hlsl.output.appendSlice(hlsl.allocator, hlsl.scratch.items[scratch_top..]); try hlsl.emitted_decls.put(hlsl.allocator, inst_idx, hlsl.air.getStr(inst.name)); } fn emitFnParam(hlsl: *Hlsl, inst_idx: InstIndex) !void { const inst = hlsl.air.getInst(inst_idx).fn_param; try hlsl.emitType(inst.type); try hlsl.writeAll(" "); try hlsl.writeName(inst.name); if (inst.builtin) |builtin| { try hlsl.emitBuiltin(builtin); } else if (inst.location) |location| { try hlsl.print(" : ATTR{}", .{location}); } } fn emitStatement(hlsl: *Hlsl, inst_idx: InstIndex) error{OutOfMemory}!void { try hlsl.writeIndent(); switch (hlsl.air.getInst(inst_idx)) { .@"var" => |inst| try hlsl.emitVar(inst), .@"const" => |inst| try hlsl.emitConst(inst), .block => |block| try hlsl.emitBlock(block), // .loop => |inst| try hlsl.emitLoop(inst), // .continuing .@"return" => |return_inst_idx| try hlsl.emitReturn(return_inst_idx), // .break_if .@"if" => |inst| try hlsl.emitIf(inst), // .@"while" => |inst| try hlsl.emitWhile(inst), .@"for" => |inst| try hlsl.emitFor(inst), // .switch .discard => try hlsl.emitDiscard(), // .@"break" => try hlsl.emitBreak(), .@"continue" => try hlsl.writeAll("continue;\n"), // .call => |inst| try hlsl.emitCall(inst), .assign, .nil_intrinsic, .texture_store, => { try hlsl.emitExpr(inst_idx); try hlsl.writeAll(";\n"); }, //else => |inst| std.debug.panic("TODO: implement Air tag {s}", .{@tagName(inst)}), else => |inst| try hlsl.print("Statement: {}\n", .{inst}), // TODO } } fn emitVar(hlsl: *Hlsl, inst: Inst.Var) !void { const t = if (inst.type != .none) inst.type else inst.init; try hlsl.emitType(t); try hlsl.writeAll(" "); try hlsl.writeName(inst.name); try hlsl.emitTypeSuffix(t); if (inst.init != .none) { try hlsl.writeAll(" = "); try hlsl.emitExpr(inst.init); } try hlsl.writeAll(";\n"); } fn emitConst(hlsl: *Hlsl, inst: Inst.Const) !void { const t = if (inst.type != .none) inst.type else inst.init; try hlsl.writeAll("const "); try hlsl.emitType(t); try hlsl.writeAll(" "); try hlsl.writeName(inst.name); try hlsl.emitTypeSuffix(inst.type); try hlsl.writeAll(" = "); try hlsl.emitExpr(inst.init); try hlsl.writeAll(";\n"); } fn emitBlock(hlsl: *Hlsl, block: Air.RefIndex) !void { try hlsl.writeAll("{\n"); { hlsl.enterScope(); defer hlsl.exitScope(); for (hlsl.air.refToList(block)) |statement| { try hlsl.emitStatement(statement); } } try hlsl.writeIndent(); try hlsl.writeAll("}\n"); } fn emitReturn(hlsl: *Hlsl, inst_idx: InstIndex) !void { try hlsl.writeAll("return"); if (inst_idx != .none) { try hlsl.writeAll(" "); try hlsl.emitExpr(inst_idx); } try hlsl.writeAll(";\n"); } fn emitIf(hlsl: *Hlsl, inst: Inst.If) !void { try hlsl.writeAll("if ("); try hlsl.emitExpr(inst.cond); try hlsl.writeAll(")\n"); { const body_inst = hlsl.air.getInst(inst.body); if (body_inst != .block) hlsl.enterScope(); try hlsl.emitStatement(inst.body); if (body_inst != .block) hlsl.exitScope(); } if (inst.@"else" != .none) { try hlsl.writeIndent(); try hlsl.writeAll("else\n"); try hlsl.emitStatement(inst.@"else"); } try hlsl.writeAll("\n"); } fn emitFor(hlsl: *Hlsl, inst: Inst.For) !void { try hlsl.writeAll("for (\n"); { hlsl.enterScope(); defer hlsl.exitScope(); try hlsl.emitStatement(inst.init); try hlsl.writeIndent(); try hlsl.emitExpr(inst.cond); try hlsl.writeAll(";\n"); try hlsl.writeIndent(); try hlsl.emitExpr(inst.update); try hlsl.writeAll(")\n"); } try hlsl.emitStatement(inst.body); } fn emitDiscard(hlsl: *Hlsl) !void { try hlsl.writeAll("discard;\n"); } fn emitExpr(hlsl: *Hlsl, inst_idx: InstIndex) error{OutOfMemory}!void { switch (hlsl.air.getInst(inst_idx)) { .var_ref => |inst| try hlsl.emitVarRef(inst), .bool => |inst| try hlsl.emitBool(inst), .int => |inst| try hlsl.emitInt(inst), .float => |inst| try hlsl.emitFloat(inst), .vector => |inst| try hlsl.emitVector(inst), .matrix => |inst| try hlsl.emitMatrix(inst), .array => |inst| try hlsl.emitArray(inst), .nil_intrinsic => |inst| try hlsl.emitNilIntrinsic(inst), .unary => |inst| try hlsl.emitUnary(inst), .unary_intrinsic => |inst| try hlsl.emitUnaryIntrinsic(inst), .binary => |inst| try hlsl.emitBinary(inst), .binary_intrinsic => |inst| try hlsl.emitBinaryIntrinsic(inst), .triple_intrinsic => |inst| try hlsl.emitTripleIntrinsic(inst), .assign => |inst| try hlsl.emitAssign(inst), .field_access => |inst| try hlsl.emitFieldAccess(inst), .swizzle_access => |inst| try hlsl.emitSwizzleAccess(inst), .index_access => |inst| try hlsl.emitIndexAccess(inst), .call => |inst| try hlsl.emitCall(inst), //.struct_construct: StructConstruct, //.bitcast: Bitcast, .texture_sample => |inst| try hlsl.emitTextureSample(inst), .texture_dimension => |inst| try hlsl.emitTextureDimension(inst), .texture_load => |inst| try hlsl.emitTextureLoad(inst), .texture_store => |inst| try hlsl.emitTextureStore(inst), //else => |inst| std.debug.panic("TODO: implement Air tag {s}", .{@tagName(inst)}), else => |inst| std.debug.panic("Expr: {}", .{inst}), // TODO } } fn emitVarRef(hlsl: *Hlsl, inst_idx: InstIndex) !void { switch (hlsl.air.getInst(inst_idx)) { .@"var" => |v| { try hlsl.writeName(v.name); const v_type_inst = hlsl.air.getInst(v.type); if (v.addr_space == .uniform and v_type_inst != .@"struct") try hlsl.writeAll(".data"); }, .@"const" => |c| try hlsl.writeName(c.name), .fn_param => |p| { try hlsl.writeName(p.name); }, else => |x| std.debug.panic("VarRef: {}", .{x}), // TODO } } fn emitBool(hlsl: *Hlsl, inst: Inst.Bool) !void { switch (inst.value.?) { .literal => |lit| try hlsl.print("{}", .{lit}), .cast => @panic("TODO: bool cast"), } } fn emitInt(hlsl: *Hlsl, inst: Inst.Int) !void { switch (hlsl.air.getValue(Inst.Int.Value, inst.value.?)) { .literal => |lit| try hlsl.print("{}", .{lit}), .cast => |cast| try hlsl.emitIntCast(inst, cast), } } fn emitIntCast(hlsl: *Hlsl, dest_type: Inst.Int, cast: Inst.ScalarCast) !void { try hlsl.emitIntType(dest_type); try hlsl.writeAll("("); try hlsl.emitExpr(cast.value); try hlsl.writeAll(")"); } fn emitFloat(hlsl: *Hlsl, inst: Inst.Float) !void { switch (hlsl.air.getValue(Inst.Float.Value, inst.value.?)) { .literal => |lit| try hlsl.print("{}", .{lit}), .cast => |cast| try hlsl.emitFloatCast(inst, cast), } } fn emitFloatCast(hlsl: *Hlsl, dest_type: Inst.Float, cast: Inst.ScalarCast) !void { try hlsl.emitFloatType(dest_type); try hlsl.writeAll("("); try hlsl.emitExpr(cast.value); try hlsl.writeAll(")"); } fn emitVector(hlsl: *Hlsl, inst: Inst.Vector) !void { try hlsl.emitVectorType(inst); try hlsl.writeAll("("); const value = hlsl.air.getValue(Inst.Vector.Value, inst.value.?); switch (value) { .literal => |literal| try hlsl.emitVectorElems(inst.size, literal), .cast => |cast| try hlsl.emitVectorElems(inst.size, cast.value), } try hlsl.writeAll(")"); } fn emitVectorElems(hlsl: *Hlsl, size: Inst.Vector.Size, value: [4]InstIndex) !void { for (value[0..@intFromEnum(size)], 0..) |elem_inst, i| { try hlsl.writeAll(if (i == 0) "" else ", "); try hlsl.emitExpr(elem_inst); } } fn emitMatrix(hlsl: *Hlsl, inst: Inst.Matrix) !void { try hlsl.emitMatrixType(inst); try hlsl.writeAll("("); const value = hlsl.air.getValue(Inst.Matrix.Value, inst.value.?); for (value[0..@intFromEnum(inst.cols)], 0..) |elem_inst, i| { try hlsl.writeAll(if (i == 0) "" else ", "); try hlsl.emitExpr(elem_inst); } try hlsl.writeAll(")"); } fn emitArray(hlsl: *Hlsl, inst: Inst.Array) !void { try hlsl.writeAll("{"); { hlsl.enterScope(); defer hlsl.exitScope(); const value = hlsl.air.refToList(inst.value.?); for (value, 0..) |elem_inst, i| { try hlsl.writeAll(if (i == 0) "\n" else ",\n"); try hlsl.writeIndent(); try hlsl.emitExpr(elem_inst); } } try hlsl.writeAll("}"); } fn emitNilIntrinsic(hlsl: *Hlsl, op: Inst.NilIntrinsic) !void { try hlsl.writeAll(switch (op) { .storage_barrier => "DeviceMemoryBarrierWithGroupSync()", .workgroup_barrier => "GroupMemoryBarrierWithGroupSync()", }); } fn emitUnary(hlsl: *Hlsl, inst: Inst.Unary) !void { try hlsl.writeAll(switch (inst.op) { .not => "!", .negate => "-", .deref => "*", .addr_of => @panic("unsupported"), }); try hlsl.emitExpr(inst.expr); } fn emitUnaryIntrinsic(hlsl: *Hlsl, inst: Inst.UnaryIntrinsic) !void { switch (inst.op) { .array_length => try hlsl.emitArrayLength(inst), else => { try hlsl.writeAll(switch (inst.op) { .all => "all", .any => "any", .abs => "abs", .acos => "acos", //.acosh => "acosh", .asin => "asin", //.asinh => "asinh", .atan => "atan", //.atanh => "atanh", .ceil => "ceil", .cos => "cos", .cosh => "cosh", //.count_leading_zeros => "count_leading_zeros", .count_one_bits => "countbits", //.count_trailing_zeros => "count_trailing_zeros", .degrees => "degrees", .exp => "exp", .exp2 => "exp2", //.first_leading_bit => "first_leading_bit", //.first_trailing_bit => "first_trailing_bit", .floor => "floor", .fract => "frac", .inverse_sqrt => "rsqrt", .length => "length", .log => "log", .log2 => "log2", //.quantize_to_F16 => "quantize_to_F16", .radians => "radians", .reverseBits => "reversebits", .round => "rint", .saturate => "saturate", .sign => "sign", .sin => "sin", .sinh => "sinh", .sqrt => "sqrt", .tan => "tan", .tanh => "tanh", .trunc => "trunc", .dpdx => "ddx", .dpdx_coarse => "ddx_coarse", .dpdx_fine => "ddx_fine", .dpdy => "ddy", .dpdy_coarse => "ddy_coarse", .dpdy_fine => "ddy_fine", .fwidth => "fwidth", .fwidth_coarse => "fwidth", .fwidth_fine => "fwidth", .normalize => "normalize", else => std.debug.panic("TODO: implement Air tag {s}", .{@tagName(inst.op)}), }); try hlsl.writeAll("("); try hlsl.emitExpr(inst.expr); try hlsl.writeAll(")"); }, } } fn emitArrayLength(hlsl: *Hlsl, inst: Inst.UnaryIntrinsic) !void { switch (hlsl.air.getInst(inst.expr)) { .unary => |un| switch (un.op) { .addr_of => try hlsl.emitArrayLengthTarget(un.expr, 0), else => try hlsl.print("ArrayLength (unary_op): {}", .{un.op}), }, else => |array_length_expr| try hlsl.print("ArrayLength (array_length_expr): {}", .{array_length_expr}), } } fn emitArrayLengthTarget(hlsl: *Hlsl, inst_idx: InstIndex, offset: usize) error{OutOfMemory}!void { switch (hlsl.air.getInst(inst_idx)) { .var_ref => |var_ref_inst_idx| try hlsl.emitArrayLengthVarRef(var_ref_inst_idx, offset), .field_access => |inst| try hlsl.emitArrayLengthFieldAccess(inst, offset), else => |inst| try hlsl.print("ArrayLengthTarget: {}", .{inst}), } } fn emitArrayLengthVarRef(hlsl: *Hlsl, inst_idx: InstIndex, offset: usize) !void { switch (hlsl.air.getInst(inst_idx)) { .@"var" => |var_inst| { try hlsl.writeAll("("); try hlsl.writeName(var_inst.name); try hlsl.print( ".GetDimensions(_array_length, _array_stride), _array_length - {})", .{offset}, ); }, else => |var_ref_expr| try hlsl.print("arrayLength (var_ref_expr): {}", .{var_ref_expr}), } } fn emitArrayLengthFieldAccess(hlsl: *Hlsl, inst: Inst.FieldAccess, base_offset: usize) !void { const member_offset = 0; // TODO try hlsl.emitArrayLengthTarget(inst.base, base_offset + member_offset); } fn emitBinary(hlsl: *Hlsl, inst: Inst.Binary) !void { switch (inst.op) { .mul => { const lhs_type = hlsl.air.getInst(inst.lhs_type); const rhs_type = hlsl.air.getInst(inst.rhs_type); if (lhs_type == .matrix or rhs_type == .matrix) { // matrices are transposed try hlsl.writeAll("mul"); try hlsl.writeAll("("); try hlsl.emitExpr(inst.rhs); try hlsl.writeAll(", "); try hlsl.emitExpr(inst.lhs); try hlsl.writeAll(")"); } else { try hlsl.emitBinaryOp(inst); } }, else => try hlsl.emitBinaryOp(inst), } } fn emitBinaryOp(hlsl: *Hlsl, inst: Inst.Binary) !void { try hlsl.writeAll("("); try hlsl.emitExpr(inst.lhs); try hlsl.print(" {s} ", .{switch (inst.op) { .mul => "*", .div => "/", .mod => "%", .add => "+", .sub => "-", .shl => "<<", .shr => ">>", .@"and" => "&", .@"or" => "|", .xor => "^", .logical_and => "&&", .logical_or => "||", .equal => "==", .not_equal => "!=", .less_than => "<", .less_than_equal => "<=", .greater_than => ">", .greater_than_equal => ">=", }}); try hlsl.emitExpr(inst.rhs); try hlsl.writeAll(")"); } fn emitBinaryIntrinsic(hlsl: *Hlsl, inst: Inst.BinaryIntrinsic) !void { try hlsl.writeAll(switch (inst.op) { .min => "min", .max => "max", .atan2 => "atan2", .distance => "distance", .dot => "dot", .pow => "pow", .step => "step", }); try hlsl.writeAll("("); try hlsl.emitExpr(inst.lhs); try hlsl.writeAll(", "); try hlsl.emitExpr(inst.rhs); try hlsl.writeAll(")"); } fn emitTripleIntrinsic(hlsl: *Hlsl, inst: Inst.TripleIntrinsic) !void { try hlsl.writeAll(switch (inst.op) { .smoothstep => "smoothstep", .clamp => "clamp", .mix => "lerp", }); try hlsl.writeAll("("); try hlsl.emitExpr(inst.a1); try hlsl.writeAll(", "); try hlsl.emitExpr(inst.a2); try hlsl.writeAll(", "); try hlsl.emitExpr(inst.a3); try hlsl.writeAll(")"); } fn emitAssign(hlsl: *Hlsl, inst: Inst.Assign) !void { try hlsl.emitExpr(inst.lhs); try hlsl.print(" {s}= ", .{switch (inst.mod) { .none => "", .add => "+", .sub => "-", .mul => "*", .div => "/", .mod => "%", .@"and" => "&", .@"or" => "|", .xor => "^", .shl => "<<", .shr => ">>", }}); try hlsl.emitExpr(inst.rhs); } fn emitFieldAccess(hlsl: *Hlsl, inst: Inst.FieldAccess) !void { const base_inst = hlsl.air.getInst(inst.base); switch (base_inst) { .var_ref => |var_ref_inst_idx| { switch (hlsl.air.getInst(var_ref_inst_idx)) { .@"var" => |v| { const v_type_inst = hlsl.air.getInst(v.type); if (v.addr_space == .storage and v_type_inst == .@"struct") { // assume 1 field that is an array try hlsl.emitExpr(inst.base); } else { try hlsl.emitFieldAccessRegular(inst); } }, else => try hlsl.emitFieldAccessRegular(inst), } }, else => try hlsl.emitFieldAccessRegular(inst), } } fn emitFieldAccessRegular(hlsl: *Hlsl, inst: Inst.FieldAccess) !void { try hlsl.emitExpr(inst.base); try hlsl.writeAll("."); try hlsl.writeName(inst.name); } fn emitSwizzleAccess(hlsl: *Hlsl, inst: Inst.SwizzleAccess) !void { try hlsl.emitExpr(inst.base); try hlsl.writeAll("."); for (0..@intFromEnum(inst.size)) |i| { switch (inst.pattern[i]) { .x => try hlsl.writeAll("x"), .y => try hlsl.writeAll("y"), .z => try hlsl.writeAll("z"), .w => try hlsl.writeAll("w"), } } } fn emitIndexAccess(hlsl: *Hlsl, inst: Inst.IndexAccess) !void { try hlsl.emitExpr(inst.base); try hlsl.writeAll("["); try hlsl.emitExpr(inst.index); try hlsl.writeAll("]"); } fn emitCall(hlsl: *Hlsl, inst: Inst.FnCall) !void { const fn_inst = hlsl.air.getInst(inst.@"fn").@"fn"; try hlsl.writeName(fn_inst.name); try hlsl.writeAll("("); if (inst.args != .none) { for (hlsl.air.refToList(inst.args), 0..) |arg_inst_idx, i| { try hlsl.writeAll(if (i == 0) "" else ", "); try hlsl.emitExpr(arg_inst_idx); } } try hlsl.writeAll(")"); } fn emitTextureSample(hlsl: *Hlsl, inst: Inst.TextureSample) !void { try hlsl.emitExpr(inst.texture); switch (inst.operands) { .none => try hlsl.writeAll(".Sample("), // TODO .level => try hlsl.writeAll(".SampleLevel("), .grad => try hlsl.writeAll(".SampleGrad("), } try hlsl.emitExpr(inst.sampler); try hlsl.writeAll(", "); try hlsl.emitExpr(inst.coords); switch (inst.operands) { .none => {}, .level => |level| { try hlsl.writeAll(", "); try hlsl.emitExpr(level); }, .grad => |grad| { try hlsl.writeAll(", "); try hlsl.emitExpr(grad.dpdx); try hlsl.writeAll(", "); try hlsl.emitExpr(grad.dpdy); }, } try hlsl.writeAll(")"); switch (hlsl.air.getInst(inst.result_type)) { .float => try hlsl.writeAll(".x"), else => {}, } } fn emitTextureDimension(hlsl: *Hlsl, inst: Inst.TextureDimension) !void { try hlsl.writeAll("("); try hlsl.emitExpr(inst.texture); try hlsl.writeAll(".GetDimensions(0, _width, _height, _number_of_levels), uint2(_width, _height)"); // TODO try hlsl.writeAll(")"); } fn emitTextureLoad(hlsl: *Hlsl, inst: Inst.TextureLoad) !void { try hlsl.emitExpr(inst.texture); try hlsl.writeAll(".Load("); try hlsl.writeAll("int3("); // TODO try hlsl.emitExpr(inst.coords); try hlsl.writeAll(", "); try hlsl.emitExpr(inst.level); try hlsl.writeAll(")"); try hlsl.writeAll(")"); switch (hlsl.air.getInst(inst.result_type)) { .float => try hlsl.writeAll(".x"), else => {}, } } fn emitTextureStore(hlsl: *Hlsl, inst: Inst.TextureStore) !void { try hlsl.emitExpr(inst.texture); try hlsl.writeAll("["); try hlsl.emitExpr(inst.coords); try hlsl.writeAll("]"); try hlsl.writeAll(" = "); try hlsl.emitExpr(inst.value); } fn enterScope(hlsl: *Hlsl) void { hlsl.indent += 4; } fn exitScope(hlsl: *Hlsl) void { hlsl.indent -= 4; } fn writeIndent(hlsl: *Hlsl) !void { try hlsl.scratch.writer(hlsl.allocator).writeByteNTimes(' ', hlsl.indent); } fn writeEntrypoint(hlsl: *Hlsl, name: Air.StringIndex) !void { const str = hlsl.air.getStr(name); try hlsl.writeAll(str); } fn writeName(hlsl: *Hlsl, name: Air.StringIndex) !void { // Suffix with index as WGSL has different scoping rules and to avoid conflicts with keywords const str = hlsl.air.getStr(name); try hlsl.print("{s}_{}", .{ str, @intFromEnum(name) }); } fn writeAll(hlsl: *Hlsl, bytes: []const u8) !void { try hlsl.scratch.writer(hlsl.allocator).writeAll(bytes); } fn print(hlsl: *Hlsl, comptime format: []const u8, args: anytype) !void { try hlsl.scratch.writer(hlsl.allocator).print(format, args); }
0
repos/mach-sysgpu/src/shader
repos/mach-sysgpu/src/shader/codegen/msl.zig
const std = @import("std"); const Air = @import("../Air.zig"); const DebugInfo = @import("../CodeGen.zig").DebugInfo; const Entrypoint = @import("../CodeGen.zig").Entrypoint; const BindingPoint = @import("../CodeGen.zig").BindingPoint; const BindingTable = @import("../CodeGen.zig").BindingTable; const Inst = Air.Inst; const InstIndex = Air.InstIndex; const Builtin = Air.Inst.Builtin; const Msl = @This(); // TODO - where to share? const slot_buffer_lengths = 28; air: *const Air, allocator: std.mem.Allocator, storage: std.ArrayListUnmanaged(u8), writer: std.ArrayListUnmanaged(u8).Writer, fn_emit_list: std.AutoArrayHashMapUnmanaged(InstIndex, bool) = .{}, bindings: *const BindingTable, indent: u32 = 0, stage: Inst.Fn.Stage = .none, has_stage_in: bool = false, frag_result_inst_idx: InstIndex = .none, label: [*:0]const u8, pub fn gen( allocator: std.mem.Allocator, air: *const Air, debug_info: DebugInfo, entrypoint: ?Entrypoint, bindings: ?*const BindingTable, label: [*:0]const u8, ) ![]const u8 { _ = debug_info; var storage = std.ArrayListUnmanaged(u8){}; var msl = Msl{ .air = air, .allocator = allocator, .storage = storage, .writer = storage.writer(allocator), .bindings = bindings orelse &.{}, .label = label, }; defer { storage.deinit(allocator); msl.fn_emit_list.deinit(allocator); } try msl.writeAll("#include <metal_stdlib>\n"); try msl.writeAll("using namespace metal;\n\n"); // TODO - track fragment return usage on Inst.Struct so HLSL can use it as well for (air.refToList(air.globals_index)) |inst_idx| { switch (air.getInst(inst_idx)) { .@"fn" => |fn_inst| { if (fn_inst.return_type != .none and fn_inst.stage == .fragment) { if (air.getInst(fn_inst.return_type) == .@"struct") { msl.frag_result_inst_idx = fn_inst.return_type; } } }, else => {}, } } const is_test = entrypoint == null; for (air.refToList(air.globals_index)) |inst_idx| { switch (air.getInst(inst_idx)) { // Entrypoint functions should only be emitted if that is the entrypoint we are // intending to emit. .@"fn" => |inst| switch (inst.stage) { .vertex => if (is_test or entrypoint.?.stage == .vertex) try msl.emitFn(inst), .fragment => if (is_test or entrypoint.?.stage == .fragment) try msl.emitFn(inst), .compute => if (is_test or entrypoint.?.stage == .compute) try msl.emitFn(inst), .none => try msl.emitFn(inst), }, .@"struct" => |inst| try msl.emitStruct(inst_idx, inst), .@"const" => |inst| try msl.emitGlobalConst(inst), .@"var" => {}, else => unreachable, } } return storage.toOwnedSlice(allocator); } fn stringFromStage(stage: Inst.Fn.Stage) []const u8 { return switch (stage) { .none => "", .compute => "kernel", .vertex => "vertex", .fragment => "fragment", }; } fn stringFromStageCapitalized(stage: Inst.Fn.Stage) []const u8 { return switch (stage) { .none => "", .compute => "Kernel", .vertex => "Vertex", .fragment => "Fragment", }; } fn emitType(msl: *Msl, inst_idx: InstIndex) error{OutOfMemory}!void { if (inst_idx == .none) { try msl.writeAll("void"); } else { switch (msl.air.getInst(inst_idx)) { .bool => |inst| try msl.emitBoolType(inst), .int => |inst| try msl.emitIntType(inst), .float => |inst| try msl.emitFloatType(inst), .vector => |inst| try msl.emitVectorType(inst), .matrix => |inst| try msl.emitMatrixType(inst), .array => |inst| try msl.emitType(inst.elem_type), .@"struct" => |inst| try msl.writeName(inst.name), .texture_type => |inst| try msl.emitTextureType(inst), .sampler_type => try msl.emitSamplerType(), else => |inst| try msl.print("TODO: emitType: {}", .{inst}), // TODO } } } fn emitTypeSuffix(msl: *Msl, inst_idx: InstIndex) error{OutOfMemory}!void { if (inst_idx != .none) { switch (msl.air.getInst(inst_idx)) { .array => |inst| try msl.emitArrayTypeSuffix(inst), else => {}, } } } fn emitArrayTypeSuffix(msl: *Msl, inst: Inst.Array) !void { if (inst.len != .none) { if (msl.air.resolveInt(inst.len)) |len| { try msl.print("[{}]", .{len}); } } else { // Flexible array members are a C99 feature, but Metal validation checks actual resource size not what is in the shader. try msl.writeAll("[1]"); } try msl.emitTypeSuffix(inst.elem_type); } fn emitTypeAsPointer(msl: *Msl, inst_idx: InstIndex) !void { if (inst_idx != .none) { switch (msl.air.getInst(inst_idx)) { .array => try msl.writeAll("*"), else => try msl.writeAll("&"), } } } fn emitBoolType(msl: *Msl, inst: Inst.Bool) !void { _ = inst; try msl.writeAll("bool"); } fn emitIntType(msl: *Msl, inst: Inst.Int) !void { try msl.writeAll(switch (inst.type) { .u32 => "uint", .i32 => "int", }); } fn emitFloatType(msl: *Msl, inst: Inst.Float) !void { try msl.writeAll(switch (inst.type) { .f32 => "float", .f16 => "half", }); } fn emitVectorSize(msl: *Msl, size: Inst.Vector.Size) !void { try msl.writeAll(switch (size) { .two => "2", .three => "3", .four => "4", }); } fn emitVectorType(msl: *Msl, inst: Inst.Vector) !void { try msl.emitType(inst.elem_type); try msl.emitVectorSize(inst.size); } fn emitMatrixType(msl: *Msl, inst: Inst.Matrix) !void { // TODO - verify dimension order try msl.emitType(inst.elem_type); try msl.emitVectorSize(inst.cols); try msl.writeAll("x"); try msl.emitVectorSize(inst.rows); } fn emitStruct(msl: *Msl, inst_idx: InstIndex, inst: Inst.Struct) !void { try msl.writeAll("struct "); try msl.writeName(inst.name); try msl.writeAll(" {\n"); msl.enterScope(); defer msl.exitScope(); const struct_members = msl.air.refToList(inst.members); for (struct_members) |member_index| { const member = msl.air.getInst(member_index).struct_member; try msl.writeIndent(); try msl.emitType(member.type); try msl.writeAll(" "); try msl.writeName(member.name); try msl.emitTypeSuffix(member.type); if (member.builtin) |builtin| { try msl.emitBuiltin(builtin); } else if (member.location) |location| { if (inst_idx == msl.frag_result_inst_idx) { try msl.print(" [[color({})]]", .{location}); } else { try msl.print(" [[user(_{})]]", .{location}); } } try msl.writeAll(";\n"); } try msl.writeAll("};\n"); } fn emitBuiltin(msl: *Msl, builtin: Builtin) !void { try msl.writeAll(" [["); try msl.writeAll(switch (builtin) { .vertex_index => "vertex_id", .instance_index => "instance_id", .position => "position", .front_facing => "front_facing", .frag_depth => "depth(any)", .local_invocation_id => "thread_position_in_threadgroup", .local_invocation_index => "thread_index_in_threadgroup", .global_invocation_id => "thread_position_in_grid", .workgroup_id => "threadgroup_position_in_grid", .num_workgroups => "threadgroups_per_grid", .sample_index => "sample_id", .sample_mask => "sample_mask", }); try msl.writeAll("]]"); } fn emitGlobalConst(msl: *Msl, inst: Inst.Const) !void { const t = if (inst.type != .none) inst.type else inst.init; try msl.writeAll("constant "); try msl.emitType(t); try msl.writeAll(" "); try msl.writeName(inst.name); try msl.emitTypeSuffix(inst.type); try msl.writeAll(" = "); try msl.emitExpr(inst.init); try msl.writeAll(";\n"); } fn isStageInParameter(msl: *Msl, inst_idx: InstIndex) bool { const inst = msl.air.getInst(inst_idx).fn_param; return inst.builtin == null; } fn hasStageInType(msl: *Msl, inst: Inst.Fn) bool { if (inst.stage == .none) return false; if (inst.params != .none) { const param_list = msl.air.refToList(inst.params); for (param_list) |param_inst_idx| { if (msl.isStageInParameter(param_inst_idx)) return true; } } return false; } fn emitFn(msl: *Msl, inst: Inst.Fn) !void { msl.stage = inst.stage; msl.has_stage_in = msl.hasStageInType(inst); try msl.emitStageInType(inst); if (inst.stage != .none) { try msl.print("{s} ", .{stringFromStage(inst.stage)}); } if (inst.return_type != .none) { try msl.emitType(inst.return_type); } else { try msl.writeAll("void"); } try msl.writeAll(" "); if (inst.stage != .none) { try msl.writeEntrypoint(inst.name); } else { try msl.writeName(inst.name); } try msl.writeAll("("); { msl.enterScope(); defer msl.exitScope(); var add_comma = false; for (msl.air.refToList(inst.global_var_refs)) |var_inst_idx| { const var_inst = msl.air.getInst(var_inst_idx).@"var"; if (var_inst.addr_space == .workgroup) continue; try msl.writeAll(if (add_comma) ",\n" else "\n"); add_comma = true; try msl.writeIndent(); try msl.emitFnGlobalVar(var_inst_idx); } if (inst.has_array_length) { try msl.writeAll(if (add_comma) ",\n" else "\n"); add_comma = true; try msl.writeIndent(); try msl.print("constant uint* buffer_lengths [[buffer({})]]", .{slot_buffer_lengths}); } if (inst.params != .none) { const param_list = msl.air.refToList(inst.params); for (param_list) |param_inst_idx| { if (msl.has_stage_in and msl.isStageInParameter(param_inst_idx)) continue; try msl.writeAll(if (add_comma) ",\n" else "\n"); add_comma = true; try msl.writeIndent(); try msl.emitFnParam(param_inst_idx); } } if (msl.has_stage_in) { try msl.writeAll(if (add_comma) ",\n" else "\n"); add_comma = true; try msl.writeIndent(); try msl.print("_{s}In in [[stage_in]]", .{stringFromStageCapitalized(inst.stage)}); } } try msl.writeAll(")\n"); const block = msl.air.getInst(inst.block).block; try msl.writeAll("{\n"); { msl.enterScope(); defer msl.exitScope(); for (msl.air.refToList(inst.global_var_refs)) |var_inst_idx| { const var_inst = msl.air.getInst(var_inst_idx).@"var"; if (var_inst.addr_space == .workgroup) { try msl.writeIndent(); try msl.writeAll("threadgroup "); try msl.emitType(var_inst.type); try msl.writeAll(" "); try msl.writeName(var_inst.name); try msl.emitTypeSuffix(var_inst.type); try msl.writeAll(";\n"); } } for (msl.air.refToList(block)) |statement| { try msl.emitStatement(statement); } } try msl.writeIndent(); try msl.writeAll("}\n"); } fn emitStageInType(msl: *Msl, inst: Inst.Fn) !void { if (!msl.has_stage_in) return; try msl.print("struct _{s}In {{\n", .{stringFromStageCapitalized(inst.stage)}); { msl.enterScope(); defer msl.exitScope(); const param_list = msl.air.refToList(inst.params); for (param_list) |param_inst_idx| { if (!msl.isStageInParameter(param_inst_idx)) continue; try msl.writeIndent(); try msl.emitFnParam(param_inst_idx); try msl.writeAll(";\n"); } } try msl.writeAll("};\n"); } fn emitFnGlobalVar(msl: *Msl, inst_idx: InstIndex) !void { const inst = msl.air.getInst(inst_idx).@"var"; const group = msl.air.resolveInt(inst.group) orelse return error.ConstExpr; const binding = msl.air.resolveInt(inst.binding) orelse return error.ConstExpr; const key = BindingPoint{ .group = @intCast(group), .binding = @intCast(binding) }; // Note: a debug marker indicating we could not find the binding slot. This can // genuinely happen because we e.g. vertex and fragment programs have different sets of // bindings but we emitFn for all function declarations, rather than just those reachable // from the vert/frag entrypoint. MSL dead code elimination means such bindings should // never be accessed. const slot = msl.bindings.get(key) orelse 1337; const type_inst = msl.air.getInst(inst.type); switch (type_inst) { .texture_type => |texture| try msl.emitFnTexture(inst, texture, slot), .sampler_type, .comparison_sampler_type => try msl.emitFnSampler(inst, slot), else => try msl.emitFnBuffer(inst, slot), } } fn emitFnTexture(msl: *Msl, inst: Inst.Var, texture: Inst.TextureType, slot: u32) !void { try msl.writeAll(switch (texture.kind) { .sampled_1d => "texture1d", .sampled_2d => "texture2d", .sampled_2d_array => "texture2d_array", .sampled_3d => "texture3d", .sampled_cube => "texturecube", .sampled_cube_array => "texturecube_array", .multisampled_2d => "texture2d_ms", .multisampled_depth_2d => "depth2d_ms", .storage_1d => "texture1d", .storage_2d => "texture2d", .storage_2d_array => "texture2d_array", .storage_3d => "texture3d", .depth_2d => "depth2d", .depth_2d_array => "depth2d_array", .depth_cube => "depthcube", .depth_cube_array => "depthcube_array", }); try msl.writeAll("<"); try msl.writeAll(switch (texture.texel_format) { .none => "float", // TODO - is this right? .rgba8unorm, .rgba8snorm, .bgra8unorm, .rgba16float, .r32float, .rg32float, .rgba32float, => "float", .rgba8uint, .rgba16uint, .r32uint, .rg32uint, .rgba32uint, => "uint", .rgba8sint, .rgba16sint, .r32sint, .rg32sint, .rgba32sint, => "int", }); try msl.writeAll(", access::"); try msl.writeAll(switch (texture.kind) { .sampled_1d, .sampled_2d, .sampled_2d_array, .sampled_3d, .sampled_cube, .sampled_cube_array, .multisampled_2d, .multisampled_depth_2d, .depth_2d, .depth_2d_array, .depth_cube, .depth_cube_array, => "sample", .storage_1d, .storage_2d, .storage_2d_array, .storage_3d, => "read_write", // TODO - read, write only }); try msl.writeAll("> "); try msl.writeName(inst.name); if (msl.stage != .none) try msl.print(" [[texture({})]]", .{slot}); } fn emitFnSampler(msl: *Msl, inst: Inst.Var, slot: u32) !void { try msl.writeAll("sampler"); try msl.writeAll(" "); try msl.writeName(inst.name); if (msl.stage != .none) try msl.print(" [[sampler({})]]", .{slot}); } fn emitFnBuffer(msl: *Msl, inst: Inst.Var, slot: u32) !void { try msl.writeAll(switch (inst.addr_space) { .uniform => "constant", else => "device", }); try msl.writeAll(" "); try msl.emitType(inst.type); try msl.emitTypeAsPointer(inst.type); try msl.writeAll(" "); try msl.writeName(inst.name); //try msl.emitTypeSuffix(inst.type); handled by emitTypeAsPointer if (msl.stage != .none) try msl.print(" [[buffer({})]]", .{slot}); } fn emitFnParam(msl: *Msl, inst_idx: InstIndex) !void { const inst = msl.air.getInst(inst_idx).fn_param; try msl.emitType(inst.type); try msl.writeAll(" "); try msl.writeName(inst.name); if (inst.builtin) |builtin| { try msl.emitBuiltin(builtin); } else if (inst.location) |location| { if (msl.stage == .vertex) { try msl.print(" [[attribute({})]]", .{location}); } else { try msl.print(" [[user(_{})]]", .{location}); } } } fn emitStatement(msl: *Msl, inst_idx: InstIndex) error{ OutOfMemory, ConstExpr }!void { try msl.writeIndent(); switch (msl.air.getInst(inst_idx)) { .@"var" => |inst| try msl.emitVar(inst), .@"const" => |inst| try msl.emitConst(inst), .block => |block| try msl.emitBlock(block), // .loop => |inst| try msl.emitLoop(inst), // .continuing .@"return" => |return_inst_idx| try msl.emitReturn(return_inst_idx), // .break_if .@"if" => |inst| try msl.emitIf(inst), // .@"while" => |inst| try msl.emitWhile(inst), .@"for" => |inst| try msl.emitFor(inst), // .switch .discard => try msl.emitDiscard(), // .@"break" => try msl.emitBreak(), .@"continue" => try msl.writeAll("continue;\n"), .call, .assign, .nil_intrinsic, .texture_store, => { try msl.emitExpr(inst_idx); try msl.writeAll(";\n"); }, //else => |inst| std.debug.panic("TODO: implement Air tag {s}", .{@tagName(inst)}), else => |inst| try msl.print("Statement: {}\n", .{inst}), // TODO } } fn emitVar(msl: *Msl, inst: Inst.Var) !void { const t = if (inst.type != .none) inst.type else inst.init; try msl.emitType(t); try msl.writeAll(" "); try msl.writeName(inst.name); try msl.emitTypeSuffix(t); if (inst.init != .none) { try msl.writeAll(" = "); try msl.emitExpr(inst.init); } try msl.writeAll(";\n"); } fn emitConst(msl: *Msl, inst: Inst.Const) !void { const t = if (inst.type != .none) inst.type else inst.init; try msl.writeAll("const "); try msl.emitType(t); try msl.writeAll(" "); try msl.writeName(inst.name); try msl.emitTypeSuffix(inst.type); try msl.writeAll(" = "); try msl.emitExpr(inst.init); try msl.writeAll(";\n"); } fn emitBlock(msl: *Msl, block: Air.RefIndex) !void { try msl.writeAll("{\n"); { msl.enterScope(); defer msl.exitScope(); for (msl.air.refToList(block)) |statement| { try msl.emitStatement(statement); } } try msl.writeIndent(); try msl.writeAll("}\n"); } fn emitReturn(msl: *Msl, inst_idx: InstIndex) !void { try msl.writeAll("return"); if (inst_idx != .none) { try msl.writeAll(" "); try msl.emitExpr(inst_idx); } try msl.writeAll(";\n"); } fn emitIf(msl: *Msl, inst: Inst.If) !void { try msl.writeAll("if ("); try msl.emitExpr(inst.cond); try msl.writeAll(")\n"); { const body_inst = msl.air.getInst(inst.body); if (body_inst != .block) msl.enterScope(); try msl.emitStatement(inst.body); if (body_inst != .block) msl.exitScope(); } if (inst.@"else" != .none) { try msl.writeIndent(); try msl.writeAll("else\n"); try msl.emitStatement(inst.@"else"); } try msl.writeAll("\n"); } fn emitFor(msl: *Msl, inst: Inst.For) !void { try msl.writeAll("for (\n"); { msl.enterScope(); defer msl.exitScope(); try msl.emitStatement(inst.init); try msl.writeIndent(); try msl.emitExpr(inst.cond); try msl.writeAll(";\n"); try msl.writeIndent(); try msl.emitExpr(inst.update); try msl.writeAll(")\n"); } try msl.emitStatement(inst.body); } fn emitDiscard(msl: *Msl) !void { try msl.writeAll("discard_fragment();\n"); } fn emitExpr(msl: *Msl, inst_idx: InstIndex) error{ OutOfMemory, ConstExpr }!void { switch (msl.air.getInst(inst_idx)) { .var_ref => |inst| try msl.emitVarRef(inst), //.bool => |inst| msl.emitBool(inst), .int => |inst| try msl.emitInt(inst), .float => |inst| try msl.emitFloat(inst), .vector => |inst| try msl.emitVector(inst), .matrix => |inst| try msl.emitMatrix(inst), .array => |inst| try msl.emitArray(inst), .nil_intrinsic => |inst| try msl.emitNilIntrinsic(inst), .unary => |inst| try msl.emitUnary(inst), .unary_intrinsic => |inst| try msl.emitUnaryIntrinsic(inst), .binary => |inst| try msl.emitBinary(inst), .binary_intrinsic => |inst| try msl.emitBinaryIntrinsic(inst), .triple_intrinsic => |inst| try msl.emitTripleIntrinsic(inst), .assign => |inst| try msl.emitAssign(inst), .field_access => |inst| try msl.emitFieldAccess(inst), .swizzle_access => |inst| try msl.emitSwizzleAccess(inst), .index_access => |inst| try msl.emitIndexAccess(inst), .call => |inst| try msl.emitCall(inst), //.struct_construct: StructConstruct, //.bitcast: Bitcast, .texture_sample => |inst| try msl.emitTextureSample(inst), .texture_dimension => |inst| try msl.emitTextureDimension(inst), .texture_load => |inst| try msl.emitTextureLoad(inst), .texture_store => |inst| try msl.emitTextureStore(inst), //else => |inst| std.debug.panic("TODO: implement Air tag {s}", .{@tagName(inst)}), else => |inst| try msl.print("Expr: {}", .{inst}), // TODO } } fn emitVarRef(msl: *Msl, inst_idx: InstIndex) !void { switch (msl.air.getInst(inst_idx)) { .@"var" => |v| try msl.writeName(v.name), .@"const" => |c| try msl.writeName(c.name), .fn_param => |p| { if (msl.has_stage_in and msl.isStageInParameter(inst_idx)) { try msl.writeAll("in."); } try msl.writeName(p.name); }, else => |x| try msl.print("VarRef: {}", .{x}), // TODO } } fn emitInt(msl: *Msl, inst: Inst.Int) !void { return switch (msl.air.getValue(Inst.Int.Value, inst.value.?)) { .literal => |lit| try msl.print("{}", .{lit}), .cast => |cast| msl.emitIntCast(inst, cast), }; } fn emitIntCast(msl: *Msl, dest_type: Inst.Int, cast: Inst.ScalarCast) !void { try msl.emitIntType(dest_type); try msl.writeAll("("); try msl.emitExpr(cast.value); try msl.writeAll(")"); } fn emitFloat(msl: *Msl, inst: Inst.Float) !void { return switch (msl.air.getValue(Inst.Float.Value, inst.value.?)) { .literal => |lit| try msl.print("{}", .{lit}), .cast => |cast| msl.emitFloatCast(inst, cast), }; } fn emitFloatCast(msl: *Msl, dest_type: Inst.Float, cast: Inst.ScalarCast) !void { try msl.emitFloatType(dest_type); try msl.writeAll("("); try msl.emitExpr(cast.value); try msl.writeAll(")"); } fn emitVector(msl: *Msl, inst: Inst.Vector) !void { try msl.emitVectorType(inst); try msl.writeAll("("); const value = msl.air.getValue(Inst.Vector.Value, inst.value.?); switch (value) { .literal => |literal| try msl.emitVectorElems(inst.size, literal), .cast => |cast| try msl.emitVectorElems(inst.size, cast.value), } try msl.writeAll(")"); } fn emitVectorElems(msl: *Msl, size: Inst.Vector.Size, value: [4]InstIndex) !void { for (value[0..@intFromEnum(size)], 0..) |elem_inst, i| { try msl.writeAll(if (i == 0) "" else ", "); try msl.emitExpr(elem_inst); } } fn emitMatrix(msl: *Msl, inst: Inst.Matrix) !void { try msl.emitMatrixType(inst); try msl.writeAll("("); const value = msl.air.getValue(Inst.Matrix.Value, inst.value.?); for (value[0..@intFromEnum(inst.cols)], 0..) |elem_inst, i| { try msl.writeAll(if (i == 0) "" else ", "); try msl.emitExpr(elem_inst); } try msl.writeAll(")"); } fn emitArray(msl: *Msl, inst: Inst.Array) !void { try msl.writeAll("{"); { msl.enterScope(); defer msl.exitScope(); const value = msl.air.refToList(inst.value.?); for (value, 0..) |elem_inst, i| { try msl.writeAll(if (i == 0) "\n" else ",\n"); try msl.writeIndent(); try msl.emitExpr(elem_inst); } } try msl.writeAll("}"); } fn emitNilIntrinsic(msl: *Msl, op: Inst.NilIntrinsic) !void { try msl.writeAll(switch (op) { .storage_barrier => "threadgroup_barrier(mem_flags::mem_device)", .workgroup_barrier => "threadgroup_barrier(mem_flags::mem_threadgroup)", }); } fn emitUnary(msl: *Msl, inst: Inst.Unary) !void { try msl.writeAll(switch (inst.op) { .not => "!", .negate => "-", .deref => "*", .addr_of => "&", }); try msl.emitExpr(inst.expr); } fn emitUnaryIntrinsic(msl: *Msl, inst: Inst.UnaryIntrinsic) !void { const result_type = msl.air.getInst(inst.result_type); switch (inst.op) { .array_length => try msl.emitArrayLength(inst), .degrees => { try msl.writeAll("("); try msl.emitExpr(inst.expr); try msl.print(" * {}", .{180.0 / std.math.pi}); try msl.writeAll(")"); }, .radians => { try msl.writeAll("("); try msl.emitExpr(inst.expr); try msl.print(" * {}", .{std.math.pi / 180.0}); try msl.writeAll(")"); }, else => { try msl.writeAll(switch (inst.op) { .array_length => unreachable, .degrees => unreachable, .radians => unreachable, .all => "all", .any => "any", .abs => if (result_type == .float) "fabs" else "abs", .acos => "acos", .acosh => "acosh", .asin => "asin", .asinh => "asinh", .atan => "atan", .atanh => "atanh", .ceil => "ceil", .cos => "cos", .cosh => "cosh", .count_leading_zeros => "clz", .count_one_bits => "popcount", .count_trailing_zeros => "ctz", .exp => "exp", .exp2 => "exp2", //.first_leading_bit => "first_leading_bit", //.first_trailing_bit => "first_trailing_bit", .floor => "floor", .fract => "fract", .inverse_sqrt => "rsqrt", .length => "length", .log => "log", .log2 => "log2", //.quantize_to_F16 => "quantize_to_F16", .reverseBits => "reverse_bits", .round => "rint", //.saturate => "saturate", .sign => "sign", .sin => "sin", .sinh => "sinh", .sqrt => "sqrt", .tan => "tan", .tanh => "tanh", .trunc => "trunc", .dpdx => "dfdx", .dpdx_coarse => "dfdx", .dpdx_fine => "dfdx", .dpdy => "dfdy", .dpdy_coarse => "dfdy", .dpdy_fine => "dfdy", .fwidth => "fwidth", .fwidth_coarse => "fwidth", .fwidth_fine => "fwidth", .normalize => "normalize", else => std.debug.panic("TODO: implement Air tag {s}", .{@tagName(inst.op)}), }); try msl.writeAll("("); try msl.emitExpr(inst.expr); try msl.writeAll(")"); }, } } fn emitArrayLength(msl: *Msl, inst: Inst.UnaryIntrinsic) !void { switch (msl.air.getInst(inst.expr)) { .unary => |un| switch (un.op) { .addr_of => try msl.emitArrayLengthTarget(un.expr, 0), else => try msl.print("ArrayLength (unary_op): {}", .{un.op}), }, else => |array_length_expr| try msl.print("ArrayLength (array_length_expr): {}", .{array_length_expr}), } } fn emitArrayLengthTarget(msl: *Msl, inst_idx: InstIndex, offset: usize) error{OutOfMemory}!void { switch (msl.air.getInst(inst_idx)) { .var_ref => |var_ref_inst_idx| try msl.emitArrayLengthVarRef(var_ref_inst_idx, offset), .field_access => |inst| try msl.emitArrayLengthFieldAccess(inst, offset), else => |inst| try msl.print("ArrayLengthTarget: {}", .{inst}), } } fn emitArrayLengthVarRef(msl: *Msl, inst_idx: InstIndex, offset: usize) !void { switch (msl.air.getInst(inst_idx)) { .@"var" => |var_inst| { if (msl.air.resolveInt(var_inst.binding)) |binding| { try msl.print("(buffer_lengths[{}] / sizeof(", .{binding}); try msl.emitType(var_inst.type); try msl.print(") - {})", .{offset}); } }, else => |var_ref_expr| try msl.print("arrayLength (var_ref_expr): {}", .{var_ref_expr}), } } fn emitArrayLengthFieldAccess(msl: *Msl, inst: Inst.FieldAccess, base_offset: usize) !void { const member_offset = 0; // TODO try msl.emitArrayLengthTarget(inst.base, base_offset + member_offset); } fn emitBinary(msl: *Msl, inst: Inst.Binary) !void { try msl.writeAll("("); try msl.emitExpr(inst.lhs); try msl.print(" {s} ", .{switch (inst.op) { .mul => "*", .div => "/", .mod => "%", .add => "+", .sub => "-", .shl => "<<", .shr => ">>", .@"and" => "&", .@"or" => "|", .xor => "^", .logical_and => "&&", .logical_or => "||", .equal => "==", .not_equal => "!=", .less_than => "<", .less_than_equal => "<=", .greater_than => ">", .greater_than_equal => ">=", }}); try msl.emitExpr(inst.rhs); try msl.writeAll(")"); } fn emitBinaryIntrinsic(msl: *Msl, inst: Inst.BinaryIntrinsic) !void { const result_type = msl.air.getInst(inst.result_type); try msl.writeAll(switch (inst.op) { .min => if (result_type == .float) "fmin" else "min", .max => if (result_type == .float) "fmax" else "max", .atan2 => "atan2", .distance => "distance", .dot => "dot", .pow => "pow", .step => "step", }); try msl.writeAll("("); try msl.emitExpr(inst.lhs); try msl.writeAll(", "); try msl.emitExpr(inst.rhs); try msl.writeAll(")"); } fn emitTripleIntrinsic(msl: *Msl, inst: Inst.TripleIntrinsic) !void { try msl.writeAll(switch (inst.op) { .smoothstep => "smoothstep", .clamp => "clamp", .mix => "mix", }); try msl.writeAll("("); try msl.emitExpr(inst.a1); try msl.writeAll(", "); try msl.emitExpr(inst.a2); try msl.writeAll(", "); try msl.emitExpr(inst.a3); try msl.writeAll(")"); } fn emitAssign(msl: *Msl, inst: Inst.Assign) !void { try msl.emitExpr(inst.lhs); try msl.print(" {s}= ", .{switch (inst.mod) { .none => "", .add => "+", .sub => "-", .mul => "*", .div => "/", .mod => "%", .@"and" => "&", .@"or" => "|", .xor => "^", .shl => "<<", .shr => ">>", }}); try msl.emitExpr(inst.rhs); } fn emitFieldAccess(msl: *Msl, inst: Inst.FieldAccess) !void { try msl.emitExpr(inst.base); try msl.writeAll("."); try msl.writeName(inst.name); } fn emitSwizzleAccess(msl: *Msl, inst: Inst.SwizzleAccess) !void { try msl.emitExpr(inst.base); try msl.writeAll("."); for (0..@intFromEnum(inst.size)) |i| { switch (inst.pattern[i]) { .x => try msl.writeAll("x"), .y => try msl.writeAll("y"), .z => try msl.writeAll("z"), .w => try msl.writeAll("w"), } } } fn emitIndexAccess(msl: *Msl, inst: Inst.IndexAccess) !void { try msl.emitExpr(inst.base); try msl.writeAll("["); try msl.emitExpr(inst.index); try msl.writeAll("]"); } fn emitCall(msl: *Msl, inst: Inst.FnCall) !void { const fn_inst = msl.air.getInst(inst.@"fn").@"fn"; if (msl.fn_emit_list.get(inst.@"fn") == null) { try msl.fn_emit_list.put(msl.allocator, inst.@"fn", false); } try msl.writeName(fn_inst.name); try msl.writeAll("("); var add_comma = false; for (msl.air.refToList(fn_inst.global_var_refs)) |var_inst_idx| { try msl.writeAll(if (add_comma) ", " else ""); add_comma = true; const var_inst = msl.air.getInst(var_inst_idx).@"var"; try msl.writeName(var_inst.name); } if (inst.args != .none) { for (msl.air.refToList(inst.args)) |arg_inst_idx| { try msl.writeAll(if (add_comma) ", " else ""); add_comma = true; try msl.emitExpr(arg_inst_idx); } } try msl.writeAll(")"); } fn emitTextureSample(msl: *Msl, inst: Inst.TextureSample) !void { try msl.emitExpr(inst.texture); try msl.writeAll(".sample("); // TODO try msl.emitExpr(inst.sampler); try msl.writeAll(", "); try msl.emitExpr(inst.coords); switch (inst.operands) { .none => {}, .level => |level| { try msl.writeAll(", level("); try msl.emitExpr(level); try msl.writeAll(")"); }, .grad => |grad| { switch (inst.kind.dimension()) { .@"1d" => unreachable, .@"2d" => try msl.writeAll("gradient2d("), .@"3d" => try msl.writeAll("gradient3d("), .cube => try msl.writeAll("gradientcube("), } try msl.emitExpr(grad.dpdx); try msl.writeAll(", "); try msl.emitExpr(grad.dpdy); try msl.writeAll(")"); }, } try msl.writeAll(")"); } fn emitTextureDimension(msl: *Msl, inst: Inst.TextureDimension) !void { try msl.writeAll("uint2("); // TODO try msl.emitExpr(inst.texture); try msl.writeAll(".get_width()"); try msl.writeAll(", "); try msl.emitExpr(inst.texture); try msl.writeAll(".get_height()"); try msl.writeAll(")"); } fn emitTextureLoad(msl: *Msl, inst: Inst.TextureLoad) !void { try msl.emitExpr(inst.texture); try msl.writeAll(".read("); try msl.writeAll("uint2("); // TODO try msl.emitExpr(inst.coords); try msl.writeAll(")"); try msl.writeAll(", "); try msl.emitExpr(inst.level); try msl.writeAll(")"); } fn emitTextureStore(msl: *Msl, inst: Inst.TextureStore) !void { try msl.emitExpr(inst.texture); try msl.writeAll(".write("); try msl.emitExpr(inst.value); try msl.writeAll(", "); try msl.writeAll("uint2("); // TODO try msl.emitExpr(inst.coords); try msl.writeAll(")"); try msl.writeAll(")"); } fn enterScope(msl: *Msl) void { msl.indent += 4; } fn exitScope(msl: *Msl) void { msl.indent -= 4; } fn writeIndent(msl: *Msl) !void { try msl.writer.writeByteNTimes(' ', msl.indent); } fn writeEntrypoint(msl: *Msl, name: Air.StringIndex) !void { const str = msl.air.getStr(name); if (std.mem.eql(u8, str, "main")) { try msl.writeAll("main_"); } else { try msl.writeAll(str); } } fn emitTextureType(msl: *Msl, inst: Inst.TextureType) !void { // TODO: I think some of the access::sample cases are wrong, e.g. it is possible to use // texture2d with access::read and access::write, but unsure how to translate those // exactly. Does WGSL represent those or is it not allowed? switch (inst.kind) { .sampled_1d => { try msl.writeAll("texture1d<"); try msl.emitType(inst.elem_type); try msl.writeAll(", access::sample>"); }, .sampled_2d => { try msl.writeAll("texture2d<"); try msl.emitType(inst.elem_type); try msl.writeAll(", access::sample>"); }, .multisampled_2d => { try msl.writeAll("texture2d_ms<"); try msl.emitType(inst.elem_type); try msl.writeAll(", access::sample>"); }, .sampled_3d => { try msl.writeAll("texture3d<"); try msl.emitType(inst.elem_type); try msl.writeAll(", access::sample>"); }, .sampled_2d_array => { try msl.writeAll("texture2d_array<"); try msl.emitType(inst.elem_type); try msl.writeAll(", access::sample>"); }, .sampled_cube => { try msl.writeAll("texturecube<"); try msl.emitType(inst.elem_type); try msl.writeAll(", access::sample>"); }, .sampled_cube_array => { try msl.writeAll("texturecube_array<"); try msl.emitType(inst.elem_type); try msl.writeAll(", access::sample>"); }, .storage_1d => { try msl.writeAll("texture1d<"); try msl.emitType(inst.elem_type); try msl.writeAll(if (inst.access_mode == .write) "access::write>" else "access::read>"); }, .storage_2d => { try msl.writeAll("texture2d<"); try msl.emitType(inst.elem_type); try msl.writeAll(if (inst.access_mode == .write) "access::write>" else "access::read>"); }, .storage_3d => { try msl.writeAll("texture3d<"); try msl.emitType(inst.elem_type); try msl.writeAll(if (inst.access_mode == .write) "access::write>" else "access::read>"); }, .storage_2d_array => { try msl.writeAll("texture2d_array<"); try msl.emitType(inst.elem_type); try msl.writeAll(if (inst.access_mode == .write) "access::write>" else "access::read>"); }, .depth_2d => { try msl.writeAll("depth2d<"); try msl.emitType(inst.elem_type); try msl.writeAll(", access::sample>"); }, .multisampled_depth_2d => { try msl.writeAll("depth2d_ms<"); try msl.emitType(inst.elem_type); try msl.writeAll(", access::sample>"); }, .depth_2d_array => { try msl.writeAll("depth2d_array<"); try msl.emitType(inst.elem_type); try msl.writeAll(", access::sample>"); }, .depth_cube => { try msl.writeAll("depthcube<"); try msl.emitType(inst.elem_type); try msl.writeAll(", access::sample>"); }, .depth_cube_array => { try msl.writeAll("depthcube_array<"); try msl.emitType(inst.elem_type); try msl.writeAll(", access::sample>"); }, } } fn emitSamplerType(msl: *Msl) !void { try msl.writeAll("sampler"); } fn writeName(msl: *Msl, name: Air.StringIndex) !void { // Suffix with index as WGSL has different scoping rules and to avoid conflicts with keywords const str = msl.air.getStr(name); try msl.print("{s}_{}", .{ str, @intFromEnum(name) }); } fn writeAll(msl: *Msl, bytes: []const u8) !void { try msl.writer.writeAll(bytes); } fn print(msl: *Msl, comptime format: []const u8, args: anytype) !void { return std.fmt.format(msl.writer, format, args); }
0
repos/mach-sysgpu/src/shader
repos/mach-sysgpu/src/shader/codegen/spirv.zig
const std = @import("std"); const Air = @import("../Air.zig"); const DebugInfo = @import("../CodeGen.zig").DebugInfo; const Section = @import("spirv/Section.zig"); const spec = @import("spirv/spec.zig"); const Inst = Air.Inst; const InstIndex = Air.InstIndex; const RefIndex = Air.RefIndex; const Word = spec.Word; const Opcode = spec.Opcode; const Operand = spec.Operand; const IdResult = spec.IdResult; const IdRef = spec.IdRef; const SpirV = @This(); air: *const Air, allocator: std.mem.Allocator, /// Debug Information debug_section: Section, /// Annotations annotations_section: Section, /// Types, variables and constants global_section: Section, /// Functions main_section: Section, /// Cache type and constants type_value_map: std.ArrayHashMapUnmanaged(Key, IdRef, Key.Adapter, true) = .{}, /// Map Air Instruction Index to IdRefs to prevent duplicated declarations decl_map: std.AutoHashMapUnmanaged(InstIndex, Decl) = .{}, /// Map Air Struct Instruction Index to IdRefs to prevent duplicated declarations struct_map: std.AutoHashMapUnmanaged(InstIndex, IdRef) = .{}, /// Map storage variables and fields with runtime array type into their parent struct pointer runtiem_arr_vars: std.AutoHashMapUnmanaged(IdRef, IdRef) = .{}, decorated: std.AutoHashMapUnmanaged(IdRef, void) = .{}, capabilities: std.ArrayListUnmanaged(spec.Capability) = .{}, fn_stack: std.ArrayListUnmanaged(FnInfo) = .{}, extended_instructions: ?IdRef = null, emit_debug_names: bool, next_result_id: Word = 1, compute_stage: ?ComputeStage = null, vertex_stage: ?VertexStage = null, fragment_stage: ?FragmentStage = null, loop_merge_label: ?IdRef = null, loop_continue_label: ?IdRef = null, branched: std.AutoHashMapUnmanaged(RefIndex, void) = .{}, current_block: RefIndex, const Decl = struct { id: IdRef, type_id: IdRef, is_ptr: bool, storage_class: spec.StorageClass, faked_struct: bool, }; const FnInfo = struct { store_return: StoreReturn, discard_branch: ?IdRef = null, }; const StoreReturn = union(enum) { none, single: IdRef, many: []const Item, const Item = struct { ptr: IdRef, type: IdRef }; }; const ComputeStage = struct { id: IdResult, name: []const u8, interface: []const IdRef, workgroup_size: struct { x: spec.LiteralInteger, y: spec.LiteralInteger, z: spec.LiteralInteger, }, }; const VertexStage = struct { id: IdResult, name: []const u8, interface: []const IdRef, }; const FragmentStage = struct { id: IdResult, name: []const u8, interface: []const IdRef, }; pub fn gen(allocator: std.mem.Allocator, air: *const Air, debug_info: DebugInfo) ![]const u8 { var spv = SpirV{ .air = air, .allocator = allocator, .debug_section = .{ .allocator = allocator }, .annotations_section = .{ .allocator = allocator }, .global_section = .{ .allocator = allocator }, .main_section = .{ .allocator = allocator }, .emit_debug_names = debug_info.emit_names, .current_block = undefined, }; defer { spv.debug_section.deinit(); spv.annotations_section.deinit(); spv.global_section.deinit(); spv.main_section.deinit(); spv.type_value_map.deinit(allocator); spv.decl_map.deinit(allocator); spv.struct_map.deinit(allocator); spv.decorated.deinit(allocator); spv.branched.deinit(allocator); spv.capabilities.deinit(allocator); spv.runtiem_arr_vars.deinit(allocator); spv.fn_stack.deinit(allocator); if (spv.compute_stage) |stage| allocator.free(stage.interface); if (spv.vertex_stage) |stage| allocator.free(stage.interface); if (spv.fragment_stage) |stage| allocator.free(stage.interface); } var module_section = Section{ .allocator = allocator }; defer module_section.deinit(); if (debug_info.emit_source_file) |source_file_path| { try spv.emitSourceInfo(source_file_path); } for (air.refToList(air.globals_index)) |inst_idx| { switch (spv.air.getInst(inst_idx)) { .@"fn" => _ = try spv.emitFn(inst_idx), .@"const" => _ = try spv.emitConst(&spv.global_section, inst_idx), .@"var" => _ = try spv.emitVarProto(&spv.global_section, inst_idx), .@"struct" => _ = try spv.emitStruct(inst_idx), else => unreachable, } } try spv.emitModule(&module_section); try module_section.append(spv.debug_section); try module_section.append(spv.annotations_section); try module_section.append(spv.global_section); try module_section.append(spv.main_section); return allocator.dupe(u8, std.mem.sliceAsBytes(module_section.words.items)); } fn emitSourceInfo(spv: *SpirV, file_path: []const u8) !void { const file_path_id = spv.allocId(); try spv.debug_section.emit(.OpString, .{ .id_result = file_path_id, .string = file_path, }); try spv.debug_section.emit(.OpSource, .{ .source_language = .Unknown, .version = 0, .file = file_path_id, .source = spv.air.tree.source, }); } fn emitModule(spv: *SpirV, section: *Section) !void { const header = &[_]Word{ // Magic number spec.magic_number, // Spir-V 1.3 spec.Version.toWord(.{ .major = 1, .minor = 3 }), // Generator magic number // TODO: register sysgpu compiler 0, // Id's bound spv.next_result_id, // Reserved for instruction schema, if needed 0, }; try section.ensureUnusedCapacity(header.len); section.writeWords(header); try section.emit(.OpCapability, .{ .capability = .Shader }); for (spv.capabilities.items) |cap| { try section.emit(.OpCapability, .{ .capability = cap }); } if (spv.air.extensions.f16) try section.emit(.OpCapability, .{ .capability = .Float16 }); if (spv.extended_instructions) |id| try section.emit( .OpExtInstImport, .{ .id_result = id, .name = "GLSL.std.450" }, ); try section.emit(.OpMemoryModel, .{ .addressing_model = .Logical, .memory_model = .GLSL450 }); if (spv.compute_stage) |compute_stage| { try section.emit(.OpEntryPoint, .{ .execution_model = .GLCompute, .entry_point = compute_stage.id, .name = compute_stage.name, .interface = compute_stage.interface, }); try section.emit(.OpExecutionMode, .{ .entry_point = compute_stage.id, .mode = .{ .LocalSize = .{ .x_size = compute_stage.workgroup_size.x, .y_size = compute_stage.workgroup_size.y, .z_size = compute_stage.workgroup_size.z, } }, }); } if (spv.vertex_stage) |vertex_stage| { try section.emit(.OpEntryPoint, .{ .execution_model = .Vertex, .entry_point = vertex_stage.id, .name = vertex_stage.name, .interface = vertex_stage.interface, }); } if (spv.fragment_stage) |fragment_stage| { try section.emit(.OpEntryPoint, .{ .execution_model = .Fragment, .entry_point = fragment_stage.id, .name = fragment_stage.name, .interface = fragment_stage.interface, }); try section.emit(.OpExecutionMode, .{ .entry_point = fragment_stage.id, .mode = .OriginUpperLeft, }); } } fn emitFn(spv: *SpirV, inst_idx: InstIndex) error{OutOfMemory}!IdRef { const inst = spv.air.getInst(inst_idx).@"fn"; var section = Section{ .allocator = spv.allocator }; var params_section = Section{ .allocator = spv.allocator }; defer { section.deinit(); params_section.deinit(); } const fn_id = spv.allocId(); const raw_return_type_id = blk: { if (inst.return_type == .none) { break :blk try spv.resolve(.void_type); } else { break :blk try spv.emitType(inst.return_type); } }; const return_type_id = blk: { if (inst.stage != .none) { break :blk try spv.resolve(.void_type); } else { break :blk raw_return_type_id; } }; const name_slice = spv.air.getStr(inst.name); try spv.debugName(fn_id, name_slice); var interface = std.ArrayList(IdRef).init(spv.allocator); errdefer interface.deinit(); var store_return: StoreReturn = undefined; if (inst.stage != .none and inst.return_type != .none) { // TODO: eliminate duplicate code if (spv.air.getInst(inst.return_type) == .@"struct") { const struct_members = spv.air.refToList(spv.air.getInst(inst.return_type).@"struct".members); var store_returns = try std.ArrayList(StoreReturn.Item).initCapacity(spv.allocator, struct_members.len); defer store_returns.deinit(); for (struct_members) |member_index| { const return_var_id = spv.allocId(); const member = spv.air.getInst(member_index).struct_member; const member_type_id = try spv.emitType(member.type); const return_var_name_slice = try std.mem.concat( spv.allocator, u8, &.{ name_slice, spv.air.getStr(member.name), "_return_output" }, ); defer spv.allocator.free(return_var_name_slice); try spv.debugName(return_var_id, return_var_name_slice); const return_var_type_id = try spv.resolve(.{ .ptr_type = .{ .storage_class = .Output, .elem_type = member_type_id, } }); try spv.global_section.emit(.OpVariable, .{ .id_result_type = return_var_type_id, .id_result = return_var_id, .storage_class = .Output, }); if (member.builtin) |builtin| { try spv.annotations_section.emit(.OpDecorate, .{ .target = return_var_id, .decoration = .{ .BuiltIn = .{ .built_in = spirvBuiltin(builtin, inst.stage) } }, }); } if (member.location) |location| { try spv.annotations_section.emit(.OpDecorate, .{ .target = return_var_id, .decoration = .{ .Location = .{ .location = location } }, }); } try interface.append(return_var_id); try store_returns.append(.{ .ptr = return_var_id, .type = member_type_id }); } store_return = .{ .many = try store_returns.toOwnedSlice(), }; } else { const return_var_id = spv.allocId(); const return_var_name_slice = try std.mem.concat( spv.allocator, u8, &.{ name_slice, "_return_output" }, ); defer spv.allocator.free(return_var_name_slice); try spv.debugName(return_var_id, return_var_name_slice); const return_var_type_id = try spv.resolve(.{ .ptr_type = .{ .storage_class = .Output, .elem_type = raw_return_type_id, } }); try spv.global_section.emit(.OpVariable, .{ .id_result_type = return_var_type_id, .id_result = return_var_id, .storage_class = .Output, }); if (inst.return_attrs.builtin) |builtin| { try spv.annotations_section.emit(.OpDecorate, .{ .target = return_var_id, .decoration = .{ .BuiltIn = .{ .built_in = spirvBuiltin(builtin, inst.stage) } }, }); } if (inst.return_attrs.location) |location| { try spv.annotations_section.emit(.OpDecorate, .{ .target = return_var_id, .decoration = .{ .Location = .{ .location = location } }, }); } store_return = .{ .single = return_var_id }; try interface.append(return_var_id); } } else { store_return = .none; } try spv.fn_stack.append(spv.allocator, .{ .store_return = store_return }); var params_type = std.ArrayList(IdRef).init(spv.allocator); defer params_type.deinit(); if (inst.params != .none) { const param_list = spv.air.refToList(inst.params); for (param_list) |param_inst_idx| { const param_inst = spv.air.getInst(param_inst_idx).fn_param; const param_id = spv.allocId(); try spv.debugName(param_id, spv.air.getStr(param_inst.name)); if (inst.stage != .none) { const elem_type_id = try spv.emitType(param_inst.type); const param_type_id = try spv.resolve(.{ .ptr_type = .{ .storage_class = .Input, .elem_type = elem_type_id, } }); const param_var_name_slice = try std.mem.concat( spv.allocator, u8, &.{ name_slice, "_", spv.air.getStr(param_inst.name), "_input" }, ); defer spv.allocator.free(param_var_name_slice); try spv.global_section.emit(.OpVariable, .{ .id_result_type = param_type_id, .id_result = param_id, .storage_class = .Input, }); if (param_inst.builtin) |builtin| { try spv.annotations_section.emit(.OpDecorate, .{ .target = param_id, .decoration = .{ .BuiltIn = .{ .built_in = spirvBuiltin(builtin, inst.stage), } }, }); } if (param_inst.location) |location| { try spv.annotations_section.emit(.OpDecorate, .{ .target = param_id, .decoration = .{ .Location = .{ .location = location } }, }); } try interface.append(param_id); try spv.decl_map.put(spv.allocator, param_inst_idx, .{ .id = param_id, .type_id = elem_type_id, .is_ptr = true, .storage_class = .Input, .faked_struct = false, }); } else { const param_type_id = try spv.emitType(param_inst.type); try params_section.emit(.OpFunctionParameter, .{ .id_result_type = param_type_id, .id_result = param_id, }); try params_type.append(param_type_id); try spv.decl_map.put(spv.allocator, param_inst_idx, .{ .id = param_id, .type_id = param_type_id, .is_ptr = false, .storage_class = .Function, .faked_struct = false, }); } } } const fn_type_id = try spv.resolve(.{ .fn_type = .{ .return_type = return_type_id, .params_type = params_type.items, }, }); try section.emit(.OpFunction, .{ .id_result_type = return_type_id, .id_result = fn_id, .function_control = .{ .Const = inst.is_const }, .function_type = fn_type_id, }); try section.append(params_section); const body_id = spv.allocId(); const body = spv.air.getInst(inst.block).block; try section.emit(.OpLabel, .{ .id_result = body_id }); if (body != .none) { try spv.emitFnVars(&section, body); } if (body != .none) { try spv.emitBlock(&section, body); if (spv.fn_stack.getLast().discard_branch) |discard_branch| { try section.emit(.OpLabel, .{ .id_result = discard_branch }); try section.emit(.OpKill, {}); } const statements = spv.air.refToList(body); if (spv.air.getInst(statements[statements.len - 1]) != .@"return") { try spv.emitReturn(&section, .none); } } else { try spv.emitReturn(&section, .none); } _ = spv.fn_stack.pop(); try section.emit(.OpFunctionEnd, {}); switch (inst.stage) { .none => {}, .compute => |compute| spv.compute_stage = .{ .id = fn_id, .name = name_slice, .interface = try interface.toOwnedSlice(), .workgroup_size = .{ .x = blk: { const int = spv.air.getInst(compute.x).int; const value = spv.air.getValue(Inst.Int.Value, int.value.?); break :blk @intCast(value.literal); }, .y = blk: { if (compute.y == .none) break :blk 1; const int = spv.air.getInst(compute.y).int; const value = spv.air.getValue(Inst.Int.Value, int.value.?); break :blk @intCast(value.literal); }, .z = blk: { if (compute.y == .none) break :blk 1; const int = spv.air.getInst(compute.z).int; const value = spv.air.getValue(Inst.Int.Value, int.value.?); break :blk @intCast(value.literal); }, }, }, .vertex => spv.vertex_stage = .{ .id = fn_id, .name = name_slice, .interface = try interface.toOwnedSlice(), }, .fragment => spv.fragment_stage = .{ .id = fn_id, .name = name_slice, .interface = try interface.toOwnedSlice(), }, } try spv.main_section.append(section); try spv.decl_map.put(spv.allocator, inst_idx, .{ .id = fn_id, .type_id = fn_type_id, .is_ptr = false, .storage_class = .Function, // TODO .faked_struct = false, }); return fn_id; } fn emitFnVars(spv: *SpirV, section: *Section, statements: RefIndex) !void { if (statements == .none) return; const list = spv.air.refToList(statements); for (list) |statement_idx| { switch (spv.air.getInst(statement_idx)) { .@"var" => _ = try spv.emitVarProto(section, statement_idx), .@"const" => try spv.emitFnConstProto(section, statement_idx), .block => |block| try spv.emitFnVars(section, block), .@"if" => { var if_idx = statement_idx; while (true) { const @"if" = spv.air.getInst(if_idx).@"if"; const if_body = spv.air.getInst(@"if".body).block; try spv.emitFnVars(section, if_body); if (@"if".@"else" != .none) { switch (spv.air.getInst(@"if".@"else")) { .@"if" => { if_idx = @"if".@"else"; continue; }, .block => |block| return spv.emitFnVars(section, block), else => unreachable, } } break; } }, .@"while" => |@"while"| if (@"while".body != .none) { try spv.emitFnVars(section, spv.air.getInst(@"while".body).block); }, .continuing => |continuing| if (continuing != .none) { try spv.emitFnVars(section, spv.air.getInst(continuing).block); }, .@"switch" => |@"switch"| { const switch_cases_list = spv.air.refToList(@"switch".cases_list); for (switch_cases_list) |switch_case_idx| { const switch_case = spv.air.getInst(switch_case_idx).switch_case; try spv.emitFnVars(section, spv.air.getInst(switch_case.body).block); } }, .@"for" => |@"for"| { _ = try spv.emitVarProto(section, @"for".init); if (@"for".body != .none) { try spv.emitFnVars(section, spv.air.getInst(@"for".body).block); } }, else => {}, } } } fn emitVarProto(spv: *SpirV, section: *Section, inst_idx: InstIndex) !IdRef { if (spv.decl_map.get(inst_idx)) |decl| return decl.id; const inst = spv.air.getInst(inst_idx).@"var"; const id = spv.allocId(); try spv.debugName(id, spv.air.getStr(inst.name)); const type_inst = spv.air.getInst(inst.type); const type_id = try spv.emitType(inst.type); const storage_class = storageClassFromAddrSpace(inst.addr_space); if (inst.binding != .none) { const binding = spv.air.resolveConstExpr(inst.binding).?.int; try spv.annotations_section.emit(.OpDecorate, .{ .target = id, .decoration = spec.Decoration.Extended{ .Binding = .{ .binding_point = @intCast(binding) }, }, }); } if (inst.group != .none) { const group = spv.air.resolveConstExpr(inst.group).?.int; try spv.annotations_section.emit(.OpDecorate, .{ .target = id, .decoration = spec.Decoration.Extended{ .DescriptorSet = .{ .descriptor_set = @intCast(group) }, }, }); } var faked_struct = false; const ptr_type_id = if (inst.addr_space == .uniform or inst.addr_space == .storage) blk: { // zig fmt: off // TODO faked_struct = spv.air.getInst(inst.type) != .@"struct"; // zig fmt: on const struct_type_id = if (faked_struct) sti: { const struct_type_id = spv.allocId(); try spv.global_section.emit(.OpTypeStruct, .{ .id_result = struct_type_id, .id_ref = &.{type_id}, }); break :sti struct_type_id; } else type_id; if (spv.decorated.get(type_id) == null) { try spv.annotations_section.emit(.OpDecorate, .{ .target = struct_type_id, .decoration = .Block, }); if (!faked_struct) { try spv.decorateStruct(inst.type); for (spv.air.refToList(spv.air.getInst(inst.type).@"struct".members)) |member| { const member_type = spv.air.getInst(member).struct_member.type; if (spv.air.getInst(member_type) == .array) { try spv.annotations_section.emit(.OpDecorate, .{ .target = try spv.emitType(member_type), .decoration = .{ .ArrayStride = .{ .array_stride = spv.getStride(member_type, true), } }, }); } } } else { try spv.annotations_section.emit(.OpMemberDecorate, .{ .structure_type = struct_type_id, .member = 0, .decoration = .{ .Offset = .{ .byte_offset = 0 } }, }); switch (spv.air.getInst(inst.type)) { .array => { const arr_ty = spv.air.getInst(inst.type).array; if (spv.air.getInst(arr_ty.elem_type) == .@"struct") { try spv.decorateStruct(arr_ty.elem_type); } try spv.annotations_section.emit(.OpDecorate, .{ .target = type_id, .decoration = .{ .ArrayStride = .{ .array_stride = spv.getStride(inst.type, true), } }, }); }, .matrix => { try spv.annotations_section.emit(.OpMemberDecorate, .{ .structure_type = struct_type_id, .member = 0, .decoration = .ColMajor, }); try spv.annotations_section.emit(.OpMemberDecorate, .{ .structure_type = struct_type_id, .member = 0, .decoration = .{ .MatrixStride = .{ .matrix_stride = spv.getStride(inst.type, true), } }, }); }, else => {}, } } switch (inst.addr_space) { .uniform => if (!faked_struct) { try spv.emitStructStride(inst.type, type_id); try spv.annotations_section.emit(.OpDecorate, .{ .target = id, .decoration = .NonWritable, }); }, .storage => { try spv.emitStructStride(inst.type, type_id); if (inst.access_mode == .read) { try spv.annotations_section.emit(.OpDecorate, .{ .target = id, .decoration = .NonWritable, }); } }, else => {}, } try spv.decorated.put(spv.allocator, type_id, {}); } break :blk try spv.resolve(.{ .ptr_type = .{ .elem_type = struct_type_id, .storage_class = storage_class, } }); } else blk: { break :blk try spv.resolve(.{ .ptr_type = .{ .elem_type = type_id, .storage_class = storage_class, } }); }; const initializer = blk: { if (type_inst == .array and type_inst.array.len == .none) break :blk null; if (type_inst == .sampler_type or type_inst == .texture_type) break :blk null; switch (inst.addr_space) { .uniform, .storage, .workgroup => break :blk null, else => {}, } break :blk try spv.resolve(.{ .null = type_id }); }; try section.emit(.OpVariable, .{ .id_result_type = ptr_type_id, .id_result = id, .storage_class = storage_class, .initializer = initializer, }); try spv.decl_map.put(spv.allocator, inst_idx, .{ .id = id, .type_id = type_id, .is_ptr = true, .storage_class = storage_class, .faked_struct = faked_struct, }); return id; } fn emitFnConstProto(spv: *SpirV, section: *Section, inst_idx: InstIndex) !void { const inst = spv.air.getInst(inst_idx).@"const"; const id = spv.allocId(); try spv.debugName(id, spv.air.getStr(inst.name)); const type_id = try spv.emitType(inst.type); const ptr_type_id = try spv.resolve(.{ .ptr_type = .{ .elem_type = type_id, .storage_class = .Function, } }); try section.emit(.OpVariable, .{ .id_result_type = ptr_type_id, .id_result = id, .storage_class = .Function, .initializer = null, }); try spv.decl_map.put(spv.allocator, inst_idx, .{ .id = id, .type_id = type_id, .is_ptr = true, .storage_class = .Function, .faked_struct = false, }); } fn decorateStruct(spv: *SpirV, inst: InstIndex) !void { const id = try spv.emitType(inst); var offset: u32 = 0; const members = spv.air.refToList(spv.air.getInst(inst).@"struct".members); for (members, 0..) |member, i| { const member_inst = spv.air.getInst(member).struct_member; switch (spv.air.getInst(member_inst.type)) { .@"struct" => try spv.decorateStruct(member_inst.type), .array => |arr| if (spv.air.getInst(arr.elem_type) == .@"struct") { try spv.decorateStruct(arr.elem_type); }, else => {}, } try spv.annotations_section.emit(.OpMemberDecorate, .{ .structure_type = id, .member = @intCast(i), .decoration = .{ .Offset = .{ .byte_offset = offset } }, }); try spv.annotations_section.emit(.OpMemberDecorate, .{ .structure_type = id, .member = @intCast(i), .decoration = .ColMajor, }); if (members.len > 1) offset += spv.getSize(member_inst.type); } } fn emitStructStride(spv: *SpirV, inst: InstIndex, id: IdRef) !void { switch (spv.air.getInst(inst)) { .@"struct" => |@"struct"| { for (spv.air.refToList(@"struct".members), 0..) |member, i| { const member_inst = spv.air.getInst(member).struct_member; switch (spv.air.getInst(member_inst.type)) { .matrix => try spv.annotations_section.emit(.OpMemberDecorate, .{ .structure_type = id, .member = @intCast(i), .decoration = .{ .MatrixStride = .{ .matrix_stride = spv.getStride(member_inst.type, true), } }, }), else => {}, } } }, else => {}, } } fn getStride(spv: *SpirV, inst: InstIndex, direct: bool) u8 { return switch (spv.air.getInst(inst)) { inline .int, .float => |num| num.type.sizeBits() / 8, .array => |arr| spv.getStride(arr.elem_type, false), .vector => |vec| return spv.getStride(vec.elem_type, false) * if (direct) 1 else @as(u8, @intCast(@intFromEnum(vec.size))), .matrix => |mat| return @as(u8, @intCast(@intFromEnum(mat.cols))) * spv.getStride(mat.elem_type, false) * if (direct) 1 else @as(u8, @intCast(@intFromEnum(mat.rows))), .@"struct" => |strct| { var total: u8 = 0; const members = spv.air.refToList(strct.members); for (members) |member| { const member_ty = spv.air.getInst(member).struct_member.type; total += spv.getStride(member_ty, false); } return total; }, else => unreachable, // TODO }; } fn getSize(spv: *SpirV, inst: InstIndex) u8 { return switch (spv.air.getInst(inst)) { inline .int, .float => |num| num.type.sizeBits() / 8, .array => |arr| return @intCast(spv.air.resolveInt(arr.len).? * spv.getSize(arr.elem_type)), .vector => |vec| return spv.getSize(vec.elem_type) * @intFromEnum(vec.size), .matrix => |mat| return @as(u8, @intCast(@intFromEnum(mat.cols))) * @intFromEnum(mat.rows) * spv.getSize(mat.elem_type), else => unreachable, // TODO }; } fn emitConst(spv: *SpirV, section: *Section, inst_idx: InstIndex) !IdRef { if (spv.decl_map.get(inst_idx)) |decl| return decl.id; const inst = spv.air.getInst(inst_idx).@"const"; const id = try spv.emitExpr(section, inst.init); const type_id = try spv.emitType(inst.type); try spv.debugName(id, spv.air.getStr(inst.name)); try spv.decl_map.put(spv.allocator, inst_idx, .{ .id = id, .type_id = type_id, .is_ptr = false, .storage_class = .Function, .faked_struct = false, }); return id; } fn emitType(spv: *SpirV, inst: InstIndex) error{OutOfMemory}!IdRef { return switch (spv.air.getInst(inst)) { .bool => try spv.resolve(.bool_type), .int => |int| try spv.resolve(.{ .int_type = int.type }), .float => |float| try spv.resolve(.{ .float_type = float.type }), .vector => |vector| try spv.resolve(.{ .vector_type = .{ .size = vector.size, .elem_type = try spv.emitType(vector.elem_type), }, }), .matrix => |matrix| try spv.resolve(.{ .matrix_type = .{ .cols = matrix.cols, .elem_type = try spv.resolve(.{ .vector_type = .{ .size = matrix.rows, .elem_type = try spv.emitType(matrix.elem_type), }, }), }, }), .array => |array| try spv.resolve(.{ .array_type = .{ .len = if (array.len != .none) try spv.emitExpr(&spv.global_section, array.len) else null, .elem_type = try spv.emitType(array.elem_type), }, }), .ptr_type => |ptr| try spv.resolve(.{ .ptr_type = .{ .storage_class = storageClassFromAddrSpace(ptr.addr_space), .elem_type = try spv.emitType(ptr.elem_type), }, }), .sampler_type => try spv.resolve(.sampler_type), .texture_type => |texture| { const sampled_type = if (texture.elem_type != .none) try spv.emitType(texture.elem_type) else try spv.resolve(.{ .float_type = .f32 }); return spv.resolve(.{ .texture_type = .{ .sampled_type = sampled_type, .dim = spirvDim(texture.kind), .depth = spirvDepth(texture.kind), .arrayed = spirvArrayed(texture.kind), .multisampled = spirvMultisampled(texture.kind), .sampled = spirvSampled(texture.kind), .image_format = spirvImageFormat(texture.texel_format), } }); }, .atomic_type => |atomic| try spv.emitType(atomic.elem_type), .@"struct" => spv.struct_map.get(inst) orelse try spv.emitStruct(inst), else => std.debug.panic("TODO: implement Air tag {s}", .{@tagName(spv.air.getInst(inst))}), }; } fn emitStruct(spv: *SpirV, inst_idx: InstIndex) !IdRef { const inst = spv.air.getInst(inst_idx).@"struct"; const member_list = spv.air.refToList(inst.members); var members = std.ArrayList(IdRef).init(spv.allocator); defer members.deinit(); try members.ensureTotalCapacityPrecise(member_list.len); const id = spv.allocId(); try spv.debugName(id, spv.air.getStr(inst.name)); for (member_list, 0..) |member_inst_idx, i| { const member_inst = spv.air.getInst(member_inst_idx).struct_member; const member = try spv.emitType(member_inst.type); try spv.debugMemberName(id, i, spv.air.getStr(member_inst.name)); members.appendAssumeCapacity(member); } try spv.global_section.emit(.OpTypeStruct, .{ .id_result = id, .id_ref = members.items, }); try spv.struct_map.put(spv.allocator, inst_idx, id); return id; } fn emitStatement(spv: *SpirV, section: *Section, inst_idx: InstIndex) error{OutOfMemory}!void { switch (spv.air.getInst(inst_idx)) { inline .@"var", .@"const" => |@"var"| { const var_id = spv.decl_map.get(inst_idx).?.id; if (@"var".init != .none) { try section.emit(.OpStore, .{ .pointer = var_id, .object = try spv.emitExpr(section, @"var".init), }); } }, .@"return" => |inst| { switch (spv.fn_stack.getLast().store_return) { .none => try spv.emitReturn(section, inst), .single => |store_return| { try section.emit(.OpStore, .{ .pointer = store_return, .object = try spv.emitExpr(section, inst), }); try spv.emitReturn(section, .none); }, .many => |store_return_items| { // assume functions returns an struct const base = try spv.emitExpr(section, inst); for (store_return_items, 0..) |store_item, i| { const id = spv.allocId(); try section.emit(.OpCompositeExtract, .{ .id_result_type = store_item.type, .id_result = id, .composite = base, .indexes = &[_]u32{@intCast(i)}, }); try section.emit(.OpStore, .{ .pointer = store_item.ptr, .object = id, }); } try spv.emitReturn(section, .none); spv.allocator.free(store_return_items); }, } }, .call => |inst| _ = try spv.emitCall(section, inst), .@"if" => |@"if"| try spv.emitIf(section, @"if"), .@"for" => |@"for"| try spv.emitFor(section, @"for"), .@"while" => |@"while"| try spv.emitWhile(section, @"while"), .loop => |loop| try spv.emitLoop(section, loop), .@"break" => try spv.emitBreak(section), .@"continue" => try spv.emitContinue(section), .assign => |assign| try spv.emitAssign(section, assign), .block => |block| if (block != .none) try spv.emitBlock(section, block), .nil_intrinsic => |ni| try spv.emitNilIntrinsic(section, ni), .texture_store => |ts| try spv.emitTextureStore(section, ts), .discard => try spv.emitDiscard(section), else => std.debug.panic("TODO: implement Air tag {s}", .{@tagName(spv.air.getInst(inst_idx))}), } } fn emitBlock(spv: *SpirV, section: *Section, block: RefIndex) !void { const parent_block = spv.current_block; spv.current_block = block; for (spv.air.refToList(block)) |statement| { try spv.emitStatement(section, statement); } spv.current_block = parent_block; } fn emitIf(spv: *SpirV, section: *Section, inst: Inst.If) !void { const if_label = spv.allocId(); const true_label = spv.allocId(); const false_label = spv.allocId(); const merge_label = spv.allocId(); try section.emit(.OpBranch, .{ .target_label = if_label }); try section.emit(.OpLabel, .{ .id_result = if_label }); const cond = try spv.emitExpr(section, inst.cond); try section.emit(.OpSelectionMerge, .{ .merge_block = merge_label, .selection_control = .{} }); try section.emit(.OpBranchConditional, .{ .condition = cond, .true_label = true_label, .false_label = false_label, }); try section.emit(.OpLabel, .{ .id_result = true_label }); if (inst.body != .none) { const body = spv.air.getInst(inst.body).block; try spv.emitBlock(section, body); if (spv.branched.get(body) == null) { try section.emit(.OpBranch, .{ .target_label = merge_label }); } } else { try section.emit(.OpBranch, .{ .target_label = merge_label }); } try section.emit(.OpLabel, .{ .id_result = false_label }); if (inst.@"else" != .none) { switch (spv.air.getInst(inst.@"else")) { .@"if" => |else_if| { try spv.emitIf(section, else_if); try section.emit(.OpBranch, .{ .target_label = merge_label }); }, .block => |else_body| if (else_body != .none) { try spv.emitBlock(section, else_body); if (spv.branched.get(else_body) == null) { try section.emit(.OpBranch, .{ .target_label = merge_label }); } }, else => unreachable, } } else { try section.emit(.OpBranch, .{ .target_label = merge_label }); } try section.emit(.OpLabel, .{ .id_result = merge_label }); } fn emitFor(spv: *SpirV, section: *Section, inst: Inst.For) !void { const for_label = spv.allocId(); const header_label = spv.allocId(); const true_label = spv.allocId(); const false_label = spv.allocId(); const continue_label = spv.allocId(); const merge_label = spv.allocId(); const parent_loop_merge_label = spv.loop_merge_label; const parent_loop_continue_label = spv.loop_continue_label; spv.loop_merge_label = merge_label; spv.loop_continue_label = continue_label; defer { spv.loop_merge_label = parent_loop_merge_label; spv.loop_continue_label = parent_loop_continue_label; } try spv.emitStatement(section, inst.init); try section.emit(.OpBranch, .{ .target_label = for_label }); try section.emit(.OpLabel, .{ .id_result = for_label }); try section.emit(.OpLoopMerge, .{ .merge_block = merge_label, .continue_target = continue_label, // TODO: this operand must not be 0. otherwise spirv tools will complain .loop_control = .{ .Unroll = true }, }); try section.emit(.OpBranch, .{ .target_label = header_label }); try section.emit(.OpLabel, .{ .id_result = header_label }); const cond = try spv.emitExpr(section, inst.cond); const cond_not = spv.allocId(); try section.emit(.OpLogicalNot, .{ .id_result = cond_not, .id_result_type = try spv.resolve(.bool_type), .operand = cond, }); try section.emit(.OpSelectionMerge, .{ .merge_block = true_label, .selection_control = .{} }); try section.emit(.OpBranchConditional, .{ .condition = cond_not, .true_label = false_label, .false_label = true_label, }); try section.emit(.OpLabel, .{ .id_result = true_label }); if (inst.body != .none) { const body = spv.air.getInst(inst.body).block; try spv.emitBlock(section, body); try section.emit(.OpBranch, .{ .target_label = continue_label }); // if (spv.branched.get(body) == null) { // try section.emit(.OpBranch, .{ .target_label = merge_label }); // } } else { try section.emit(.OpBranch, .{ .target_label = merge_label }); } try section.emit(.OpLabel, .{ .id_result = false_label }); try section.emit(.OpBranch, .{ .target_label = merge_label }); try section.emit(.OpLabel, .{ .id_result = continue_label }); try spv.emitStatement(section, inst.update); try section.emit(.OpBranch, .{ .target_label = for_label }); try section.emit(.OpLabel, .{ .id_result = merge_label }); } fn emitWhile(spv: *SpirV, section: *Section, inst: Inst.While) !void { const while_label = spv.allocId(); const header_label = spv.allocId(); const true_label = spv.allocId(); const false_label = spv.allocId(); const continue_label = spv.allocId(); const merge_label = spv.allocId(); const parent_loop_merge_label = spv.loop_merge_label; const parent_loop_continue_label = spv.loop_continue_label; spv.loop_merge_label = merge_label; spv.loop_continue_label = continue_label; defer { spv.loop_merge_label = parent_loop_merge_label; spv.loop_continue_label = parent_loop_continue_label; } try section.emit(.OpBranch, .{ .target_label = while_label }); try section.emit(.OpLabel, .{ .id_result = while_label }); try section.emit(.OpLoopMerge, .{ .merge_block = merge_label, .continue_target = continue_label, // TODO: this operand must not be 0. otherwise spirv tools will complain .loop_control = .{ .Unroll = true }, }); try section.emit(.OpBranch, .{ .target_label = header_label }); try section.emit(.OpLabel, .{ .id_result = header_label }); const cond = try spv.emitExpr(section, inst.cond); try section.emit(.OpSelectionMerge, .{ .merge_block = true_label, .selection_control = .{} }); try section.emit(.OpBranchConditional, .{ .condition = cond, .true_label = false_label, .false_label = true_label, }); try section.emit(.OpLabel, .{ .id_result = true_label }); if (inst.body != .none) { const body = spv.air.getInst(inst.body).block; try spv.emitBlock(section, body); if (spv.branched.get(body) == null) { try section.emit(.OpBranch, .{ .target_label = merge_label }); } } else { try section.emit(.OpBranch, .{ .target_label = merge_label }); } try section.emit(.OpLabel, .{ .id_result = false_label }); try section.emit(.OpBranch, .{ .target_label = merge_label }); try section.emit(.OpLabel, .{ .id_result = continue_label }); try section.emit(.OpBranch, .{ .target_label = while_label }); try section.emit(.OpLabel, .{ .id_result = merge_label }); } fn emitLoop(spv: *SpirV, section: *Section, body_inst: InstIndex) !void { if (body_inst == .none) return; const loop_label = spv.allocId(); const body_label = spv.allocId(); const continue_label = spv.allocId(); const merge_label = spv.allocId(); const parent_loop_merge_label = spv.loop_merge_label; const parent_loop_continue_label = spv.loop_continue_label; spv.loop_merge_label = merge_label; spv.loop_continue_label = continue_label; defer { spv.loop_merge_label = parent_loop_merge_label; spv.loop_continue_label = parent_loop_continue_label; } try section.emit(.OpBranch, .{ .target_label = loop_label }); try section.emit(.OpLabel, .{ .id_result = loop_label }); try section.emit(.OpLoopMerge, .{ .merge_block = merge_label, .continue_target = continue_label, // TODO: this operand must not be 0. otherwise spirv tools will complain .loop_control = .{ .Unroll = true }, }); try section.emit(.OpBranch, .{ .target_label = body_label }); try section.emit(.OpLabel, .{ .id_result = body_label }); const body = spv.air.getInst(body_inst).block; try spv.emitBlock(section, body); if (spv.branched.get(body) == null) { try section.emit(.OpBranch, .{ .target_label = continue_label }); } try section.emit(.OpLabel, .{ .id_result = continue_label }); try section.emit(.OpBranch, .{ .target_label = loop_label }); try section.emit(.OpLabel, .{ .id_result = merge_label }); } fn emitBreak(spv: *SpirV, section: *Section) !void { try section.emit(.OpBranch, .{ .target_label = spv.loop_merge_label.? }); try spv.branched.put(spv.allocator, spv.current_block, {}); } fn emitContinue(spv: *SpirV, section: *Section) !void { try section.emit(.OpBranch, .{ .target_label = spv.loop_continue_label.? }); try spv.branched.put(spv.allocator, spv.current_block, {}); } fn emitAssign(spv: *SpirV, section: *Section, inst: Inst.Assign) !void { const decl = try spv.accessPtr(section, inst.lhs); const expr = blk: { const op: Inst.Binary.Op = switch (inst.mod) { .none => break :blk try spv.emitExpr(section, inst.rhs), .add => .add, .sub => .sub, .mul => .mul, .div => .div, .mod => .mod, .@"and" => .@"and", .@"or" => .@"or", .xor => .xor, .shl => .shl, .shr => .shr, }; break :blk try spv.emitBinaryAir(section, .{ .op = op, .result_type = inst.type, .lhs_type = inst.type, .rhs_type = inst.type, .lhs = inst.lhs, .rhs = inst.rhs, }); }; try section.emit(.OpStore, .{ .pointer = decl.id, .object = expr, }); } fn emitReturn(spv: *SpirV, section: *Section, inst: InstIndex) !void { try spv.branched.put(spv.allocator, spv.current_block, {}); if (inst == .none) return section.emit(.OpReturn, {}); try section.emit(.OpReturnValue, .{ .value = try spv.emitExpr(section, inst) }); } fn emitExpr(spv: *SpirV, section: *Section, inst: InstIndex) error{OutOfMemory}!IdRef { return switch (spv.air.getInst(inst)) { .bool => |boolean| spv.emitBool(section, boolean), .int => |int| spv.emitInt(section, int), .float => |float| spv.emitFloat(section, float), .vector => |vector| spv.emitVector(section, vector), .matrix => |matrix| spv.emitMatrix(section, matrix), .array => |array| spv.emitArray(section, array), .call => |call| spv.emitCall(section, call), .swizzle_access => |swizzle_access| spv.emitSwizzleAccess(section, swizzle_access), .var_ref => |var_ref| { const va = try spv.emitVarAccess(section, var_ref); if (va.is_ptr) { const load_id = spv.allocId(); try section.emit(.OpLoad, .{ .id_result_type = va.type.elem_type, .id_result = load_id, .pointer = va.id, }); return load_id; } else { return va.id; } }, .index_access => |index_access| { const ia = try spv.emitIndexAccess(section, index_access); const load_id = spv.allocId(); try section.emit(.OpLoad, .{ .id_result_type = ia.type.elem_type, .id_result = load_id, .pointer = ia.id, }); return load_id; }, .field_access => |field_access| { const fa = try spv.emitFieldAccess(section, field_access); const load_id = spv.allocId(); try section.emit(.OpLoad, .{ .id_result_type = fa.type.elem_type, .id_result = load_id, .pointer = fa.id, }); return load_id; }, .binary => |bin| spv.emitBinaryAir(section, bin), .unary => |un| spv.emitUnary(section, un), .unary_intrinsic => |un| spv.emitUnaryIntrinsic(section, un), .binary_intrinsic => |bin| spv.emitBinaryIntrinsic(section, bin), .triple_intrinsic => |bin| spv.emitTripleIntrinsic(section, bin), .texture_sample => |ts| spv.emitTextureSample(section, ts), .texture_dimension => |td| spv.emitTextureDimension(section, td), .texture_load => |tl| spv.emitTextureLoad(section, tl), else => std.debug.panic("TODO: implement Air tag {s}", .{@tagName(spv.air.getInst(inst))}), }; } const PtrAccess = struct { id: IdRef, type: Key.PointerType, is_ptr: bool, }; fn emitVarAccess(spv: *SpirV, section: *Section, inst: InstIndex) !PtrAccess { const decl = spv.decl_map.get(inst) orelse blk: { switch (spv.air.getInst(inst)) { .@"const" => _ = try spv.emitConst(&spv.global_section, inst), .@"var" => _ = try spv.emitVarProto(&spv.global_section, inst), else => unreachable, } break :blk spv.decl_map.get(inst).?; }; if (decl.faked_struct) { const id = spv.allocId(); const index_id = try spv.resolve(.{ .int = .{ .type = .u32, .value = 0 } }); const type_id = try spv.resolve(.{ .ptr_type = .{ .storage_class = decl.storage_class, .elem_type = decl.type_id, } }); try section.emit(.OpAccessChain, .{ .id_result_type = type_id, .id_result = id, .base = decl.id, .indexes = &.{index_id}, }); try spv.runtiem_arr_vars.put(spv.allocator, id, decl.id); return .{ .id = id, .type = .{ .elem_type = decl.type_id, .storage_class = decl.storage_class, }, .is_ptr = true, }; } return .{ .id = decl.id, .type = .{ .elem_type = decl.type_id, .storage_class = decl.storage_class, }, .is_ptr = decl.is_ptr, }; } fn emitSwizzleAccess(spv: *SpirV, section: *Section, inst: Inst.SwizzleAccess) !IdRef { if (spv.air.resolveConstExpr(inst.base)) |_| { const swizzles = try spv.extractSwizzle(section, inst); defer spv.allocator.free(swizzles); if (inst.size == .one) { const single_swizzle = swizzles[0]; return single_swizzle; } return spv.resolve(.{ .vector = .{ .type = try spv.resolve(.{ .vector_type = .{ .elem_type = try spv.emitType(inst.type), .size = @enumFromInt(@intFromEnum(inst.size)), }, }), .value = swizzles, }, }); } const swizzles = try spv.extractSwizzle(section, inst); defer spv.allocator.free(swizzles); if (inst.size == .one) { const single_swizzle = swizzles[0]; return single_swizzle; } const vec_ty = try spv.resolve(.{ .vector_type = .{ .elem_type = try spv.emitType(inst.type), .size = @enumFromInt(@intFromEnum(inst.size)), } }); const id = spv.allocId(); try section.emit(.OpCompositeConstruct, .{ .id_result_type = vec_ty, .id_result = id, .constituents = swizzles, }); return id; } fn extractSwizzle(spv: *SpirV, section: *Section, inst: Inst.SwizzleAccess) ![]const IdRef { const base = try spv.emitExpr(section, inst.base); const swizzles = try spv.allocator.alloc(IdRef, @intFromEnum(inst.size)); for (swizzles, 0..) |*id, i| { id.* = spv.allocId(); try section.emit(.OpCompositeExtract, .{ .id_result_type = try spv.emitType(inst.type), .id_result = id.*, .composite = base, .indexes = &.{@intFromEnum(inst.pattern[i])}, }); } return swizzles; } fn emitIndexAccess(spv: *SpirV, section: *Section, inst: Inst.IndexAccess) !PtrAccess { const type_id = try spv.emitType(inst.type); const base_ptr = try spv.accessPtr(section, inst.base); const indexes: []const IdResult = blk: { const index = try spv.emitExpr(section, inst.index); if (spv.air.getInst(inst.base) == .var_ref) { switch (spv.air.getInst(spv.air.getInst(inst.base).var_ref)) { .@"var" => |vc| { if (spv.air.getInst(vc.type) == .@"struct" and base_ptr.type.storage_class == .StorageBuffer) { const uint0 = try spv.resolve(.{ .int = .{ .type = .u32, .value = 0 } }); break :blk &.{ uint0, index }; } }, else => {}, } } break :blk &.{index}; }; const id = spv.allocId(); try section.emit(.OpAccessChain, .{ .id_result_type = try spv.resolve(.{ .ptr_type = .{ .elem_type = type_id, .storage_class = base_ptr.type.storage_class, } }), .id_result = id, .base = base_ptr.id, .indexes = indexes, }); return .{ .id = id, .type = .{ .elem_type = type_id, .storage_class = base_ptr.type.storage_class, }, .is_ptr = true, }; } fn emitFieldAccess(spv: *SpirV, section: *Section, inst: Inst.FieldAccess) !PtrAccess { const struct_member = spv.air.getInst(inst.field).struct_member; const type_id = try spv.emitType(struct_member.type); const base_decl = try spv.accessPtr(section, inst.base); const id = spv.allocId(); const index_id = try spv.resolve(.{ .int = .{ .type = .u32, .value = struct_member.index, } }); try section.emit(.OpAccessChain, .{ .id_result_type = try spv.resolve(.{ .ptr_type = .{ .elem_type = type_id, .storage_class = base_decl.type.storage_class, }, }), .id_result = id, .base = base_decl.id, .indexes = &.{index_id}, }); if (spv.air.getInst(struct_member.type) == .array and spv.air.getInst(struct_member.type).array.len == .none) { try spv.runtiem_arr_vars.put(spv.allocator, id, base_decl.id); } return .{ .id = id, .type = .{ .elem_type = type_id, .storage_class = base_decl.type.storage_class, }, .is_ptr = true, }; } fn emitBinaryAir(spv: *SpirV, section: *Section, binary: Inst.Binary) !IdRef { const result_ty = try spv.emitType(binary.result_type); var lhs = try spv.emitExpr(section, binary.lhs); var rhs = try spv.emitExpr(section, binary.rhs); const lhs_ty = spv.air.getInst(binary.lhs_type); const rhs_ty = spv.air.getInst(binary.rhs_type); return switch (lhs_ty) { .bool => switch (binary.op) { .equal => spv.emitBinary(section, .OpLogicalEqual, result_ty, &lhs, &rhs, lhs_ty, rhs_ty), .not_equal => spv.emitBinary(section, .OpLogicalNotEqual, result_ty, &lhs, &rhs, lhs_ty, rhs_ty), .logical_and => spv.emitBinary(section, .OpLogicalAnd, result_ty, &lhs, &rhs, lhs_ty, rhs_ty), .logical_or => spv.emitBinary(section, .OpLogicalOr, result_ty, &lhs, &rhs, lhs_ty, rhs_ty), else => unreachable, }, .int => |int| switch (binary.op) { .mul => spv.emitBinary(section, .OpIMul, result_ty, &lhs, &rhs, lhs_ty, rhs_ty), .add => spv.emitBinary(section, .OpIAdd, result_ty, &lhs, &rhs, lhs_ty, rhs_ty), .sub => spv.emitBinary(section, .OpISub, result_ty, &lhs, &rhs, lhs_ty, rhs_ty), .shl => spv.emitBinary(section, .OpShiftLeftLogical, result_ty, &lhs, &rhs, lhs_ty, rhs_ty), .shr => spv.emitBinary(section, .OpShiftRightLogical, result_ty, &lhs, &rhs, lhs_ty, rhs_ty), .@"and" => spv.emitBinary(section, .OpBitwiseAnd, result_ty, &lhs, &rhs, lhs_ty, rhs_ty), .@"or" => spv.emitBinary(section, .OpBitwiseOr, result_ty, &lhs, &rhs, lhs_ty, rhs_ty), .xor => spv.emitBinary(section, .OpBitwiseXor, result_ty, &lhs, &rhs, lhs_ty, rhs_ty), .equal => spv.emitBinary(section, .OpIEqual, result_ty, &lhs, &rhs, lhs_ty, rhs_ty), .not_equal => spv.emitBinary(section, .OpINotEqual, result_ty, &lhs, &rhs, lhs_ty, rhs_ty), else => switch (int.type) { .i32 => switch (binary.op) { .div => spv.emitBinary(section, .OpSDiv, result_ty, &lhs, &rhs, lhs_ty, rhs_ty), .mod => spv.emitBinary(section, .OpSMod, result_ty, &lhs, &rhs, lhs_ty, rhs_ty), .less_than => spv.emitBinary(section, .OpSLessThan, result_ty, &lhs, &rhs, lhs_ty, rhs_ty), .less_than_equal => spv.emitBinary(section, .OpSLessThanEqual, result_ty, &lhs, &rhs, lhs_ty, rhs_ty), .greater_than => spv.emitBinary(section, .OpSGreaterThan, result_ty, &lhs, &rhs, lhs_ty, rhs_ty), .greater_than_equal => spv.emitBinary(section, .OpSGreaterThanEqual, result_ty, &lhs, &rhs, lhs_ty, rhs_ty), else => unreachable, }, .u32 => switch (binary.op) { .div => spv.emitBinary(section, .OpUDiv, result_ty, &lhs, &rhs, lhs_ty, rhs_ty), .mod => spv.emitBinary(section, .OpUMod, result_ty, &lhs, &rhs, lhs_ty, rhs_ty), .less_than => spv.emitBinary(section, .OpULessThan, result_ty, &lhs, &rhs, lhs_ty, rhs_ty), .less_than_equal => spv.emitBinary(section, .OpULessThanEqual, result_ty, &lhs, &rhs, lhs_ty, rhs_ty), .greater_than => spv.emitBinary(section, .OpUGreaterThan, result_ty, &lhs, &rhs, lhs_ty, rhs_ty), .greater_than_equal => spv.emitBinary(section, .OpUGreaterThanEqual, result_ty, &lhs, &rhs, lhs_ty, rhs_ty), else => unreachable, }, }, }, .float => switch (binary.op) { .mul => spv.emitBinary(section, .OpFMul, result_ty, &lhs, &rhs, lhs_ty, rhs_ty), .div => spv.emitBinary(section, .OpFDiv, result_ty, &lhs, &rhs, lhs_ty, rhs_ty), .mod => spv.emitBinary(section, .OpFMod, result_ty, &lhs, &rhs, lhs_ty, rhs_ty), .add => spv.emitBinary(section, .OpFAdd, result_ty, &lhs, &rhs, lhs_ty, rhs_ty), .sub => spv.emitBinary(section, .OpFSub, result_ty, &lhs, &rhs, lhs_ty, rhs_ty), .equal => spv.emitBinary(section, .OpFOrdEqual, result_ty, &lhs, &rhs, lhs_ty, rhs_ty), .not_equal => spv.emitBinary(section, .OpFOrdNotEqual, result_ty, &lhs, &rhs, lhs_ty, rhs_ty), .less_than => spv.emitBinary(section, .OpFOrdLessThan, result_ty, &lhs, &rhs, lhs_ty, rhs_ty), .greater_than => spv.emitBinary(section, .OpFOrdGreaterThan, result_ty, &lhs, &rhs, lhs_ty, rhs_ty), .greater_than_equal => spv.emitBinary(section, .OpFOrdGreaterThanEqual, result_ty, &lhs, &rhs, lhs_ty, rhs_ty), else => unreachable, }, .vector => |vec| switch (spv.air.getInst(vec.elem_type)) { .int => |int| switch (binary.op) { .mul => switch (rhs_ty) { .matrix => { const id = spv.allocId(); try section.emit(.OpVectorTimesMatrix, .{ .id_result = id, .id_result_type = result_ty, .vector = lhs, .matrix = rhs, }); return id; }, else => spv.emitBinary(section, .OpIMul, result_ty, &lhs, &rhs, lhs_ty, rhs_ty), }, .add => spv.emitBinary(section, .OpIAdd, result_ty, &lhs, &rhs, lhs_ty, rhs_ty), .sub => spv.emitBinary(section, .OpISub, result_ty, &lhs, &rhs, lhs_ty, rhs_ty), .shl => spv.emitBinary(section, .OpShiftLeftLogical, result_ty, &lhs, &rhs, lhs_ty, rhs_ty), .shr => spv.emitBinary(section, .OpShiftRightLogical, result_ty, &lhs, &rhs, lhs_ty, rhs_ty), .@"and" => spv.emitBinary(section, .OpBitwiseAnd, result_ty, &lhs, &rhs, lhs_ty, rhs_ty), .@"or" => spv.emitBinary(section, .OpBitwiseOr, result_ty, &lhs, &rhs, lhs_ty, rhs_ty), .xor => spv.emitBinary(section, .OpBitwiseXor, result_ty, &lhs, &rhs, lhs_ty, rhs_ty), .equal => spv.emitBinary(section, .OpIEqual, result_ty, &lhs, &rhs, lhs_ty, rhs_ty), .not_equal => spv.emitBinary(section, .OpINotEqual, result_ty, &lhs, &rhs, lhs_ty, rhs_ty), else => switch (int.type) { .i32 => switch (binary.op) { .div => spv.emitBinary(section, .OpSDiv, result_ty, &lhs, &rhs, lhs_ty, rhs_ty), .mod => spv.emitBinary(section, .OpSMod, result_ty, &lhs, &rhs, lhs_ty, rhs_ty), .less_than => spv.emitBinary(section, .OpSLessThan, result_ty, &lhs, &rhs, lhs_ty, rhs_ty), .greater_than => spv.emitBinary(section, .OpSGreaterThan, result_ty, &lhs, &rhs, lhs_ty, rhs_ty), .greater_than_equal => spv.emitBinary(section, .OpSGreaterThanEqual, result_ty, &lhs, &rhs, lhs_ty, rhs_ty), else => unreachable, }, .u32 => switch (binary.op) { .div => spv.emitBinary(section, .OpUDiv, result_ty, &lhs, &rhs, lhs_ty, rhs_ty), .mod => spv.emitBinary(section, .OpUMod, result_ty, &lhs, &rhs, lhs_ty, rhs_ty), .less_than => spv.emitBinary(section, .OpULessThan, result_ty, &lhs, &rhs, lhs_ty, rhs_ty), .greater_than => spv.emitBinary(section, .OpUGreaterThan, result_ty, &lhs, &rhs, lhs_ty, rhs_ty), .greater_than_equal => spv.emitBinary(section, .OpUGreaterThanEqual, result_ty, &lhs, &rhs, lhs_ty, rhs_ty), else => unreachable, }, }, }, .float => switch (binary.op) { .mul => switch (rhs_ty) { .matrix => { const id = spv.allocId(); try section.emit(.OpVectorTimesMatrix, .{ .id_result = id, .id_result_type = result_ty, .vector = lhs, .matrix = rhs, }); return id; }, else => spv.emitBinary(section, .OpFMul, result_ty, &lhs, &rhs, lhs_ty, rhs_ty), }, .div => spv.emitBinary(section, .OpFDiv, result_ty, &lhs, &rhs, lhs_ty, rhs_ty), .mod => spv.emitBinary(section, .OpFMod, result_ty, &lhs, &rhs, lhs_ty, rhs_ty), .add => spv.emitBinary(section, .OpFAdd, result_ty, &lhs, &rhs, lhs_ty, rhs_ty), .sub => spv.emitBinary(section, .OpFSub, result_ty, &lhs, &rhs, lhs_ty, rhs_ty), .equal => spv.emitBinary(section, .OpFOrdEqual, result_ty, &lhs, &rhs, lhs_ty, rhs_ty), .not_equal => spv.emitBinary(section, .OpFOrdNotEqual, result_ty, &lhs, &rhs, lhs_ty, rhs_ty), .less_than => spv.emitBinary(section, .OpFOrdLessThan, result_ty, &lhs, &rhs, lhs_ty, rhs_ty), .greater_than => spv.emitBinary(section, .OpFOrdGreaterThan, result_ty, &lhs, &rhs, lhs_ty, rhs_ty), .greater_than_equal => spv.emitBinary(section, .OpFOrdGreaterThanEqual, result_ty, &lhs, &rhs, lhs_ty, rhs_ty), else => unreachable, }, else => unreachable, }, .matrix => switch (rhs_ty) { .matrix => switch (binary.op) { .mul => { const id = spv.allocId(); try section.emit(.OpMatrixTimesMatrix, .{ .id_result = id, .id_result_type = result_ty, .leftmatrix = lhs, .rightmatrix = rhs, }); return id; }, else => unreachable, }, .vector => switch (binary.op) { .mul => { const id = spv.allocId(); try section.emit(.OpMatrixTimesVector, .{ .id_result = id, .id_result_type = result_ty, .matrix = lhs, .vector = rhs, }); return id; }, else => unreachable, }, else => unreachable, }, else => unreachable, }; } fn emitBinary( spv: *SpirV, section: *Section, comptime op: Opcode, result_ty: IdRef, lhs: *IdRef, rhs: *IdRef, lhs_ty: Inst, rhs_ty: Inst, ) !IdRef { const id = spv.allocId(); if (lhs_ty == .vector and rhs_ty != .vector) { var constituents: [4]IdRef = undefined; @memset(constituents[0..@intFromEnum(lhs_ty.vector.size)], rhs.*); rhs.* = spv.allocId(); try section.emit(.OpCompositeConstruct, .{ .id_result_type = result_ty, .id_result = rhs.*, .constituents = constituents[0..@intFromEnum(lhs_ty.vector.size)], }); } if (rhs_ty == .vector and lhs_ty != .vector) { var constituents: [4]IdRef = undefined; @memset(constituents[0..@intFromEnum(rhs_ty.vector.size)], lhs.*); lhs.* = spv.allocId(); try section.emit(.OpCompositeConstruct, .{ .id_result_type = result_ty, .id_result = lhs.*, .constituents = constituents[0..@intFromEnum(rhs_ty.vector.size)], }); } switch (op) { .OpShiftLeftLogical, .OpShiftRightLogical => { try section.emit(op, .{ .id_result = id, .id_result_type = result_ty, .base = lhs.*, .shift = rhs.*, }); }, else => { try section.emit(op, .{ .id_result = id, .id_result_type = result_ty, .operand_1 = lhs.*, .operand_2 = rhs.*, }); }, } return id; } fn emitUnary(spv: *SpirV, section: *Section, unary: Inst.Unary) !IdRef { switch (unary.op) { .not => { const id = spv.allocId(); const expr = try spv.emitExpr(section, unary.expr); const result_type = try spv.emitType(unary.result_type); try section.emit(.OpNot, .{ .id_result_type = result_type, .id_result = id, .operand = expr, }); return id; }, .negate => { const id = spv.allocId(); const expr = try spv.emitExpr(section, unary.expr); const result_type = try spv.emitType(unary.result_type); switch (spv.air.getInst(unary.result_type)) { .int => try section.emit(.OpSNegate, .{ .id_result_type = result_type, .id_result = id, .operand = expr, }), .float => try section.emit(.OpFNegate, .{ .id_result_type = result_type, .id_result = id, .operand = expr, }), else => unreachable, } return id; }, .addr_of => return (try spv.accessPtr(section, unary.expr)).id, else => unreachable, } } fn emitNilIntrinsic(spv: *SpirV, section: *Section, intr: Inst.NilIntrinsic) !void { switch (intr) { .workgroup_barrier => { const uint2 = try spv.resolve(.{ .int = .{ .type = .u32, .value = 2 } }); const uint264 = try spv.resolve(.{ .int = .{ .type = .u32, .value = 264 } }); try section.emit(.OpControlBarrier, .{ .execution = uint2, .memory = uint2, .semantics = uint264, }); }, else => std.debug.panic("TODO: implement Nil Intrinsic {s}", .{@tagName(intr)}), } } fn emitUnaryIntrinsic(spv: *SpirV, section: *Section, unary: Inst.UnaryIntrinsic) !IdRef { const id = spv.allocId(); const expr = try spv.emitExpr(section, unary.expr); const result_type = try spv.emitType(unary.result_type); const result_type_inst = switch (spv.air.getInst(unary.result_type)) { .vector => |vec| spv.air.getInst(vec.elem_type), else => |ty| ty, }; const instruction: Word = switch (unary.op) { .array_length => { const expr_parent_ptr = spv.runtiem_arr_vars.get(expr) orelse expr; try section.emit(.OpArrayLength, .{ .id_result_type = result_type, .id_result = id, .structure = expr_parent_ptr, .array_member = 0, // TODO }); return id; }, .radians => 11, .sin => 13, .cos => 14, .tan => 15, .normalize => 69, .length => 66, .floor => 8, .abs => switch (result_type_inst) { .float => 4, .int => 5, else => unreachable, }, .all => { try section.emit(.OpAll, .{ .id_result = id, .id_result_type = result_type, .vector = expr, }); return id; }, .dpdx => { try section.emit(.OpDPdx, .{ .id_result = id, .id_result_type = result_type, .p = expr, }); return id; }, .dpdy => { try section.emit(.OpDPdy, .{ .id_result = id, .id_result_type = result_type, .p = expr, }); return id; }, .fwidth => { try section.emit(.OpFwidth, .{ .id_result = id, .id_result_type = result_type, .p = expr, }); return id; }, else => std.debug.panic("TODO: implement Unary Intrinsic {s}", .{@tagName(unary.op)}), }; try section.emit(.OpExtInst, .{ .id_result_type = result_type, .id_result = id, .set = spv.importExtInst(), .instruction = .{ .inst = instruction }, .id_ref_4 = &.{expr}, }); return id; } fn emitBinaryIntrinsic(spv: *SpirV, section: *Section, bin: Inst.BinaryIntrinsic) !IdRef { const id = spv.allocId(); const lhs = try spv.emitExpr(section, bin.lhs); const rhs = try spv.emitExpr(section, bin.rhs); const result_type = try spv.emitType(bin.result_type); const result_type_inst = switch (spv.air.getInst(bin.result_type)) { .vector => |vec| spv.air.getInst(vec.elem_type), else => |ty| ty, }; const instruction: Word = switch (bin.op) { .min => switch (result_type_inst) { .float => 37, .int => |int| switch (int.type) { .u32 => 38, .i32 => 39, }, else => unreachable, }, .max => switch (result_type_inst) { .float => 40, .int => |int| switch (int.type) { .u32 => 41, .i32 => 42, }, else => unreachable, }, .atan2 => 25, .distance => 67, .dot => { try section.emit(.OpDot, .{ .id_result = id, .id_result_type = result_type, .vector_1 = lhs, .vector_2 = rhs, }); return id; }, .pow => 26, .step => 48, }; try section.emit(.OpExtInst, .{ .id_result_type = result_type, .id_result = id, .set = spv.importExtInst(), .instruction = .{ .inst = instruction }, .id_ref_4 = &.{ lhs, rhs }, }); return id; } fn emitTripleIntrinsic(spv: *SpirV, section: *Section, triple: Inst.TripleIntrinsic) !IdRef { const id = spv.allocId(); const a1 = try spv.emitExpr(section, triple.a1); const a2 = try spv.emitExpr(section, triple.a2); var a3 = try spv.emitExpr(section, triple.a3); const result_type = try spv.emitType(triple.result_type); const result_type_inst = switch (spv.air.getInst(triple.result_type)) { .vector => |vec| spv.air.getInst(vec.elem_type), else => |ty| ty, }; const instruction: Word = switch (triple.op) { .mix => 46, .clamp => switch (result_type_inst) { .float => 43, .int => |int| switch (int.type) { .u32 => 44, .i32 => 45, }, else => unreachable, }, .smoothstep => 49, }; if (triple.op == .mix and spv.air.getInst(triple.result_type) == .vector) { const vec_type_inst = spv.air.getInst(triple.result_type).vector; var constituents = std.BoundedArray(IdRef, 4){}; constituents.appendNTimesAssumeCapacity(a3, @intFromEnum(vec_type_inst.size)); a3 = spv.allocId(); try section.emit(.OpCompositeConstruct, .{ .id_result_type = result_type, .id_result = a3, .constituents = constituents.slice(), }); } try section.emit(.OpExtInst, .{ .id_result_type = result_type, .id_result = id, .set = spv.importExtInst(), .instruction = .{ .inst = instruction }, .id_ref_4 = &.{ a1, a2, a3 }, }); return id; } fn emitTextureSample(spv: *SpirV, section: *Section, ts: Inst.TextureSample) !IdRef { const image_id = spv.allocId(); const loaded_image_id = spv.allocId(); const texture = try spv.emitExpr(section, ts.texture); const sampler = try spv.emitExpr(section, ts.sampler); const coords = try spv.emitExpr(section, ts.coords); const result_type = try spv.emitType(ts.result_type); const texture_type = try spv.emitType(ts.texture_type); const sampled_image_ty = try spv.resolve(.{ .sampled_image_type = texture_type }); const extract_result = spv.air.getInst(ts.result_type) != .vector; var final_result_type = result_type; try section.emit(.OpSampledImage, .{ .id_result_type = sampled_image_ty, .id_result = image_id, .image = texture, .sampler = sampler, }); if (extract_result) { final_result_type = try spv.resolve(.{ .vector_type = .{ .elem_type = result_type, .size = .four, }, }); } if (ts.operands != .none) { const image_operands: spec.ImageOperands.Extended = switch (ts.operands) { .none => unreachable, .level => |level| .{ .Lod = .{ .id_ref = try spv.emitExpr(section, level), } }, .grad => |grad| .{ .Grad = .{ .id_ref_0 = try spv.emitExpr(section, grad.dpdx), .id_ref_1 = try spv.emitExpr(section, grad.dpdy), } }, }; try section.emit(.OpImageSampleExplicitLod, .{ .id_result_type = final_result_type, .id_result = loaded_image_id, .sampled_image = image_id, .coordinate = coords, .image_operands = image_operands, }); } else { try section.emit(.OpImageSampleImplicitLod, .{ .id_result_type = final_result_type, .id_result = loaded_image_id, .sampled_image = image_id, .coordinate = coords, }); } if (extract_result) { const extract_id = spv.allocId(); try section.emit(.OpCompositeExtract, .{ .id_result_type = result_type, .id_result = extract_id, .composite = loaded_image_id, .indexes = &.{0}, }); return extract_id; } return loaded_image_id; } fn emitTextureDimension(spv: *SpirV, section: *Section, ts: Inst.TextureDimension) !IdRef { try spv.capabilities.append(spv.allocator, .ImageQuery); const id = spv.allocId(); const texture = try spv.emitExpr(section, ts.texture); const level = if (ts.level != .none) try spv.emitExpr(section, ts.level) else try spv.resolve(.{ .int = .{ .type = .u32, .value = 0 } }); const result_type = try spv.emitType(ts.result_type); try section.emit(.OpImageQuerySizeLod, .{ .id_result_type = result_type, .id_result = id, .image = texture, .level_of_detail = level, }); return id; } fn emitTextureLoad(spv: *SpirV, section: *Section, ts: Inst.TextureLoad) !IdRef { const id = spv.allocId(); const texture = try spv.emitExpr(section, ts.texture); const level = try spv.emitExpr(section, ts.level); const coords = try spv.emitExpr(section, ts.coords); const result_type = try spv.emitType(ts.result_type); const extract_result = spv.air.getInst(ts.result_type) != .vector; var final_result_type = result_type; if (extract_result) { final_result_type = try spv.resolve(.{ .vector_type = .{ .elem_type = result_type, .size = .four, }, }); } try section.emit(.OpImageFetch, .{ .id_result_type = final_result_type, .id_result = id, .image = texture, .coordinate = coords, .image_operands = .{ .Lod = .{ .id_ref = level } }, }); if (extract_result) { const extract_id = spv.allocId(); try section.emit(.OpCompositeExtract, .{ .id_result_type = result_type, .id_result = extract_id, .composite = id, .indexes = &.{0}, }); return extract_id; } return id; } fn emitTextureStore(spv: *SpirV, section: *Section, ts: Inst.TextureStore) !void { const texture = try spv.emitExpr(section, ts.texture); const coords = try spv.emitExpr(section, ts.coords); const value = try spv.emitExpr(section, ts.value); try section.emit(.OpImageWrite, .{ .image = texture, .coordinate = coords, .texel = value, }); } fn emitDiscard(spv: *SpirV, section: *Section) !void { const id = spv.allocId(); spv.fn_stack.items[spv.fn_stack.items.len - 1].discard_branch = id; try section.emit(.OpBranch, .{ .target_label = id }); try section.emit(.OpLabel, .{ .id_result = spv.allocId() }); } fn importExtInst(spv: *SpirV) IdRef { if (spv.extended_instructions) |id| return id; spv.extended_instructions = spv.allocId(); return spv.extended_instructions.?; } fn accessPtr(spv: *SpirV, section: *Section, decl: InstIndex) error{OutOfMemory}!PtrAccess { switch (spv.air.getInst(decl)) { .var_ref => |var_ref| return spv.emitVarAccess(section, var_ref), .index_access => |index_access| return spv.emitIndexAccess(section, index_access), .field_access => |field_access| return spv.emitFieldAccess(section, field_access), .swizzle_access => |swizzle_access| { std.debug.assert(swizzle_access.size == .one); const id = spv.allocId(); const index_id = try spv.resolve(.{ .int = .{ .type = .u32, .value = @intFromEnum(swizzle_access.pattern[0]), } }); const type_id = try spv.emitType(swizzle_access.type); const base = try spv.accessPtr(section, swizzle_access.base); const ptr_type_id = try spv.resolve(.{ .ptr_type = .{ .storage_class = base.type.storage_class, .elem_type = type_id, } }); try section.emit(.OpAccessChain, .{ .id_result_type = ptr_type_id, .id_result = id, .base = base.id, .indexes = &.{index_id}, }); return .{ .id = id, .type = .{ .storage_class = base.type.storage_class, .elem_type = type_id }, .is_ptr = true, }; }, else => unreachable, } } fn emitCall(spv: *SpirV, section: *Section, inst: Inst.FnCall) !IdRef { var args = std.ArrayList(IdRef).init(spv.allocator); defer args.deinit(); if (inst.args != .none) { for (spv.air.refToList(inst.args)) |arg_inst_idx| { try args.append(try spv.emitExpr(section, arg_inst_idx)); } } const id = spv.allocId(); const function = if (spv.decl_map.get(inst.@"fn")) |decl| decl.id else try spv.emitFn(inst.@"fn"); try section.emit(.OpFunctionCall, .{ .id_result_type = try spv.emitType(spv.air.getInst(inst.@"fn").@"fn".return_type), .id_result = id, .function = function, .id_ref_3 = args.items, }); return id; } fn emitBool(spv: *SpirV, section: *Section, boolean: Inst.Bool) !IdRef { return switch (boolean.value.?) { .literal => |lit| spv.resolve(.{ .bool = lit }), .cast => |cast| spv.emitBoolCast(section, cast), }; } fn emitInt(spv: *SpirV, section: *Section, int: Inst.Int) !IdRef { return switch (spv.air.getValue(Inst.Int.Value, int.value.?)) { .literal => |lit| spv.resolve(.{ .int = .{ .type = int.type, .value = @bitCast(lit) } }), .cast => |cast| spv.emitIntCast(section, int.type, cast), }; } fn emitFloat(spv: *SpirV, section: *Section, float: Inst.Float) !IdRef { return switch (spv.air.getValue(Inst.Float.Value, float.value.?)) { .literal => |lit| spv.resolve(.{ .float = .{ .type = float.type, .value = @bitCast(lit) } }), .cast => |cast| spv.emitFloatCast(section, float.type, cast), }; } fn emitBoolCast(spv: *SpirV, section: *Section, cast: Inst.ScalarCast) !IdRef { const id = spv.allocId(); const dest_type_id = try spv.resolve(.bool_type); const source_type = spv.air.getInst(cast.type); const value_id = try spv.emitExpr(section, cast.value); switch (source_type) { .int => |int| try section.emit(.OpINotEqual, .{ .id_result_type = dest_type_id, .id_result = id, .operand_1 = try spv.resolve(.{ .null = try spv.resolve(.{ .int_type = int.type }) }), .operand_2 = value_id, }), .float => |float| try section.emit(.OpFUnordNotEqual, .{ .id_result_type = dest_type_id, .id_result = id, .operand_1 = try spv.resolve(.{ .null = try spv.resolve(.{ .float_type = float.type }) }), .operand_2 = value_id, }), else => unreachable, } return id; } fn emitIntCast(spv: *SpirV, section: *Section, dest_type: Inst.Int.Type, cast: Inst.ScalarCast) !IdRef { const id = spv.allocId(); const source_type = spv.air.getInst(cast.type); const dest_type_id = try spv.resolve(.{ .int_type = dest_type }); const value_id = try spv.emitExpr(section, cast.value); switch (dest_type) { .i32 => switch (source_type) { .int => try section.emit(.OpBitcast, .{ .id_result_type = dest_type_id, .id_result = id, .operand = value_id, }), .float => try section.emit(.OpConvertFToS, .{ .id_result_type = dest_type_id, .id_result = id, .float_value = value_id, }), else => unreachable, }, .u32 => switch (source_type) { .int => try section.emit(.OpBitcast, .{ .id_result_type = dest_type_id, .id_result = id, .operand = value_id, }), .float => try section.emit(.OpConvertFToU, .{ .id_result_type = dest_type_id, .id_result = id, .float_value = value_id, }), else => unreachable, }, } return id; } fn emitFloatCast(spv: *SpirV, section: *Section, dest_type: Inst.Float.Type, cast: Inst.ScalarCast) !IdRef { const id = spv.allocId(); const source_type = spv.air.getInst(cast.type); const dest_type_id = try spv.resolve(.{ .float_type = dest_type }); const value_id = try spv.emitExpr(section, cast.value); switch (source_type) { .float => try section.emit(.OpFConvert, .{ .id_result_type = dest_type_id, .id_result = id, .float_value = value_id, }), .int => |int| switch (int.type) { .u32 => try section.emit(.OpConvertUToF, .{ .id_result_type = dest_type_id, .id_result = id, .unsigned_value = value_id, }), .i32 => try section.emit(.OpConvertSToF, .{ .id_result_type = dest_type_id, .id_result = id, .signed_value = value_id, }), }, .bool => { try section.emit(.OpSelect, .{ .id_result_type = dest_type_id, .id_result = id, .condition = value_id, .object_1 = try spv.resolve(.{ .float = .{ .type = dest_type, .value = @bitCast(@as(f32, 1.0)) } }), .object_2 = try spv.resolve(.{ .float = .{ .type = dest_type, .value = 0 } }), }); }, else => unreachable, } return id; } fn emitVector(spv: *SpirV, section: *Section, inst: Inst.Vector) !IdRef { const elem_type_key: Key = switch (spv.air.getInst(inst.elem_type)) { .bool => .bool_type, .float => |float| .{ .float_type = float.type }, .int => |int| .{ .int_type = int.type }, else => unreachable, }; const type_id = try spv.resolve(.{ .vector_type = .{ .elem_type = try spv.resolve(elem_type_key), .size = inst.size, }, }); if (inst.value.? == .none) { return spv.resolve(.{ .null = type_id }); } var constituents = std.ArrayList(IdRef).init(spv.allocator); defer constituents.deinit(); try constituents.ensureTotalCapacityPrecise(@intFromEnum(inst.size)); const value = spv.air.getValue(Inst.Vector.Value, inst.value.?); switch (value) { .literal => for (value.literal[0..@intFromEnum(inst.size)]) |elem_inst| { const elem_id = try spv.emitExpr(section, elem_inst); constituents.appendAssumeCapacity(elem_id); }, .cast => |cast| for (cast.value[0..@intFromEnum(inst.size)]) |elem_inst| { const elem_id = switch (elem_type_key) { .float_type => |float| try spv.emitFloatCast(section, float, .{ .type = cast.type, .value = elem_inst }), .int_type => |int| try spv.emitIntCast(section, int, .{ .type = cast.type, .value = elem_inst }), else => unreachable, }; constituents.appendAssumeCapacity(elem_id); }, } const id = spv.allocId(); try section.emit(.OpCompositeConstruct, .{ .id_result_type = type_id, .id_result = id, .constituents = constituents.items, }); return id; } fn emitMatrix(spv: *SpirV, section: *Section, inst: Inst.Matrix) !IdRef { const vec_elem_type_id = try spv.emitType(inst.elem_type); const elem_type_id = try spv.resolve(.{ .vector_type = .{ .elem_type = vec_elem_type_id, .size = inst.rows, }, }); const type_id = try spv.resolve(.{ .matrix_type = .{ .elem_type = elem_type_id, .cols = inst.cols, }, }); if (inst.value.? == .none) { return spv.resolve(.{ .null = type_id }); } var constituents = std.ArrayList(IdRef).init(spv.allocator); defer constituents.deinit(); try constituents.ensureTotalCapacityPrecise(@intFromEnum(inst.cols)); const value = spv.air.getValue(Inst.Matrix.Value, inst.value.?); for (value[0..@intFromEnum(inst.cols)]) |elem_inst| { const elem_id = try spv.emitExpr(section, elem_inst); constituents.appendAssumeCapacity(elem_id); } const id = spv.allocId(); try section.emit(.OpCompositeConstruct, .{ .id_result_type = type_id, .id_result = id, .constituents = constituents.items, }); return id; } fn emitArray(spv: *SpirV, section: *Section, inst: Inst.Array) !IdRef { const len = if (inst.len != .none) try spv.emitExpr(&spv.global_section, inst.len) else null; const type_id = try spv.resolve(.{ .array_type = .{ .elem_type = try spv.emitType(inst.elem_type), .len = len, }, }); if (inst.value.? == .none) { return spv.resolve(.{ .null = type_id }); } const value = spv.air.refToList(inst.value.?); var constituents = std.ArrayList(IdRef).init(spv.allocator); defer constituents.deinit(); try constituents.ensureTotalCapacityPrecise(value.len); for (value) |elem_inst| { const elem_id = try spv.emitExpr(section, elem_inst); constituents.appendAssumeCapacity(elem_id); } const id = spv.allocId(); try section.emit(.OpCompositeConstruct, .{ .id_result_type = type_id, .id_result = id, .constituents = constituents.items, }); return id; } const Key = union(enum) { void_type, bool_type, sampler_type, int_type: Inst.Int.Type, float_type: Inst.Float.Type, vector_type: VectorType, matrix_type: MatrixType, array_type: ArrayType, ptr_type: PointerType, fn_type: FunctionType, texture_type: TextureType, sampled_image_type: IdRef, null: IdRef, bool: bool, int: Int, float: Float, vector: Vector, const VectorType = struct { size: Inst.Vector.Size, elem_type: IdRef, }; const MatrixType = struct { cols: Inst.Vector.Size, elem_type: IdRef, }; const ArrayType = struct { len: ?IdRef, elem_type: IdRef, }; const PointerType = struct { storage_class: spec.StorageClass, elem_type: IdRef, }; const FunctionType = struct { return_type: IdRef, params_type: []const IdRef, }; const TextureType = struct { sampled_type: IdRef, dim: spec.Dim, depth: u2, arrayed: u1, multisampled: u1, sampled: u2, image_format: spec.ImageFormat, }; const Int = struct { type: Inst.Int.Type, value: i33, }; const Float = struct { type: Inst.Float.Type, value: u32, }; const Vector = struct { type: IdRef, value: []const IdRef, }; const Adapter = struct { pub fn hash(ctx: Adapter, key: Key) u32 { _ = ctx; var hasher = std.hash.XxHash32.init(0); switch (key) { .fn_type => |func| { std.hash.autoHash(&hasher, func.return_type); for (func.params_type) |param_type| { std.hash.autoHash(&hasher, param_type); } }, inline else => |k| std.hash.autoHashStrat(&hasher, k, .Deep), } return @as(u32, @truncate(hasher.final())); } pub fn eql(ctx: Adapter, a: Key, b: Key, b_index: usize) bool { _ = ctx; _ = b_index; switch (a) { .fn_type => |a_func| if (b == .fn_type) { const b_func = b.fn_type; if (a_func.return_type.id != b_func.return_type.id) return false; if (a_func.params_type.len != b_func.params_type.len) return false; if (a_func.params_type.ptr == a_func.params_type.ptr) return true; for (a_func.params_type, b_func.params_type) |a_param, b_param| { if (a_param.id != b_param.id) return false; } return true; }, else => {}, } return std.meta.eql(a, b); } }; }; pub fn resolve(spv: *SpirV, key: Key) !IdRef { if (spv.type_value_map.get(key)) |value| return value; const id = spv.allocId(); switch (key) { .void_type => try spv.global_section.emit(.OpTypeVoid, .{ .id_result = id }), .bool_type => try spv.global_section.emit(.OpTypeBool, .{ .id_result = id }), .int_type => |int| try spv.global_section.emit(.OpTypeInt, .{ .id_result = id, .width = int.sizeBits(), .signedness = @intFromBool(int.signedness()), }), .float_type => |float| try spv.global_section.emit(.OpTypeFloat, .{ .id_result = id, .width = float.sizeBits(), }), .vector_type => |vector| try spv.global_section.emit(.OpTypeVector, .{ .id_result = id, .component_type = vector.elem_type, .component_count = @intFromEnum(vector.size), }), .matrix_type => |matrix| try spv.global_section.emit(.OpTypeMatrix, .{ .id_result = id, .column_type = matrix.elem_type, .column_count = @intFromEnum(matrix.cols), }), .array_type => |array| { if (array.len) |len| { try spv.global_section.emit(.OpTypeArray, .{ .id_result = id, .element_type = array.elem_type, .length = len, }); } else { try spv.global_section.emit(.OpTypeRuntimeArray, .{ .id_result = id, .element_type = array.elem_type, }); } }, .ptr_type => |ptr_type| { try spv.global_section.emit(.OpTypePointer, .{ .id_result = id, .storage_class = ptr_type.storage_class, .type = ptr_type.elem_type, }); }, .fn_type => |fn_type| { try spv.global_section.emit(.OpTypeFunction, .{ .id_result = id, .return_type = fn_type.return_type, .id_ref_2 = fn_type.params_type, }); }, .null => |nil| { try spv.global_section.emit(.OpConstantNull, .{ .id_result_type = nil, .id_result = id }); }, .bool => |val| { const type_id = try spv.resolve(.bool_type); if (val) { try spv.global_section.emit(.OpConstantTrue, .{ .id_result_type = type_id, .id_result = id }); } else { try spv.global_section.emit(.OpConstantFalse, .{ .id_result_type = type_id, .id_result = id }); } }, .int => |int| { const value: spec.LiteralContextDependentNumber = switch (int.type) { .u32 => .{ .uint32 = @intCast(int.value) }, .i32 => .{ .int32 = @intCast(int.value) }, }; try spv.global_section.emit(.OpConstant, .{ .id_result_type = try spv.resolve(.{ .int_type = int.type }), .id_result = id, .value = value, }); }, .float => |float| { const value: spec.LiteralContextDependentNumber = switch (float.type) { .f16 => .{ .uint32 = @as(u16, @bitCast(@as(f16, @floatCast(@as(f32, @bitCast(float.value)))))) }, .f32 => .{ .float32 = @bitCast(float.value) }, }; try spv.global_section.emit(.OpConstant, .{ .id_result_type = try spv.resolve(.{ .float_type = float.type }), .id_result = id, .value = value, }); }, .vector => |vector| { try spv.global_section.emit(.OpConstantComposite, .{ .id_result_type = vector.type, .id_result = id, .constituents = vector.value, }); }, .sampler_type => try spv.global_section.emit(.OpTypeSampler, .{ .id_result = id }), .texture_type => |texture_type| try spv.global_section.emit(.OpTypeImage, .{ .id_result = id, .sampled_type = texture_type.sampled_type, .dim = texture_type.dim, .depth = texture_type.depth, .arrayed = texture_type.arrayed, .ms = texture_type.multisampled, .sampled = texture_type.sampled, .image_format = texture_type.image_format, }), .sampled_image_type => |si| try spv.global_section.emit(.OpTypeSampledImage, .{ .id_result = id, .image_type = si, }), } try spv.type_value_map.put(spv.allocator, key, id); return id; } fn debugName(spv: *SpirV, id: IdResult, name: []const u8) !void { if (spv.emit_debug_names) { try spv.debug_section.emit(.OpName, .{ .target = id, .name = name }); } } fn debugMemberName(spv: *SpirV, struct_id: IdResult, index: usize, name: []const u8) !void { if (spv.emit_debug_names) { try spv.debug_section.emit(.OpMemberName, .{ .type = struct_id, .member = @as(spec.LiteralInteger, @intCast(index)), .name = name, }); } } fn allocId(spv: *SpirV) IdResult { defer spv.next_result_id += 1; return .{ .id = spv.next_result_id }; } fn spirvBuiltin(builtin: Air.Inst.Builtin, stage: Air.Inst.Fn.Stage) spec.BuiltIn { return switch (builtin) { .vertex_index => .VertexIndex, .instance_index => .InstanceIndex, .position => switch (stage) { .fragment => .FragCoord, else => .Position, }, .front_facing => .FrontFacing, .frag_depth => .FragDepth, .local_invocation_id => .LocalInvocationId, .local_invocation_index => .LocalInvocationIndex, .global_invocation_id => .GlobalInvocationId, .workgroup_id => .WorkgroupId, .num_workgroups => .NumWorkgroups, .sample_index => .SampleMask, .sample_mask => .SampleId, }; } fn storageClassFromAddrSpace(addr_space: Air.Inst.PointerType.AddressSpace) spec.StorageClass { return switch (addr_space) { .uniform_constant => .UniformConstant, .function => .Function, .private => .Private, .workgroup => .Workgroup, .uniform => .Uniform, .storage => .StorageBuffer, }; } fn spirvDim(kind: Inst.TextureType.Kind) spec.Dim { return switch (kind) { .sampled_1d, .storage_1d => .@"1D", .sampled_3d, .storage_3d => .@"3D", .sampled_2d, .sampled_2d_array, .multisampled_2d, .multisampled_depth_2d, .storage_2d, .storage_2d_array, .depth_2d, .depth_2d_array, => .@"2D", .sampled_cube, .sampled_cube_array, .depth_cube, .depth_cube_array, => .Cube, }; } fn spirvDepth(kind: Inst.TextureType.Kind) u1 { _ = kind; // The Vulkan spec says: The "Depth" operand of OpTypeImage is ignored. // In SPIRV, 0 means not depth, 1 means depth, and 2 means unknown. // Using anything other than 0 is problematic on various Vulkan drivers. return 0; } fn spirvArrayed(kind: Inst.TextureType.Kind) u1 { return switch (kind) { .sampled_2d_array, .sampled_cube_array, .storage_2d_array, .depth_2d_array, .depth_cube_array, => 1, else => 0, }; } fn spirvMultisampled(kind: Inst.TextureType.Kind) u1 { return switch (kind) { .multisampled_2d, .multisampled_depth_2d => 1, else => 0, }; } fn spirvSampled(kind: Inst.TextureType.Kind) u2 { return switch (kind) { .sampled_1d, .sampled_2d, .sampled_2d_array, .sampled_3d, .sampled_cube, .sampled_cube_array, .multisampled_2d, .multisampled_depth_2d, .depth_2d, .depth_2d_array, .depth_cube, .depth_cube_array, => 1, else => 2, }; } fn spirvImageFormat(texel_format: Inst.TextureType.TexelFormat) spec.ImageFormat { return switch (texel_format) { .none => .Unknown, .rgba8unorm => .Rgba8, .rgba8snorm => .Rgba8Snorm, .rgba8uint => .Rgba8ui, .rgba8sint => .Rgba8i, .rgba16uint => .Rgba16ui, .rgba16sint => .Rgba16i, .rgba16float => .Rgba16f, .r32uint => .R32ui, .r32sint => .R32i, .r32float => .R32f, .rg32uint => .Rg32ui, .rg32sint => .Rg32i, .rg32float => .Rg32f, .rgba32uint => .Rgba32ui, .rgba32sint => .Rgba32i, .rgba32float => .Rgba32f, .bgra8unorm => .Unknown, }; }
0
repos/mach-sysgpu/src/shader
repos/mach-sysgpu/src/shader/codegen/glsl.zig
const std = @import("std"); const Air = @import("../Air.zig"); const DebugInfo = @import("../CodeGen.zig").DebugInfo; const Entrypoint = @import("../CodeGen.zig").Entrypoint; const BindingPoint = @import("../CodeGen.zig").BindingPoint; const BindingTable = @import("../CodeGen.zig").BindingTable; const Inst = Air.Inst; const InstIndex = Air.InstIndex; const Builtin = Air.Inst.Builtin; const Glsl = @This(); air: *const Air, allocator: std.mem.Allocator, storage: std.ArrayListUnmanaged(u8), writer: std.ArrayListUnmanaged(u8).Writer, bindings: *const BindingTable, entrypoint_inst: ?Inst.Fn = null, indent: u32 = 0, pub fn gen( allocator: std.mem.Allocator, air: *const Air, debug_info: DebugInfo, entrypoint: ?Entrypoint, bindings: ?*const BindingTable, ) ![]const u8 { _ = debug_info; var storage = std.ArrayListUnmanaged(u8){}; var glsl = Glsl{ .air = air, .allocator = allocator, .storage = storage, .writer = storage.writer(allocator), .bindings = bindings orelse &.{}, }; defer { storage.deinit(allocator); } try glsl.writeAll("#version 450\n\n"); for (air.refToList(air.globals_index)) |inst_idx| { switch (air.getInst(inst_idx)) { .@"struct" => |inst| try glsl.emitStruct(inst), else => {}, } } // GLSL deosn't support multiple entrypoints so we only generate // when `entrypoint` is specified OR there's only one entrypoint var entrypoint_name: ?[]const u8 = null; if (entrypoint != null) { entrypoint_name = std.mem.span(entrypoint.?.name); } else { const has_multiple_entrypoints = @intFromBool(air.vertex_stage == .none) & @intFromBool(air.fragment_stage == .none) & @intFromBool(air.compute_stage == .none); if (has_multiple_entrypoints == 1) { return error.MultipleEntrypoints; } } for (air.refToList(air.globals_index)) |inst_idx| { switch (air.getInst(inst_idx)) { .@"var" => |inst| try glsl.emitGlobalVar(inst), .@"fn" => |inst| { const name = glsl.air.getStr(inst.name); if (entrypoint_name) |_| { if (std.mem.eql(u8, entrypoint_name.?, name)) { try glsl.emitFn(inst); } } else if (inst.stage != .none) { try glsl.emitFn(inst); } }, .@"struct" => {}, else => |inst| try glsl.print("TopLevel: {}\n", .{inst}), // TODO } } return storage.toOwnedSlice(allocator); } fn emitElemType(glsl: *Glsl, inst_idx: InstIndex) !void { switch (glsl.air.getInst(inst_idx)) { .bool => |inst| try glsl.emitBoolElemType(inst), .int => |inst| try glsl.emitIntElemType(inst), .float => |inst| try glsl.emitFloatElemType(inst), else => unreachable, } } fn emitBoolElemType(glsl: *Glsl, inst: Inst.Bool) !void { _ = inst; try glsl.writeAll("b"); } fn emitIntElemType(glsl: *Glsl, inst: Inst.Int) !void { try glsl.writeAll(switch (inst.type) { .u32 => "u", .i32 => "i", }); } fn emitFloatElemType(glsl: *Glsl, inst: Inst.Float) !void { try glsl.writeAll(switch (inst.type) { .f32 => "", .f16 => "", // TODO - extension for half support? }); } fn emitType(glsl: *Glsl, inst_idx: InstIndex) error{OutOfMemory}!void { if (inst_idx == .none) { try glsl.writeAll("void"); } else { switch (glsl.air.getInst(inst_idx)) { .bool => |inst| try glsl.emitBoolType(inst), .int => |inst| try glsl.emitIntType(inst), .float => |inst| try glsl.emitFloatType(inst), .vector => |inst| try glsl.emitVectorType(inst), .matrix => |inst| try glsl.emitMatrixType(inst), .array => |inst| try glsl.emitType(inst.elem_type), .@"struct" => |inst| try glsl.writeName(inst.name), else => |inst| try glsl.print("Type: {}", .{inst}), // TODO } } } fn emitTypeSuffix(glsl: *Glsl, inst_idx: InstIndex) error{OutOfMemory}!void { if (inst_idx != .none) { switch (glsl.air.getInst(inst_idx)) { .array => |inst| try glsl.emitArrayTypeSuffix(inst), else => {}, } } } fn emitArrayTypeSuffix(glsl: *Glsl, inst: Inst.Array) !void { if (inst.len != .none) { if (glsl.air.resolveInt(inst.len)) |len| { try glsl.print("[{}]", .{len}); } } else { try glsl.writeAll("[]"); } try glsl.emitTypeSuffix(inst.elem_type); } fn emitBoolType(glsl: *Glsl, inst: Inst.Bool) !void { _ = inst; try glsl.writeAll("bool"); } fn emitIntType(glsl: *Glsl, inst: Inst.Int) !void { try glsl.writeAll(switch (inst.type) { .u32 => "uint", .i32 => "int", }); } fn emitFloatType(glsl: *Glsl, inst: Inst.Float) !void { try glsl.writeAll(switch (inst.type) { .f32 => "float", .f16 => "half", }); } fn emitVectorSize(glsl: *Glsl, size: Inst.Vector.Size) !void { try glsl.writeAll(switch (size) { .two => "2", .three => "3", .four => "4", }); } fn emitVectorType(glsl: *Glsl, inst: Inst.Vector) !void { try glsl.emitElemType(inst.elem_type); try glsl.writeAll("vec"); try glsl.emitVectorSize(inst.size); } fn emitMatrixType(glsl: *Glsl, inst: Inst.Matrix) !void { // TODO - verify dimension order try glsl.emitElemType(inst.elem_type); try glsl.writeAll("mat"); try glsl.emitVectorSize(inst.cols); try glsl.writeAll("x"); try glsl.emitVectorSize(inst.rows); } fn emitStruct(glsl: *Glsl, inst: Inst.Struct) !void { // Workaround - structures with runtime arrays are not generally supported but can exist directly // in a block context which we inline in emitGlobalVar for (glsl.air.refToList(inst.members)) |member_index| { const member = glsl.air.getInst(member_index).struct_member; switch (glsl.air.getInst(member.type)) { .array => |array_type| { if (array_type.len == .none) { return; } }, else => {}, } } try glsl.writeAll("struct "); try glsl.writeName(inst.name); try glsl.writeAll(" {\n"); glsl.enterScope(); defer glsl.exitScope(); for (glsl.air.refToList(inst.members)) |member_index| { const member = glsl.air.getInst(member_index).struct_member; try glsl.writeIndent(); try glsl.emitType(member.type); try glsl.writeAll(" "); try glsl.writeName(member.name); try glsl.emitTypeSuffix(member.type); try glsl.writeAll(";\n"); } try glsl.writeAll("};\n"); } fn emitBuiltin(glsl: *Glsl, builtin: Builtin) !void { const stage = glsl.entrypoint_inst.?.stage; try glsl.writeAll(switch (builtin) { .vertex_index => "gl_VertexID", .instance_index => "gl_InstanceID", .position => if (stage == .vertex) "gl_Position" else "gl_FragCoord", .front_facing => "gl_FrontFacing", .frag_depth => "gl_FragDepth", .local_invocation_id => "gl_LocalInvocationID", .local_invocation_index => "gl_LocalInvocationIndex", .global_invocation_id => "gl_GlobalInvocationID", .workgroup_id => "gl_WorkGroupID", .num_workgroups => "gl_NumWorkGroups", .sample_index => "gl_SampleID", .sample_mask => "gl_SampleMask", // TODO - gl_SampleMaskIn }); } fn emitGlobalVar(glsl: *Glsl, inst: Inst.Var) !void { const group = glsl.air.resolveInt(inst.group) orelse return error.ConstExpr; const binding = glsl.air.resolveInt(inst.binding) orelse return error.ConstExpr; const key = BindingPoint{ .group = @intCast(group), .binding = @intCast(binding) }; const slot = glsl.bindings.get(key) orelse return error.NoBinding; try glsl.print("layout(binding = {}, ", .{slot}); try glsl.writeAll(if (inst.addr_space == .uniform) "std140" else "std430"); try glsl.writeAll(") "); if (inst.access_mode == .read) try glsl.writeAll("readonly "); try glsl.writeAll(if (inst.addr_space == .uniform) "uniform" else "buffer"); try glsl.print(" Block{}", .{slot}); const var_type = glsl.air.getInst(inst.type); switch (var_type) { .@"struct" => |struct_inst| { // Inline struct to support runtime arrays try glsl.writeAll("\n"); try glsl.writeAll("{\n"); glsl.enterScope(); defer glsl.exitScope(); for (glsl.air.refToList(struct_inst.members)) |member_index| { const member = glsl.air.getInst(member_index).struct_member; try glsl.writeIndent(); try glsl.emitType(member.type); try glsl.writeAll(" "); try glsl.writeName(member.name); try glsl.emitTypeSuffix(member.type); try glsl.writeAll(";\n"); } try glsl.writeAll("} "); try glsl.writeName(inst.name); try glsl.writeAll(";\n"); }, else => { try glsl.writeAll(" { "); try glsl.emitType(inst.type); try glsl.writeAll(" "); try glsl.writeName(inst.name); try glsl.emitTypeSuffix(inst.type); try glsl.writeAll("; };\n"); }, } } fn emitGlobal(glsl: *Glsl, location: ?u16, in_out: []const u8, var_type: InstIndex, name: Air.StringIndex) !void { try glsl.print("layout(location = {}) {s} ", .{ location.?, in_out }); try glsl.emitType(var_type); try glsl.writeAll(" "); try glsl.writeName(name); try glsl.emitTypeSuffix(var_type); try glsl.writeAll(";\n"); } fn emitGlobalFnParam(glsl: *Glsl, inst_idx: InstIndex) !void { const inst = glsl.air.getInst(inst_idx).fn_param; if (inst.builtin == null) { try glsl.emitGlobal(inst.location, "in", inst.type, inst.name); } } fn emitGlobalStructOutputs(glsl: *Glsl, inst: Inst.Struct) !void { for (glsl.air.refToList(inst.members)) |member_index| { const member = glsl.air.getInst(member_index).struct_member; if (member.builtin == null) { try glsl.emitGlobal(member.location, "out", member.type, member.name); } } } fn emitGlobalScalarOutput(glsl: *Glsl, inst: Inst.Fn) !void { if (inst.return_attrs.builtin == null) { try glsl.print("layout(location = {}) out ", .{0}); try glsl.emitType(inst.return_type); try glsl.writeAll(" "); try glsl.writeAll("main_output"); try glsl.emitTypeSuffix(inst.return_type); try glsl.writeAll(";\n"); } } fn emitFn(glsl: *Glsl, inst: Inst.Fn) !void { if (inst.stage != .none) { glsl.entrypoint_inst = inst; if (inst.params != .none) { const param_list = glsl.air.refToList(inst.params); for (param_list) |param_inst_idx| { try glsl.emitGlobalFnParam(param_inst_idx); } } if (inst.return_type != .none) { switch (glsl.air.getInst(inst.return_type)) { .@"struct" => |struct_inst| try glsl.emitGlobalStructOutputs(struct_inst), else => try glsl.emitGlobalScalarOutput(inst), } } switch (inst.stage) { .compute => |workgroup_size| { try glsl.print("layout(local_size_x = {}, local_size_y = {}, local_size_z = {}) in;\n", .{ glsl.air.resolveInt(workgroup_size.x) orelse 1, glsl.air.resolveInt(workgroup_size.y) orelse 1, glsl.air.resolveInt(workgroup_size.z) orelse 1, }); }, else => {}, } try glsl.emitType(.none); } else { try glsl.emitType(inst.return_type); } try glsl.writeAll(" "); if (inst.stage != .none) { try glsl.writeEntrypoint(); } else { try glsl.writeName(inst.name); } try glsl.writeAll("("); if (inst.stage == .none) { glsl.enterScope(); defer glsl.exitScope(); var add_comma = false; if (inst.params != .none) { for (glsl.air.refToList(inst.params)) |param_inst_idx| { try glsl.writeAll(if (add_comma) ",\n" else "\n"); add_comma = true; try glsl.writeIndent(); try glsl.emitFnParam(param_inst_idx); } } } try glsl.writeAll(")\n"); const block = glsl.air.getInst(inst.block).block; try glsl.writeAll("{\n"); { glsl.enterScope(); defer glsl.exitScope(); for (glsl.air.refToList(block)) |statement| { try glsl.emitStatement(statement); } } try glsl.writeAll("}\n"); glsl.entrypoint_inst = null; } fn emitFnParam(glsl: *Glsl, inst_idx: InstIndex) !void { const inst = glsl.air.getInst(inst_idx).fn_param; try glsl.emitType(inst.type); try glsl.writeAll(" "); try glsl.writeName(inst.name); } fn emitStatement(glsl: *Glsl, inst_idx: InstIndex) error{OutOfMemory}!void { try glsl.writeIndent(); switch (glsl.air.getInst(inst_idx)) { .@"var" => |inst| try glsl.emitVar(inst), //.@"const" => |inst| try glsl.emitConst(inst), .block => |block| try glsl.emitBlock(block), // .loop => |inst| try glsl.emitLoop(inst), // .continuing .@"return" => |return_inst_idx| try glsl.emitReturn(return_inst_idx), // .break_if .@"if" => |inst| try glsl.emitIf(inst), // .@"while" => |inst| try glsl.emitWhile(inst), .@"for" => |inst| try glsl.emitFor(inst), // .switch //.discard => try glsl.emitDiscard(), // .@"break" => try glsl.emitBreak(), .@"continue" => try glsl.writeAll("continue;\n"), // .call => |inst| try glsl.emitCall(inst), .assign, .nil_intrinsic, .texture_store, => { try glsl.emitExpr(inst_idx); try glsl.writeAll(";\n"); }, //else => |inst| std.debug.panic("TODO: implement Air tag {s}", .{@tagName(inst)}), else => |inst| try glsl.print("Statement: {}\n", .{inst}), // TODO } } fn emitVar(glsl: *Glsl, inst: Inst.Var) !void { const t = if (inst.type != .none) inst.type else inst.init; try glsl.emitType(t); try glsl.writeAll(" "); try glsl.writeName(inst.name); try glsl.emitTypeSuffix(t); if (inst.init != .none) { try glsl.writeAll(" = "); try glsl.emitExpr(inst.init); } try glsl.writeAll(";\n"); } fn emitBlock(glsl: *Glsl, block: Air.RefIndex) !void { try glsl.writeAll("{\n"); { glsl.enterScope(); defer glsl.exitScope(); for (glsl.air.refToList(block)) |statement| { try glsl.emitStatement(statement); } } try glsl.writeIndent(); try glsl.writeAll("}\n"); } fn emitReturn(glsl: *Glsl, inst_idx: InstIndex) !void { if (glsl.entrypoint_inst) |fn_inst| { if (fn_inst.return_type != .none) { switch (glsl.air.getInst(fn_inst.return_type)) { .@"struct" => |struct_inst| try glsl.emitGlobalStructReturn(struct_inst, inst_idx), else => try glsl.emitGlobalScalarReturn(fn_inst, inst_idx), } try glsl.writeIndent(); } try glsl.writeAll("return;\n"); } else { try glsl.writeAll("return"); if (inst_idx != .none) { try glsl.writeAll(" "); try glsl.emitExpr(inst_idx); } try glsl.writeAll(";\n"); } } fn emitGlobalStructReturn(glsl: *Glsl, inst: Inst.Struct, inst_idx: InstIndex) !void { for (glsl.air.refToList(inst.members), 0..) |member_index, i| { const member = glsl.air.getInst(member_index).struct_member; if (i > 0) try glsl.writeIndent(); if (member.builtin) |builtin| { try glsl.emitBuiltin(builtin); } else { try glsl.writeName(member.name); } try glsl.writeAll(" = "); try glsl.emitExpr(inst_idx); try glsl.writeAll("."); try glsl.writeName(member.name); try glsl.writeAll(";\n"); } } fn emitGlobalScalarReturn(glsl: *Glsl, inst: Inst.Fn, inst_idx: InstIndex) !void { if (inst.return_attrs.builtin) |builtin| { try glsl.emitBuiltin(builtin); } else { try glsl.writeAll("main_output"); } if (inst_idx != .none) { try glsl.writeAll(" = "); try glsl.emitExpr(inst_idx); } try glsl.writeAll(";\n"); } fn emitIf(glsl: *Glsl, inst: Inst.If) !void { try glsl.writeAll("if ("); try glsl.emitExpr(inst.cond); try glsl.writeAll(")\n"); { const body_inst = glsl.air.getInst(inst.body); if (body_inst != .block) glsl.enterScope(); try glsl.emitStatement(inst.body); if (body_inst != .block) glsl.exitScope(); } if (inst.@"else" != .none) { try glsl.writeIndent(); try glsl.writeAll("else\n"); try glsl.emitStatement(inst.@"else"); } try glsl.writeAll("\n"); } fn emitFor(glsl: *Glsl, inst: Inst.For) !void { try glsl.writeAll("for (\n"); { glsl.enterScope(); defer glsl.exitScope(); try glsl.emitStatement(inst.init); try glsl.writeIndent(); try glsl.emitExpr(inst.cond); try glsl.writeAll(";\n"); try glsl.writeIndent(); try glsl.emitExpr(inst.update); try glsl.writeAll(")\n"); } try glsl.emitStatement(inst.body); } fn emitExpr(glsl: *Glsl, inst_idx: InstIndex) error{OutOfMemory}!void { switch (glsl.air.getInst(inst_idx)) { .var_ref => |inst| try glsl.emitVarRef(inst), .bool => |inst| try glsl.emitBool(inst), .int => |inst| try glsl.emitInt(inst), .float => |inst| try glsl.emitFloat(inst), .vector => |inst| try glsl.emitVector(inst), //.matrix => |inst| try glsl.emitMatrix(inst), .array => |inst| try glsl.emitArray(inst), //.nil_intrinsic => |inst| try glsl.emitNilIntrinsic(inst), .unary => |inst| try glsl.emitUnary(inst), .unary_intrinsic => |inst| try glsl.emitUnaryIntrinsic(inst), .binary => |inst| try glsl.emitBinary(inst), .binary_intrinsic => |inst| try glsl.emitBinaryIntrinsic(inst), .triple_intrinsic => |inst| try glsl.emitTripleIntrinsic(inst), .assign => |inst| try glsl.emitAssign(inst), .field_access => |inst| try glsl.emitFieldAccess(inst), .swizzle_access => |inst| try glsl.emitSwizzleAccess(inst), .index_access => |inst| try glsl.emitIndexAccess(inst), //.call => |inst| try glsl.emitCall(inst), //.struct_construct: StructConstruct, //.bitcast: Bitcast, //.texture_sample => |inst| try glsl.emitTextureSample(inst), //.texture_dimension => |inst| try glsl.emitTextureDimension(inst), //.texture_load => |inst| try glsl.emitTextureLoad(inst), //.texture_store => |inst| try glsl.emitTextureStore(inst), //else => |inst| std.debug.panic("TODO: implement Air tag {s}", .{@tagName(inst)}), else => |inst| std.debug.panic("Expr: {}", .{inst}), // TODO } } fn emitVarRef(glsl: *Glsl, inst_idx: InstIndex) !void { switch (glsl.air.getInst(inst_idx)) { .@"var" => |v| try glsl.writeName(v.name), .@"const" => |c| try glsl.writeName(c.name), .fn_param => |p| { if (p.builtin) |builtin| { try glsl.emitBuiltin(builtin); } else { try glsl.writeName(p.name); } }, else => |x| std.debug.panic("VarRef: {}", .{x}), // TODO } } fn emitBool(glsl: *Glsl, inst: Inst.Bool) !void { switch (inst.value.?) { .literal => |lit| try glsl.print("{}", .{lit}), .cast => @panic("TODO"), } } fn emitInt(glsl: *Glsl, inst: Inst.Int) !void { switch (glsl.air.getValue(Inst.Int.Value, inst.value.?)) { .literal => |lit| try glsl.print("{}", .{lit}), .cast => |cast| try glsl.emitIntCast(inst, cast), } } fn emitIntCast(glsl: *Glsl, dest_type: Inst.Int, cast: Inst.ScalarCast) !void { try glsl.emitIntType(dest_type); try glsl.writeAll("("); try glsl.emitExpr(cast.value); try glsl.writeAll(")"); } fn emitFloat(glsl: *Glsl, inst: Inst.Float) !void { switch (glsl.air.getValue(Inst.Float.Value, inst.value.?)) { .literal => |lit| try glsl.print("{}", .{lit}), .cast => |cast| try glsl.emitFloatCast(inst, cast), } } fn emitFloatCast(glsl: *Glsl, dest_type: Inst.Float, cast: Inst.ScalarCast) !void { try glsl.emitFloatType(dest_type); try glsl.writeAll("("); try glsl.emitExpr(cast.value); try glsl.writeAll(")"); } fn emitVector(glsl: *Glsl, inst: Inst.Vector) !void { try glsl.emitVectorType(inst); try glsl.writeAll("("); const value = glsl.air.getValue(Inst.Vector.Value, inst.value.?); switch (value) { .literal => |literal| try glsl.emitVectorElems(inst.size, literal), .cast => |cast| try glsl.emitVectorElems(inst.size, cast.value), } try glsl.writeAll(")"); } fn emitVectorElems(glsl: *Glsl, size: Inst.Vector.Size, value: [4]InstIndex) !void { for (value[0..@intFromEnum(size)], 0..) |elem_inst, i| { try glsl.writeAll(if (i == 0) "" else ", "); try glsl.emitExpr(elem_inst); } } fn emitArray(glsl: *Glsl, inst: Inst.Array) !void { try glsl.emitType(inst.elem_type); try glsl.writeAll("[]("); { glsl.enterScope(); defer glsl.exitScope(); const value = glsl.air.refToList(inst.value.?); for (value, 0..) |elem_inst, i| { try glsl.writeAll(if (i == 0) "\n" else ",\n"); try glsl.writeIndent(); try glsl.emitExpr(elem_inst); } } try glsl.writeAll(")"); } fn emitUnary(glsl: *Glsl, inst: Inst.Unary) !void { try glsl.writeAll(switch (inst.op) { .not => "!", .negate => "-", .deref => "*", .addr_of => @panic("unsupported"), }); try glsl.emitExpr(inst.expr); } fn emitUnaryIntrinsic(glsl: *Glsl, inst: Inst.UnaryIntrinsic) !void { switch (inst.op) { .array_length => try glsl.emitArrayLength(inst), else => { try glsl.writeAll(switch (inst.op) { .array_length => unreachable, .degrees => "radians", .radians => "degrees", .all => "all", .any => "any", .abs => "abs", .acos => "acos", .acosh => "acosh", .asin => "asin", .asinh => "asinh", .atan => "atan", .atanh => "atanh", .ceil => "ceil", .cos => "cos", .cosh => "cosh", //.count_leading_zeros => "count_leading_zeros", .count_one_bits => "bitCount", //.count_trailing_zeros => "count_trailing_zeros", .exp => "exp", .exp2 => "exp2", //.first_leading_bit => "first_leading_bit", //.first_trailing_bit => "first_trailing_bit", .floor => "floor", .fract => "fract", .inverse_sqrt => "inversesqrt", .length => "length", .log => "log", .log2 => "log2", //.quantize_to_F16 => "quantize_to_F16", .reverseBits => "bitfieldReverse", .round => "round", //.saturate => "saturate", .sign => "sign", .sin => "sin", .sinh => "sinh", .sqrt => "sqrt", .tan => "tan", .tanh => "tanh", .trunc => "trunc", .dpdx => "dFdx", .dpdx_coarse => "dFdxCoarse", .dpdx_fine => "dFdxFine", .dpdy => "dFdy", .dpdy_coarse => "dFdyCoarse", .dpdy_fine => "dFdyFine", .fwidth => "fwidth", .fwidth_coarse => "fwidthCoarse", .fwidth_fine => "fwidthFine", .normalize => "normalize", else => std.debug.panic("TODO: implement Air tag {s}", .{@tagName(inst.op)}), }); try glsl.writeAll("("); try glsl.emitExpr(inst.expr); try glsl.writeAll(")"); }, } } fn emitArrayLength(glsl: *Glsl, inst: Inst.UnaryIntrinsic) !void { switch (glsl.air.getInst(inst.expr)) { .unary => |un| switch (un.op) { .addr_of => try glsl.emitArrayLengthTarget(un.expr, 0), else => try glsl.print("ArrayLength (unary_op): {}", .{un.op}), }, else => |array_length_expr| try glsl.print("ArrayLength (array_length_expr): {}", .{array_length_expr}), } } fn emitArrayLengthTarget(glsl: *Glsl, inst_idx: InstIndex, offset: usize) error{OutOfMemory}!void { try glsl.writeAll("("); try glsl.emitExpr(inst_idx); try glsl.print(".length() - {}", .{offset}); try glsl.writeAll(")"); } fn emitBinary(glsl: *Glsl, inst: Inst.Binary) !void { try glsl.writeAll("("); try glsl.emitExpr(inst.lhs); try glsl.print(" {s} ", .{switch (inst.op) { .mul => "*", .div => "/", .mod => "%", .add => "+", .sub => "-", .shl => "<<", .shr => ">>", .@"and" => "&", .@"or" => "|", .xor => "^", .logical_and => "&&", .logical_or => "||", .equal => "==", .not_equal => "!=", .less_than => "<", .less_than_equal => "<=", .greater_than => ">", .greater_than_equal => ">=", }}); try glsl.emitExpr(inst.rhs); try glsl.writeAll(")"); } fn emitBinaryIntrinsic(glsl: *Glsl, inst: Inst.BinaryIntrinsic) !void { try glsl.writeAll(switch (inst.op) { .min => "min", .max => "max", .atan2 => "atan", .distance => "distance", .dot => "dot", .pow => "pow", .step => "step", }); try glsl.writeAll("("); try glsl.emitExpr(inst.lhs); try glsl.writeAll(", "); try glsl.emitExpr(inst.rhs); try glsl.writeAll(")"); } fn emitTripleIntrinsic(glsl: *Glsl, inst: Inst.TripleIntrinsic) !void { try glsl.writeAll(switch (inst.op) { .smoothstep => "smoothstep", .clamp => "clamp", .mix => "mix", }); try glsl.writeAll("("); try glsl.emitExpr(inst.a1); try glsl.writeAll(", "); try glsl.emitExpr(inst.a2); try glsl.writeAll(", "); try glsl.emitExpr(inst.a3); try glsl.writeAll(")"); } fn emitAssign(glsl: *Glsl, inst: Inst.Assign) !void { try glsl.emitExpr(inst.lhs); try glsl.print(" {s}= ", .{switch (inst.mod) { .none => "", .add => "+", .sub => "-", .mul => "*", .div => "/", .mod => "%", .@"and" => "&", .@"or" => "|", .xor => "^", .shl => "<<", .shr => ">>", }}); try glsl.emitExpr(inst.rhs); } fn emitFieldAccess(glsl: *Glsl, inst: Inst.FieldAccess) !void { try glsl.emitExpr(inst.base); try glsl.writeAll("."); try glsl.writeName(inst.name); } fn emitSwizzleAccess(glsl: *Glsl, inst: Inst.SwizzleAccess) !void { try glsl.emitExpr(inst.base); try glsl.writeAll("."); for (0..@intFromEnum(inst.size)) |i| { switch (inst.pattern[i]) { .x => try glsl.writeAll("x"), .y => try glsl.writeAll("y"), .z => try glsl.writeAll("z"), .w => try glsl.writeAll("w"), } } } fn emitIndexAccess(glsl: *Glsl, inst: Inst.IndexAccess) !void { try glsl.emitExpr(inst.base); try glsl.writeAll("["); try glsl.emitExpr(inst.index); try glsl.writeAll("]"); } fn enterScope(glsl: *Glsl) void { glsl.indent += 4; } fn exitScope(glsl: *Glsl) void { glsl.indent -= 4; } fn writeIndent(glsl: *Glsl) !void { try glsl.writer.writeByteNTimes(' ', glsl.indent); } fn writeEntrypoint(glsl: *Glsl) !void { try glsl.writeAll("main"); } fn writeName(glsl: *Glsl, name: Air.StringIndex) !void { // Suffix with index as WGSL has different scoping rules and to avoid conflicts with keywords const str = glsl.air.getStr(name); try glsl.print("{s}_{}", .{ str, @intFromEnum(name) }); } fn writeAll(glsl: *Glsl, bytes: []const u8) !void { try glsl.writer.writeAll(bytes); } fn print(glsl: *Glsl, comptime format: []const u8, args: anytype) !void { return std.fmt.format(glsl.writer, format, args); }
0
repos/mach-sysgpu/src/shader/codegen
repos/mach-sysgpu/src/shader/codegen/spirv/spec.zig
//! Borrowed from Zig compiler codebase with changes. //! Licensed under LICENSE-ZIG //! //! This file is auto-generated by tools/gen_spirv_spec.zig. pub const Version = packed struct(Word) { padding: u8 = 0, minor: u8, major: u8, padding0: u8 = 0, pub fn toWord(self: @This()) Word { return @bitCast(self); } }; pub const Word = u32; pub const IdResult = struct { id: Word, }; pub const IdResultType = IdResult; pub const IdRef = IdResult; pub const IdMemorySemantics = IdRef; pub const IdScope = IdRef; pub const LiteralInteger = Word; pub const LiteralString = []const u8; pub const LiteralContextDependentNumber = union(enum) { int32: i32, uint32: u32, int64: i64, uint64: u64, float32: f32, float64: f64, }; pub const LiteralExtInstInteger = struct { inst: Word }; pub const LiteralSpecConstantOpInteger = struct { opcode: Opcode }; pub const PairLiteralIntegerIdRef = struct { value: LiteralInteger, label: IdRef }; pub const PairIdRefLiteralInteger = struct { target: IdRef, member: LiteralInteger }; pub const PairIdRefIdRef = [2]IdRef; pub const Quantifier = enum { required, optional, variadic, }; pub const Operand = struct { kind: OperandKind, quantifier: Quantifier, }; pub const OperandCategory = enum { bit_enum, value_enum, id, literal, composite, }; pub const Enumerant = struct { name: []const u8, value: Word, parameters: []const OperandKind, }; pub const version = Version{ .major = 1, .minor = 6, .patch = 1 }; pub const magic_number: Word = 0x07230203; pub const Class = enum { Miscellaneous, Debug, Extension, ModeSetting, TypeDeclaration, ConstantCreation, Function, Memory, Annotation, Composite, Image, Conversion, Arithmetic, RelationalAndLogical, Bit, Derivative, Primitive, Barrier, Atomic, ControlFlow, Group, Pipe, DeviceSideEnqueue, NonUniform, Reserved, }; pub const OperandKind = enum { ImageOperands, FPFastMathMode, SelectionControl, LoopControl, FunctionControl, MemorySemantics, MemoryAccess, KernelProfilingInfo, RayFlags, FragmentShadingRate, SourceLanguage, ExecutionModel, AddressingModel, MemoryModel, ExecutionMode, StorageClass, Dim, SamplerAddressingMode, SamplerFilterMode, ImageFormat, ImageChannelOrder, ImageChannelDataType, FPRoundingMode, FPDenormMode, QuantizationModes, FPOperationMode, OverflowModes, LinkageType, AccessQualifier, FunctionParameterAttribute, Decoration, BuiltIn, Scope, GroupOperation, KernelEnqueueFlags, Capability, RayQueryIntersection, RayQueryCommittedIntersectionType, RayQueryCandidateIntersectionType, PackedVectorFormat, IdResultType, IdResult, IdMemorySemantics, IdScope, IdRef, LiteralInteger, LiteralString, LiteralContextDependentNumber, LiteralExtInstInteger, LiteralSpecConstantOpInteger, PairLiteralIntegerIdRef, PairIdRefLiteralInteger, PairIdRefIdRef, pub fn category(self: OperandKind) OperandCategory { return switch (self) { .ImageOperands => .bit_enum, .FPFastMathMode => .bit_enum, .SelectionControl => .bit_enum, .LoopControl => .bit_enum, .FunctionControl => .bit_enum, .MemorySemantics => .bit_enum, .MemoryAccess => .bit_enum, .KernelProfilingInfo => .bit_enum, .RayFlags => .bit_enum, .FragmentShadingRate => .bit_enum, .SourceLanguage => .value_enum, .ExecutionModel => .value_enum, .AddressingModel => .value_enum, .MemoryModel => .value_enum, .ExecutionMode => .value_enum, .StorageClass => .value_enum, .Dim => .value_enum, .SamplerAddressingMode => .value_enum, .SamplerFilterMode => .value_enum, .ImageFormat => .value_enum, .ImageChannelOrder => .value_enum, .ImageChannelDataType => .value_enum, .FPRoundingMode => .value_enum, .FPDenormMode => .value_enum, .QuantizationModes => .value_enum, .FPOperationMode => .value_enum, .OverflowModes => .value_enum, .LinkageType => .value_enum, .AccessQualifier => .value_enum, .FunctionParameterAttribute => .value_enum, .Decoration => .value_enum, .BuiltIn => .value_enum, .Scope => .value_enum, .GroupOperation => .value_enum, .KernelEnqueueFlags => .value_enum, .Capability => .value_enum, .RayQueryIntersection => .value_enum, .RayQueryCommittedIntersectionType => .value_enum, .RayQueryCandidateIntersectionType => .value_enum, .PackedVectorFormat => .value_enum, .IdResultType => .id, .IdResult => .id, .IdMemorySemantics => .id, .IdScope => .id, .IdRef => .id, .LiteralInteger => .literal, .LiteralString => .literal, .LiteralContextDependentNumber => .literal, .LiteralExtInstInteger => .literal, .LiteralSpecConstantOpInteger => .literal, .PairLiteralIntegerIdRef => .composite, .PairIdRefLiteralInteger => .composite, .PairIdRefIdRef => .composite, }; } pub fn enumerants(self: OperandKind) []const Enumerant { return switch (self) { .ImageOperands => &[_]Enumerant{ .{ .name = "Bias", .value = 0x0001, .parameters = &[_]OperandKind{.IdRef} }, .{ .name = "Lod", .value = 0x0002, .parameters = &[_]OperandKind{.IdRef} }, .{ .name = "Grad", .value = 0x0004, .parameters = &[_]OperandKind{ .IdRef, .IdRef } }, .{ .name = "ConstOffset", .value = 0x0008, .parameters = &[_]OperandKind{.IdRef} }, .{ .name = "Offset", .value = 0x0010, .parameters = &[_]OperandKind{.IdRef} }, .{ .name = "ConstOffsets", .value = 0x0020, .parameters = &[_]OperandKind{.IdRef} }, .{ .name = "Sample", .value = 0x0040, .parameters = &[_]OperandKind{.IdRef} }, .{ .name = "MinLod", .value = 0x0080, .parameters = &[_]OperandKind{.IdRef} }, .{ .name = "MakeTexelAvailable", .value = 0x0100, .parameters = &[_]OperandKind{.IdScope} }, .{ .name = "MakeTexelAvailableKHR", .value = 0x0100, .parameters = &[_]OperandKind{.IdScope} }, .{ .name = "MakeTexelVisible", .value = 0x0200, .parameters = &[_]OperandKind{.IdScope} }, .{ .name = "MakeTexelVisibleKHR", .value = 0x0200, .parameters = &[_]OperandKind{.IdScope} }, .{ .name = "NonPrivateTexel", .value = 0x0400, .parameters = &[_]OperandKind{} }, .{ .name = "NonPrivateTexelKHR", .value = 0x0400, .parameters = &[_]OperandKind{} }, .{ .name = "VolatileTexel", .value = 0x0800, .parameters = &[_]OperandKind{} }, .{ .name = "VolatileTexelKHR", .value = 0x0800, .parameters = &[_]OperandKind{} }, .{ .name = "SignExtend", .value = 0x1000, .parameters = &[_]OperandKind{} }, .{ .name = "ZeroExtend", .value = 0x2000, .parameters = &[_]OperandKind{} }, .{ .name = "Nontemporal", .value = 0x4000, .parameters = &[_]OperandKind{} }, .{ .name = "Offsets", .value = 0x10000, .parameters = &[_]OperandKind{.IdRef} }, }, .FPFastMathMode => &[_]Enumerant{ .{ .name = "NotNaN", .value = 0x0001, .parameters = &[_]OperandKind{} }, .{ .name = "NotInf", .value = 0x0002, .parameters = &[_]OperandKind{} }, .{ .name = "NSZ", .value = 0x0004, .parameters = &[_]OperandKind{} }, .{ .name = "AllowRecip", .value = 0x0008, .parameters = &[_]OperandKind{} }, .{ .name = "Fast", .value = 0x0010, .parameters = &[_]OperandKind{} }, .{ .name = "AllowContractFastINTEL", .value = 0x10000, .parameters = &[_]OperandKind{} }, .{ .name = "AllowReassocINTEL", .value = 0x20000, .parameters = &[_]OperandKind{} }, }, .SelectionControl => &[_]Enumerant{ .{ .name = "Flatten", .value = 0x0001, .parameters = &[_]OperandKind{} }, .{ .name = "DontFlatten", .value = 0x0002, .parameters = &[_]OperandKind{} }, }, .LoopControl => &[_]Enumerant{ .{ .name = "Unroll", .value = 0x0001, .parameters = &[_]OperandKind{} }, .{ .name = "DontUnroll", .value = 0x0002, .parameters = &[_]OperandKind{} }, .{ .name = "DependencyInfinite", .value = 0x0004, .parameters = &[_]OperandKind{} }, .{ .name = "DependencyLength", .value = 0x0008, .parameters = &[_]OperandKind{.LiteralInteger} }, .{ .name = "MinIterations", .value = 0x0010, .parameters = &[_]OperandKind{.LiteralInteger} }, .{ .name = "MaxIterations", .value = 0x0020, .parameters = &[_]OperandKind{.LiteralInteger} }, .{ .name = "IterationMultiple", .value = 0x0040, .parameters = &[_]OperandKind{.LiteralInteger} }, .{ .name = "PeelCount", .value = 0x0080, .parameters = &[_]OperandKind{.LiteralInteger} }, .{ .name = "PartialCount", .value = 0x0100, .parameters = &[_]OperandKind{.LiteralInteger} }, .{ .name = "InitiationIntervalINTEL", .value = 0x10000, .parameters = &[_]OperandKind{.LiteralInteger} }, .{ .name = "MaxConcurrencyINTEL", .value = 0x20000, .parameters = &[_]OperandKind{.LiteralInteger} }, .{ .name = "DependencyArrayINTEL", .value = 0x40000, .parameters = &[_]OperandKind{.LiteralInteger} }, .{ .name = "PipelineEnableINTEL", .value = 0x80000, .parameters = &[_]OperandKind{.LiteralInteger} }, .{ .name = "LoopCoalesceINTEL", .value = 0x100000, .parameters = &[_]OperandKind{.LiteralInteger} }, .{ .name = "MaxInterleavingINTEL", .value = 0x200000, .parameters = &[_]OperandKind{.LiteralInteger} }, .{ .name = "SpeculatedIterationsINTEL", .value = 0x400000, .parameters = &[_]OperandKind{.LiteralInteger} }, .{ .name = "NoFusionINTEL", .value = 0x800000, .parameters = &[_]OperandKind{} }, .{ .name = "LoopCountINTEL", .value = 0x1000000, .parameters = &[_]OperandKind{.LiteralInteger} }, .{ .name = "MaxReinvocationDelayINTEL", .value = 0x2000000, .parameters = &[_]OperandKind{.LiteralInteger} }, }, .FunctionControl => &[_]Enumerant{ .{ .name = "Inline", .value = 0x0001, .parameters = &[_]OperandKind{} }, .{ .name = "DontInline", .value = 0x0002, .parameters = &[_]OperandKind{} }, .{ .name = "Pure", .value = 0x0004, .parameters = &[_]OperandKind{} }, .{ .name = "Const", .value = 0x0008, .parameters = &[_]OperandKind{} }, .{ .name = "OptNoneINTEL", .value = 0x10000, .parameters = &[_]OperandKind{} }, }, .MemorySemantics => &[_]Enumerant{ .{ .name = "Relaxed", .value = 0x0000, .parameters = &[_]OperandKind{} }, .{ .name = "Acquire", .value = 0x0002, .parameters = &[_]OperandKind{} }, .{ .name = "Release", .value = 0x0004, .parameters = &[_]OperandKind{} }, .{ .name = "AcquireRelease", .value = 0x0008, .parameters = &[_]OperandKind{} }, .{ .name = "SequentiallyConsistent", .value = 0x0010, .parameters = &[_]OperandKind{} }, .{ .name = "UniformMemory", .value = 0x0040, .parameters = &[_]OperandKind{} }, .{ .name = "SubgroupMemory", .value = 0x0080, .parameters = &[_]OperandKind{} }, .{ .name = "WorkgroupMemory", .value = 0x0100, .parameters = &[_]OperandKind{} }, .{ .name = "CrossWorkgroupMemory", .value = 0x0200, .parameters = &[_]OperandKind{} }, .{ .name = "AtomicCounterMemory", .value = 0x0400, .parameters = &[_]OperandKind{} }, .{ .name = "ImageMemory", .value = 0x0800, .parameters = &[_]OperandKind{} }, .{ .name = "OutputMemory", .value = 0x1000, .parameters = &[_]OperandKind{} }, .{ .name = "OutputMemoryKHR", .value = 0x1000, .parameters = &[_]OperandKind{} }, .{ .name = "MakeAvailable", .value = 0x2000, .parameters = &[_]OperandKind{} }, .{ .name = "MakeAvailableKHR", .value = 0x2000, .parameters = &[_]OperandKind{} }, .{ .name = "MakeVisible", .value = 0x4000, .parameters = &[_]OperandKind{} }, .{ .name = "MakeVisibleKHR", .value = 0x4000, .parameters = &[_]OperandKind{} }, .{ .name = "Volatile", .value = 0x8000, .parameters = &[_]OperandKind{} }, }, .MemoryAccess => &[_]Enumerant{ .{ .name = "Volatile", .value = 0x0001, .parameters = &[_]OperandKind{} }, .{ .name = "Aligned", .value = 0x0002, .parameters = &[_]OperandKind{.LiteralInteger} }, .{ .name = "Nontemporal", .value = 0x0004, .parameters = &[_]OperandKind{} }, .{ .name = "MakePointerAvailable", .value = 0x0008, .parameters = &[_]OperandKind{.IdScope} }, .{ .name = "MakePointerAvailableKHR", .value = 0x0008, .parameters = &[_]OperandKind{.IdScope} }, .{ .name = "MakePointerVisible", .value = 0x0010, .parameters = &[_]OperandKind{.IdScope} }, .{ .name = "MakePointerVisibleKHR", .value = 0x0010, .parameters = &[_]OperandKind{.IdScope} }, .{ .name = "NonPrivatePointer", .value = 0x0020, .parameters = &[_]OperandKind{} }, .{ .name = "NonPrivatePointerKHR", .value = 0x0020, .parameters = &[_]OperandKind{} }, .{ .name = "AliasScopeINTELMask", .value = 0x10000, .parameters = &[_]OperandKind{.IdRef} }, .{ .name = "NoAliasINTELMask", .value = 0x20000, .parameters = &[_]OperandKind{.IdRef} }, }, .KernelProfilingInfo => &[_]Enumerant{ .{ .name = "CmdExecTime", .value = 0x0001, .parameters = &[_]OperandKind{} }, }, .RayFlags => &[_]Enumerant{ .{ .name = "NoneKHR", .value = 0x0000, .parameters = &[_]OperandKind{} }, .{ .name = "OpaqueKHR", .value = 0x0001, .parameters = &[_]OperandKind{} }, .{ .name = "NoOpaqueKHR", .value = 0x0002, .parameters = &[_]OperandKind{} }, .{ .name = "TerminateOnFirstHitKHR", .value = 0x0004, .parameters = &[_]OperandKind{} }, .{ .name = "SkipClosestHitShaderKHR", .value = 0x0008, .parameters = &[_]OperandKind{} }, .{ .name = "CullBackFacingTrianglesKHR", .value = 0x0010, .parameters = &[_]OperandKind{} }, .{ .name = "CullFrontFacingTrianglesKHR", .value = 0x0020, .parameters = &[_]OperandKind{} }, .{ .name = "CullOpaqueKHR", .value = 0x0040, .parameters = &[_]OperandKind{} }, .{ .name = "CullNoOpaqueKHR", .value = 0x0080, .parameters = &[_]OperandKind{} }, .{ .name = "SkipTrianglesKHR", .value = 0x0100, .parameters = &[_]OperandKind{} }, .{ .name = "SkipAABBsKHR", .value = 0x0200, .parameters = &[_]OperandKind{} }, .{ .name = "ForceOpacityMicromap2StateEXT", .value = 0x0400, .parameters = &[_]OperandKind{} }, }, .FragmentShadingRate => &[_]Enumerant{ .{ .name = "Vertical2Pixels", .value = 0x0001, .parameters = &[_]OperandKind{} }, .{ .name = "Vertical4Pixels", .value = 0x0002, .parameters = &[_]OperandKind{} }, .{ .name = "Horizontal2Pixels", .value = 0x0004, .parameters = &[_]OperandKind{} }, .{ .name = "Horizontal4Pixels", .value = 0x0008, .parameters = &[_]OperandKind{} }, }, .SourceLanguage => &[_]Enumerant{ .{ .name = "Unknown", .value = 0, .parameters = &[_]OperandKind{} }, .{ .name = "ESSL", .value = 1, .parameters = &[_]OperandKind{} }, .{ .name = "GLSL", .value = 2, .parameters = &[_]OperandKind{} }, .{ .name = "OpenCL_C", .value = 3, .parameters = &[_]OperandKind{} }, .{ .name = "OpenCL_CPP", .value = 4, .parameters = &[_]OperandKind{} }, .{ .name = "HLSL", .value = 5, .parameters = &[_]OperandKind{} }, .{ .name = "CPP_for_OpenCL", .value = 6, .parameters = &[_]OperandKind{} }, .{ .name = "SYCL", .value = 7, .parameters = &[_]OperandKind{} }, .{ .name = "HERO_C", .value = 8, .parameters = &[_]OperandKind{} }, }, .ExecutionModel => &[_]Enumerant{ .{ .name = "Vertex", .value = 0, .parameters = &[_]OperandKind{} }, .{ .name = "TessellationControl", .value = 1, .parameters = &[_]OperandKind{} }, .{ .name = "TessellationEvaluation", .value = 2, .parameters = &[_]OperandKind{} }, .{ .name = "Geometry", .value = 3, .parameters = &[_]OperandKind{} }, .{ .name = "Fragment", .value = 4, .parameters = &[_]OperandKind{} }, .{ .name = "GLCompute", .value = 5, .parameters = &[_]OperandKind{} }, .{ .name = "Kernel", .value = 6, .parameters = &[_]OperandKind{} }, .{ .name = "TaskNV", .value = 5267, .parameters = &[_]OperandKind{} }, .{ .name = "MeshNV", .value = 5268, .parameters = &[_]OperandKind{} }, .{ .name = "RayGenerationNV", .value = 5313, .parameters = &[_]OperandKind{} }, .{ .name = "RayGenerationKHR", .value = 5313, .parameters = &[_]OperandKind{} }, .{ .name = "IntersectionNV", .value = 5314, .parameters = &[_]OperandKind{} }, .{ .name = "IntersectionKHR", .value = 5314, .parameters = &[_]OperandKind{} }, .{ .name = "AnyHitNV", .value = 5315, .parameters = &[_]OperandKind{} }, .{ .name = "AnyHitKHR", .value = 5315, .parameters = &[_]OperandKind{} }, .{ .name = "ClosestHitNV", .value = 5316, .parameters = &[_]OperandKind{} }, .{ .name = "ClosestHitKHR", .value = 5316, .parameters = &[_]OperandKind{} }, .{ .name = "MissNV", .value = 5317, .parameters = &[_]OperandKind{} }, .{ .name = "MissKHR", .value = 5317, .parameters = &[_]OperandKind{} }, .{ .name = "CallableNV", .value = 5318, .parameters = &[_]OperandKind{} }, .{ .name = "CallableKHR", .value = 5318, .parameters = &[_]OperandKind{} }, .{ .name = "TaskEXT", .value = 5364, .parameters = &[_]OperandKind{} }, .{ .name = "MeshEXT", .value = 5365, .parameters = &[_]OperandKind{} }, }, .AddressingModel => &[_]Enumerant{ .{ .name = "Logical", .value = 0, .parameters = &[_]OperandKind{} }, .{ .name = "Physical32", .value = 1, .parameters = &[_]OperandKind{} }, .{ .name = "Physical64", .value = 2, .parameters = &[_]OperandKind{} }, .{ .name = "PhysicalStorageBuffer64", .value = 5348, .parameters = &[_]OperandKind{} }, .{ .name = "PhysicalStorageBuffer64EXT", .value = 5348, .parameters = &[_]OperandKind{} }, }, .MemoryModel => &[_]Enumerant{ .{ .name = "Simple", .value = 0, .parameters = &[_]OperandKind{} }, .{ .name = "GLSL450", .value = 1, .parameters = &[_]OperandKind{} }, .{ .name = "OpenCL", .value = 2, .parameters = &[_]OperandKind{} }, .{ .name = "Vulkan", .value = 3, .parameters = &[_]OperandKind{} }, .{ .name = "VulkanKHR", .value = 3, .parameters = &[_]OperandKind{} }, }, .ExecutionMode => &[_]Enumerant{ .{ .name = "Invocations", .value = 0, .parameters = &[_]OperandKind{.LiteralInteger} }, .{ .name = "SpacingEqual", .value = 1, .parameters = &[_]OperandKind{} }, .{ .name = "SpacingFractionalEven", .value = 2, .parameters = &[_]OperandKind{} }, .{ .name = "SpacingFractionalOdd", .value = 3, .parameters = &[_]OperandKind{} }, .{ .name = "VertexOrderCw", .value = 4, .parameters = &[_]OperandKind{} }, .{ .name = "VertexOrderCcw", .value = 5, .parameters = &[_]OperandKind{} }, .{ .name = "PixelCenterInteger", .value = 6, .parameters = &[_]OperandKind{} }, .{ .name = "OriginUpperLeft", .value = 7, .parameters = &[_]OperandKind{} }, .{ .name = "OriginLowerLeft", .value = 8, .parameters = &[_]OperandKind{} }, .{ .name = "EarlyFragmentTests", .value = 9, .parameters = &[_]OperandKind{} }, .{ .name = "PointMode", .value = 10, .parameters = &[_]OperandKind{} }, .{ .name = "Xfb", .value = 11, .parameters = &[_]OperandKind{} }, .{ .name = "DepthReplacing", .value = 12, .parameters = &[_]OperandKind{} }, .{ .name = "DepthGreater", .value = 14, .parameters = &[_]OperandKind{} }, .{ .name = "DepthLess", .value = 15, .parameters = &[_]OperandKind{} }, .{ .name = "DepthUnchanged", .value = 16, .parameters = &[_]OperandKind{} }, .{ .name = "LocalSize", .value = 17, .parameters = &[_]OperandKind{ .LiteralInteger, .LiteralInteger, .LiteralInteger } }, .{ .name = "LocalSizeHint", .value = 18, .parameters = &[_]OperandKind{ .LiteralInteger, .LiteralInteger, .LiteralInteger } }, .{ .name = "InputPoints", .value = 19, .parameters = &[_]OperandKind{} }, .{ .name = "InputLines", .value = 20, .parameters = &[_]OperandKind{} }, .{ .name = "InputLinesAdjacency", .value = 21, .parameters = &[_]OperandKind{} }, .{ .name = "Triangles", .value = 22, .parameters = &[_]OperandKind{} }, .{ .name = "InputTrianglesAdjacency", .value = 23, .parameters = &[_]OperandKind{} }, .{ .name = "Quads", .value = 24, .parameters = &[_]OperandKind{} }, .{ .name = "Isolines", .value = 25, .parameters = &[_]OperandKind{} }, .{ .name = "OutputVertices", .value = 26, .parameters = &[_]OperandKind{.LiteralInteger} }, .{ .name = "OutputPoints", .value = 27, .parameters = &[_]OperandKind{} }, .{ .name = "OutputLineStrip", .value = 28, .parameters = &[_]OperandKind{} }, .{ .name = "OutputTriangleStrip", .value = 29, .parameters = &[_]OperandKind{} }, .{ .name = "VecTypeHint", .value = 30, .parameters = &[_]OperandKind{.LiteralInteger} }, .{ .name = "ContractionOff", .value = 31, .parameters = &[_]OperandKind{} }, .{ .name = "Initializer", .value = 33, .parameters = &[_]OperandKind{} }, .{ .name = "Finalizer", .value = 34, .parameters = &[_]OperandKind{} }, .{ .name = "SubgroupSize", .value = 35, .parameters = &[_]OperandKind{.LiteralInteger} }, .{ .name = "SubgroupsPerWorkgroup", .value = 36, .parameters = &[_]OperandKind{.LiteralInteger} }, .{ .name = "SubgroupsPerWorkgroupId", .value = 37, .parameters = &[_]OperandKind{.IdRef} }, .{ .name = "LocalSizeId", .value = 38, .parameters = &[_]OperandKind{ .IdRef, .IdRef, .IdRef } }, .{ .name = "LocalSizeHintId", .value = 39, .parameters = &[_]OperandKind{ .IdRef, .IdRef, .IdRef } }, .{ .name = "NonCoherentColorAttachmentReadEXT", .value = 4169, .parameters = &[_]OperandKind{} }, .{ .name = "NonCoherentDepthAttachmentReadEXT", .value = 4170, .parameters = &[_]OperandKind{} }, .{ .name = "NonCoherentStencilAttachmentReadEXT", .value = 4171, .parameters = &[_]OperandKind{} }, .{ .name = "SubgroupUniformControlFlowKHR", .value = 4421, .parameters = &[_]OperandKind{} }, .{ .name = "PostDepthCoverage", .value = 4446, .parameters = &[_]OperandKind{} }, .{ .name = "DenormPreserve", .value = 4459, .parameters = &[_]OperandKind{.LiteralInteger} }, .{ .name = "DenormFlushToZero", .value = 4460, .parameters = &[_]OperandKind{.LiteralInteger} }, .{ .name = "SignedZeroInfNanPreserve", .value = 4461, .parameters = &[_]OperandKind{.LiteralInteger} }, .{ .name = "RoundingModeRTE", .value = 4462, .parameters = &[_]OperandKind{.LiteralInteger} }, .{ .name = "RoundingModeRTZ", .value = 4463, .parameters = &[_]OperandKind{.LiteralInteger} }, .{ .name = "EarlyAndLateFragmentTestsAMD", .value = 5017, .parameters = &[_]OperandKind{} }, .{ .name = "StencilRefReplacingEXT", .value = 5027, .parameters = &[_]OperandKind{} }, .{ .name = "StencilRefUnchangedFrontAMD", .value = 5079, .parameters = &[_]OperandKind{} }, .{ .name = "StencilRefGreaterFrontAMD", .value = 5080, .parameters = &[_]OperandKind{} }, .{ .name = "StencilRefLessFrontAMD", .value = 5081, .parameters = &[_]OperandKind{} }, .{ .name = "StencilRefUnchangedBackAMD", .value = 5082, .parameters = &[_]OperandKind{} }, .{ .name = "StencilRefGreaterBackAMD", .value = 5083, .parameters = &[_]OperandKind{} }, .{ .name = "StencilRefLessBackAMD", .value = 5084, .parameters = &[_]OperandKind{} }, .{ .name = "OutputLinesNV", .value = 5269, .parameters = &[_]OperandKind{} }, .{ .name = "OutputLinesEXT", .value = 5269, .parameters = &[_]OperandKind{} }, .{ .name = "OutputPrimitivesNV", .value = 5270, .parameters = &[_]OperandKind{.LiteralInteger} }, .{ .name = "OutputPrimitivesEXT", .value = 5270, .parameters = &[_]OperandKind{.LiteralInteger} }, .{ .name = "DerivativeGroupQuadsNV", .value = 5289, .parameters = &[_]OperandKind{} }, .{ .name = "DerivativeGroupLinearNV", .value = 5290, .parameters = &[_]OperandKind{} }, .{ .name = "OutputTrianglesNV", .value = 5298, .parameters = &[_]OperandKind{} }, .{ .name = "OutputTrianglesEXT", .value = 5298, .parameters = &[_]OperandKind{} }, .{ .name = "PixelInterlockOrderedEXT", .value = 5366, .parameters = &[_]OperandKind{} }, .{ .name = "PixelInterlockUnorderedEXT", .value = 5367, .parameters = &[_]OperandKind{} }, .{ .name = "SampleInterlockOrderedEXT", .value = 5368, .parameters = &[_]OperandKind{} }, .{ .name = "SampleInterlockUnorderedEXT", .value = 5369, .parameters = &[_]OperandKind{} }, .{ .name = "ShadingRateInterlockOrderedEXT", .value = 5370, .parameters = &[_]OperandKind{} }, .{ .name = "ShadingRateInterlockUnorderedEXT", .value = 5371, .parameters = &[_]OperandKind{} }, .{ .name = "SharedLocalMemorySizeINTEL", .value = 5618, .parameters = &[_]OperandKind{.LiteralInteger} }, .{ .name = "RoundingModeRTPINTEL", .value = 5620, .parameters = &[_]OperandKind{.LiteralInteger} }, .{ .name = "RoundingModeRTNINTEL", .value = 5621, .parameters = &[_]OperandKind{.LiteralInteger} }, .{ .name = "FloatingPointModeALTINTEL", .value = 5622, .parameters = &[_]OperandKind{.LiteralInteger} }, .{ .name = "FloatingPointModeIEEEINTEL", .value = 5623, .parameters = &[_]OperandKind{.LiteralInteger} }, .{ .name = "MaxWorkgroupSizeINTEL", .value = 5893, .parameters = &[_]OperandKind{ .LiteralInteger, .LiteralInteger, .LiteralInteger } }, .{ .name = "MaxWorkDimINTEL", .value = 5894, .parameters = &[_]OperandKind{.LiteralInteger} }, .{ .name = "NoGlobalOffsetINTEL", .value = 5895, .parameters = &[_]OperandKind{} }, .{ .name = "NumSIMDWorkitemsINTEL", .value = 5896, .parameters = &[_]OperandKind{.LiteralInteger} }, .{ .name = "SchedulerTargetFmaxMhzINTEL", .value = 5903, .parameters = &[_]OperandKind{.LiteralInteger} }, .{ .name = "StreamingInterfaceINTEL", .value = 6154, .parameters = &[_]OperandKind{.LiteralInteger} }, .{ .name = "RegisterMapInterfaceINTEL", .value = 6160, .parameters = &[_]OperandKind{.LiteralInteger} }, .{ .name = "NamedBarrierCountINTEL", .value = 6417, .parameters = &[_]OperandKind{.LiteralInteger} }, }, .StorageClass => &[_]Enumerant{ .{ .name = "UniformConstant", .value = 0, .parameters = &[_]OperandKind{} }, .{ .name = "Input", .value = 1, .parameters = &[_]OperandKind{} }, .{ .name = "Uniform", .value = 2, .parameters = &[_]OperandKind{} }, .{ .name = "Output", .value = 3, .parameters = &[_]OperandKind{} }, .{ .name = "Workgroup", .value = 4, .parameters = &[_]OperandKind{} }, .{ .name = "CrossWorkgroup", .value = 5, .parameters = &[_]OperandKind{} }, .{ .name = "Private", .value = 6, .parameters = &[_]OperandKind{} }, .{ .name = "Function", .value = 7, .parameters = &[_]OperandKind{} }, .{ .name = "Generic", .value = 8, .parameters = &[_]OperandKind{} }, .{ .name = "PushConstant", .value = 9, .parameters = &[_]OperandKind{} }, .{ .name = "AtomicCounter", .value = 10, .parameters = &[_]OperandKind{} }, .{ .name = "Image", .value = 11, .parameters = &[_]OperandKind{} }, .{ .name = "StorageBuffer", .value = 12, .parameters = &[_]OperandKind{} }, .{ .name = "TileImageEXT", .value = 4172, .parameters = &[_]OperandKind{} }, .{ .name = "CallableDataNV", .value = 5328, .parameters = &[_]OperandKind{} }, .{ .name = "CallableDataKHR", .value = 5328, .parameters = &[_]OperandKind{} }, .{ .name = "IncomingCallableDataNV", .value = 5329, .parameters = &[_]OperandKind{} }, .{ .name = "IncomingCallableDataKHR", .value = 5329, .parameters = &[_]OperandKind{} }, .{ .name = "RayPayloadNV", .value = 5338, .parameters = &[_]OperandKind{} }, .{ .name = "RayPayloadKHR", .value = 5338, .parameters = &[_]OperandKind{} }, .{ .name = "HitAttributeNV", .value = 5339, .parameters = &[_]OperandKind{} }, .{ .name = "HitAttributeKHR", .value = 5339, .parameters = &[_]OperandKind{} }, .{ .name = "IncomingRayPayloadNV", .value = 5342, .parameters = &[_]OperandKind{} }, .{ .name = "IncomingRayPayloadKHR", .value = 5342, .parameters = &[_]OperandKind{} }, .{ .name = "ShaderRecordBufferNV", .value = 5343, .parameters = &[_]OperandKind{} }, .{ .name = "ShaderRecordBufferKHR", .value = 5343, .parameters = &[_]OperandKind{} }, .{ .name = "PhysicalStorageBuffer", .value = 5349, .parameters = &[_]OperandKind{} }, .{ .name = "PhysicalStorageBufferEXT", .value = 5349, .parameters = &[_]OperandKind{} }, .{ .name = "HitObjectAttributeNV", .value = 5385, .parameters = &[_]OperandKind{} }, .{ .name = "TaskPayloadWorkgroupEXT", .value = 5402, .parameters = &[_]OperandKind{} }, .{ .name = "CodeSectionINTEL", .value = 5605, .parameters = &[_]OperandKind{} }, .{ .name = "DeviceOnlyINTEL", .value = 5936, .parameters = &[_]OperandKind{} }, .{ .name = "HostOnlyINTEL", .value = 5937, .parameters = &[_]OperandKind{} }, }, .Dim => &[_]Enumerant{ .{ .name = "1D", .value = 0, .parameters = &[_]OperandKind{} }, .{ .name = "2D", .value = 1, .parameters = &[_]OperandKind{} }, .{ .name = "3D", .value = 2, .parameters = &[_]OperandKind{} }, .{ .name = "Cube", .value = 3, .parameters = &[_]OperandKind{} }, .{ .name = "Rect", .value = 4, .parameters = &[_]OperandKind{} }, .{ .name = "Buffer", .value = 5, .parameters = &[_]OperandKind{} }, .{ .name = "SubpassData", .value = 6, .parameters = &[_]OperandKind{} }, .{ .name = "TileImageDataEXT", .value = 4173, .parameters = &[_]OperandKind{} }, }, .SamplerAddressingMode => &[_]Enumerant{ .{ .name = "None", .value = 0, .parameters = &[_]OperandKind{} }, .{ .name = "ClampToEdge", .value = 1, .parameters = &[_]OperandKind{} }, .{ .name = "Clamp", .value = 2, .parameters = &[_]OperandKind{} }, .{ .name = "Repeat", .value = 3, .parameters = &[_]OperandKind{} }, .{ .name = "RepeatMirrored", .value = 4, .parameters = &[_]OperandKind{} }, }, .SamplerFilterMode => &[_]Enumerant{ .{ .name = "Nearest", .value = 0, .parameters = &[_]OperandKind{} }, .{ .name = "Linear", .value = 1, .parameters = &[_]OperandKind{} }, }, .ImageFormat => &[_]Enumerant{ .{ .name = "Unknown", .value = 0, .parameters = &[_]OperandKind{} }, .{ .name = "Rgba32f", .value = 1, .parameters = &[_]OperandKind{} }, .{ .name = "Rgba16f", .value = 2, .parameters = &[_]OperandKind{} }, .{ .name = "R32f", .value = 3, .parameters = &[_]OperandKind{} }, .{ .name = "Rgba8", .value = 4, .parameters = &[_]OperandKind{} }, .{ .name = "Rgba8Snorm", .value = 5, .parameters = &[_]OperandKind{} }, .{ .name = "Rg32f", .value = 6, .parameters = &[_]OperandKind{} }, .{ .name = "Rg16f", .value = 7, .parameters = &[_]OperandKind{} }, .{ .name = "R11fG11fB10f", .value = 8, .parameters = &[_]OperandKind{} }, .{ .name = "R16f", .value = 9, .parameters = &[_]OperandKind{} }, .{ .name = "Rgba16", .value = 10, .parameters = &[_]OperandKind{} }, .{ .name = "Rgb10A2", .value = 11, .parameters = &[_]OperandKind{} }, .{ .name = "Rg16", .value = 12, .parameters = &[_]OperandKind{} }, .{ .name = "Rg8", .value = 13, .parameters = &[_]OperandKind{} }, .{ .name = "R16", .value = 14, .parameters = &[_]OperandKind{} }, .{ .name = "R8", .value = 15, .parameters = &[_]OperandKind{} }, .{ .name = "Rgba16Snorm", .value = 16, .parameters = &[_]OperandKind{} }, .{ .name = "Rg16Snorm", .value = 17, .parameters = &[_]OperandKind{} }, .{ .name = "Rg8Snorm", .value = 18, .parameters = &[_]OperandKind{} }, .{ .name = "R16Snorm", .value = 19, .parameters = &[_]OperandKind{} }, .{ .name = "R8Snorm", .value = 20, .parameters = &[_]OperandKind{} }, .{ .name = "Rgba32i", .value = 21, .parameters = &[_]OperandKind{} }, .{ .name = "Rgba16i", .value = 22, .parameters = &[_]OperandKind{} }, .{ .name = "Rgba8i", .value = 23, .parameters = &[_]OperandKind{} }, .{ .name = "R32i", .value = 24, .parameters = &[_]OperandKind{} }, .{ .name = "Rg32i", .value = 25, .parameters = &[_]OperandKind{} }, .{ .name = "Rg16i", .value = 26, .parameters = &[_]OperandKind{} }, .{ .name = "Rg8i", .value = 27, .parameters = &[_]OperandKind{} }, .{ .name = "R16i", .value = 28, .parameters = &[_]OperandKind{} }, .{ .name = "R8i", .value = 29, .parameters = &[_]OperandKind{} }, .{ .name = "Rgba32ui", .value = 30, .parameters = &[_]OperandKind{} }, .{ .name = "Rgba16ui", .value = 31, .parameters = &[_]OperandKind{} }, .{ .name = "Rgba8ui", .value = 32, .parameters = &[_]OperandKind{} }, .{ .name = "R32ui", .value = 33, .parameters = &[_]OperandKind{} }, .{ .name = "Rgb10a2ui", .value = 34, .parameters = &[_]OperandKind{} }, .{ .name = "Rg32ui", .value = 35, .parameters = &[_]OperandKind{} }, .{ .name = "Rg16ui", .value = 36, .parameters = &[_]OperandKind{} }, .{ .name = "Rg8ui", .value = 37, .parameters = &[_]OperandKind{} }, .{ .name = "R16ui", .value = 38, .parameters = &[_]OperandKind{} }, .{ .name = "R8ui", .value = 39, .parameters = &[_]OperandKind{} }, .{ .name = "R64ui", .value = 40, .parameters = &[_]OperandKind{} }, .{ .name = "R64i", .value = 41, .parameters = &[_]OperandKind{} }, }, .ImageChannelOrder => &[_]Enumerant{ .{ .name = "R", .value = 0, .parameters = &[_]OperandKind{} }, .{ .name = "A", .value = 1, .parameters = &[_]OperandKind{} }, .{ .name = "RG", .value = 2, .parameters = &[_]OperandKind{} }, .{ .name = "RA", .value = 3, .parameters = &[_]OperandKind{} }, .{ .name = "RGB", .value = 4, .parameters = &[_]OperandKind{} }, .{ .name = "RGBA", .value = 5, .parameters = &[_]OperandKind{} }, .{ .name = "BGRA", .value = 6, .parameters = &[_]OperandKind{} }, .{ .name = "ARGB", .value = 7, .parameters = &[_]OperandKind{} }, .{ .name = "Intensity", .value = 8, .parameters = &[_]OperandKind{} }, .{ .name = "Luminance", .value = 9, .parameters = &[_]OperandKind{} }, .{ .name = "Rx", .value = 10, .parameters = &[_]OperandKind{} }, .{ .name = "RGx", .value = 11, .parameters = &[_]OperandKind{} }, .{ .name = "RGBx", .value = 12, .parameters = &[_]OperandKind{} }, .{ .name = "Depth", .value = 13, .parameters = &[_]OperandKind{} }, .{ .name = "DepthStencil", .value = 14, .parameters = &[_]OperandKind{} }, .{ .name = "sRGB", .value = 15, .parameters = &[_]OperandKind{} }, .{ .name = "sRGBx", .value = 16, .parameters = &[_]OperandKind{} }, .{ .name = "sRGBA", .value = 17, .parameters = &[_]OperandKind{} }, .{ .name = "sBGRA", .value = 18, .parameters = &[_]OperandKind{} }, .{ .name = "ABGR", .value = 19, .parameters = &[_]OperandKind{} }, }, .ImageChannelDataType => &[_]Enumerant{ .{ .name = "SnormInt8", .value = 0, .parameters = &[_]OperandKind{} }, .{ .name = "SnormInt16", .value = 1, .parameters = &[_]OperandKind{} }, .{ .name = "UnormInt8", .value = 2, .parameters = &[_]OperandKind{} }, .{ .name = "UnormInt16", .value = 3, .parameters = &[_]OperandKind{} }, .{ .name = "UnormShort565", .value = 4, .parameters = &[_]OperandKind{} }, .{ .name = "UnormShort555", .value = 5, .parameters = &[_]OperandKind{} }, .{ .name = "UnormInt101010", .value = 6, .parameters = &[_]OperandKind{} }, .{ .name = "SignedInt8", .value = 7, .parameters = &[_]OperandKind{} }, .{ .name = "SignedInt16", .value = 8, .parameters = &[_]OperandKind{} }, .{ .name = "SignedInt32", .value = 9, .parameters = &[_]OperandKind{} }, .{ .name = "UnsignedInt8", .value = 10, .parameters = &[_]OperandKind{} }, .{ .name = "UnsignedInt16", .value = 11, .parameters = &[_]OperandKind{} }, .{ .name = "UnsignedInt32", .value = 12, .parameters = &[_]OperandKind{} }, .{ .name = "HalfFloat", .value = 13, .parameters = &[_]OperandKind{} }, .{ .name = "Float", .value = 14, .parameters = &[_]OperandKind{} }, .{ .name = "UnormInt24", .value = 15, .parameters = &[_]OperandKind{} }, .{ .name = "UnormInt101010_2", .value = 16, .parameters = &[_]OperandKind{} }, }, .FPRoundingMode => &[_]Enumerant{ .{ .name = "RTE", .value = 0, .parameters = &[_]OperandKind{} }, .{ .name = "RTZ", .value = 1, .parameters = &[_]OperandKind{} }, .{ .name = "RTP", .value = 2, .parameters = &[_]OperandKind{} }, .{ .name = "RTN", .value = 3, .parameters = &[_]OperandKind{} }, }, .FPDenormMode => &[_]Enumerant{ .{ .name = "Preserve", .value = 0, .parameters = &[_]OperandKind{} }, .{ .name = "FlushToZero", .value = 1, .parameters = &[_]OperandKind{} }, }, .QuantizationModes => &[_]Enumerant{ .{ .name = "TRN", .value = 0, .parameters = &[_]OperandKind{} }, .{ .name = "TRN_ZERO", .value = 1, .parameters = &[_]OperandKind{} }, .{ .name = "RND", .value = 2, .parameters = &[_]OperandKind{} }, .{ .name = "RND_ZERO", .value = 3, .parameters = &[_]OperandKind{} }, .{ .name = "RND_INF", .value = 4, .parameters = &[_]OperandKind{} }, .{ .name = "RND_MIN_INF", .value = 5, .parameters = &[_]OperandKind{} }, .{ .name = "RND_CONV", .value = 6, .parameters = &[_]OperandKind{} }, .{ .name = "RND_CONV_ODD", .value = 7, .parameters = &[_]OperandKind{} }, }, .FPOperationMode => &[_]Enumerant{ .{ .name = "IEEE", .value = 0, .parameters = &[_]OperandKind{} }, .{ .name = "ALT", .value = 1, .parameters = &[_]OperandKind{} }, }, .OverflowModes => &[_]Enumerant{ .{ .name = "WRAP", .value = 0, .parameters = &[_]OperandKind{} }, .{ .name = "SAT", .value = 1, .parameters = &[_]OperandKind{} }, .{ .name = "SAT_ZERO", .value = 2, .parameters = &[_]OperandKind{} }, .{ .name = "SAT_SYM", .value = 3, .parameters = &[_]OperandKind{} }, }, .LinkageType => &[_]Enumerant{ .{ .name = "Export", .value = 0, .parameters = &[_]OperandKind{} }, .{ .name = "Import", .value = 1, .parameters = &[_]OperandKind{} }, .{ .name = "LinkOnceODR", .value = 2, .parameters = &[_]OperandKind{} }, }, .AccessQualifier => &[_]Enumerant{ .{ .name = "ReadOnly", .value = 0, .parameters = &[_]OperandKind{} }, .{ .name = "WriteOnly", .value = 1, .parameters = &[_]OperandKind{} }, .{ .name = "ReadWrite", .value = 2, .parameters = &[_]OperandKind{} }, }, .FunctionParameterAttribute => &[_]Enumerant{ .{ .name = "Zext", .value = 0, .parameters = &[_]OperandKind{} }, .{ .name = "Sext", .value = 1, .parameters = &[_]OperandKind{} }, .{ .name = "ByVal", .value = 2, .parameters = &[_]OperandKind{} }, .{ .name = "Sret", .value = 3, .parameters = &[_]OperandKind{} }, .{ .name = "NoAlias", .value = 4, .parameters = &[_]OperandKind{} }, .{ .name = "NoCapture", .value = 5, .parameters = &[_]OperandKind{} }, .{ .name = "NoWrite", .value = 6, .parameters = &[_]OperandKind{} }, .{ .name = "NoReadWrite", .value = 7, .parameters = &[_]OperandKind{} }, .{ .name = "RuntimeAlignedINTEL", .value = 5940, .parameters = &[_]OperandKind{} }, }, .Decoration => &[_]Enumerant{ .{ .name = "RelaxedPrecision", .value = 0, .parameters = &[_]OperandKind{} }, .{ .name = "SpecId", .value = 1, .parameters = &[_]OperandKind{.LiteralInteger} }, .{ .name = "Block", .value = 2, .parameters = &[_]OperandKind{} }, .{ .name = "BufferBlock", .value = 3, .parameters = &[_]OperandKind{} }, .{ .name = "RowMajor", .value = 4, .parameters = &[_]OperandKind{} }, .{ .name = "ColMajor", .value = 5, .parameters = &[_]OperandKind{} }, .{ .name = "ArrayStride", .value = 6, .parameters = &[_]OperandKind{.LiteralInteger} }, .{ .name = "MatrixStride", .value = 7, .parameters = &[_]OperandKind{.LiteralInteger} }, .{ .name = "GLSLShared", .value = 8, .parameters = &[_]OperandKind{} }, .{ .name = "GLSLPacked", .value = 9, .parameters = &[_]OperandKind{} }, .{ .name = "CPacked", .value = 10, .parameters = &[_]OperandKind{} }, .{ .name = "BuiltIn", .value = 11, .parameters = &[_]OperandKind{.BuiltIn} }, .{ .name = "NoPerspective", .value = 13, .parameters = &[_]OperandKind{} }, .{ .name = "Flat", .value = 14, .parameters = &[_]OperandKind{} }, .{ .name = "Patch", .value = 15, .parameters = &[_]OperandKind{} }, .{ .name = "Centroid", .value = 16, .parameters = &[_]OperandKind{} }, .{ .name = "Sample", .value = 17, .parameters = &[_]OperandKind{} }, .{ .name = "Invariant", .value = 18, .parameters = &[_]OperandKind{} }, .{ .name = "Restrict", .value = 19, .parameters = &[_]OperandKind{} }, .{ .name = "Aliased", .value = 20, .parameters = &[_]OperandKind{} }, .{ .name = "Volatile", .value = 21, .parameters = &[_]OperandKind{} }, .{ .name = "Constant", .value = 22, .parameters = &[_]OperandKind{} }, .{ .name = "Coherent", .value = 23, .parameters = &[_]OperandKind{} }, .{ .name = "NonWritable", .value = 24, .parameters = &[_]OperandKind{} }, .{ .name = "NonReadable", .value = 25, .parameters = &[_]OperandKind{} }, .{ .name = "Uniform", .value = 26, .parameters = &[_]OperandKind{} }, .{ .name = "UniformId", .value = 27, .parameters = &[_]OperandKind{.IdScope} }, .{ .name = "SaturatedConversion", .value = 28, .parameters = &[_]OperandKind{} }, .{ .name = "Stream", .value = 29, .parameters = &[_]OperandKind{.LiteralInteger} }, .{ .name = "Location", .value = 30, .parameters = &[_]OperandKind{.LiteralInteger} }, .{ .name = "Component", .value = 31, .parameters = &[_]OperandKind{.LiteralInteger} }, .{ .name = "Index", .value = 32, .parameters = &[_]OperandKind{.LiteralInteger} }, .{ .name = "Binding", .value = 33, .parameters = &[_]OperandKind{.LiteralInteger} }, .{ .name = "DescriptorSet", .value = 34, .parameters = &[_]OperandKind{.LiteralInteger} }, .{ .name = "Offset", .value = 35, .parameters = &[_]OperandKind{.LiteralInteger} }, .{ .name = "XfbBuffer", .value = 36, .parameters = &[_]OperandKind{.LiteralInteger} }, .{ .name = "XfbStride", .value = 37, .parameters = &[_]OperandKind{.LiteralInteger} }, .{ .name = "FuncParamAttr", .value = 38, .parameters = &[_]OperandKind{.FunctionParameterAttribute} }, .{ .name = "FPRoundingMode", .value = 39, .parameters = &[_]OperandKind{.FPRoundingMode} }, .{ .name = "FPFastMathMode", .value = 40, .parameters = &[_]OperandKind{.FPFastMathMode} }, .{ .name = "LinkageAttributes", .value = 41, .parameters = &[_]OperandKind{ .LiteralString, .LinkageType } }, .{ .name = "NoContraction", .value = 42, .parameters = &[_]OperandKind{} }, .{ .name = "InputAttachmentIndex", .value = 43, .parameters = &[_]OperandKind{.LiteralInteger} }, .{ .name = "Alignment", .value = 44, .parameters = &[_]OperandKind{.LiteralInteger} }, .{ .name = "MaxByteOffset", .value = 45, .parameters = &[_]OperandKind{.LiteralInteger} }, .{ .name = "AlignmentId", .value = 46, .parameters = &[_]OperandKind{.IdRef} }, .{ .name = "MaxByteOffsetId", .value = 47, .parameters = &[_]OperandKind{.IdRef} }, .{ .name = "NoSignedWrap", .value = 4469, .parameters = &[_]OperandKind{} }, .{ .name = "NoUnsignedWrap", .value = 4470, .parameters = &[_]OperandKind{} }, .{ .name = "WeightTextureQCOM", .value = 4487, .parameters = &[_]OperandKind{} }, .{ .name = "BlockMatchTextureQCOM", .value = 4488, .parameters = &[_]OperandKind{} }, .{ .name = "ExplicitInterpAMD", .value = 4999, .parameters = &[_]OperandKind{} }, .{ .name = "OverrideCoverageNV", .value = 5248, .parameters = &[_]OperandKind{} }, .{ .name = "PassthroughNV", .value = 5250, .parameters = &[_]OperandKind{} }, .{ .name = "ViewportRelativeNV", .value = 5252, .parameters = &[_]OperandKind{} }, .{ .name = "SecondaryViewportRelativeNV", .value = 5256, .parameters = &[_]OperandKind{.LiteralInteger} }, .{ .name = "PerPrimitiveNV", .value = 5271, .parameters = &[_]OperandKind{} }, .{ .name = "PerPrimitiveEXT", .value = 5271, .parameters = &[_]OperandKind{} }, .{ .name = "PerViewNV", .value = 5272, .parameters = &[_]OperandKind{} }, .{ .name = "PerTaskNV", .value = 5273, .parameters = &[_]OperandKind{} }, .{ .name = "PerVertexKHR", .value = 5285, .parameters = &[_]OperandKind{} }, .{ .name = "PerVertexNV", .value = 5285, .parameters = &[_]OperandKind{} }, .{ .name = "NonUniform", .value = 5300, .parameters = &[_]OperandKind{} }, .{ .name = "NonUniformEXT", .value = 5300, .parameters = &[_]OperandKind{} }, .{ .name = "RestrictPointer", .value = 5355, .parameters = &[_]OperandKind{} }, .{ .name = "RestrictPointerEXT", .value = 5355, .parameters = &[_]OperandKind{} }, .{ .name = "AliasedPointer", .value = 5356, .parameters = &[_]OperandKind{} }, .{ .name = "AliasedPointerEXT", .value = 5356, .parameters = &[_]OperandKind{} }, .{ .name = "HitObjectShaderRecordBufferNV", .value = 5386, .parameters = &[_]OperandKind{} }, .{ .name = "BindlessSamplerNV", .value = 5398, .parameters = &[_]OperandKind{} }, .{ .name = "BindlessImageNV", .value = 5399, .parameters = &[_]OperandKind{} }, .{ .name = "BoundSamplerNV", .value = 5400, .parameters = &[_]OperandKind{} }, .{ .name = "BoundImageNV", .value = 5401, .parameters = &[_]OperandKind{} }, .{ .name = "SIMTCallINTEL", .value = 5599, .parameters = &[_]OperandKind{.LiteralInteger} }, .{ .name = "ReferencedIndirectlyINTEL", .value = 5602, .parameters = &[_]OperandKind{} }, .{ .name = "ClobberINTEL", .value = 5607, .parameters = &[_]OperandKind{.LiteralString} }, .{ .name = "SideEffectsINTEL", .value = 5608, .parameters = &[_]OperandKind{} }, .{ .name = "VectorComputeVariableINTEL", .value = 5624, .parameters = &[_]OperandKind{} }, .{ .name = "FuncParamIOKindINTEL", .value = 5625, .parameters = &[_]OperandKind{.LiteralInteger} }, .{ .name = "VectorComputeFunctionINTEL", .value = 5626, .parameters = &[_]OperandKind{} }, .{ .name = "StackCallINTEL", .value = 5627, .parameters = &[_]OperandKind{} }, .{ .name = "GlobalVariableOffsetINTEL", .value = 5628, .parameters = &[_]OperandKind{.LiteralInteger} }, .{ .name = "CounterBuffer", .value = 5634, .parameters = &[_]OperandKind{.IdRef} }, .{ .name = "HlslCounterBufferGOOGLE", .value = 5634, .parameters = &[_]OperandKind{.IdRef} }, .{ .name = "UserSemantic", .value = 5635, .parameters = &[_]OperandKind{.LiteralString} }, .{ .name = "HlslSemanticGOOGLE", .value = 5635, .parameters = &[_]OperandKind{.LiteralString} }, .{ .name = "UserTypeGOOGLE", .value = 5636, .parameters = &[_]OperandKind{.LiteralString} }, .{ .name = "FunctionRoundingModeINTEL", .value = 5822, .parameters = &[_]OperandKind{ .LiteralInteger, .FPRoundingMode } }, .{ .name = "FunctionDenormModeINTEL", .value = 5823, .parameters = &[_]OperandKind{ .LiteralInteger, .FPDenormMode } }, .{ .name = "RegisterINTEL", .value = 5825, .parameters = &[_]OperandKind{} }, .{ .name = "MemoryINTEL", .value = 5826, .parameters = &[_]OperandKind{.LiteralString} }, .{ .name = "NumbanksINTEL", .value = 5827, .parameters = &[_]OperandKind{.LiteralInteger} }, .{ .name = "BankwidthINTEL", .value = 5828, .parameters = &[_]OperandKind{.LiteralInteger} }, .{ .name = "MaxPrivateCopiesINTEL", .value = 5829, .parameters = &[_]OperandKind{.LiteralInteger} }, .{ .name = "SinglepumpINTEL", .value = 5830, .parameters = &[_]OperandKind{} }, .{ .name = "DoublepumpINTEL", .value = 5831, .parameters = &[_]OperandKind{} }, .{ .name = "MaxReplicatesINTEL", .value = 5832, .parameters = &[_]OperandKind{.LiteralInteger} }, .{ .name = "SimpleDualPortINTEL", .value = 5833, .parameters = &[_]OperandKind{} }, .{ .name = "MergeINTEL", .value = 5834, .parameters = &[_]OperandKind{ .LiteralString, .LiteralString } }, .{ .name = "BankBitsINTEL", .value = 5835, .parameters = &[_]OperandKind{.LiteralInteger} }, .{ .name = "ForcePow2DepthINTEL", .value = 5836, .parameters = &[_]OperandKind{.LiteralInteger} }, .{ .name = "BurstCoalesceINTEL", .value = 5899, .parameters = &[_]OperandKind{} }, .{ .name = "CacheSizeINTEL", .value = 5900, .parameters = &[_]OperandKind{.LiteralInteger} }, .{ .name = "DontStaticallyCoalesceINTEL", .value = 5901, .parameters = &[_]OperandKind{} }, .{ .name = "PrefetchINTEL", .value = 5902, .parameters = &[_]OperandKind{.LiteralInteger} }, .{ .name = "StallEnableINTEL", .value = 5905, .parameters = &[_]OperandKind{} }, .{ .name = "FuseLoopsInFunctionINTEL", .value = 5907, .parameters = &[_]OperandKind{} }, .{ .name = "MathOpDSPModeINTEL", .value = 5909, .parameters = &[_]OperandKind{ .LiteralInteger, .LiteralInteger } }, .{ .name = "AliasScopeINTEL", .value = 5914, .parameters = &[_]OperandKind{.IdRef} }, .{ .name = "NoAliasINTEL", .value = 5915, .parameters = &[_]OperandKind{.IdRef} }, .{ .name = "InitiationIntervalINTEL", .value = 5917, .parameters = &[_]OperandKind{.LiteralInteger} }, .{ .name = "MaxConcurrencyINTEL", .value = 5918, .parameters = &[_]OperandKind{.LiteralInteger} }, .{ .name = "PipelineEnableINTEL", .value = 5919, .parameters = &[_]OperandKind{.LiteralInteger} }, .{ .name = "BufferLocationINTEL", .value = 5921, .parameters = &[_]OperandKind{.LiteralInteger} }, .{ .name = "IOPipeStorageINTEL", .value = 5944, .parameters = &[_]OperandKind{.LiteralInteger} }, .{ .name = "FunctionFloatingPointModeINTEL", .value = 6080, .parameters = &[_]OperandKind{ .LiteralInteger, .FPOperationMode } }, .{ .name = "SingleElementVectorINTEL", .value = 6085, .parameters = &[_]OperandKind{} }, .{ .name = "VectorComputeCallableFunctionINTEL", .value = 6087, .parameters = &[_]OperandKind{} }, .{ .name = "MediaBlockIOINTEL", .value = 6140, .parameters = &[_]OperandKind{} }, .{ .name = "LatencyControlLabelINTEL", .value = 6172, .parameters = &[_]OperandKind{.LiteralInteger} }, .{ .name = "LatencyControlConstraintINTEL", .value = 6173, .parameters = &[_]OperandKind{ .LiteralInteger, .LiteralInteger, .LiteralInteger } }, .{ .name = "ConduitKernelArgumentINTEL", .value = 6175, .parameters = &[_]OperandKind{} }, .{ .name = "RegisterMapKernelArgumentINTEL", .value = 6176, .parameters = &[_]OperandKind{} }, .{ .name = "MMHostInterfaceAddressWidthINTEL", .value = 6177, .parameters = &[_]OperandKind{.LiteralInteger} }, .{ .name = "MMHostInterfaceDataWidthINTEL", .value = 6178, .parameters = &[_]OperandKind{.LiteralInteger} }, .{ .name = "MMHostInterfaceLatencyINTEL", .value = 6179, .parameters = &[_]OperandKind{.LiteralInteger} }, .{ .name = "MMHostInterfaceReadWriteModeINTEL", .value = 6180, .parameters = &[_]OperandKind{.AccessQualifier} }, .{ .name = "MMHostInterfaceMaxBurstINTEL", .value = 6181, .parameters = &[_]OperandKind{.LiteralInteger} }, .{ .name = "MMHostInterfaceWaitRequestINTEL", .value = 6182, .parameters = &[_]OperandKind{.LiteralInteger} }, .{ .name = "StableKernelArgumentINTEL", .value = 6183, .parameters = &[_]OperandKind{} }, }, .BuiltIn => &[_]Enumerant{ .{ .name = "Position", .value = 0, .parameters = &[_]OperandKind{} }, .{ .name = "PointSize", .value = 1, .parameters = &[_]OperandKind{} }, .{ .name = "ClipDistance", .value = 3, .parameters = &[_]OperandKind{} }, .{ .name = "CullDistance", .value = 4, .parameters = &[_]OperandKind{} }, .{ .name = "VertexId", .value = 5, .parameters = &[_]OperandKind{} }, .{ .name = "InstanceId", .value = 6, .parameters = &[_]OperandKind{} }, .{ .name = "PrimitiveId", .value = 7, .parameters = &[_]OperandKind{} }, .{ .name = "InvocationId", .value = 8, .parameters = &[_]OperandKind{} }, .{ .name = "Layer", .value = 9, .parameters = &[_]OperandKind{} }, .{ .name = "ViewportIndex", .value = 10, .parameters = &[_]OperandKind{} }, .{ .name = "TessLevelOuter", .value = 11, .parameters = &[_]OperandKind{} }, .{ .name = "TessLevelInner", .value = 12, .parameters = &[_]OperandKind{} }, .{ .name = "TessCoord", .value = 13, .parameters = &[_]OperandKind{} }, .{ .name = "PatchVertices", .value = 14, .parameters = &[_]OperandKind{} }, .{ .name = "FragCoord", .value = 15, .parameters = &[_]OperandKind{} }, .{ .name = "PointCoord", .value = 16, .parameters = &[_]OperandKind{} }, .{ .name = "FrontFacing", .value = 17, .parameters = &[_]OperandKind{} }, .{ .name = "SampleId", .value = 18, .parameters = &[_]OperandKind{} }, .{ .name = "SamplePosition", .value = 19, .parameters = &[_]OperandKind{} }, .{ .name = "SampleMask", .value = 20, .parameters = &[_]OperandKind{} }, .{ .name = "FragDepth", .value = 22, .parameters = &[_]OperandKind{} }, .{ .name = "HelperInvocation", .value = 23, .parameters = &[_]OperandKind{} }, .{ .name = "NumWorkgroups", .value = 24, .parameters = &[_]OperandKind{} }, .{ .name = "WorkgroupSize", .value = 25, .parameters = &[_]OperandKind{} }, .{ .name = "WorkgroupId", .value = 26, .parameters = &[_]OperandKind{} }, .{ .name = "LocalInvocationId", .value = 27, .parameters = &[_]OperandKind{} }, .{ .name = "GlobalInvocationId", .value = 28, .parameters = &[_]OperandKind{} }, .{ .name = "LocalInvocationIndex", .value = 29, .parameters = &[_]OperandKind{} }, .{ .name = "WorkDim", .value = 30, .parameters = &[_]OperandKind{} }, .{ .name = "GlobalSize", .value = 31, .parameters = &[_]OperandKind{} }, .{ .name = "EnqueuedWorkgroupSize", .value = 32, .parameters = &[_]OperandKind{} }, .{ .name = "GlobalOffset", .value = 33, .parameters = &[_]OperandKind{} }, .{ .name = "GlobalLinearId", .value = 34, .parameters = &[_]OperandKind{} }, .{ .name = "SubgroupSize", .value = 36, .parameters = &[_]OperandKind{} }, .{ .name = "SubgroupMaxSize", .value = 37, .parameters = &[_]OperandKind{} }, .{ .name = "NumSubgroups", .value = 38, .parameters = &[_]OperandKind{} }, .{ .name = "NumEnqueuedSubgroups", .value = 39, .parameters = &[_]OperandKind{} }, .{ .name = "SubgroupId", .value = 40, .parameters = &[_]OperandKind{} }, .{ .name = "SubgroupLocalInvocationId", .value = 41, .parameters = &[_]OperandKind{} }, .{ .name = "VertexIndex", .value = 42, .parameters = &[_]OperandKind{} }, .{ .name = "InstanceIndex", .value = 43, .parameters = &[_]OperandKind{} }, .{ .name = "CoreIDARM", .value = 4160, .parameters = &[_]OperandKind{} }, .{ .name = "CoreCountARM", .value = 4161, .parameters = &[_]OperandKind{} }, .{ .name = "CoreMaxIDARM", .value = 4162, .parameters = &[_]OperandKind{} }, .{ .name = "WarpIDARM", .value = 4163, .parameters = &[_]OperandKind{} }, .{ .name = "WarpMaxIDARM", .value = 4164, .parameters = &[_]OperandKind{} }, .{ .name = "SubgroupEqMask", .value = 4416, .parameters = &[_]OperandKind{} }, .{ .name = "SubgroupEqMaskKHR", .value = 4416, .parameters = &[_]OperandKind{} }, .{ .name = "SubgroupGeMask", .value = 4417, .parameters = &[_]OperandKind{} }, .{ .name = "SubgroupGeMaskKHR", .value = 4417, .parameters = &[_]OperandKind{} }, .{ .name = "SubgroupGtMask", .value = 4418, .parameters = &[_]OperandKind{} }, .{ .name = "SubgroupGtMaskKHR", .value = 4418, .parameters = &[_]OperandKind{} }, .{ .name = "SubgroupLeMask", .value = 4419, .parameters = &[_]OperandKind{} }, .{ .name = "SubgroupLeMaskKHR", .value = 4419, .parameters = &[_]OperandKind{} }, .{ .name = "SubgroupLtMask", .value = 4420, .parameters = &[_]OperandKind{} }, .{ .name = "SubgroupLtMaskKHR", .value = 4420, .parameters = &[_]OperandKind{} }, .{ .name = "BaseVertex", .value = 4424, .parameters = &[_]OperandKind{} }, .{ .name = "BaseInstance", .value = 4425, .parameters = &[_]OperandKind{} }, .{ .name = "DrawIndex", .value = 4426, .parameters = &[_]OperandKind{} }, .{ .name = "PrimitiveShadingRateKHR", .value = 4432, .parameters = &[_]OperandKind{} }, .{ .name = "DeviceIndex", .value = 4438, .parameters = &[_]OperandKind{} }, .{ .name = "ViewIndex", .value = 4440, .parameters = &[_]OperandKind{} }, .{ .name = "ShadingRateKHR", .value = 4444, .parameters = &[_]OperandKind{} }, .{ .name = "BaryCoordNoPerspAMD", .value = 4992, .parameters = &[_]OperandKind{} }, .{ .name = "BaryCoordNoPerspCentroidAMD", .value = 4993, .parameters = &[_]OperandKind{} }, .{ .name = "BaryCoordNoPerspSampleAMD", .value = 4994, .parameters = &[_]OperandKind{} }, .{ .name = "BaryCoordSmoothAMD", .value = 4995, .parameters = &[_]OperandKind{} }, .{ .name = "BaryCoordSmoothCentroidAMD", .value = 4996, .parameters = &[_]OperandKind{} }, .{ .name = "BaryCoordSmoothSampleAMD", .value = 4997, .parameters = &[_]OperandKind{} }, .{ .name = "BaryCoordPullModelAMD", .value = 4998, .parameters = &[_]OperandKind{} }, .{ .name = "FragStencilRefEXT", .value = 5014, .parameters = &[_]OperandKind{} }, .{ .name = "ViewportMaskNV", .value = 5253, .parameters = &[_]OperandKind{} }, .{ .name = "SecondaryPositionNV", .value = 5257, .parameters = &[_]OperandKind{} }, .{ .name = "SecondaryViewportMaskNV", .value = 5258, .parameters = &[_]OperandKind{} }, .{ .name = "PositionPerViewNV", .value = 5261, .parameters = &[_]OperandKind{} }, .{ .name = "ViewportMaskPerViewNV", .value = 5262, .parameters = &[_]OperandKind{} }, .{ .name = "FullyCoveredEXT", .value = 5264, .parameters = &[_]OperandKind{} }, .{ .name = "TaskCountNV", .value = 5274, .parameters = &[_]OperandKind{} }, .{ .name = "PrimitiveCountNV", .value = 5275, .parameters = &[_]OperandKind{} }, .{ .name = "PrimitiveIndicesNV", .value = 5276, .parameters = &[_]OperandKind{} }, .{ .name = "ClipDistancePerViewNV", .value = 5277, .parameters = &[_]OperandKind{} }, .{ .name = "CullDistancePerViewNV", .value = 5278, .parameters = &[_]OperandKind{} }, .{ .name = "LayerPerViewNV", .value = 5279, .parameters = &[_]OperandKind{} }, .{ .name = "MeshViewCountNV", .value = 5280, .parameters = &[_]OperandKind{} }, .{ .name = "MeshViewIndicesNV", .value = 5281, .parameters = &[_]OperandKind{} }, .{ .name = "BaryCoordKHR", .value = 5286, .parameters = &[_]OperandKind{} }, .{ .name = "BaryCoordNV", .value = 5286, .parameters = &[_]OperandKind{} }, .{ .name = "BaryCoordNoPerspKHR", .value = 5287, .parameters = &[_]OperandKind{} }, .{ .name = "BaryCoordNoPerspNV", .value = 5287, .parameters = &[_]OperandKind{} }, .{ .name = "FragSizeEXT", .value = 5292, .parameters = &[_]OperandKind{} }, .{ .name = "FragmentSizeNV", .value = 5292, .parameters = &[_]OperandKind{} }, .{ .name = "FragInvocationCountEXT", .value = 5293, .parameters = &[_]OperandKind{} }, .{ .name = "InvocationsPerPixelNV", .value = 5293, .parameters = &[_]OperandKind{} }, .{ .name = "PrimitivePointIndicesEXT", .value = 5294, .parameters = &[_]OperandKind{} }, .{ .name = "PrimitiveLineIndicesEXT", .value = 5295, .parameters = &[_]OperandKind{} }, .{ .name = "PrimitiveTriangleIndicesEXT", .value = 5296, .parameters = &[_]OperandKind{} }, .{ .name = "CullPrimitiveEXT", .value = 5299, .parameters = &[_]OperandKind{} }, .{ .name = "LaunchIdNV", .value = 5319, .parameters = &[_]OperandKind{} }, .{ .name = "LaunchIdKHR", .value = 5319, .parameters = &[_]OperandKind{} }, .{ .name = "LaunchSizeNV", .value = 5320, .parameters = &[_]OperandKind{} }, .{ .name = "LaunchSizeKHR", .value = 5320, .parameters = &[_]OperandKind{} }, .{ .name = "WorldRayOriginNV", .value = 5321, .parameters = &[_]OperandKind{} }, .{ .name = "WorldRayOriginKHR", .value = 5321, .parameters = &[_]OperandKind{} }, .{ .name = "WorldRayDirectionNV", .value = 5322, .parameters = &[_]OperandKind{} }, .{ .name = "WorldRayDirectionKHR", .value = 5322, .parameters = &[_]OperandKind{} }, .{ .name = "ObjectRayOriginNV", .value = 5323, .parameters = &[_]OperandKind{} }, .{ .name = "ObjectRayOriginKHR", .value = 5323, .parameters = &[_]OperandKind{} }, .{ .name = "ObjectRayDirectionNV", .value = 5324, .parameters = &[_]OperandKind{} }, .{ .name = "ObjectRayDirectionKHR", .value = 5324, .parameters = &[_]OperandKind{} }, .{ .name = "RayTminNV", .value = 5325, .parameters = &[_]OperandKind{} }, .{ .name = "RayTminKHR", .value = 5325, .parameters = &[_]OperandKind{} }, .{ .name = "RayTmaxNV", .value = 5326, .parameters = &[_]OperandKind{} }, .{ .name = "RayTmaxKHR", .value = 5326, .parameters = &[_]OperandKind{} }, .{ .name = "InstanceCustomIndexNV", .value = 5327, .parameters = &[_]OperandKind{} }, .{ .name = "InstanceCustomIndexKHR", .value = 5327, .parameters = &[_]OperandKind{} }, .{ .name = "ObjectToWorldNV", .value = 5330, .parameters = &[_]OperandKind{} }, .{ .name = "ObjectToWorldKHR", .value = 5330, .parameters = &[_]OperandKind{} }, .{ .name = "WorldToObjectNV", .value = 5331, .parameters = &[_]OperandKind{} }, .{ .name = "WorldToObjectKHR", .value = 5331, .parameters = &[_]OperandKind{} }, .{ .name = "HitTNV", .value = 5332, .parameters = &[_]OperandKind{} }, .{ .name = "HitKindNV", .value = 5333, .parameters = &[_]OperandKind{} }, .{ .name = "HitKindKHR", .value = 5333, .parameters = &[_]OperandKind{} }, .{ .name = "CurrentRayTimeNV", .value = 5334, .parameters = &[_]OperandKind{} }, .{ .name = "HitTriangleVertexPositionsKHR", .value = 5335, .parameters = &[_]OperandKind{} }, .{ .name = "IncomingRayFlagsNV", .value = 5351, .parameters = &[_]OperandKind{} }, .{ .name = "IncomingRayFlagsKHR", .value = 5351, .parameters = &[_]OperandKind{} }, .{ .name = "RayGeometryIndexKHR", .value = 5352, .parameters = &[_]OperandKind{} }, .{ .name = "WarpsPerSMNV", .value = 5374, .parameters = &[_]OperandKind{} }, .{ .name = "SMCountNV", .value = 5375, .parameters = &[_]OperandKind{} }, .{ .name = "WarpIDNV", .value = 5376, .parameters = &[_]OperandKind{} }, .{ .name = "SMIDNV", .value = 5377, .parameters = &[_]OperandKind{} }, .{ .name = "CullMaskKHR", .value = 6021, .parameters = &[_]OperandKind{} }, }, .Scope => &[_]Enumerant{ .{ .name = "CrossDevice", .value = 0, .parameters = &[_]OperandKind{} }, .{ .name = "Device", .value = 1, .parameters = &[_]OperandKind{} }, .{ .name = "Workgroup", .value = 2, .parameters = &[_]OperandKind{} }, .{ .name = "Subgroup", .value = 3, .parameters = &[_]OperandKind{} }, .{ .name = "Invocation", .value = 4, .parameters = &[_]OperandKind{} }, .{ .name = "QueueFamily", .value = 5, .parameters = &[_]OperandKind{} }, .{ .name = "QueueFamilyKHR", .value = 5, .parameters = &[_]OperandKind{} }, .{ .name = "ShaderCallKHR", .value = 6, .parameters = &[_]OperandKind{} }, }, .GroupOperation => &[_]Enumerant{ .{ .name = "Reduce", .value = 0, .parameters = &[_]OperandKind{} }, .{ .name = "InclusiveScan", .value = 1, .parameters = &[_]OperandKind{} }, .{ .name = "ExclusiveScan", .value = 2, .parameters = &[_]OperandKind{} }, .{ .name = "ClusteredReduce", .value = 3, .parameters = &[_]OperandKind{} }, .{ .name = "PartitionedReduceNV", .value = 6, .parameters = &[_]OperandKind{} }, .{ .name = "PartitionedInclusiveScanNV", .value = 7, .parameters = &[_]OperandKind{} }, .{ .name = "PartitionedExclusiveScanNV", .value = 8, .parameters = &[_]OperandKind{} }, }, .KernelEnqueueFlags => &[_]Enumerant{ .{ .name = "NoWait", .value = 0, .parameters = &[_]OperandKind{} }, .{ .name = "WaitKernel", .value = 1, .parameters = &[_]OperandKind{} }, .{ .name = "WaitWorkGroup", .value = 2, .parameters = &[_]OperandKind{} }, }, .Capability => &[_]Enumerant{ .{ .name = "Matrix", .value = 0, .parameters = &[_]OperandKind{} }, .{ .name = "Shader", .value = 1, .parameters = &[_]OperandKind{} }, .{ .name = "Geometry", .value = 2, .parameters = &[_]OperandKind{} }, .{ .name = "Tessellation", .value = 3, .parameters = &[_]OperandKind{} }, .{ .name = "Addresses", .value = 4, .parameters = &[_]OperandKind{} }, .{ .name = "Linkage", .value = 5, .parameters = &[_]OperandKind{} }, .{ .name = "Kernel", .value = 6, .parameters = &[_]OperandKind{} }, .{ .name = "Vector16", .value = 7, .parameters = &[_]OperandKind{} }, .{ .name = "Float16Buffer", .value = 8, .parameters = &[_]OperandKind{} }, .{ .name = "Float16", .value = 9, .parameters = &[_]OperandKind{} }, .{ .name = "Float64", .value = 10, .parameters = &[_]OperandKind{} }, .{ .name = "Int64", .value = 11, .parameters = &[_]OperandKind{} }, .{ .name = "Int64Atomics", .value = 12, .parameters = &[_]OperandKind{} }, .{ .name = "ImageBasic", .value = 13, .parameters = &[_]OperandKind{} }, .{ .name = "ImageReadWrite", .value = 14, .parameters = &[_]OperandKind{} }, .{ .name = "ImageMipmap", .value = 15, .parameters = &[_]OperandKind{} }, .{ .name = "Pipes", .value = 17, .parameters = &[_]OperandKind{} }, .{ .name = "Groups", .value = 18, .parameters = &[_]OperandKind{} }, .{ .name = "DeviceEnqueue", .value = 19, .parameters = &[_]OperandKind{} }, .{ .name = "LiteralSampler", .value = 20, .parameters = &[_]OperandKind{} }, .{ .name = "AtomicStorage", .value = 21, .parameters = &[_]OperandKind{} }, .{ .name = "Int16", .value = 22, .parameters = &[_]OperandKind{} }, .{ .name = "TessellationPointSize", .value = 23, .parameters = &[_]OperandKind{} }, .{ .name = "GeometryPointSize", .value = 24, .parameters = &[_]OperandKind{} }, .{ .name = "ImageGatherExtended", .value = 25, .parameters = &[_]OperandKind{} }, .{ .name = "StorageImageMultisample", .value = 27, .parameters = &[_]OperandKind{} }, .{ .name = "UniformBufferArrayDynamicIndexing", .value = 28, .parameters = &[_]OperandKind{} }, .{ .name = "SampledImageArrayDynamicIndexing", .value = 29, .parameters = &[_]OperandKind{} }, .{ .name = "StorageBufferArrayDynamicIndexing", .value = 30, .parameters = &[_]OperandKind{} }, .{ .name = "StorageImageArrayDynamicIndexing", .value = 31, .parameters = &[_]OperandKind{} }, .{ .name = "ClipDistance", .value = 32, .parameters = &[_]OperandKind{} }, .{ .name = "CullDistance", .value = 33, .parameters = &[_]OperandKind{} }, .{ .name = "ImageCubeArray", .value = 34, .parameters = &[_]OperandKind{} }, .{ .name = "SampleRateShading", .value = 35, .parameters = &[_]OperandKind{} }, .{ .name = "ImageRect", .value = 36, .parameters = &[_]OperandKind{} }, .{ .name = "SampledRect", .value = 37, .parameters = &[_]OperandKind{} }, .{ .name = "GenericPointer", .value = 38, .parameters = &[_]OperandKind{} }, .{ .name = "Int8", .value = 39, .parameters = &[_]OperandKind{} }, .{ .name = "InputAttachment", .value = 40, .parameters = &[_]OperandKind{} }, .{ .name = "SparseResidency", .value = 41, .parameters = &[_]OperandKind{} }, .{ .name = "MinLod", .value = 42, .parameters = &[_]OperandKind{} }, .{ .name = "Sampled1D", .value = 43, .parameters = &[_]OperandKind{} }, .{ .name = "Image1D", .value = 44, .parameters = &[_]OperandKind{} }, .{ .name = "SampledCubeArray", .value = 45, .parameters = &[_]OperandKind{} }, .{ .name = "SampledBuffer", .value = 46, .parameters = &[_]OperandKind{} }, .{ .name = "ImageBuffer", .value = 47, .parameters = &[_]OperandKind{} }, .{ .name = "ImageMSArray", .value = 48, .parameters = &[_]OperandKind{} }, .{ .name = "StorageImageExtendedFormats", .value = 49, .parameters = &[_]OperandKind{} }, .{ .name = "ImageQuery", .value = 50, .parameters = &[_]OperandKind{} }, .{ .name = "DerivativeControl", .value = 51, .parameters = &[_]OperandKind{} }, .{ .name = "InterpolationFunction", .value = 52, .parameters = &[_]OperandKind{} }, .{ .name = "TransformFeedback", .value = 53, .parameters = &[_]OperandKind{} }, .{ .name = "GeometryStreams", .value = 54, .parameters = &[_]OperandKind{} }, .{ .name = "StorageImageReadWithoutFormat", .value = 55, .parameters = &[_]OperandKind{} }, .{ .name = "StorageImageWriteWithoutFormat", .value = 56, .parameters = &[_]OperandKind{} }, .{ .name = "MultiViewport", .value = 57, .parameters = &[_]OperandKind{} }, .{ .name = "SubgroupDispatch", .value = 58, .parameters = &[_]OperandKind{} }, .{ .name = "NamedBarrier", .value = 59, .parameters = &[_]OperandKind{} }, .{ .name = "PipeStorage", .value = 60, .parameters = &[_]OperandKind{} }, .{ .name = "GroupNonUniform", .value = 61, .parameters = &[_]OperandKind{} }, .{ .name = "GroupNonUniformVote", .value = 62, .parameters = &[_]OperandKind{} }, .{ .name = "GroupNonUniformArithmetic", .value = 63, .parameters = &[_]OperandKind{} }, .{ .name = "GroupNonUniformBallot", .value = 64, .parameters = &[_]OperandKind{} }, .{ .name = "GroupNonUniformShuffle", .value = 65, .parameters = &[_]OperandKind{} }, .{ .name = "GroupNonUniformShuffleRelative", .value = 66, .parameters = &[_]OperandKind{} }, .{ .name = "GroupNonUniformClustered", .value = 67, .parameters = &[_]OperandKind{} }, .{ .name = "GroupNonUniformQuad", .value = 68, .parameters = &[_]OperandKind{} }, .{ .name = "ShaderLayer", .value = 69, .parameters = &[_]OperandKind{} }, .{ .name = "ShaderViewportIndex", .value = 70, .parameters = &[_]OperandKind{} }, .{ .name = "UniformDecoration", .value = 71, .parameters = &[_]OperandKind{} }, .{ .name = "CoreBuiltinsARM", .value = 4165, .parameters = &[_]OperandKind{} }, .{ .name = "TileImageColorReadAccessEXT", .value = 4166, .parameters = &[_]OperandKind{} }, .{ .name = "TileImageDepthReadAccessEXT", .value = 4167, .parameters = &[_]OperandKind{} }, .{ .name = "TileImageStencilReadAccessEXT", .value = 4168, .parameters = &[_]OperandKind{} }, .{ .name = "FragmentShadingRateKHR", .value = 4422, .parameters = &[_]OperandKind{} }, .{ .name = "SubgroupBallotKHR", .value = 4423, .parameters = &[_]OperandKind{} }, .{ .name = "DrawParameters", .value = 4427, .parameters = &[_]OperandKind{} }, .{ .name = "WorkgroupMemoryExplicitLayoutKHR", .value = 4428, .parameters = &[_]OperandKind{} }, .{ .name = "WorkgroupMemoryExplicitLayout8BitAccessKHR", .value = 4429, .parameters = &[_]OperandKind{} }, .{ .name = "WorkgroupMemoryExplicitLayout16BitAccessKHR", .value = 4430, .parameters = &[_]OperandKind{} }, .{ .name = "SubgroupVoteKHR", .value = 4431, .parameters = &[_]OperandKind{} }, .{ .name = "StorageBuffer16BitAccess", .value = 4433, .parameters = &[_]OperandKind{} }, .{ .name = "StorageUniformBufferBlock16", .value = 4433, .parameters = &[_]OperandKind{} }, .{ .name = "UniformAndStorageBuffer16BitAccess", .value = 4434, .parameters = &[_]OperandKind{} }, .{ .name = "StorageUniform16", .value = 4434, .parameters = &[_]OperandKind{} }, .{ .name = "StoragePushConstant16", .value = 4435, .parameters = &[_]OperandKind{} }, .{ .name = "StorageInputOutput16", .value = 4436, .parameters = &[_]OperandKind{} }, .{ .name = "DeviceGroup", .value = 4437, .parameters = &[_]OperandKind{} }, .{ .name = "MultiView", .value = 4439, .parameters = &[_]OperandKind{} }, .{ .name = "VariablePointersStorageBuffer", .value = 4441, .parameters = &[_]OperandKind{} }, .{ .name = "VariablePointers", .value = 4442, .parameters = &[_]OperandKind{} }, .{ .name = "AtomicStorageOps", .value = 4445, .parameters = &[_]OperandKind{} }, .{ .name = "SampleMaskPostDepthCoverage", .value = 4447, .parameters = &[_]OperandKind{} }, .{ .name = "StorageBuffer8BitAccess", .value = 4448, .parameters = &[_]OperandKind{} }, .{ .name = "UniformAndStorageBuffer8BitAccess", .value = 4449, .parameters = &[_]OperandKind{} }, .{ .name = "StoragePushConstant8", .value = 4450, .parameters = &[_]OperandKind{} }, .{ .name = "DenormPreserve", .value = 4464, .parameters = &[_]OperandKind{} }, .{ .name = "DenormFlushToZero", .value = 4465, .parameters = &[_]OperandKind{} }, .{ .name = "SignedZeroInfNanPreserve", .value = 4466, .parameters = &[_]OperandKind{} }, .{ .name = "RoundingModeRTE", .value = 4467, .parameters = &[_]OperandKind{} }, .{ .name = "RoundingModeRTZ", .value = 4468, .parameters = &[_]OperandKind{} }, .{ .name = "RayQueryProvisionalKHR", .value = 4471, .parameters = &[_]OperandKind{} }, .{ .name = "RayQueryKHR", .value = 4472, .parameters = &[_]OperandKind{} }, .{ .name = "RayTraversalPrimitiveCullingKHR", .value = 4478, .parameters = &[_]OperandKind{} }, .{ .name = "RayTracingKHR", .value = 4479, .parameters = &[_]OperandKind{} }, .{ .name = "TextureSampleWeightedQCOM", .value = 4484, .parameters = &[_]OperandKind{} }, .{ .name = "TextureBoxFilterQCOM", .value = 4485, .parameters = &[_]OperandKind{} }, .{ .name = "TextureBlockMatchQCOM", .value = 4486, .parameters = &[_]OperandKind{} }, .{ .name = "Float16ImageAMD", .value = 5008, .parameters = &[_]OperandKind{} }, .{ .name = "ImageGatherBiasLodAMD", .value = 5009, .parameters = &[_]OperandKind{} }, .{ .name = "FragmentMaskAMD", .value = 5010, .parameters = &[_]OperandKind{} }, .{ .name = "StencilExportEXT", .value = 5013, .parameters = &[_]OperandKind{} }, .{ .name = "ImageReadWriteLodAMD", .value = 5015, .parameters = &[_]OperandKind{} }, .{ .name = "Int64ImageEXT", .value = 5016, .parameters = &[_]OperandKind{} }, .{ .name = "ShaderClockKHR", .value = 5055, .parameters = &[_]OperandKind{} }, .{ .name = "SampleMaskOverrideCoverageNV", .value = 5249, .parameters = &[_]OperandKind{} }, .{ .name = "GeometryShaderPassthroughNV", .value = 5251, .parameters = &[_]OperandKind{} }, .{ .name = "ShaderViewportIndexLayerEXT", .value = 5254, .parameters = &[_]OperandKind{} }, .{ .name = "ShaderViewportIndexLayerNV", .value = 5254, .parameters = &[_]OperandKind{} }, .{ .name = "ShaderViewportMaskNV", .value = 5255, .parameters = &[_]OperandKind{} }, .{ .name = "ShaderStereoViewNV", .value = 5259, .parameters = &[_]OperandKind{} }, .{ .name = "PerViewAttributesNV", .value = 5260, .parameters = &[_]OperandKind{} }, .{ .name = "FragmentFullyCoveredEXT", .value = 5265, .parameters = &[_]OperandKind{} }, .{ .name = "MeshShadingNV", .value = 5266, .parameters = &[_]OperandKind{} }, .{ .name = "ImageFootprintNV", .value = 5282, .parameters = &[_]OperandKind{} }, .{ .name = "MeshShadingEXT", .value = 5283, .parameters = &[_]OperandKind{} }, .{ .name = "FragmentBarycentricKHR", .value = 5284, .parameters = &[_]OperandKind{} }, .{ .name = "FragmentBarycentricNV", .value = 5284, .parameters = &[_]OperandKind{} }, .{ .name = "ComputeDerivativeGroupQuadsNV", .value = 5288, .parameters = &[_]OperandKind{} }, .{ .name = "FragmentDensityEXT", .value = 5291, .parameters = &[_]OperandKind{} }, .{ .name = "ShadingRateNV", .value = 5291, .parameters = &[_]OperandKind{} }, .{ .name = "GroupNonUniformPartitionedNV", .value = 5297, .parameters = &[_]OperandKind{} }, .{ .name = "ShaderNonUniform", .value = 5301, .parameters = &[_]OperandKind{} }, .{ .name = "ShaderNonUniformEXT", .value = 5301, .parameters = &[_]OperandKind{} }, .{ .name = "RuntimeDescriptorArray", .value = 5302, .parameters = &[_]OperandKind{} }, .{ .name = "RuntimeDescriptorArrayEXT", .value = 5302, .parameters = &[_]OperandKind{} }, .{ .name = "InputAttachmentArrayDynamicIndexing", .value = 5303, .parameters = &[_]OperandKind{} }, .{ .name = "InputAttachmentArrayDynamicIndexingEXT", .value = 5303, .parameters = &[_]OperandKind{} }, .{ .name = "UniformTexelBufferArrayDynamicIndexing", .value = 5304, .parameters = &[_]OperandKind{} }, .{ .name = "UniformTexelBufferArrayDynamicIndexingEXT", .value = 5304, .parameters = &[_]OperandKind{} }, .{ .name = "StorageTexelBufferArrayDynamicIndexing", .value = 5305, .parameters = &[_]OperandKind{} }, .{ .name = "StorageTexelBufferArrayDynamicIndexingEXT", .value = 5305, .parameters = &[_]OperandKind{} }, .{ .name = "UniformBufferArrayNonUniformIndexing", .value = 5306, .parameters = &[_]OperandKind{} }, .{ .name = "UniformBufferArrayNonUniformIndexingEXT", .value = 5306, .parameters = &[_]OperandKind{} }, .{ .name = "SampledImageArrayNonUniformIndexing", .value = 5307, .parameters = &[_]OperandKind{} }, .{ .name = "SampledImageArrayNonUniformIndexingEXT", .value = 5307, .parameters = &[_]OperandKind{} }, .{ .name = "StorageBufferArrayNonUniformIndexing", .value = 5308, .parameters = &[_]OperandKind{} }, .{ .name = "StorageBufferArrayNonUniformIndexingEXT", .value = 5308, .parameters = &[_]OperandKind{} }, .{ .name = "StorageImageArrayNonUniformIndexing", .value = 5309, .parameters = &[_]OperandKind{} }, .{ .name = "StorageImageArrayNonUniformIndexingEXT", .value = 5309, .parameters = &[_]OperandKind{} }, .{ .name = "InputAttachmentArrayNonUniformIndexing", .value = 5310, .parameters = &[_]OperandKind{} }, .{ .name = "InputAttachmentArrayNonUniformIndexingEXT", .value = 5310, .parameters = &[_]OperandKind{} }, .{ .name = "UniformTexelBufferArrayNonUniformIndexing", .value = 5311, .parameters = &[_]OperandKind{} }, .{ .name = "UniformTexelBufferArrayNonUniformIndexingEXT", .value = 5311, .parameters = &[_]OperandKind{} }, .{ .name = "StorageTexelBufferArrayNonUniformIndexing", .value = 5312, .parameters = &[_]OperandKind{} }, .{ .name = "StorageTexelBufferArrayNonUniformIndexingEXT", .value = 5312, .parameters = &[_]OperandKind{} }, .{ .name = "RayTracingPositionFetchKHR", .value = 5336, .parameters = &[_]OperandKind{} }, .{ .name = "RayTracingNV", .value = 5340, .parameters = &[_]OperandKind{} }, .{ .name = "RayTracingMotionBlurNV", .value = 5341, .parameters = &[_]OperandKind{} }, .{ .name = "VulkanMemoryModel", .value = 5345, .parameters = &[_]OperandKind{} }, .{ .name = "VulkanMemoryModelKHR", .value = 5345, .parameters = &[_]OperandKind{} }, .{ .name = "VulkanMemoryModelDeviceScope", .value = 5346, .parameters = &[_]OperandKind{} }, .{ .name = "VulkanMemoryModelDeviceScopeKHR", .value = 5346, .parameters = &[_]OperandKind{} }, .{ .name = "PhysicalStorageBufferAddresses", .value = 5347, .parameters = &[_]OperandKind{} }, .{ .name = "PhysicalStorageBufferAddressesEXT", .value = 5347, .parameters = &[_]OperandKind{} }, .{ .name = "ComputeDerivativeGroupLinearNV", .value = 5350, .parameters = &[_]OperandKind{} }, .{ .name = "RayTracingProvisionalKHR", .value = 5353, .parameters = &[_]OperandKind{} }, .{ .name = "CooperativeMatrixNV", .value = 5357, .parameters = &[_]OperandKind{} }, .{ .name = "FragmentShaderSampleInterlockEXT", .value = 5363, .parameters = &[_]OperandKind{} }, .{ .name = "FragmentShaderShadingRateInterlockEXT", .value = 5372, .parameters = &[_]OperandKind{} }, .{ .name = "ShaderSMBuiltinsNV", .value = 5373, .parameters = &[_]OperandKind{} }, .{ .name = "FragmentShaderPixelInterlockEXT", .value = 5378, .parameters = &[_]OperandKind{} }, .{ .name = "DemoteToHelperInvocation", .value = 5379, .parameters = &[_]OperandKind{} }, .{ .name = "DemoteToHelperInvocationEXT", .value = 5379, .parameters = &[_]OperandKind{} }, .{ .name = "RayTracingOpacityMicromapEXT", .value = 5381, .parameters = &[_]OperandKind{} }, .{ .name = "ShaderInvocationReorderNV", .value = 5383, .parameters = &[_]OperandKind{} }, .{ .name = "BindlessTextureNV", .value = 5390, .parameters = &[_]OperandKind{} }, .{ .name = "RayQueryPositionFetchKHR", .value = 5391, .parameters = &[_]OperandKind{} }, .{ .name = "SubgroupShuffleINTEL", .value = 5568, .parameters = &[_]OperandKind{} }, .{ .name = "SubgroupBufferBlockIOINTEL", .value = 5569, .parameters = &[_]OperandKind{} }, .{ .name = "SubgroupImageBlockIOINTEL", .value = 5570, .parameters = &[_]OperandKind{} }, .{ .name = "SubgroupImageMediaBlockIOINTEL", .value = 5579, .parameters = &[_]OperandKind{} }, .{ .name = "RoundToInfinityINTEL", .value = 5582, .parameters = &[_]OperandKind{} }, .{ .name = "FloatingPointModeINTEL", .value = 5583, .parameters = &[_]OperandKind{} }, .{ .name = "IntegerFunctions2INTEL", .value = 5584, .parameters = &[_]OperandKind{} }, .{ .name = "FunctionPointersINTEL", .value = 5603, .parameters = &[_]OperandKind{} }, .{ .name = "IndirectReferencesINTEL", .value = 5604, .parameters = &[_]OperandKind{} }, .{ .name = "AsmINTEL", .value = 5606, .parameters = &[_]OperandKind{} }, .{ .name = "AtomicFloat32MinMaxEXT", .value = 5612, .parameters = &[_]OperandKind{} }, .{ .name = "AtomicFloat64MinMaxEXT", .value = 5613, .parameters = &[_]OperandKind{} }, .{ .name = "AtomicFloat16MinMaxEXT", .value = 5616, .parameters = &[_]OperandKind{} }, .{ .name = "VectorComputeINTEL", .value = 5617, .parameters = &[_]OperandKind{} }, .{ .name = "VectorAnyINTEL", .value = 5619, .parameters = &[_]OperandKind{} }, .{ .name = "ExpectAssumeKHR", .value = 5629, .parameters = &[_]OperandKind{} }, .{ .name = "SubgroupAvcMotionEstimationINTEL", .value = 5696, .parameters = &[_]OperandKind{} }, .{ .name = "SubgroupAvcMotionEstimationIntraINTEL", .value = 5697, .parameters = &[_]OperandKind{} }, .{ .name = "SubgroupAvcMotionEstimationChromaINTEL", .value = 5698, .parameters = &[_]OperandKind{} }, .{ .name = "VariableLengthArrayINTEL", .value = 5817, .parameters = &[_]OperandKind{} }, .{ .name = "FunctionFloatControlINTEL", .value = 5821, .parameters = &[_]OperandKind{} }, .{ .name = "FPGAMemoryAttributesINTEL", .value = 5824, .parameters = &[_]OperandKind{} }, .{ .name = "FPFastMathModeINTEL", .value = 5837, .parameters = &[_]OperandKind{} }, .{ .name = "ArbitraryPrecisionIntegersINTEL", .value = 5844, .parameters = &[_]OperandKind{} }, .{ .name = "ArbitraryPrecisionFloatingPointINTEL", .value = 5845, .parameters = &[_]OperandKind{} }, .{ .name = "UnstructuredLoopControlsINTEL", .value = 5886, .parameters = &[_]OperandKind{} }, .{ .name = "FPGALoopControlsINTEL", .value = 5888, .parameters = &[_]OperandKind{} }, .{ .name = "KernelAttributesINTEL", .value = 5892, .parameters = &[_]OperandKind{} }, .{ .name = "FPGAKernelAttributesINTEL", .value = 5897, .parameters = &[_]OperandKind{} }, .{ .name = "FPGAMemoryAccessesINTEL", .value = 5898, .parameters = &[_]OperandKind{} }, .{ .name = "FPGAClusterAttributesINTEL", .value = 5904, .parameters = &[_]OperandKind{} }, .{ .name = "LoopFuseINTEL", .value = 5906, .parameters = &[_]OperandKind{} }, .{ .name = "FPGADSPControlINTEL", .value = 5908, .parameters = &[_]OperandKind{} }, .{ .name = "MemoryAccessAliasingINTEL", .value = 5910, .parameters = &[_]OperandKind{} }, .{ .name = "FPGAInvocationPipeliningAttributesINTEL", .value = 5916, .parameters = &[_]OperandKind{} }, .{ .name = "FPGABufferLocationINTEL", .value = 5920, .parameters = &[_]OperandKind{} }, .{ .name = "ArbitraryPrecisionFixedPointINTEL", .value = 5922, .parameters = &[_]OperandKind{} }, .{ .name = "USMStorageClassesINTEL", .value = 5935, .parameters = &[_]OperandKind{} }, .{ .name = "RuntimeAlignedAttributeINTEL", .value = 5939, .parameters = &[_]OperandKind{} }, .{ .name = "IOPipesINTEL", .value = 5943, .parameters = &[_]OperandKind{} }, .{ .name = "BlockingPipesINTEL", .value = 5945, .parameters = &[_]OperandKind{} }, .{ .name = "FPGARegINTEL", .value = 5948, .parameters = &[_]OperandKind{} }, .{ .name = "DotProductInputAll", .value = 6016, .parameters = &[_]OperandKind{} }, .{ .name = "DotProductInputAllKHR", .value = 6016, .parameters = &[_]OperandKind{} }, .{ .name = "DotProductInput4x8Bit", .value = 6017, .parameters = &[_]OperandKind{} }, .{ .name = "DotProductInput4x8BitKHR", .value = 6017, .parameters = &[_]OperandKind{} }, .{ .name = "DotProductInput4x8BitPacked", .value = 6018, .parameters = &[_]OperandKind{} }, .{ .name = "DotProductInput4x8BitPackedKHR", .value = 6018, .parameters = &[_]OperandKind{} }, .{ .name = "DotProduct", .value = 6019, .parameters = &[_]OperandKind{} }, .{ .name = "DotProductKHR", .value = 6019, .parameters = &[_]OperandKind{} }, .{ .name = "RayCullMaskKHR", .value = 6020, .parameters = &[_]OperandKind{} }, .{ .name = "BitInstructions", .value = 6025, .parameters = &[_]OperandKind{} }, .{ .name = "GroupNonUniformRotateKHR", .value = 6026, .parameters = &[_]OperandKind{} }, .{ .name = "AtomicFloat32AddEXT", .value = 6033, .parameters = &[_]OperandKind{} }, .{ .name = "AtomicFloat64AddEXT", .value = 6034, .parameters = &[_]OperandKind{} }, .{ .name = "LongConstantCompositeINTEL", .value = 6089, .parameters = &[_]OperandKind{} }, .{ .name = "OptNoneINTEL", .value = 6094, .parameters = &[_]OperandKind{} }, .{ .name = "AtomicFloat16AddEXT", .value = 6095, .parameters = &[_]OperandKind{} }, .{ .name = "DebugInfoModuleINTEL", .value = 6114, .parameters = &[_]OperandKind{} }, .{ .name = "BFloat16ConversionINTEL", .value = 6115, .parameters = &[_]OperandKind{} }, .{ .name = "SplitBarrierINTEL", .value = 6141, .parameters = &[_]OperandKind{} }, .{ .name = "FPGAKernelAttributesv2INTEL", .value = 6161, .parameters = &[_]OperandKind{} }, .{ .name = "FPGALatencyControlINTEL", .value = 6171, .parameters = &[_]OperandKind{} }, .{ .name = "FPGAArgumentInterfacesINTEL", .value = 6174, .parameters = &[_]OperandKind{} }, .{ .name = "GroupUniformArithmeticKHR", .value = 6400, .parameters = &[_]OperandKind{} }, }, .RayQueryIntersection => &[_]Enumerant{ .{ .name = "RayQueryCandidateIntersectionKHR", .value = 0, .parameters = &[_]OperandKind{} }, .{ .name = "RayQueryCommittedIntersectionKHR", .value = 1, .parameters = &[_]OperandKind{} }, }, .RayQueryCommittedIntersectionType => &[_]Enumerant{ .{ .name = "RayQueryCommittedIntersectionNoneKHR", .value = 0, .parameters = &[_]OperandKind{} }, .{ .name = "RayQueryCommittedIntersectionTriangleKHR", .value = 1, .parameters = &[_]OperandKind{} }, .{ .name = "RayQueryCommittedIntersectionGeneratedKHR", .value = 2, .parameters = &[_]OperandKind{} }, }, .RayQueryCandidateIntersectionType => &[_]Enumerant{ .{ .name = "RayQueryCandidateIntersectionTriangleKHR", .value = 0, .parameters = &[_]OperandKind{} }, .{ .name = "RayQueryCandidateIntersectionAABBKHR", .value = 1, .parameters = &[_]OperandKind{} }, }, .PackedVectorFormat => &[_]Enumerant{ .{ .name = "PackedVectorFormat4x8Bit", .value = 0, .parameters = &[_]OperandKind{} }, .{ .name = "PackedVectorFormat4x8BitKHR", .value = 0, .parameters = &[_]OperandKind{} }, }, .IdResultType => unreachable, .IdResult => unreachable, .IdMemorySemantics => unreachable, .IdScope => unreachable, .IdRef => unreachable, .LiteralInteger => unreachable, .LiteralString => unreachable, .LiteralContextDependentNumber => unreachable, .LiteralExtInstInteger => unreachable, .LiteralSpecConstantOpInteger => unreachable, .PairLiteralIntegerIdRef => unreachable, .PairIdRefLiteralInteger => unreachable, .PairIdRefIdRef => unreachable, }; } }; pub const Opcode = enum(u16) { OpNop = 0, OpUndef = 1, OpSourceContinued = 2, OpSource = 3, OpSourceExtension = 4, OpName = 5, OpMemberName = 6, OpString = 7, OpLine = 8, OpExtension = 10, OpExtInstImport = 11, OpExtInst = 12, OpMemoryModel = 14, OpEntryPoint = 15, OpExecutionMode = 16, OpCapability = 17, OpTypeVoid = 19, OpTypeBool = 20, OpTypeInt = 21, OpTypeFloat = 22, OpTypeVector = 23, OpTypeMatrix = 24, OpTypeImage = 25, OpTypeSampler = 26, OpTypeSampledImage = 27, OpTypeArray = 28, OpTypeRuntimeArray = 29, OpTypeStruct = 30, OpTypeOpaque = 31, OpTypePointer = 32, OpTypeFunction = 33, OpTypeEvent = 34, OpTypeDeviceEvent = 35, OpTypeReserveId = 36, OpTypeQueue = 37, OpTypePipe = 38, OpTypeForwardPointer = 39, OpConstantTrue = 41, OpConstantFalse = 42, OpConstant = 43, OpConstantComposite = 44, OpConstantSampler = 45, OpConstantNull = 46, OpSpecConstantTrue = 48, OpSpecConstantFalse = 49, OpSpecConstant = 50, OpSpecConstantComposite = 51, OpSpecConstantOp = 52, OpFunction = 54, OpFunctionParameter = 55, OpFunctionEnd = 56, OpFunctionCall = 57, OpVariable = 59, OpImageTexelPointer = 60, OpLoad = 61, OpStore = 62, OpCopyMemory = 63, OpCopyMemorySized = 64, OpAccessChain = 65, OpInBoundsAccessChain = 66, OpPtrAccessChain = 67, OpArrayLength = 68, OpGenericPtrMemSemantics = 69, OpInBoundsPtrAccessChain = 70, OpDecorate = 71, OpMemberDecorate = 72, OpDecorationGroup = 73, OpGroupDecorate = 74, OpGroupMemberDecorate = 75, OpVectorExtractDynamic = 77, OpVectorInsertDynamic = 78, OpVectorShuffle = 79, OpCompositeConstruct = 80, OpCompositeExtract = 81, OpCompositeInsert = 82, OpCopyObject = 83, OpTranspose = 84, OpSampledImage = 86, OpImageSampleImplicitLod = 87, OpImageSampleExplicitLod = 88, OpImageSampleDrefImplicitLod = 89, OpImageSampleDrefExplicitLod = 90, OpImageSampleProjImplicitLod = 91, OpImageSampleProjExplicitLod = 92, OpImageSampleProjDrefImplicitLod = 93, OpImageSampleProjDrefExplicitLod = 94, OpImageFetch = 95, OpImageGather = 96, OpImageDrefGather = 97, OpImageRead = 98, OpImageWrite = 99, OpImage = 100, OpImageQueryFormat = 101, OpImageQueryOrder = 102, OpImageQuerySizeLod = 103, OpImageQuerySize = 104, OpImageQueryLod = 105, OpImageQueryLevels = 106, OpImageQuerySamples = 107, OpConvertFToU = 109, OpConvertFToS = 110, OpConvertSToF = 111, OpConvertUToF = 112, OpUConvert = 113, OpSConvert = 114, OpFConvert = 115, OpQuantizeToF16 = 116, OpConvertPtrToU = 117, OpSatConvertSToU = 118, OpSatConvertUToS = 119, OpConvertUToPtr = 120, OpPtrCastToGeneric = 121, OpGenericCastToPtr = 122, OpGenericCastToPtrExplicit = 123, OpBitcast = 124, OpSNegate = 126, OpFNegate = 127, OpIAdd = 128, OpFAdd = 129, OpISub = 130, OpFSub = 131, OpIMul = 132, OpFMul = 133, OpUDiv = 134, OpSDiv = 135, OpFDiv = 136, OpUMod = 137, OpSRem = 138, OpSMod = 139, OpFRem = 140, OpFMod = 141, OpVectorTimesScalar = 142, OpMatrixTimesScalar = 143, OpVectorTimesMatrix = 144, OpMatrixTimesVector = 145, OpMatrixTimesMatrix = 146, OpOuterProduct = 147, OpDot = 148, OpIAddCarry = 149, OpISubBorrow = 150, OpUMulExtended = 151, OpSMulExtended = 152, OpAny = 154, OpAll = 155, OpIsNan = 156, OpIsInf = 157, OpIsFinite = 158, OpIsNormal = 159, OpSignBitSet = 160, OpLessOrGreater = 161, OpOrdered = 162, OpUnordered = 163, OpLogicalEqual = 164, OpLogicalNotEqual = 165, OpLogicalOr = 166, OpLogicalAnd = 167, OpLogicalNot = 168, OpSelect = 169, OpIEqual = 170, OpINotEqual = 171, OpUGreaterThan = 172, OpSGreaterThan = 173, OpUGreaterThanEqual = 174, OpSGreaterThanEqual = 175, OpULessThan = 176, OpSLessThan = 177, OpULessThanEqual = 178, OpSLessThanEqual = 179, OpFOrdEqual = 180, OpFUnordEqual = 181, OpFOrdNotEqual = 182, OpFUnordNotEqual = 183, OpFOrdLessThan = 184, OpFUnordLessThan = 185, OpFOrdGreaterThan = 186, OpFUnordGreaterThan = 187, OpFOrdLessThanEqual = 188, OpFUnordLessThanEqual = 189, OpFOrdGreaterThanEqual = 190, OpFUnordGreaterThanEqual = 191, OpShiftRightLogical = 194, OpShiftRightArithmetic = 195, OpShiftLeftLogical = 196, OpBitwiseOr = 197, OpBitwiseXor = 198, OpBitwiseAnd = 199, OpNot = 200, OpBitFieldInsert = 201, OpBitFieldSExtract = 202, OpBitFieldUExtract = 203, OpBitReverse = 204, OpBitCount = 205, OpDPdx = 207, OpDPdy = 208, OpFwidth = 209, OpDPdxFine = 210, OpDPdyFine = 211, OpFwidthFine = 212, OpDPdxCoarse = 213, OpDPdyCoarse = 214, OpFwidthCoarse = 215, OpEmitVertex = 218, OpEndPrimitive = 219, OpEmitStreamVertex = 220, OpEndStreamPrimitive = 221, OpControlBarrier = 224, OpMemoryBarrier = 225, OpAtomicLoad = 227, OpAtomicStore = 228, OpAtomicExchange = 229, OpAtomicCompareExchange = 230, OpAtomicCompareExchangeWeak = 231, OpAtomicIIncrement = 232, OpAtomicIDecrement = 233, OpAtomicIAdd = 234, OpAtomicISub = 235, OpAtomicSMin = 236, OpAtomicUMin = 237, OpAtomicSMax = 238, OpAtomicUMax = 239, OpAtomicAnd = 240, OpAtomicOr = 241, OpAtomicXor = 242, OpPhi = 245, OpLoopMerge = 246, OpSelectionMerge = 247, OpLabel = 248, OpBranch = 249, OpBranchConditional = 250, OpSwitch = 251, OpKill = 252, OpReturn = 253, OpReturnValue = 254, OpUnreachable = 255, OpLifetimeStart = 256, OpLifetimeStop = 257, OpGroupAsyncCopy = 259, OpGroupWaitEvents = 260, OpGroupAll = 261, OpGroupAny = 262, OpGroupBroadcast = 263, OpGroupIAdd = 264, OpGroupFAdd = 265, OpGroupFMin = 266, OpGroupUMin = 267, OpGroupSMin = 268, OpGroupFMax = 269, OpGroupUMax = 270, OpGroupSMax = 271, OpReadPipe = 274, OpWritePipe = 275, OpReservedReadPipe = 276, OpReservedWritePipe = 277, OpReserveReadPipePackets = 278, OpReserveWritePipePackets = 279, OpCommitReadPipe = 280, OpCommitWritePipe = 281, OpIsValidReserveId = 282, OpGetNumPipePackets = 283, OpGetMaxPipePackets = 284, OpGroupReserveReadPipePackets = 285, OpGroupReserveWritePipePackets = 286, OpGroupCommitReadPipe = 287, OpGroupCommitWritePipe = 288, OpEnqueueMarker = 291, OpEnqueueKernel = 292, OpGetKernelNDrangeSubGroupCount = 293, OpGetKernelNDrangeMaxSubGroupSize = 294, OpGetKernelWorkGroupSize = 295, OpGetKernelPreferredWorkGroupSizeMultiple = 296, OpRetainEvent = 297, OpReleaseEvent = 298, OpCreateUserEvent = 299, OpIsValidEvent = 300, OpSetUserEventStatus = 301, OpCaptureEventProfilingInfo = 302, OpGetDefaultQueue = 303, OpBuildNDRange = 304, OpImageSparseSampleImplicitLod = 305, OpImageSparseSampleExplicitLod = 306, OpImageSparseSampleDrefImplicitLod = 307, OpImageSparseSampleDrefExplicitLod = 308, OpImageSparseSampleProjImplicitLod = 309, OpImageSparseSampleProjExplicitLod = 310, OpImageSparseSampleProjDrefImplicitLod = 311, OpImageSparseSampleProjDrefExplicitLod = 312, OpImageSparseFetch = 313, OpImageSparseGather = 314, OpImageSparseDrefGather = 315, OpImageSparseTexelsResident = 316, OpNoLine = 317, OpAtomicFlagTestAndSet = 318, OpAtomicFlagClear = 319, OpImageSparseRead = 320, OpSizeOf = 321, OpTypePipeStorage = 322, OpConstantPipeStorage = 323, OpCreatePipeFromPipeStorage = 324, OpGetKernelLocalSizeForSubgroupCount = 325, OpGetKernelMaxNumSubgroups = 326, OpTypeNamedBarrier = 327, OpNamedBarrierInitialize = 328, OpMemoryNamedBarrier = 329, OpModuleProcessed = 330, OpExecutionModeId = 331, OpDecorateId = 332, OpGroupNonUniformElect = 333, OpGroupNonUniformAll = 334, OpGroupNonUniformAny = 335, OpGroupNonUniformAllEqual = 336, OpGroupNonUniformBroadcast = 337, OpGroupNonUniformBroadcastFirst = 338, OpGroupNonUniformBallot = 339, OpGroupNonUniformInverseBallot = 340, OpGroupNonUniformBallotBitExtract = 341, OpGroupNonUniformBallotBitCount = 342, OpGroupNonUniformBallotFindLSB = 343, OpGroupNonUniformBallotFindMSB = 344, OpGroupNonUniformShuffle = 345, OpGroupNonUniformShuffleXor = 346, OpGroupNonUniformShuffleUp = 347, OpGroupNonUniformShuffleDown = 348, OpGroupNonUniformIAdd = 349, OpGroupNonUniformFAdd = 350, OpGroupNonUniformIMul = 351, OpGroupNonUniformFMul = 352, OpGroupNonUniformSMin = 353, OpGroupNonUniformUMin = 354, OpGroupNonUniformFMin = 355, OpGroupNonUniformSMax = 356, OpGroupNonUniformUMax = 357, OpGroupNonUniformFMax = 358, OpGroupNonUniformBitwiseAnd = 359, OpGroupNonUniformBitwiseOr = 360, OpGroupNonUniformBitwiseXor = 361, OpGroupNonUniformLogicalAnd = 362, OpGroupNonUniformLogicalOr = 363, OpGroupNonUniformLogicalXor = 364, OpGroupNonUniformQuadBroadcast = 365, OpGroupNonUniformQuadSwap = 366, OpCopyLogical = 400, OpPtrEqual = 401, OpPtrNotEqual = 402, OpPtrDiff = 403, OpColorAttachmentReadEXT = 4160, OpDepthAttachmentReadEXT = 4161, OpStencilAttachmentReadEXT = 4162, OpTerminateInvocation = 4416, OpSubgroupBallotKHR = 4421, OpSubgroupFirstInvocationKHR = 4422, OpSubgroupAllKHR = 4428, OpSubgroupAnyKHR = 4429, OpSubgroupAllEqualKHR = 4430, OpGroupNonUniformRotateKHR = 4431, OpSubgroupReadInvocationKHR = 4432, OpTraceRayKHR = 4445, OpExecuteCallableKHR = 4446, OpConvertUToAccelerationStructureKHR = 4447, OpIgnoreIntersectionKHR = 4448, OpTerminateRayKHR = 4449, OpSDot = 4450, OpUDot = 4451, OpSUDot = 4452, OpSDotAccSat = 4453, OpUDotAccSat = 4454, OpSUDotAccSat = 4455, OpTypeRayQueryKHR = 4472, OpRayQueryInitializeKHR = 4473, OpRayQueryTerminateKHR = 4474, OpRayQueryGenerateIntersectionKHR = 4475, OpRayQueryConfirmIntersectionKHR = 4476, OpRayQueryProceedKHR = 4477, OpRayQueryGetIntersectionTypeKHR = 4479, OpImageSampleWeightedQCOM = 4480, OpImageBoxFilterQCOM = 4481, OpImageBlockMatchSSDQCOM = 4482, OpImageBlockMatchSADQCOM = 4483, OpGroupIAddNonUniformAMD = 5000, OpGroupFAddNonUniformAMD = 5001, OpGroupFMinNonUniformAMD = 5002, OpGroupUMinNonUniformAMD = 5003, OpGroupSMinNonUniformAMD = 5004, OpGroupFMaxNonUniformAMD = 5005, OpGroupUMaxNonUniformAMD = 5006, OpGroupSMaxNonUniformAMD = 5007, OpFragmentMaskFetchAMD = 5011, OpFragmentFetchAMD = 5012, OpReadClockKHR = 5056, OpHitObjectRecordHitMotionNV = 5249, OpHitObjectRecordHitWithIndexMotionNV = 5250, OpHitObjectRecordMissMotionNV = 5251, OpHitObjectGetWorldToObjectNV = 5252, OpHitObjectGetObjectToWorldNV = 5253, OpHitObjectGetObjectRayDirectionNV = 5254, OpHitObjectGetObjectRayOriginNV = 5255, OpHitObjectTraceRayMotionNV = 5256, OpHitObjectGetShaderRecordBufferHandleNV = 5257, OpHitObjectGetShaderBindingTableRecordIndexNV = 5258, OpHitObjectRecordEmptyNV = 5259, OpHitObjectTraceRayNV = 5260, OpHitObjectRecordHitNV = 5261, OpHitObjectRecordHitWithIndexNV = 5262, OpHitObjectRecordMissNV = 5263, OpHitObjectExecuteShaderNV = 5264, OpHitObjectGetCurrentTimeNV = 5265, OpHitObjectGetAttributesNV = 5266, OpHitObjectGetHitKindNV = 5267, OpHitObjectGetPrimitiveIndexNV = 5268, OpHitObjectGetGeometryIndexNV = 5269, OpHitObjectGetInstanceIdNV = 5270, OpHitObjectGetInstanceCustomIndexNV = 5271, OpHitObjectGetWorldRayDirectionNV = 5272, OpHitObjectGetWorldRayOriginNV = 5273, OpHitObjectGetRayTMaxNV = 5274, OpHitObjectGetRayTMinNV = 5275, OpHitObjectIsEmptyNV = 5276, OpHitObjectIsHitNV = 5277, OpHitObjectIsMissNV = 5278, OpReorderThreadWithHitObjectNV = 5279, OpReorderThreadWithHintNV = 5280, OpTypeHitObjectNV = 5281, OpImageSampleFootprintNV = 5283, OpEmitMeshTasksEXT = 5294, OpSetMeshOutputsEXT = 5295, OpGroupNonUniformPartitionNV = 5296, OpWritePackedPrimitiveIndices4x8NV = 5299, OpReportIntersectionKHR = 5334, OpIgnoreIntersectionNV = 5335, OpTerminateRayNV = 5336, OpTraceNV = 5337, OpTraceMotionNV = 5338, OpTraceRayMotionNV = 5339, OpRayQueryGetIntersectionTriangleVertexPositionsKHR = 5340, OpTypeAccelerationStructureKHR = 5341, OpExecuteCallableNV = 5344, OpTypeCooperativeMatrixNV = 5358, OpCooperativeMatrixLoadNV = 5359, OpCooperativeMatrixStoreNV = 5360, OpCooperativeMatrixMulAddNV = 5361, OpCooperativeMatrixLengthNV = 5362, OpBeginInvocationInterlockEXT = 5364, OpEndInvocationInterlockEXT = 5365, OpDemoteToHelperInvocation = 5380, OpIsHelperInvocationEXT = 5381, OpConvertUToImageNV = 5391, OpConvertUToSamplerNV = 5392, OpConvertImageToUNV = 5393, OpConvertSamplerToUNV = 5394, OpConvertUToSampledImageNV = 5395, OpConvertSampledImageToUNV = 5396, OpSamplerImageAddressingModeNV = 5397, OpSubgroupShuffleINTEL = 5571, OpSubgroupShuffleDownINTEL = 5572, OpSubgroupShuffleUpINTEL = 5573, OpSubgroupShuffleXorINTEL = 5574, OpSubgroupBlockReadINTEL = 5575, OpSubgroupBlockWriteINTEL = 5576, OpSubgroupImageBlockReadINTEL = 5577, OpSubgroupImageBlockWriteINTEL = 5578, OpSubgroupImageMediaBlockReadINTEL = 5580, OpSubgroupImageMediaBlockWriteINTEL = 5581, OpUCountLeadingZerosINTEL = 5585, OpUCountTrailingZerosINTEL = 5586, OpAbsISubINTEL = 5587, OpAbsUSubINTEL = 5588, OpIAddSatINTEL = 5589, OpUAddSatINTEL = 5590, OpIAverageINTEL = 5591, OpUAverageINTEL = 5592, OpIAverageRoundedINTEL = 5593, OpUAverageRoundedINTEL = 5594, OpISubSatINTEL = 5595, OpUSubSatINTEL = 5596, OpIMul32x16INTEL = 5597, OpUMul32x16INTEL = 5598, OpAtomicFMinEXT = 5614, OpAtomicFMaxEXT = 5615, OpAssumeTrueKHR = 5630, OpExpectKHR = 5631, OpDecorateString = 5632, OpMemberDecorateString = 5633, OpLoopControlINTEL = 5887, OpReadPipeBlockingINTEL = 5946, OpWritePipeBlockingINTEL = 5947, OpFPGARegINTEL = 5949, OpRayQueryGetRayTMinKHR = 6016, OpRayQueryGetRayFlagsKHR = 6017, OpRayQueryGetIntersectionTKHR = 6018, OpRayQueryGetIntersectionInstanceCustomIndexKHR = 6019, OpRayQueryGetIntersectionInstanceIdKHR = 6020, OpRayQueryGetIntersectionInstanceShaderBindingTableRecordOffsetKHR = 6021, OpRayQueryGetIntersectionGeometryIndexKHR = 6022, OpRayQueryGetIntersectionPrimitiveIndexKHR = 6023, OpRayQueryGetIntersectionBarycentricsKHR = 6024, OpRayQueryGetIntersectionFrontFaceKHR = 6025, OpRayQueryGetIntersectionCandidateAABBOpaqueKHR = 6026, OpRayQueryGetIntersectionObjectRayDirectionKHR = 6027, OpRayQueryGetIntersectionObjectRayOriginKHR = 6028, OpRayQueryGetWorldRayDirectionKHR = 6029, OpRayQueryGetWorldRayOriginKHR = 6030, OpRayQueryGetIntersectionObjectToWorldKHR = 6031, OpRayQueryGetIntersectionWorldToObjectKHR = 6032, OpAtomicFAddEXT = 6035, OpTypeBufferSurfaceINTEL = 6086, OpTypeStructContinuedINTEL = 6090, OpConstantCompositeContinuedINTEL = 6091, OpSpecConstantCompositeContinuedINTEL = 6092, OpConvertFToBF16INTEL = 6116, OpConvertBF16ToFINTEL = 6117, OpControlBarrierArriveINTEL = 6142, OpControlBarrierWaitINTEL = 6143, OpGroupIMulKHR = 6401, OpGroupFMulKHR = 6402, OpGroupBitwiseAndKHR = 6403, OpGroupBitwiseOrKHR = 6404, OpGroupBitwiseXorKHR = 6405, OpGroupLogicalAndKHR = 6406, OpGroupLogicalOrKHR = 6407, OpGroupLogicalXorKHR = 6408, pub const OpSDotKHR = Opcode.OpSDot; pub const OpUDotKHR = Opcode.OpUDot; pub const OpSUDotKHR = Opcode.OpSUDot; pub const OpSDotAccSatKHR = Opcode.OpSDotAccSat; pub const OpUDotAccSatKHR = Opcode.OpUDotAccSat; pub const OpSUDotAccSatKHR = Opcode.OpSUDotAccSat; pub const OpReportIntersectionNV = Opcode.OpReportIntersectionKHR; pub const OpTypeAccelerationStructureNV = Opcode.OpTypeAccelerationStructureKHR; pub const OpDemoteToHelperInvocationEXT = Opcode.OpDemoteToHelperInvocation; pub const OpDecorateStringGOOGLE = Opcode.OpDecorateString; pub const OpMemberDecorateStringGOOGLE = Opcode.OpMemberDecorateString; pub fn Operands(comptime self: Opcode) type { return switch (self) { .OpNop => void, .OpUndef => struct { id_result_type: IdResultType, id_result: IdResult }, .OpSourceContinued => struct { continued_source: LiteralString }, .OpSource => struct { source_language: SourceLanguage, version: LiteralInteger, file: ?IdRef = null, source: ?LiteralString = null }, .OpSourceExtension => struct { extension: LiteralString }, .OpName => struct { target: IdRef, name: LiteralString }, .OpMemberName => struct { type: IdRef, member: LiteralInteger, name: LiteralString }, .OpString => struct { id_result: IdResult, string: LiteralString }, .OpLine => struct { file: IdRef, line: LiteralInteger, column: LiteralInteger }, .OpExtension => struct { name: LiteralString }, .OpExtInstImport => struct { id_result: IdResult, name: LiteralString }, .OpExtInst => struct { id_result_type: IdResultType, id_result: IdResult, set: IdRef, instruction: LiteralExtInstInteger, id_ref_4: []const IdRef = &.{} }, .OpMemoryModel => struct { addressing_model: AddressingModel, memory_model: MemoryModel }, .OpEntryPoint => struct { execution_model: ExecutionModel, entry_point: IdRef, name: LiteralString, interface: []const IdRef = &.{} }, .OpExecutionMode => struct { entry_point: IdRef, mode: ExecutionMode.Extended }, .OpCapability => struct { capability: Capability }, .OpTypeVoid => struct { id_result: IdResult }, .OpTypeBool => struct { id_result: IdResult }, .OpTypeInt => struct { id_result: IdResult, width: LiteralInteger, signedness: LiteralInteger }, .OpTypeFloat => struct { id_result: IdResult, width: LiteralInteger }, .OpTypeVector => struct { id_result: IdResult, component_type: IdRef, component_count: LiteralInteger }, .OpTypeMatrix => struct { id_result: IdResult, column_type: IdRef, column_count: LiteralInteger }, .OpTypeImage => struct { id_result: IdResult, sampled_type: IdRef, dim: Dim, depth: LiteralInteger, arrayed: LiteralInteger, ms: LiteralInteger, sampled: LiteralInteger, image_format: ImageFormat, access_qualifier: ?AccessQualifier = null }, .OpTypeSampler => struct { id_result: IdResult }, .OpTypeSampledImage => struct { id_result: IdResult, image_type: IdRef }, .OpTypeArray => struct { id_result: IdResult, element_type: IdRef, length: IdRef }, .OpTypeRuntimeArray => struct { id_result: IdResult, element_type: IdRef }, .OpTypeStruct => struct { id_result: IdResult, id_ref: []const IdRef = &.{} }, .OpTypeOpaque => struct { id_result: IdResult, literal_string: LiteralString }, .OpTypePointer => struct { id_result: IdResult, storage_class: StorageClass, type: IdRef }, .OpTypeFunction => struct { id_result: IdResult, return_type: IdRef, id_ref_2: []const IdRef = &.{} }, .OpTypeEvent => struct { id_result: IdResult }, .OpTypeDeviceEvent => struct { id_result: IdResult }, .OpTypeReserveId => struct { id_result: IdResult }, .OpTypeQueue => struct { id_result: IdResult }, .OpTypePipe => struct { id_result: IdResult, qualifier: AccessQualifier }, .OpTypeForwardPointer => struct { pointer_type: IdRef, storage_class: StorageClass }, .OpConstantTrue => struct { id_result_type: IdResultType, id_result: IdResult }, .OpConstantFalse => struct { id_result_type: IdResultType, id_result: IdResult }, .OpConstant => struct { id_result_type: IdResultType, id_result: IdResult, value: LiteralContextDependentNumber }, .OpConstantComposite => struct { id_result_type: IdResultType, id_result: IdResult, constituents: []const IdRef = &.{} }, .OpConstantSampler => struct { id_result_type: IdResultType, id_result: IdResult, sampler_addressing_mode: SamplerAddressingMode, param: LiteralInteger, sampler_filter_mode: SamplerFilterMode }, .OpConstantNull => struct { id_result_type: IdResultType, id_result: IdResult }, .OpSpecConstantTrue => struct { id_result_type: IdResultType, id_result: IdResult }, .OpSpecConstantFalse => struct { id_result_type: IdResultType, id_result: IdResult }, .OpSpecConstant => struct { id_result_type: IdResultType, id_result: IdResult, value: LiteralContextDependentNumber }, .OpSpecConstantComposite => struct { id_result_type: IdResultType, id_result: IdResult, constituents: []const IdRef = &.{} }, .OpSpecConstantOp => struct { id_result_type: IdResultType, id_result: IdResult, opcode: LiteralSpecConstantOpInteger }, .OpFunction => struct { id_result_type: IdResultType, id_result: IdResult, function_control: FunctionControl, function_type: IdRef }, .OpFunctionParameter => struct { id_result_type: IdResultType, id_result: IdResult }, .OpFunctionEnd => void, .OpFunctionCall => struct { id_result_type: IdResultType, id_result: IdResult, function: IdRef, id_ref_3: []const IdRef = &.{} }, .OpVariable => struct { id_result_type: IdResultType, id_result: IdResult, storage_class: StorageClass, initializer: ?IdRef = null }, .OpImageTexelPointer => struct { id_result_type: IdResultType, id_result: IdResult, image: IdRef, coordinate: IdRef, sample: IdRef }, .OpLoad => struct { id_result_type: IdResultType, id_result: IdResult, pointer: IdRef, memory_access: ?MemoryAccess.Extended = null }, .OpStore => struct { pointer: IdRef, object: IdRef, memory_access: ?MemoryAccess.Extended = null }, .OpCopyMemory => struct { target: IdRef, source: IdRef, memory_access_2: ?MemoryAccess.Extended = null, memory_access_3: ?MemoryAccess.Extended = null }, .OpCopyMemorySized => struct { target: IdRef, source: IdRef, size: IdRef, memory_access_3: ?MemoryAccess.Extended = null, memory_access_4: ?MemoryAccess.Extended = null }, .OpAccessChain => struct { id_result_type: IdResultType, id_result: IdResult, base: IdRef, indexes: []const IdRef = &.{} }, .OpInBoundsAccessChain => struct { id_result_type: IdResultType, id_result: IdResult, base: IdRef, indexes: []const IdRef = &.{} }, .OpPtrAccessChain => struct { id_result_type: IdResultType, id_result: IdResult, base: IdRef, element: IdRef, indexes: []const IdRef = &.{} }, .OpArrayLength => struct { id_result_type: IdResultType, id_result: IdResult, structure: IdRef, array_member: LiteralInteger }, .OpGenericPtrMemSemantics => struct { id_result_type: IdResultType, id_result: IdResult, pointer: IdRef }, .OpInBoundsPtrAccessChain => struct { id_result_type: IdResultType, id_result: IdResult, base: IdRef, element: IdRef, indexes: []const IdRef = &.{} }, .OpDecorate => struct { target: IdRef, decoration: Decoration.Extended }, .OpMemberDecorate => struct { structure_type: IdRef, member: LiteralInteger, decoration: Decoration.Extended }, .OpDecorationGroup => struct { id_result: IdResult }, .OpGroupDecorate => struct { decoration_group: IdRef, targets: []const IdRef = &.{} }, .OpGroupMemberDecorate => struct { decoration_group: IdRef, targets: []const PairIdRefLiteralInteger = &.{} }, .OpVectorExtractDynamic => struct { id_result_type: IdResultType, id_result: IdResult, vector: IdRef, index: IdRef }, .OpVectorInsertDynamic => struct { id_result_type: IdResultType, id_result: IdResult, vector: IdRef, component: IdRef, index: IdRef }, .OpVectorShuffle => struct { id_result_type: IdResultType, id_result: IdResult, vector_1: IdRef, vector_2: IdRef, components: []const LiteralInteger = &.{} }, .OpCompositeConstruct => struct { id_result_type: IdResultType, id_result: IdResult, constituents: []const IdRef = &.{} }, .OpCompositeExtract => struct { id_result_type: IdResultType, id_result: IdResult, composite: IdRef, indexes: []const LiteralInteger = &.{} }, .OpCompositeInsert => struct { id_result_type: IdResultType, id_result: IdResult, object: IdRef, composite: IdRef, indexes: []const LiteralInteger = &.{} }, .OpCopyObject => struct { id_result_type: IdResultType, id_result: IdResult, operand: IdRef }, .OpTranspose => struct { id_result_type: IdResultType, id_result: IdResult, matrix: IdRef }, .OpSampledImage => struct { id_result_type: IdResultType, id_result: IdResult, image: IdRef, sampler: IdRef }, .OpImageSampleImplicitLod => struct { id_result_type: IdResultType, id_result: IdResult, sampled_image: IdRef, coordinate: IdRef, image_operands: ?ImageOperands.Extended = null }, .OpImageSampleExplicitLod => struct { id_result_type: IdResultType, id_result: IdResult, sampled_image: IdRef, coordinate: IdRef, image_operands: ImageOperands.Extended }, .OpImageSampleDrefImplicitLod => struct { id_result_type: IdResultType, id_result: IdResult, sampled_image: IdRef, coordinate: IdRef, d_ref: IdRef, image_operands: ?ImageOperands.Extended = null }, .OpImageSampleDrefExplicitLod => struct { id_result_type: IdResultType, id_result: IdResult, sampled_image: IdRef, coordinate: IdRef, d_ref: IdRef, image_operands: ImageOperands.Extended }, .OpImageSampleProjImplicitLod => struct { id_result_type: IdResultType, id_result: IdResult, sampled_image: IdRef, coordinate: IdRef, image_operands: ?ImageOperands.Extended = null }, .OpImageSampleProjExplicitLod => struct { id_result_type: IdResultType, id_result: IdResult, sampled_image: IdRef, coordinate: IdRef, image_operands: ImageOperands.Extended }, .OpImageSampleProjDrefImplicitLod => struct { id_result_type: IdResultType, id_result: IdResult, sampled_image: IdRef, coordinate: IdRef, d_ref: IdRef, image_operands: ?ImageOperands.Extended = null }, .OpImageSampleProjDrefExplicitLod => struct { id_result_type: IdResultType, id_result: IdResult, sampled_image: IdRef, coordinate: IdRef, d_ref: IdRef, image_operands: ImageOperands.Extended }, .OpImageFetch => struct { id_result_type: IdResultType, id_result: IdResult, image: IdRef, coordinate: IdRef, image_operands: ?ImageOperands.Extended = null }, .OpImageGather => struct { id_result_type: IdResultType, id_result: IdResult, sampled_image: IdRef, coordinate: IdRef, component: IdRef, image_operands: ?ImageOperands.Extended = null }, .OpImageDrefGather => struct { id_result_type: IdResultType, id_result: IdResult, sampled_image: IdRef, coordinate: IdRef, d_ref: IdRef, image_operands: ?ImageOperands.Extended = null }, .OpImageRead => struct { id_result_type: IdResultType, id_result: IdResult, image: IdRef, coordinate: IdRef, image_operands: ?ImageOperands.Extended = null }, .OpImageWrite => struct { image: IdRef, coordinate: IdRef, texel: IdRef, image_operands: ?ImageOperands.Extended = null }, .OpImage => struct { id_result_type: IdResultType, id_result: IdResult, sampled_image: IdRef }, .OpImageQueryFormat => struct { id_result_type: IdResultType, id_result: IdResult, image: IdRef }, .OpImageQueryOrder => struct { id_result_type: IdResultType, id_result: IdResult, image: IdRef }, .OpImageQuerySizeLod => struct { id_result_type: IdResultType, id_result: IdResult, image: IdRef, level_of_detail: IdRef }, .OpImageQuerySize => struct { id_result_type: IdResultType, id_result: IdResult, image: IdRef }, .OpImageQueryLod => struct { id_result_type: IdResultType, id_result: IdResult, sampled_image: IdRef, coordinate: IdRef }, .OpImageQueryLevels => struct { id_result_type: IdResultType, id_result: IdResult, image: IdRef }, .OpImageQuerySamples => struct { id_result_type: IdResultType, id_result: IdResult, image: IdRef }, .OpConvertFToU => struct { id_result_type: IdResultType, id_result: IdResult, float_value: IdRef }, .OpConvertFToS => struct { id_result_type: IdResultType, id_result: IdResult, float_value: IdRef }, .OpConvertSToF => struct { id_result_type: IdResultType, id_result: IdResult, signed_value: IdRef }, .OpConvertUToF => struct { id_result_type: IdResultType, id_result: IdResult, unsigned_value: IdRef }, .OpUConvert => struct { id_result_type: IdResultType, id_result: IdResult, unsigned_value: IdRef }, .OpSConvert => struct { id_result_type: IdResultType, id_result: IdResult, signed_value: IdRef }, .OpFConvert => struct { id_result_type: IdResultType, id_result: IdResult, float_value: IdRef }, .OpQuantizeToF16 => struct { id_result_type: IdResultType, id_result: IdResult, value: IdRef }, .OpConvertPtrToU => struct { id_result_type: IdResultType, id_result: IdResult, pointer: IdRef }, .OpSatConvertSToU => struct { id_result_type: IdResultType, id_result: IdResult, signed_value: IdRef }, .OpSatConvertUToS => struct { id_result_type: IdResultType, id_result: IdResult, unsigned_value: IdRef }, .OpConvertUToPtr => struct { id_result_type: IdResultType, id_result: IdResult, integer_value: IdRef }, .OpPtrCastToGeneric => struct { id_result_type: IdResultType, id_result: IdResult, pointer: IdRef }, .OpGenericCastToPtr => struct { id_result_type: IdResultType, id_result: IdResult, pointer: IdRef }, .OpGenericCastToPtrExplicit => struct { id_result_type: IdResultType, id_result: IdResult, pointer: IdRef, storage: StorageClass }, .OpBitcast => struct { id_result_type: IdResultType, id_result: IdResult, operand: IdRef }, .OpSNegate => struct { id_result_type: IdResultType, id_result: IdResult, operand: IdRef }, .OpFNegate => struct { id_result_type: IdResultType, id_result: IdResult, operand: IdRef }, .OpIAdd => struct { id_result_type: IdResultType, id_result: IdResult, operand_1: IdRef, operand_2: IdRef }, .OpFAdd => struct { id_result_type: IdResultType, id_result: IdResult, operand_1: IdRef, operand_2: IdRef }, .OpISub => struct { id_result_type: IdResultType, id_result: IdResult, operand_1: IdRef, operand_2: IdRef }, .OpFSub => struct { id_result_type: IdResultType, id_result: IdResult, operand_1: IdRef, operand_2: IdRef }, .OpIMul => struct { id_result_type: IdResultType, id_result: IdResult, operand_1: IdRef, operand_2: IdRef }, .OpFMul => struct { id_result_type: IdResultType, id_result: IdResult, operand_1: IdRef, operand_2: IdRef }, .OpUDiv => struct { id_result_type: IdResultType, id_result: IdResult, operand_1: IdRef, operand_2: IdRef }, .OpSDiv => struct { id_result_type: IdResultType, id_result: IdResult, operand_1: IdRef, operand_2: IdRef }, .OpFDiv => struct { id_result_type: IdResultType, id_result: IdResult, operand_1: IdRef, operand_2: IdRef }, .OpUMod => struct { id_result_type: IdResultType, id_result: IdResult, operand_1: IdRef, operand_2: IdRef }, .OpSRem => struct { id_result_type: IdResultType, id_result: IdResult, operand_1: IdRef, operand_2: IdRef }, .OpSMod => struct { id_result_type: IdResultType, id_result: IdResult, operand_1: IdRef, operand_2: IdRef }, .OpFRem => struct { id_result_type: IdResultType, id_result: IdResult, operand_1: IdRef, operand_2: IdRef }, .OpFMod => struct { id_result_type: IdResultType, id_result: IdResult, operand_1: IdRef, operand_2: IdRef }, .OpVectorTimesScalar => struct { id_result_type: IdResultType, id_result: IdResult, vector: IdRef, scalar: IdRef }, .OpMatrixTimesScalar => struct { id_result_type: IdResultType, id_result: IdResult, matrix: IdRef, scalar: IdRef }, .OpVectorTimesMatrix => struct { id_result_type: IdResultType, id_result: IdResult, vector: IdRef, matrix: IdRef }, .OpMatrixTimesVector => struct { id_result_type: IdResultType, id_result: IdResult, matrix: IdRef, vector: IdRef }, .OpMatrixTimesMatrix => struct { id_result_type: IdResultType, id_result: IdResult, leftmatrix: IdRef, rightmatrix: IdRef }, .OpOuterProduct => struct { id_result_type: IdResultType, id_result: IdResult, vector_1: IdRef, vector_2: IdRef }, .OpDot => struct { id_result_type: IdResultType, id_result: IdResult, vector_1: IdRef, vector_2: IdRef }, .OpIAddCarry => struct { id_result_type: IdResultType, id_result: IdResult, operand_1: IdRef, operand_2: IdRef }, .OpISubBorrow => struct { id_result_type: IdResultType, id_result: IdResult, operand_1: IdRef, operand_2: IdRef }, .OpUMulExtended => struct { id_result_type: IdResultType, id_result: IdResult, operand_1: IdRef, operand_2: IdRef }, .OpSMulExtended => struct { id_result_type: IdResultType, id_result: IdResult, operand_1: IdRef, operand_2: IdRef }, .OpAny => struct { id_result_type: IdResultType, id_result: IdResult, vector: IdRef }, .OpAll => struct { id_result_type: IdResultType, id_result: IdResult, vector: IdRef }, .OpIsNan => struct { id_result_type: IdResultType, id_result: IdResult, x: IdRef }, .OpIsInf => struct { id_result_type: IdResultType, id_result: IdResult, x: IdRef }, .OpIsFinite => struct { id_result_type: IdResultType, id_result: IdResult, x: IdRef }, .OpIsNormal => struct { id_result_type: IdResultType, id_result: IdResult, x: IdRef }, .OpSignBitSet => struct { id_result_type: IdResultType, id_result: IdResult, x: IdRef }, .OpLessOrGreater => struct { id_result_type: IdResultType, id_result: IdResult, x: IdRef, y: IdRef }, .OpOrdered => struct { id_result_type: IdResultType, id_result: IdResult, x: IdRef, y: IdRef }, .OpUnordered => struct { id_result_type: IdResultType, id_result: IdResult, x: IdRef, y: IdRef }, .OpLogicalEqual => struct { id_result_type: IdResultType, id_result: IdResult, operand_1: IdRef, operand_2: IdRef }, .OpLogicalNotEqual => struct { id_result_type: IdResultType, id_result: IdResult, operand_1: IdRef, operand_2: IdRef }, .OpLogicalOr => struct { id_result_type: IdResultType, id_result: IdResult, operand_1: IdRef, operand_2: IdRef }, .OpLogicalAnd => struct { id_result_type: IdResultType, id_result: IdResult, operand_1: IdRef, operand_2: IdRef }, .OpLogicalNot => struct { id_result_type: IdResultType, id_result: IdResult, operand: IdRef }, .OpSelect => struct { id_result_type: IdResultType, id_result: IdResult, condition: IdRef, object_1: IdRef, object_2: IdRef }, .OpIEqual => struct { id_result_type: IdResultType, id_result: IdResult, operand_1: IdRef, operand_2: IdRef }, .OpINotEqual => struct { id_result_type: IdResultType, id_result: IdResult, operand_1: IdRef, operand_2: IdRef }, .OpUGreaterThan => struct { id_result_type: IdResultType, id_result: IdResult, operand_1: IdRef, operand_2: IdRef }, .OpSGreaterThan => struct { id_result_type: IdResultType, id_result: IdResult, operand_1: IdRef, operand_2: IdRef }, .OpUGreaterThanEqual => struct { id_result_type: IdResultType, id_result: IdResult, operand_1: IdRef, operand_2: IdRef }, .OpSGreaterThanEqual => struct { id_result_type: IdResultType, id_result: IdResult, operand_1: IdRef, operand_2: IdRef }, .OpULessThan => struct { id_result_type: IdResultType, id_result: IdResult, operand_1: IdRef, operand_2: IdRef }, .OpSLessThan => struct { id_result_type: IdResultType, id_result: IdResult, operand_1: IdRef, operand_2: IdRef }, .OpULessThanEqual => struct { id_result_type: IdResultType, id_result: IdResult, operand_1: IdRef, operand_2: IdRef }, .OpSLessThanEqual => struct { id_result_type: IdResultType, id_result: IdResult, operand_1: IdRef, operand_2: IdRef }, .OpFOrdEqual => struct { id_result_type: IdResultType, id_result: IdResult, operand_1: IdRef, operand_2: IdRef }, .OpFUnordEqual => struct { id_result_type: IdResultType, id_result: IdResult, operand_1: IdRef, operand_2: IdRef }, .OpFOrdNotEqual => struct { id_result_type: IdResultType, id_result: IdResult, operand_1: IdRef, operand_2: IdRef }, .OpFUnordNotEqual => struct { id_result_type: IdResultType, id_result: IdResult, operand_1: IdRef, operand_2: IdRef }, .OpFOrdLessThan => struct { id_result_type: IdResultType, id_result: IdResult, operand_1: IdRef, operand_2: IdRef }, .OpFUnordLessThan => struct { id_result_type: IdResultType, id_result: IdResult, operand_1: IdRef, operand_2: IdRef }, .OpFOrdGreaterThan => struct { id_result_type: IdResultType, id_result: IdResult, operand_1: IdRef, operand_2: IdRef }, .OpFUnordGreaterThan => struct { id_result_type: IdResultType, id_result: IdResult, operand_1: IdRef, operand_2: IdRef }, .OpFOrdLessThanEqual => struct { id_result_type: IdResultType, id_result: IdResult, operand_1: IdRef, operand_2: IdRef }, .OpFUnordLessThanEqual => struct { id_result_type: IdResultType, id_result: IdResult, operand_1: IdRef, operand_2: IdRef }, .OpFOrdGreaterThanEqual => struct { id_result_type: IdResultType, id_result: IdResult, operand_1: IdRef, operand_2: IdRef }, .OpFUnordGreaterThanEqual => struct { id_result_type: IdResultType, id_result: IdResult, operand_1: IdRef, operand_2: IdRef }, .OpShiftRightLogical => struct { id_result_type: IdResultType, id_result: IdResult, base: IdRef, shift: IdRef }, .OpShiftRightArithmetic => struct { id_result_type: IdResultType, id_result: IdResult, base: IdRef, shift: IdRef }, .OpShiftLeftLogical => struct { id_result_type: IdResultType, id_result: IdResult, base: IdRef, shift: IdRef }, .OpBitwiseOr => struct { id_result_type: IdResultType, id_result: IdResult, operand_1: IdRef, operand_2: IdRef }, .OpBitwiseXor => struct { id_result_type: IdResultType, id_result: IdResult, operand_1: IdRef, operand_2: IdRef }, .OpBitwiseAnd => struct { id_result_type: IdResultType, id_result: IdResult, operand_1: IdRef, operand_2: IdRef }, .OpNot => struct { id_result_type: IdResultType, id_result: IdResult, operand: IdRef }, .OpBitFieldInsert => struct { id_result_type: IdResultType, id_result: IdResult, base: IdRef, insert: IdRef, offset: IdRef, count: IdRef }, .OpBitFieldSExtract => struct { id_result_type: IdResultType, id_result: IdResult, base: IdRef, offset: IdRef, count: IdRef }, .OpBitFieldUExtract => struct { id_result_type: IdResultType, id_result: IdResult, base: IdRef, offset: IdRef, count: IdRef }, .OpBitReverse => struct { id_result_type: IdResultType, id_result: IdResult, base: IdRef }, .OpBitCount => struct { id_result_type: IdResultType, id_result: IdResult, base: IdRef }, .OpDPdx => struct { id_result_type: IdResultType, id_result: IdResult, p: IdRef }, .OpDPdy => struct { id_result_type: IdResultType, id_result: IdResult, p: IdRef }, .OpFwidth => struct { id_result_type: IdResultType, id_result: IdResult, p: IdRef }, .OpDPdxFine => struct { id_result_type: IdResultType, id_result: IdResult, p: IdRef }, .OpDPdyFine => struct { id_result_type: IdResultType, id_result: IdResult, p: IdRef }, .OpFwidthFine => struct { id_result_type: IdResultType, id_result: IdResult, p: IdRef }, .OpDPdxCoarse => struct { id_result_type: IdResultType, id_result: IdResult, p: IdRef }, .OpDPdyCoarse => struct { id_result_type: IdResultType, id_result: IdResult, p: IdRef }, .OpFwidthCoarse => struct { id_result_type: IdResultType, id_result: IdResult, p: IdRef }, .OpEmitVertex => void, .OpEndPrimitive => void, .OpEmitStreamVertex => struct { stream: IdRef }, .OpEndStreamPrimitive => struct { stream: IdRef }, .OpControlBarrier => struct { execution: IdScope, memory: IdScope, semantics: IdMemorySemantics }, .OpMemoryBarrier => struct { memory: IdScope, semantics: IdMemorySemantics }, .OpAtomicLoad => struct { id_result_type: IdResultType, id_result: IdResult, pointer: IdRef, memory: IdScope, semantics: IdMemorySemantics }, .OpAtomicStore => struct { pointer: IdRef, memory: IdScope, semantics: IdMemorySemantics, value: IdRef }, .OpAtomicExchange => struct { id_result_type: IdResultType, id_result: IdResult, pointer: IdRef, memory: IdScope, semantics: IdMemorySemantics, value: IdRef }, .OpAtomicCompareExchange => struct { id_result_type: IdResultType, id_result: IdResult, pointer: IdRef, memory: IdScope, equal: IdMemorySemantics, unequal: IdMemorySemantics, value: IdRef, comparator: IdRef }, .OpAtomicCompareExchangeWeak => struct { id_result_type: IdResultType, id_result: IdResult, pointer: IdRef, memory: IdScope, equal: IdMemorySemantics, unequal: IdMemorySemantics, value: IdRef, comparator: IdRef }, .OpAtomicIIncrement => struct { id_result_type: IdResultType, id_result: IdResult, pointer: IdRef, memory: IdScope, semantics: IdMemorySemantics }, .OpAtomicIDecrement => struct { id_result_type: IdResultType, id_result: IdResult, pointer: IdRef, memory: IdScope, semantics: IdMemorySemantics }, .OpAtomicIAdd => struct { id_result_type: IdResultType, id_result: IdResult, pointer: IdRef, memory: IdScope, semantics: IdMemorySemantics, value: IdRef }, .OpAtomicISub => struct { id_result_type: IdResultType, id_result: IdResult, pointer: IdRef, memory: IdScope, semantics: IdMemorySemantics, value: IdRef }, .OpAtomicSMin => struct { id_result_type: IdResultType, id_result: IdResult, pointer: IdRef, memory: IdScope, semantics: IdMemorySemantics, value: IdRef }, .OpAtomicUMin => struct { id_result_type: IdResultType, id_result: IdResult, pointer: IdRef, memory: IdScope, semantics: IdMemorySemantics, value: IdRef }, .OpAtomicSMax => struct { id_result_type: IdResultType, id_result: IdResult, pointer: IdRef, memory: IdScope, semantics: IdMemorySemantics, value: IdRef }, .OpAtomicUMax => struct { id_result_type: IdResultType, id_result: IdResult, pointer: IdRef, memory: IdScope, semantics: IdMemorySemantics, value: IdRef }, .OpAtomicAnd => struct { id_result_type: IdResultType, id_result: IdResult, pointer: IdRef, memory: IdScope, semantics: IdMemorySemantics, value: IdRef }, .OpAtomicOr => struct { id_result_type: IdResultType, id_result: IdResult, pointer: IdRef, memory: IdScope, semantics: IdMemorySemantics, value: IdRef }, .OpAtomicXor => struct { id_result_type: IdResultType, id_result: IdResult, pointer: IdRef, memory: IdScope, semantics: IdMemorySemantics, value: IdRef }, .OpPhi => struct { id_result_type: IdResultType, id_result: IdResult, pair_id_ref_id_ref: []const PairIdRefIdRef = &.{} }, .OpLoopMerge => struct { merge_block: IdRef, continue_target: IdRef, loop_control: LoopControl.Extended }, .OpSelectionMerge => struct { merge_block: IdRef, selection_control: SelectionControl }, .OpLabel => struct { id_result: IdResult }, .OpBranch => struct { target_label: IdRef }, .OpBranchConditional => struct { condition: IdRef, true_label: IdRef, false_label: IdRef, branch_weights: []const LiteralInteger = &.{} }, .OpSwitch => struct { selector: IdRef, default: IdRef, target: []const PairLiteralIntegerIdRef = &.{} }, .OpKill => void, .OpReturn => void, .OpReturnValue => struct { value: IdRef }, .OpUnreachable => void, .OpLifetimeStart => struct { pointer: IdRef, size: LiteralInteger }, .OpLifetimeStop => struct { pointer: IdRef, size: LiteralInteger }, .OpGroupAsyncCopy => struct { id_result_type: IdResultType, id_result: IdResult, execution: IdScope, destination: IdRef, source: IdRef, num_elements: IdRef, stride: IdRef, event: IdRef }, .OpGroupWaitEvents => struct { execution: IdScope, num_events: IdRef, events_list: IdRef }, .OpGroupAll => struct { id_result_type: IdResultType, id_result: IdResult, execution: IdScope, predicate: IdRef }, .OpGroupAny => struct { id_result_type: IdResultType, id_result: IdResult, execution: IdScope, predicate: IdRef }, .OpGroupBroadcast => struct { id_result_type: IdResultType, id_result: IdResult, execution: IdScope, value: IdRef, localid: IdRef }, .OpGroupIAdd => struct { id_result_type: IdResultType, id_result: IdResult, execution: IdScope, operation: GroupOperation, x: IdRef }, .OpGroupFAdd => struct { id_result_type: IdResultType, id_result: IdResult, execution: IdScope, operation: GroupOperation, x: IdRef }, .OpGroupFMin => struct { id_result_type: IdResultType, id_result: IdResult, execution: IdScope, operation: GroupOperation, x: IdRef }, .OpGroupUMin => struct { id_result_type: IdResultType, id_result: IdResult, execution: IdScope, operation: GroupOperation, x: IdRef }, .OpGroupSMin => struct { id_result_type: IdResultType, id_result: IdResult, execution: IdScope, operation: GroupOperation, x: IdRef }, .OpGroupFMax => struct { id_result_type: IdResultType, id_result: IdResult, execution: IdScope, operation: GroupOperation, x: IdRef }, .OpGroupUMax => struct { id_result_type: IdResultType, id_result: IdResult, execution: IdScope, operation: GroupOperation, x: IdRef }, .OpGroupSMax => struct { id_result_type: IdResultType, id_result: IdResult, execution: IdScope, operation: GroupOperation, x: IdRef }, .OpReadPipe => struct { id_result_type: IdResultType, id_result: IdResult, pipe: IdRef, pointer: IdRef, packet_size: IdRef, packet_alignment: IdRef }, .OpWritePipe => struct { id_result_type: IdResultType, id_result: IdResult, pipe: IdRef, pointer: IdRef, packet_size: IdRef, packet_alignment: IdRef }, .OpReservedReadPipe => struct { id_result_type: IdResultType, id_result: IdResult, pipe: IdRef, reserve_id: IdRef, index: IdRef, pointer: IdRef, packet_size: IdRef, packet_alignment: IdRef }, .OpReservedWritePipe => struct { id_result_type: IdResultType, id_result: IdResult, pipe: IdRef, reserve_id: IdRef, index: IdRef, pointer: IdRef, packet_size: IdRef, packet_alignment: IdRef }, .OpReserveReadPipePackets => struct { id_result_type: IdResultType, id_result: IdResult, pipe: IdRef, num_packets: IdRef, packet_size: IdRef, packet_alignment: IdRef }, .OpReserveWritePipePackets => struct { id_result_type: IdResultType, id_result: IdResult, pipe: IdRef, num_packets: IdRef, packet_size: IdRef, packet_alignment: IdRef }, .OpCommitReadPipe => struct { pipe: IdRef, reserve_id: IdRef, packet_size: IdRef, packet_alignment: IdRef }, .OpCommitWritePipe => struct { pipe: IdRef, reserve_id: IdRef, packet_size: IdRef, packet_alignment: IdRef }, .OpIsValidReserveId => struct { id_result_type: IdResultType, id_result: IdResult, reserve_id: IdRef }, .OpGetNumPipePackets => struct { id_result_type: IdResultType, id_result: IdResult, pipe: IdRef, packet_size: IdRef, packet_alignment: IdRef }, .OpGetMaxPipePackets => struct { id_result_type: IdResultType, id_result: IdResult, pipe: IdRef, packet_size: IdRef, packet_alignment: IdRef }, .OpGroupReserveReadPipePackets => struct { id_result_type: IdResultType, id_result: IdResult, execution: IdScope, pipe: IdRef, num_packets: IdRef, packet_size: IdRef, packet_alignment: IdRef }, .OpGroupReserveWritePipePackets => struct { id_result_type: IdResultType, id_result: IdResult, execution: IdScope, pipe: IdRef, num_packets: IdRef, packet_size: IdRef, packet_alignment: IdRef }, .OpGroupCommitReadPipe => struct { execution: IdScope, pipe: IdRef, reserve_id: IdRef, packet_size: IdRef, packet_alignment: IdRef }, .OpGroupCommitWritePipe => struct { execution: IdScope, pipe: IdRef, reserve_id: IdRef, packet_size: IdRef, packet_alignment: IdRef }, .OpEnqueueMarker => struct { id_result_type: IdResultType, id_result: IdResult, queue: IdRef, num_events: IdRef, wait_events: IdRef, ret_event: IdRef }, .OpEnqueueKernel => struct { id_result_type: IdResultType, id_result: IdResult, queue: IdRef, flags: IdRef, nd_range: IdRef, num_events: IdRef, wait_events: IdRef, ret_event: IdRef, invoke: IdRef, param: IdRef, param_size: IdRef, param_align: IdRef, local_size: []const IdRef = &.{} }, .OpGetKernelNDrangeSubGroupCount => struct { id_result_type: IdResultType, id_result: IdResult, nd_range: IdRef, invoke: IdRef, param: IdRef, param_size: IdRef, param_align: IdRef }, .OpGetKernelNDrangeMaxSubGroupSize => struct { id_result_type: IdResultType, id_result: IdResult, nd_range: IdRef, invoke: IdRef, param: IdRef, param_size: IdRef, param_align: IdRef }, .OpGetKernelWorkGroupSize => struct { id_result_type: IdResultType, id_result: IdResult, invoke: IdRef, param: IdRef, param_size: IdRef, param_align: IdRef }, .OpGetKernelPreferredWorkGroupSizeMultiple => struct { id_result_type: IdResultType, id_result: IdResult, invoke: IdRef, param: IdRef, param_size: IdRef, param_align: IdRef }, .OpRetainEvent => struct { event: IdRef }, .OpReleaseEvent => struct { event: IdRef }, .OpCreateUserEvent => struct { id_result_type: IdResultType, id_result: IdResult }, .OpIsValidEvent => struct { id_result_type: IdResultType, id_result: IdResult, event: IdRef }, .OpSetUserEventStatus => struct { event: IdRef, status: IdRef }, .OpCaptureEventProfilingInfo => struct { event: IdRef, profiling_info: IdRef, value: IdRef }, .OpGetDefaultQueue => struct { id_result_type: IdResultType, id_result: IdResult }, .OpBuildNDRange => struct { id_result_type: IdResultType, id_result: IdResult, globalworksize: IdRef, localworksize: IdRef, globalworkoffset: IdRef }, .OpImageSparseSampleImplicitLod => struct { id_result_type: IdResultType, id_result: IdResult, sampled_image: IdRef, coordinate: IdRef, image_operands: ?ImageOperands.Extended = null }, .OpImageSparseSampleExplicitLod => struct { id_result_type: IdResultType, id_result: IdResult, sampled_image: IdRef, coordinate: IdRef, image_operands: ImageOperands.Extended }, .OpImageSparseSampleDrefImplicitLod => struct { id_result_type: IdResultType, id_result: IdResult, sampled_image: IdRef, coordinate: IdRef, d_ref: IdRef, image_operands: ?ImageOperands.Extended = null }, .OpImageSparseSampleDrefExplicitLod => struct { id_result_type: IdResultType, id_result: IdResult, sampled_image: IdRef, coordinate: IdRef, d_ref: IdRef, image_operands: ImageOperands.Extended }, .OpImageSparseSampleProjImplicitLod => struct { id_result_type: IdResultType, id_result: IdResult, sampled_image: IdRef, coordinate: IdRef, image_operands: ?ImageOperands.Extended = null }, .OpImageSparseSampleProjExplicitLod => struct { id_result_type: IdResultType, id_result: IdResult, sampled_image: IdRef, coordinate: IdRef, image_operands: ImageOperands.Extended }, .OpImageSparseSampleProjDrefImplicitLod => struct { id_result_type: IdResultType, id_result: IdResult, sampled_image: IdRef, coordinate: IdRef, d_ref: IdRef, image_operands: ?ImageOperands.Extended = null }, .OpImageSparseSampleProjDrefExplicitLod => struct { id_result_type: IdResultType, id_result: IdResult, sampled_image: IdRef, coordinate: IdRef, d_ref: IdRef, image_operands: ImageOperands.Extended }, .OpImageSparseFetch => struct { id_result_type: IdResultType, id_result: IdResult, image: IdRef, coordinate: IdRef, image_operands: ?ImageOperands.Extended = null }, .OpImageSparseGather => struct { id_result_type: IdResultType, id_result: IdResult, sampled_image: IdRef, coordinate: IdRef, component: IdRef, image_operands: ?ImageOperands.Extended = null }, .OpImageSparseDrefGather => struct { id_result_type: IdResultType, id_result: IdResult, sampled_image: IdRef, coordinate: IdRef, d_ref: IdRef, image_operands: ?ImageOperands.Extended = null }, .OpImageSparseTexelsResident => struct { id_result_type: IdResultType, id_result: IdResult, resident_code: IdRef }, .OpNoLine => void, .OpAtomicFlagTestAndSet => struct { id_result_type: IdResultType, id_result: IdResult, pointer: IdRef, memory: IdScope, semantics: IdMemorySemantics }, .OpAtomicFlagClear => struct { pointer: IdRef, memory: IdScope, semantics: IdMemorySemantics }, .OpImageSparseRead => struct { id_result_type: IdResultType, id_result: IdResult, image: IdRef, coordinate: IdRef, image_operands: ?ImageOperands.Extended = null }, .OpSizeOf => struct { id_result_type: IdResultType, id_result: IdResult, pointer: IdRef }, .OpTypePipeStorage => struct { id_result: IdResult }, .OpConstantPipeStorage => struct { id_result_type: IdResultType, id_result: IdResult, packet_size: LiteralInteger, packet_alignment: LiteralInteger, capacity: LiteralInteger }, .OpCreatePipeFromPipeStorage => struct { id_result_type: IdResultType, id_result: IdResult, pipe_storage: IdRef }, .OpGetKernelLocalSizeForSubgroupCount => struct { id_result_type: IdResultType, id_result: IdResult, subgroup_count: IdRef, invoke: IdRef, param: IdRef, param_size: IdRef, param_align: IdRef }, .OpGetKernelMaxNumSubgroups => struct { id_result_type: IdResultType, id_result: IdResult, invoke: IdRef, param: IdRef, param_size: IdRef, param_align: IdRef }, .OpTypeNamedBarrier => struct { id_result: IdResult }, .OpNamedBarrierInitialize => struct { id_result_type: IdResultType, id_result: IdResult, subgroup_count: IdRef }, .OpMemoryNamedBarrier => struct { named_barrier: IdRef, memory: IdScope, semantics: IdMemorySemantics }, .OpModuleProcessed => struct { process: LiteralString }, .OpExecutionModeId => struct { entry_point: IdRef, mode: ExecutionMode.Extended }, .OpDecorateId => struct { target: IdRef, decoration: Decoration.Extended }, .OpGroupNonUniformElect => struct { id_result_type: IdResultType, id_result: IdResult, execution: IdScope }, .OpGroupNonUniformAll => struct { id_result_type: IdResultType, id_result: IdResult, execution: IdScope, predicate: IdRef }, .OpGroupNonUniformAny => struct { id_result_type: IdResultType, id_result: IdResult, execution: IdScope, predicate: IdRef }, .OpGroupNonUniformAllEqual => struct { id_result_type: IdResultType, id_result: IdResult, execution: IdScope, value: IdRef }, .OpGroupNonUniformBroadcast => struct { id_result_type: IdResultType, id_result: IdResult, execution: IdScope, value: IdRef, id: IdRef }, .OpGroupNonUniformBroadcastFirst => struct { id_result_type: IdResultType, id_result: IdResult, execution: IdScope, value: IdRef }, .OpGroupNonUniformBallot => struct { id_result_type: IdResultType, id_result: IdResult, execution: IdScope, predicate: IdRef }, .OpGroupNonUniformInverseBallot => struct { id_result_type: IdResultType, id_result: IdResult, execution: IdScope, value: IdRef }, .OpGroupNonUniformBallotBitExtract => struct { id_result_type: IdResultType, id_result: IdResult, execution: IdScope, value: IdRef, index: IdRef }, .OpGroupNonUniformBallotBitCount => struct { id_result_type: IdResultType, id_result: IdResult, execution: IdScope, operation: GroupOperation, value: IdRef }, .OpGroupNonUniformBallotFindLSB => struct { id_result_type: IdResultType, id_result: IdResult, execution: IdScope, value: IdRef }, .OpGroupNonUniformBallotFindMSB => struct { id_result_type: IdResultType, id_result: IdResult, execution: IdScope, value: IdRef }, .OpGroupNonUniformShuffle => struct { id_result_type: IdResultType, id_result: IdResult, execution: IdScope, value: IdRef, id: IdRef }, .OpGroupNonUniformShuffleXor => struct { id_result_type: IdResultType, id_result: IdResult, execution: IdScope, value: IdRef, mask: IdRef }, .OpGroupNonUniformShuffleUp => struct { id_result_type: IdResultType, id_result: IdResult, execution: IdScope, value: IdRef, delta: IdRef }, .OpGroupNonUniformShuffleDown => struct { id_result_type: IdResultType, id_result: IdResult, execution: IdScope, value: IdRef, delta: IdRef }, .OpGroupNonUniformIAdd => struct { id_result_type: IdResultType, id_result: IdResult, execution: IdScope, operation: GroupOperation, value: IdRef, clustersize: ?IdRef = null }, .OpGroupNonUniformFAdd => struct { id_result_type: IdResultType, id_result: IdResult, execution: IdScope, operation: GroupOperation, value: IdRef, clustersize: ?IdRef = null }, .OpGroupNonUniformIMul => struct { id_result_type: IdResultType, id_result: IdResult, execution: IdScope, operation: GroupOperation, value: IdRef, clustersize: ?IdRef = null }, .OpGroupNonUniformFMul => struct { id_result_type: IdResultType, id_result: IdResult, execution: IdScope, operation: GroupOperation, value: IdRef, clustersize: ?IdRef = null }, .OpGroupNonUniformSMin => struct { id_result_type: IdResultType, id_result: IdResult, execution: IdScope, operation: GroupOperation, value: IdRef, clustersize: ?IdRef = null }, .OpGroupNonUniformUMin => struct { id_result_type: IdResultType, id_result: IdResult, execution: IdScope, operation: GroupOperation, value: IdRef, clustersize: ?IdRef = null }, .OpGroupNonUniformFMin => struct { id_result_type: IdResultType, id_result: IdResult, execution: IdScope, operation: GroupOperation, value: IdRef, clustersize: ?IdRef = null }, .OpGroupNonUniformSMax => struct { id_result_type: IdResultType, id_result: IdResult, execution: IdScope, operation: GroupOperation, value: IdRef, clustersize: ?IdRef = null }, .OpGroupNonUniformUMax => struct { id_result_type: IdResultType, id_result: IdResult, execution: IdScope, operation: GroupOperation, value: IdRef, clustersize: ?IdRef = null }, .OpGroupNonUniformFMax => struct { id_result_type: IdResultType, id_result: IdResult, execution: IdScope, operation: GroupOperation, value: IdRef, clustersize: ?IdRef = null }, .OpGroupNonUniformBitwiseAnd => struct { id_result_type: IdResultType, id_result: IdResult, execution: IdScope, operation: GroupOperation, value: IdRef, clustersize: ?IdRef = null }, .OpGroupNonUniformBitwiseOr => struct { id_result_type: IdResultType, id_result: IdResult, execution: IdScope, operation: GroupOperation, value: IdRef, clustersize: ?IdRef = null }, .OpGroupNonUniformBitwiseXor => struct { id_result_type: IdResultType, id_result: IdResult, execution: IdScope, operation: GroupOperation, value: IdRef, clustersize: ?IdRef = null }, .OpGroupNonUniformLogicalAnd => struct { id_result_type: IdResultType, id_result: IdResult, execution: IdScope, operation: GroupOperation, value: IdRef, clustersize: ?IdRef = null }, .OpGroupNonUniformLogicalOr => struct { id_result_type: IdResultType, id_result: IdResult, execution: IdScope, operation: GroupOperation, value: IdRef, clustersize: ?IdRef = null }, .OpGroupNonUniformLogicalXor => struct { id_result_type: IdResultType, id_result: IdResult, execution: IdScope, operation: GroupOperation, value: IdRef, clustersize: ?IdRef = null }, .OpGroupNonUniformQuadBroadcast => struct { id_result_type: IdResultType, id_result: IdResult, execution: IdScope, value: IdRef, index: IdRef }, .OpGroupNonUniformQuadSwap => struct { id_result_type: IdResultType, id_result: IdResult, execution: IdScope, value: IdRef, direction: IdRef }, .OpCopyLogical => struct { id_result_type: IdResultType, id_result: IdResult, operand: IdRef }, .OpPtrEqual => struct { id_result_type: IdResultType, id_result: IdResult, operand_1: IdRef, operand_2: IdRef }, .OpPtrNotEqual => struct { id_result_type: IdResultType, id_result: IdResult, operand_1: IdRef, operand_2: IdRef }, .OpPtrDiff => struct { id_result_type: IdResultType, id_result: IdResult, operand_1: IdRef, operand_2: IdRef }, .OpColorAttachmentReadEXT => struct { id_result_type: IdResultType, id_result: IdResult, attachment: IdRef, sample: ?IdRef = null }, .OpDepthAttachmentReadEXT => struct { id_result_type: IdResultType, id_result: IdResult, sample: ?IdRef = null }, .OpStencilAttachmentReadEXT => struct { id_result_type: IdResultType, id_result: IdResult, sample: ?IdRef = null }, .OpTerminateInvocation => void, .OpSubgroupBallotKHR => struct { id_result_type: IdResultType, id_result: IdResult, predicate: IdRef }, .OpSubgroupFirstInvocationKHR => struct { id_result_type: IdResultType, id_result: IdResult, value: IdRef }, .OpSubgroupAllKHR => struct { id_result_type: IdResultType, id_result: IdResult, predicate: IdRef }, .OpSubgroupAnyKHR => struct { id_result_type: IdResultType, id_result: IdResult, predicate: IdRef }, .OpSubgroupAllEqualKHR => struct { id_result_type: IdResultType, id_result: IdResult, predicate: IdRef }, .OpGroupNonUniformRotateKHR => struct { id_result_type: IdResultType, id_result: IdResult, execution: IdScope, value: IdRef, delta: IdRef, clustersize: ?IdRef = null }, .OpSubgroupReadInvocationKHR => struct { id_result_type: IdResultType, id_result: IdResult, value: IdRef, index: IdRef }, .OpTraceRayKHR => struct { accel: IdRef, ray_flags: IdRef, cull_mask: IdRef, sbt_offset: IdRef, sbt_stride: IdRef, miss_index: IdRef, ray_origin: IdRef, ray_tmin: IdRef, ray_direction: IdRef, ray_tmax: IdRef, payload: IdRef }, .OpExecuteCallableKHR => struct { sbt_index: IdRef, callable_data: IdRef }, .OpConvertUToAccelerationStructureKHR => struct { id_result_type: IdResultType, id_result: IdResult, accel: IdRef }, .OpIgnoreIntersectionKHR => void, .OpTerminateRayKHR => void, .OpSDot => struct { id_result_type: IdResultType, id_result: IdResult, vector_1: IdRef, vector_2: IdRef, packed_vector_format: ?PackedVectorFormat = null }, .OpUDot => struct { id_result_type: IdResultType, id_result: IdResult, vector_1: IdRef, vector_2: IdRef, packed_vector_format: ?PackedVectorFormat = null }, .OpSUDot => struct { id_result_type: IdResultType, id_result: IdResult, vector_1: IdRef, vector_2: IdRef, packed_vector_format: ?PackedVectorFormat = null }, .OpSDotAccSat => struct { id_result_type: IdResultType, id_result: IdResult, vector_1: IdRef, vector_2: IdRef, accumulator: IdRef, packed_vector_format: ?PackedVectorFormat = null }, .OpUDotAccSat => struct { id_result_type: IdResultType, id_result: IdResult, vector_1: IdRef, vector_2: IdRef, accumulator: IdRef, packed_vector_format: ?PackedVectorFormat = null }, .OpSUDotAccSat => struct { id_result_type: IdResultType, id_result: IdResult, vector_1: IdRef, vector_2: IdRef, accumulator: IdRef, packed_vector_format: ?PackedVectorFormat = null }, .OpTypeRayQueryKHR => struct { id_result: IdResult }, .OpRayQueryInitializeKHR => struct { rayquery: IdRef, accel: IdRef, rayflags: IdRef, cullmask: IdRef, rayorigin: IdRef, raytmin: IdRef, raydirection: IdRef, raytmax: IdRef }, .OpRayQueryTerminateKHR => struct { rayquery: IdRef }, .OpRayQueryGenerateIntersectionKHR => struct { rayquery: IdRef, hitt: IdRef }, .OpRayQueryConfirmIntersectionKHR => struct { rayquery: IdRef }, .OpRayQueryProceedKHR => struct { id_result_type: IdResultType, id_result: IdResult, rayquery: IdRef }, .OpRayQueryGetIntersectionTypeKHR => struct { id_result_type: IdResultType, id_result: IdResult, rayquery: IdRef, intersection: IdRef }, .OpImageSampleWeightedQCOM => struct { id_result_type: IdResultType, id_result: IdResult, texture: IdRef, coordinates: IdRef, weights: IdRef }, .OpImageBoxFilterQCOM => struct { id_result_type: IdResultType, id_result: IdResult, texture: IdRef, coordinates: IdRef, box_size: IdRef }, .OpImageBlockMatchSSDQCOM => struct { id_result_type: IdResultType, id_result: IdResult, target: IdRef, target_coordinates: IdRef, reference: IdRef, reference_coordinates: IdRef, block_size: IdRef }, .OpImageBlockMatchSADQCOM => struct { id_result_type: IdResultType, id_result: IdResult, target: IdRef, target_coordinates: IdRef, reference: IdRef, reference_coordinates: IdRef, block_size: IdRef }, .OpGroupIAddNonUniformAMD => struct { id_result_type: IdResultType, id_result: IdResult, execution: IdScope, operation: GroupOperation, x: IdRef }, .OpGroupFAddNonUniformAMD => struct { id_result_type: IdResultType, id_result: IdResult, execution: IdScope, operation: GroupOperation, x: IdRef }, .OpGroupFMinNonUniformAMD => struct { id_result_type: IdResultType, id_result: IdResult, execution: IdScope, operation: GroupOperation, x: IdRef }, .OpGroupUMinNonUniformAMD => struct { id_result_type: IdResultType, id_result: IdResult, execution: IdScope, operation: GroupOperation, x: IdRef }, .OpGroupSMinNonUniformAMD => struct { id_result_type: IdResultType, id_result: IdResult, execution: IdScope, operation: GroupOperation, x: IdRef }, .OpGroupFMaxNonUniformAMD => struct { id_result_type: IdResultType, id_result: IdResult, execution: IdScope, operation: GroupOperation, x: IdRef }, .OpGroupUMaxNonUniformAMD => struct { id_result_type: IdResultType, id_result: IdResult, execution: IdScope, operation: GroupOperation, x: IdRef }, .OpGroupSMaxNonUniformAMD => struct { id_result_type: IdResultType, id_result: IdResult, execution: IdScope, operation: GroupOperation, x: IdRef }, .OpFragmentMaskFetchAMD => struct { id_result_type: IdResultType, id_result: IdResult, image: IdRef, coordinate: IdRef }, .OpFragmentFetchAMD => struct { id_result_type: IdResultType, id_result: IdResult, image: IdRef, coordinate: IdRef, fragment_index: IdRef }, .OpReadClockKHR => struct { id_result_type: IdResultType, id_result: IdResult, scope: IdScope }, .OpHitObjectRecordHitMotionNV => struct { hit_object: IdRef, acceleration_structure: IdRef, instanceid: IdRef, primitiveid: IdRef, geometryindex: IdRef, hit_kind: IdRef, sbt_record_offset: IdRef, sbt_record_stride: IdRef, origin: IdRef, tmin: IdRef, direction: IdRef, tmax: IdRef, current_time: IdRef, hitobject_attributes: IdRef }, .OpHitObjectRecordHitWithIndexMotionNV => struct { hit_object: IdRef, acceleration_structure: IdRef, instanceid: IdRef, primitiveid: IdRef, geometryindex: IdRef, hit_kind: IdRef, sbt_record_index: IdRef, origin: IdRef, tmin: IdRef, direction: IdRef, tmax: IdRef, current_time: IdRef, hitobject_attributes: IdRef }, .OpHitObjectRecordMissMotionNV => struct { hit_object: IdRef, sbt_index: IdRef, origin: IdRef, tmin: IdRef, direction: IdRef, tmax: IdRef, current_time: IdRef }, .OpHitObjectGetWorldToObjectNV => struct { id_result_type: IdResultType, id_result: IdResult, hit_object: IdRef }, .OpHitObjectGetObjectToWorldNV => struct { id_result_type: IdResultType, id_result: IdResult, hit_object: IdRef }, .OpHitObjectGetObjectRayDirectionNV => struct { id_result_type: IdResultType, id_result: IdResult, hit_object: IdRef }, .OpHitObjectGetObjectRayOriginNV => struct { id_result_type: IdResultType, id_result: IdResult, hit_object: IdRef }, .OpHitObjectTraceRayMotionNV => struct { hit_object: IdRef, acceleration_structure: IdRef, rayflags: IdRef, cullmask: IdRef, sbt_record_offset: IdRef, sbt_record_stride: IdRef, miss_index: IdRef, origin: IdRef, tmin: IdRef, direction: IdRef, tmax: IdRef, time: IdRef, payload: IdRef }, .OpHitObjectGetShaderRecordBufferHandleNV => struct { id_result_type: IdResultType, id_result: IdResult, hit_object: IdRef }, .OpHitObjectGetShaderBindingTableRecordIndexNV => struct { id_result_type: IdResultType, id_result: IdResult, hit_object: IdRef }, .OpHitObjectRecordEmptyNV => struct { hit_object: IdRef }, .OpHitObjectTraceRayNV => struct { hit_object: IdRef, acceleration_structure: IdRef, rayflags: IdRef, cullmask: IdRef, sbt_record_offset: IdRef, sbt_record_stride: IdRef, miss_index: IdRef, origin: IdRef, tmin: IdRef, direction: IdRef, tmax: IdRef, payload: IdRef }, .OpHitObjectRecordHitNV => struct { hit_object: IdRef, acceleration_structure: IdRef, instanceid: IdRef, primitiveid: IdRef, geometryindex: IdRef, hit_kind: IdRef, sbt_record_offset: IdRef, sbt_record_stride: IdRef, origin: IdRef, tmin: IdRef, direction: IdRef, tmax: IdRef, hitobject_attributes: IdRef }, .OpHitObjectRecordHitWithIndexNV => struct { hit_object: IdRef, acceleration_structure: IdRef, instanceid: IdRef, primitiveid: IdRef, geometryindex: IdRef, hit_kind: IdRef, sbt_record_index: IdRef, origin: IdRef, tmin: IdRef, direction: IdRef, tmax: IdRef, hitobject_attributes: IdRef }, .OpHitObjectRecordMissNV => struct { hit_object: IdRef, sbt_index: IdRef, origin: IdRef, tmin: IdRef, direction: IdRef, tmax: IdRef }, .OpHitObjectExecuteShaderNV => struct { hit_object: IdRef, payload: IdRef }, .OpHitObjectGetCurrentTimeNV => struct { id_result_type: IdResultType, id_result: IdResult, hit_object: IdRef }, .OpHitObjectGetAttributesNV => struct { hit_object: IdRef, hit_object_attribute: IdRef }, .OpHitObjectGetHitKindNV => struct { id_result_type: IdResultType, id_result: IdResult, hit_object: IdRef }, .OpHitObjectGetPrimitiveIndexNV => struct { id_result_type: IdResultType, id_result: IdResult, hit_object: IdRef }, .OpHitObjectGetGeometryIndexNV => struct { id_result_type: IdResultType, id_result: IdResult, hit_object: IdRef }, .OpHitObjectGetInstanceIdNV => struct { id_result_type: IdResultType, id_result: IdResult, hit_object: IdRef }, .OpHitObjectGetInstanceCustomIndexNV => struct { id_result_type: IdResultType, id_result: IdResult, hit_object: IdRef }, .OpHitObjectGetWorldRayDirectionNV => struct { id_result_type: IdResultType, id_result: IdResult, hit_object: IdRef }, .OpHitObjectGetWorldRayOriginNV => struct { id_result_type: IdResultType, id_result: IdResult, hit_object: IdRef }, .OpHitObjectGetRayTMaxNV => struct { id_result_type: IdResultType, id_result: IdResult, hit_object: IdRef }, .OpHitObjectGetRayTMinNV => struct { id_result_type: IdResultType, id_result: IdResult, hit_object: IdRef }, .OpHitObjectIsEmptyNV => struct { id_result_type: IdResultType, id_result: IdResult, hit_object: IdRef }, .OpHitObjectIsHitNV => struct { id_result_type: IdResultType, id_result: IdResult, hit_object: IdRef }, .OpHitObjectIsMissNV => struct { id_result_type: IdResultType, id_result: IdResult, hit_object: IdRef }, .OpReorderThreadWithHitObjectNV => struct { hit_object: IdRef, hint: ?IdRef = null, bits: ?IdRef = null }, .OpReorderThreadWithHintNV => struct { hint: IdRef, bits: IdRef }, .OpTypeHitObjectNV => struct { id_result: IdResult }, .OpImageSampleFootprintNV => struct { id_result_type: IdResultType, id_result: IdResult, sampled_image: IdRef, coordinate: IdRef, granularity: IdRef, coarse: IdRef, image_operands: ?ImageOperands.Extended = null }, .OpEmitMeshTasksEXT => struct { group_count_x: IdRef, group_count_y: IdRef, group_count_z: IdRef, payload: ?IdRef = null }, .OpSetMeshOutputsEXT => struct { vertex_count: IdRef, primitive_count: IdRef }, .OpGroupNonUniformPartitionNV => struct { id_result_type: IdResultType, id_result: IdResult, value: IdRef }, .OpWritePackedPrimitiveIndices4x8NV => struct { index_offset: IdRef, packed_indices: IdRef }, .OpReportIntersectionKHR => struct { id_result_type: IdResultType, id_result: IdResult, hit: IdRef, hitkind: IdRef }, .OpIgnoreIntersectionNV => void, .OpTerminateRayNV => void, .OpTraceNV => struct { accel: IdRef, ray_flags: IdRef, cull_mask: IdRef, sbt_offset: IdRef, sbt_stride: IdRef, miss_index: IdRef, ray_origin: IdRef, ray_tmin: IdRef, ray_direction: IdRef, ray_tmax: IdRef, payloadid: IdRef }, .OpTraceMotionNV => struct { accel: IdRef, ray_flags: IdRef, cull_mask: IdRef, sbt_offset: IdRef, sbt_stride: IdRef, miss_index: IdRef, ray_origin: IdRef, ray_tmin: IdRef, ray_direction: IdRef, ray_tmax: IdRef, time: IdRef, payloadid: IdRef }, .OpTraceRayMotionNV => struct { accel: IdRef, ray_flags: IdRef, cull_mask: IdRef, sbt_offset: IdRef, sbt_stride: IdRef, miss_index: IdRef, ray_origin: IdRef, ray_tmin: IdRef, ray_direction: IdRef, ray_tmax: IdRef, time: IdRef, payload: IdRef }, .OpRayQueryGetIntersectionTriangleVertexPositionsKHR => struct { id_result_type: IdResultType, id_result: IdResult, rayquery: IdRef, intersection: IdRef }, .OpTypeAccelerationStructureKHR => struct { id_result: IdResult }, .OpExecuteCallableNV => struct { sbt_index: IdRef, callable_dataid: IdRef }, .OpTypeCooperativeMatrixNV => struct { id_result: IdResult, component_type: IdRef, execution: IdScope, rows: IdRef, columns: IdRef }, .OpCooperativeMatrixLoadNV => struct { id_result_type: IdResultType, id_result: IdResult, pointer: IdRef, stride: IdRef, column_major: IdRef, memory_access: ?MemoryAccess.Extended = null }, .OpCooperativeMatrixStoreNV => struct { pointer: IdRef, object: IdRef, stride: IdRef, column_major: IdRef, memory_access: ?MemoryAccess.Extended = null }, .OpCooperativeMatrixMulAddNV => struct { id_result_type: IdResultType, id_result: IdResult, a: IdRef, b: IdRef, c: IdRef }, .OpCooperativeMatrixLengthNV => struct { id_result_type: IdResultType, id_result: IdResult, type: IdRef }, .OpBeginInvocationInterlockEXT => void, .OpEndInvocationInterlockEXT => void, .OpDemoteToHelperInvocation => void, .OpIsHelperInvocationEXT => struct { id_result_type: IdResultType, id_result: IdResult }, .OpConvertUToImageNV => struct { id_result_type: IdResultType, id_result: IdResult, operand: IdRef }, .OpConvertUToSamplerNV => struct { id_result_type: IdResultType, id_result: IdResult, operand: IdRef }, .OpConvertImageToUNV => struct { id_result_type: IdResultType, id_result: IdResult, operand: IdRef }, .OpConvertSamplerToUNV => struct { id_result_type: IdResultType, id_result: IdResult, operand: IdRef }, .OpConvertUToSampledImageNV => struct { id_result_type: IdResultType, id_result: IdResult, operand: IdRef }, .OpConvertSampledImageToUNV => struct { id_result_type: IdResultType, id_result: IdResult, operand: IdRef }, .OpSamplerImageAddressingModeNV => struct { bit_width: LiteralInteger }, .OpSubgroupShuffleINTEL => struct { id_result_type: IdResultType, id_result: IdResult, data: IdRef, invocationid: IdRef }, .OpSubgroupShuffleDownINTEL => struct { id_result_type: IdResultType, id_result: IdResult, current: IdRef, next: IdRef, delta: IdRef }, .OpSubgroupShuffleUpINTEL => struct { id_result_type: IdResultType, id_result: IdResult, previous: IdRef, current: IdRef, delta: IdRef }, .OpSubgroupShuffleXorINTEL => struct { id_result_type: IdResultType, id_result: IdResult, data: IdRef, value: IdRef }, .OpSubgroupBlockReadINTEL => struct { id_result_type: IdResultType, id_result: IdResult, ptr: IdRef }, .OpSubgroupBlockWriteINTEL => struct { ptr: IdRef, data: IdRef }, .OpSubgroupImageBlockReadINTEL => struct { id_result_type: IdResultType, id_result: IdResult, image: IdRef, coordinate: IdRef }, .OpSubgroupImageBlockWriteINTEL => struct { image: IdRef, coordinate: IdRef, data: IdRef }, .OpSubgroupImageMediaBlockReadINTEL => struct { id_result_type: IdResultType, id_result: IdResult, image: IdRef, coordinate: IdRef, width: IdRef, height: IdRef }, .OpSubgroupImageMediaBlockWriteINTEL => struct { image: IdRef, coordinate: IdRef, width: IdRef, height: IdRef, data: IdRef }, .OpUCountLeadingZerosINTEL => struct { id_result_type: IdResultType, id_result: IdResult, operand: IdRef }, .OpUCountTrailingZerosINTEL => struct { id_result_type: IdResultType, id_result: IdResult, operand: IdRef }, .OpAbsISubINTEL => struct { id_result_type: IdResultType, id_result: IdResult, operand_1: IdRef, operand_2: IdRef }, .OpAbsUSubINTEL => struct { id_result_type: IdResultType, id_result: IdResult, operand_1: IdRef, operand_2: IdRef }, .OpIAddSatINTEL => struct { id_result_type: IdResultType, id_result: IdResult, operand_1: IdRef, operand_2: IdRef }, .OpUAddSatINTEL => struct { id_result_type: IdResultType, id_result: IdResult, operand_1: IdRef, operand_2: IdRef }, .OpIAverageINTEL => struct { id_result_type: IdResultType, id_result: IdResult, operand_1: IdRef, operand_2: IdRef }, .OpUAverageINTEL => struct { id_result_type: IdResultType, id_result: IdResult, operand_1: IdRef, operand_2: IdRef }, .OpIAverageRoundedINTEL => struct { id_result_type: IdResultType, id_result: IdResult, operand_1: IdRef, operand_2: IdRef }, .OpUAverageRoundedINTEL => struct { id_result_type: IdResultType, id_result: IdResult, operand_1: IdRef, operand_2: IdRef }, .OpISubSatINTEL => struct { id_result_type: IdResultType, id_result: IdResult, operand_1: IdRef, operand_2: IdRef }, .OpUSubSatINTEL => struct { id_result_type: IdResultType, id_result: IdResult, operand_1: IdRef, operand_2: IdRef }, .OpIMul32x16INTEL => struct { id_result_type: IdResultType, id_result: IdResult, operand_1: IdRef, operand_2: IdRef }, .OpUMul32x16INTEL => struct { id_result_type: IdResultType, id_result: IdResult, operand_1: IdRef, operand_2: IdRef }, .OpAtomicFMinEXT => struct { id_result_type: IdResultType, id_result: IdResult, pointer: IdRef, memory: IdScope, semantics: IdMemorySemantics, value: IdRef }, .OpAtomicFMaxEXT => struct { id_result_type: IdResultType, id_result: IdResult, pointer: IdRef, memory: IdScope, semantics: IdMemorySemantics, value: IdRef }, .OpAssumeTrueKHR => struct { condition: IdRef }, .OpExpectKHR => struct { id_result_type: IdResultType, id_result: IdResult, value: IdRef, expectedvalue: IdRef }, .OpDecorateString => struct { target: IdRef, decoration: Decoration.Extended }, .OpMemberDecorateString => struct { struct_type: IdRef, member: LiteralInteger, decoration: Decoration.Extended }, .OpLoopControlINTEL => struct { loop_control_parameters: []const LiteralInteger = &.{} }, .OpReadPipeBlockingINTEL => struct { id_result_type: IdResultType, id_result: IdResult, packet_size: IdRef, packet_alignment: IdRef }, .OpWritePipeBlockingINTEL => struct { id_result_type: IdResultType, id_result: IdResult, packet_size: IdRef, packet_alignment: IdRef }, .OpFPGARegINTEL => struct { id_result_type: IdResultType, id_result: IdResult, result: IdRef, input: IdRef }, .OpRayQueryGetRayTMinKHR => struct { id_result_type: IdResultType, id_result: IdResult, rayquery: IdRef }, .OpRayQueryGetRayFlagsKHR => struct { id_result_type: IdResultType, id_result: IdResult, rayquery: IdRef }, .OpRayQueryGetIntersectionTKHR => struct { id_result_type: IdResultType, id_result: IdResult, rayquery: IdRef, intersection: IdRef }, .OpRayQueryGetIntersectionInstanceCustomIndexKHR => struct { id_result_type: IdResultType, id_result: IdResult, rayquery: IdRef, intersection: IdRef }, .OpRayQueryGetIntersectionInstanceIdKHR => struct { id_result_type: IdResultType, id_result: IdResult, rayquery: IdRef, intersection: IdRef }, .OpRayQueryGetIntersectionInstanceShaderBindingTableRecordOffsetKHR => struct { id_result_type: IdResultType, id_result: IdResult, rayquery: IdRef, intersection: IdRef }, .OpRayQueryGetIntersectionGeometryIndexKHR => struct { id_result_type: IdResultType, id_result: IdResult, rayquery: IdRef, intersection: IdRef }, .OpRayQueryGetIntersectionPrimitiveIndexKHR => struct { id_result_type: IdResultType, id_result: IdResult, rayquery: IdRef, intersection: IdRef }, .OpRayQueryGetIntersectionBarycentricsKHR => struct { id_result_type: IdResultType, id_result: IdResult, rayquery: IdRef, intersection: IdRef }, .OpRayQueryGetIntersectionFrontFaceKHR => struct { id_result_type: IdResultType, id_result: IdResult, rayquery: IdRef, intersection: IdRef }, .OpRayQueryGetIntersectionCandidateAABBOpaqueKHR => struct { id_result_type: IdResultType, id_result: IdResult, rayquery: IdRef }, .OpRayQueryGetIntersectionObjectRayDirectionKHR => struct { id_result_type: IdResultType, id_result: IdResult, rayquery: IdRef, intersection: IdRef }, .OpRayQueryGetIntersectionObjectRayOriginKHR => struct { id_result_type: IdResultType, id_result: IdResult, rayquery: IdRef, intersection: IdRef }, .OpRayQueryGetWorldRayDirectionKHR => struct { id_result_type: IdResultType, id_result: IdResult, rayquery: IdRef }, .OpRayQueryGetWorldRayOriginKHR => struct { id_result_type: IdResultType, id_result: IdResult, rayquery: IdRef }, .OpRayQueryGetIntersectionObjectToWorldKHR => struct { id_result_type: IdResultType, id_result: IdResult, rayquery: IdRef, intersection: IdRef }, .OpRayQueryGetIntersectionWorldToObjectKHR => struct { id_result_type: IdResultType, id_result: IdResult, rayquery: IdRef, intersection: IdRef }, .OpAtomicFAddEXT => struct { id_result_type: IdResultType, id_result: IdResult, pointer: IdRef, memory: IdScope, semantics: IdMemorySemantics, value: IdRef }, .OpTypeBufferSurfaceINTEL => struct { id_result: IdResult, accessqualifier: AccessQualifier }, .OpTypeStructContinuedINTEL => struct { id_ref: []const IdRef = &.{} }, .OpConstantCompositeContinuedINTEL => struct { constituents: []const IdRef = &.{} }, .OpSpecConstantCompositeContinuedINTEL => struct { constituents: []const IdRef = &.{} }, .OpConvertFToBF16INTEL => struct { id_result_type: IdResultType, id_result: IdResult, float_value: IdRef }, .OpConvertBF16ToFINTEL => struct { id_result_type: IdResultType, id_result: IdResult, bfloat16_value: IdRef }, .OpControlBarrierArriveINTEL => struct { execution: IdScope, memory: IdScope, semantics: IdMemorySemantics }, .OpControlBarrierWaitINTEL => struct { execution: IdScope, memory: IdScope, semantics: IdMemorySemantics }, .OpGroupIMulKHR => struct { id_result_type: IdResultType, id_result: IdResult, execution: IdScope, operation: GroupOperation, x: IdRef }, .OpGroupFMulKHR => struct { id_result_type: IdResultType, id_result: IdResult, execution: IdScope, operation: GroupOperation, x: IdRef }, .OpGroupBitwiseAndKHR => struct { id_result_type: IdResultType, id_result: IdResult, execution: IdScope, operation: GroupOperation, x: IdRef }, .OpGroupBitwiseOrKHR => struct { id_result_type: IdResultType, id_result: IdResult, execution: IdScope, operation: GroupOperation, x: IdRef }, .OpGroupBitwiseXorKHR => struct { id_result_type: IdResultType, id_result: IdResult, execution: IdScope, operation: GroupOperation, x: IdRef }, .OpGroupLogicalAndKHR => struct { id_result_type: IdResultType, id_result: IdResult, execution: IdScope, operation: GroupOperation, x: IdRef }, .OpGroupLogicalOrKHR => struct { id_result_type: IdResultType, id_result: IdResult, execution: IdScope, operation: GroupOperation, x: IdRef }, .OpGroupLogicalXorKHR => struct { id_result_type: IdResultType, id_result: IdResult, execution: IdScope, operation: GroupOperation, x: IdRef }, }; } pub fn operands(self: Opcode) []const Operand { return switch (self) { .OpNop => &[_]Operand{}, .OpUndef => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, }, .OpSourceContinued => &[_]Operand{ .{ .kind = .LiteralString, .quantifier = .required }, }, .OpSource => &[_]Operand{ .{ .kind = .SourceLanguage, .quantifier = .required }, .{ .kind = .LiteralInteger, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .optional }, .{ .kind = .LiteralString, .quantifier = .optional }, }, .OpSourceExtension => &[_]Operand{ .{ .kind = .LiteralString, .quantifier = .required }, }, .OpName => &[_]Operand{ .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .LiteralString, .quantifier = .required }, }, .OpMemberName => &[_]Operand{ .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .LiteralInteger, .quantifier = .required }, .{ .kind = .LiteralString, .quantifier = .required }, }, .OpString => &[_]Operand{ .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .LiteralString, .quantifier = .required }, }, .OpLine => &[_]Operand{ .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .LiteralInteger, .quantifier = .required }, .{ .kind = .LiteralInteger, .quantifier = .required }, }, .OpExtension => &[_]Operand{ .{ .kind = .LiteralString, .quantifier = .required }, }, .OpExtInstImport => &[_]Operand{ .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .LiteralString, .quantifier = .required }, }, .OpExtInst => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .LiteralExtInstInteger, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .variadic }, }, .OpMemoryModel => &[_]Operand{ .{ .kind = .AddressingModel, .quantifier = .required }, .{ .kind = .MemoryModel, .quantifier = .required }, }, .OpEntryPoint => &[_]Operand{ .{ .kind = .ExecutionModel, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .LiteralString, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .variadic }, }, .OpExecutionMode => &[_]Operand{ .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .ExecutionMode, .quantifier = .required }, }, .OpCapability => &[_]Operand{ .{ .kind = .Capability, .quantifier = .required }, }, .OpTypeVoid => &[_]Operand{ .{ .kind = .IdResult, .quantifier = .required }, }, .OpTypeBool => &[_]Operand{ .{ .kind = .IdResult, .quantifier = .required }, }, .OpTypeInt => &[_]Operand{ .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .LiteralInteger, .quantifier = .required }, .{ .kind = .LiteralInteger, .quantifier = .required }, }, .OpTypeFloat => &[_]Operand{ .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .LiteralInteger, .quantifier = .required }, }, .OpTypeVector => &[_]Operand{ .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .LiteralInteger, .quantifier = .required }, }, .OpTypeMatrix => &[_]Operand{ .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .LiteralInteger, .quantifier = .required }, }, .OpTypeImage => &[_]Operand{ .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .Dim, .quantifier = .required }, .{ .kind = .LiteralInteger, .quantifier = .required }, .{ .kind = .LiteralInteger, .quantifier = .required }, .{ .kind = .LiteralInteger, .quantifier = .required }, .{ .kind = .LiteralInteger, .quantifier = .required }, .{ .kind = .ImageFormat, .quantifier = .required }, .{ .kind = .AccessQualifier, .quantifier = .optional }, }, .OpTypeSampler => &[_]Operand{ .{ .kind = .IdResult, .quantifier = .required }, }, .OpTypeSampledImage => &[_]Operand{ .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpTypeArray => &[_]Operand{ .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpTypeRuntimeArray => &[_]Operand{ .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpTypeStruct => &[_]Operand{ .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .variadic }, }, .OpTypeOpaque => &[_]Operand{ .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .LiteralString, .quantifier = .required }, }, .OpTypePointer => &[_]Operand{ .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .StorageClass, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpTypeFunction => &[_]Operand{ .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .variadic }, }, .OpTypeEvent => &[_]Operand{ .{ .kind = .IdResult, .quantifier = .required }, }, .OpTypeDeviceEvent => &[_]Operand{ .{ .kind = .IdResult, .quantifier = .required }, }, .OpTypeReserveId => &[_]Operand{ .{ .kind = .IdResult, .quantifier = .required }, }, .OpTypeQueue => &[_]Operand{ .{ .kind = .IdResult, .quantifier = .required }, }, .OpTypePipe => &[_]Operand{ .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .AccessQualifier, .quantifier = .required }, }, .OpTypeForwardPointer => &[_]Operand{ .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .StorageClass, .quantifier = .required }, }, .OpConstantTrue => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, }, .OpConstantFalse => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, }, .OpConstant => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .LiteralContextDependentNumber, .quantifier = .required }, }, .OpConstantComposite => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .variadic }, }, .OpConstantSampler => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .SamplerAddressingMode, .quantifier = .required }, .{ .kind = .LiteralInteger, .quantifier = .required }, .{ .kind = .SamplerFilterMode, .quantifier = .required }, }, .OpConstantNull => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, }, .OpSpecConstantTrue => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, }, .OpSpecConstantFalse => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, }, .OpSpecConstant => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .LiteralContextDependentNumber, .quantifier = .required }, }, .OpSpecConstantComposite => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .variadic }, }, .OpSpecConstantOp => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .LiteralSpecConstantOpInteger, .quantifier = .required }, }, .OpFunction => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .FunctionControl, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpFunctionParameter => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, }, .OpFunctionEnd => &[_]Operand{}, .OpFunctionCall => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .variadic }, }, .OpVariable => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .StorageClass, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .optional }, }, .OpImageTexelPointer => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpLoad => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .MemoryAccess, .quantifier = .optional }, }, .OpStore => &[_]Operand{ .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .MemoryAccess, .quantifier = .optional }, }, .OpCopyMemory => &[_]Operand{ .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .MemoryAccess, .quantifier = .optional }, .{ .kind = .MemoryAccess, .quantifier = .optional }, }, .OpCopyMemorySized => &[_]Operand{ .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .MemoryAccess, .quantifier = .optional }, .{ .kind = .MemoryAccess, .quantifier = .optional }, }, .OpAccessChain => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .variadic }, }, .OpInBoundsAccessChain => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .variadic }, }, .OpPtrAccessChain => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .variadic }, }, .OpArrayLength => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .LiteralInteger, .quantifier = .required }, }, .OpGenericPtrMemSemantics => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpInBoundsPtrAccessChain => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .variadic }, }, .OpDecorate => &[_]Operand{ .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .Decoration, .quantifier = .required }, }, .OpMemberDecorate => &[_]Operand{ .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .LiteralInteger, .quantifier = .required }, .{ .kind = .Decoration, .quantifier = .required }, }, .OpDecorationGroup => &[_]Operand{ .{ .kind = .IdResult, .quantifier = .required }, }, .OpGroupDecorate => &[_]Operand{ .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .variadic }, }, .OpGroupMemberDecorate => &[_]Operand{ .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .PairIdRefLiteralInteger, .quantifier = .variadic }, }, .OpVectorExtractDynamic => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpVectorInsertDynamic => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpVectorShuffle => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .LiteralInteger, .quantifier = .variadic }, }, .OpCompositeConstruct => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .variadic }, }, .OpCompositeExtract => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .LiteralInteger, .quantifier = .variadic }, }, .OpCompositeInsert => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .LiteralInteger, .quantifier = .variadic }, }, .OpCopyObject => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpTranspose => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpSampledImage => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpImageSampleImplicitLod => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .ImageOperands, .quantifier = .optional }, }, .OpImageSampleExplicitLod => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .ImageOperands, .quantifier = .required }, }, .OpImageSampleDrefImplicitLod => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .ImageOperands, .quantifier = .optional }, }, .OpImageSampleDrefExplicitLod => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .ImageOperands, .quantifier = .required }, }, .OpImageSampleProjImplicitLod => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .ImageOperands, .quantifier = .optional }, }, .OpImageSampleProjExplicitLod => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .ImageOperands, .quantifier = .required }, }, .OpImageSampleProjDrefImplicitLod => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .ImageOperands, .quantifier = .optional }, }, .OpImageSampleProjDrefExplicitLod => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .ImageOperands, .quantifier = .required }, }, .OpImageFetch => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .ImageOperands, .quantifier = .optional }, }, .OpImageGather => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .ImageOperands, .quantifier = .optional }, }, .OpImageDrefGather => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .ImageOperands, .quantifier = .optional }, }, .OpImageRead => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .ImageOperands, .quantifier = .optional }, }, .OpImageWrite => &[_]Operand{ .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .ImageOperands, .quantifier = .optional }, }, .OpImage => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpImageQueryFormat => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpImageQueryOrder => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpImageQuerySizeLod => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpImageQuerySize => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpImageQueryLod => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpImageQueryLevels => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpImageQuerySamples => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpConvertFToU => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpConvertFToS => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpConvertSToF => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpConvertUToF => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpUConvert => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpSConvert => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpFConvert => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpQuantizeToF16 => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpConvertPtrToU => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpSatConvertSToU => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpSatConvertUToS => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpConvertUToPtr => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpPtrCastToGeneric => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpGenericCastToPtr => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpGenericCastToPtrExplicit => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .StorageClass, .quantifier = .required }, }, .OpBitcast => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpSNegate => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpFNegate => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpIAdd => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpFAdd => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpISub => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpFSub => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpIMul => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpFMul => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpUDiv => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpSDiv => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpFDiv => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpUMod => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpSRem => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpSMod => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpFRem => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpFMod => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpVectorTimesScalar => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpMatrixTimesScalar => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpVectorTimesMatrix => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpMatrixTimesVector => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpMatrixTimesMatrix => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpOuterProduct => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpDot => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpIAddCarry => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpISubBorrow => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpUMulExtended => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpSMulExtended => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpAny => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpAll => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpIsNan => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpIsInf => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpIsFinite => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpIsNormal => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpSignBitSet => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpLessOrGreater => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpOrdered => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpUnordered => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpLogicalEqual => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpLogicalNotEqual => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpLogicalOr => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpLogicalAnd => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpLogicalNot => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpSelect => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpIEqual => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpINotEqual => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpUGreaterThan => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpSGreaterThan => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpUGreaterThanEqual => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpSGreaterThanEqual => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpULessThan => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpSLessThan => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpULessThanEqual => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpSLessThanEqual => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpFOrdEqual => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpFUnordEqual => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpFOrdNotEqual => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpFUnordNotEqual => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpFOrdLessThan => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpFUnordLessThan => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpFOrdGreaterThan => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpFUnordGreaterThan => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpFOrdLessThanEqual => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpFUnordLessThanEqual => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpFOrdGreaterThanEqual => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpFUnordGreaterThanEqual => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpShiftRightLogical => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpShiftRightArithmetic => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpShiftLeftLogical => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpBitwiseOr => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpBitwiseXor => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpBitwiseAnd => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpNot => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpBitFieldInsert => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpBitFieldSExtract => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpBitFieldUExtract => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpBitReverse => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpBitCount => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpDPdx => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpDPdy => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpFwidth => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpDPdxFine => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpDPdyFine => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpFwidthFine => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpDPdxCoarse => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpDPdyCoarse => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpFwidthCoarse => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpEmitVertex => &[_]Operand{}, .OpEndPrimitive => &[_]Operand{}, .OpEmitStreamVertex => &[_]Operand{ .{ .kind = .IdRef, .quantifier = .required }, }, .OpEndStreamPrimitive => &[_]Operand{ .{ .kind = .IdRef, .quantifier = .required }, }, .OpControlBarrier => &[_]Operand{ .{ .kind = .IdScope, .quantifier = .required }, .{ .kind = .IdScope, .quantifier = .required }, .{ .kind = .IdMemorySemantics, .quantifier = .required }, }, .OpMemoryBarrier => &[_]Operand{ .{ .kind = .IdScope, .quantifier = .required }, .{ .kind = .IdMemorySemantics, .quantifier = .required }, }, .OpAtomicLoad => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdScope, .quantifier = .required }, .{ .kind = .IdMemorySemantics, .quantifier = .required }, }, .OpAtomicStore => &[_]Operand{ .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdScope, .quantifier = .required }, .{ .kind = .IdMemorySemantics, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpAtomicExchange => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdScope, .quantifier = .required }, .{ .kind = .IdMemorySemantics, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpAtomicCompareExchange => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdScope, .quantifier = .required }, .{ .kind = .IdMemorySemantics, .quantifier = .required }, .{ .kind = .IdMemorySemantics, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpAtomicCompareExchangeWeak => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdScope, .quantifier = .required }, .{ .kind = .IdMemorySemantics, .quantifier = .required }, .{ .kind = .IdMemorySemantics, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpAtomicIIncrement => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdScope, .quantifier = .required }, .{ .kind = .IdMemorySemantics, .quantifier = .required }, }, .OpAtomicIDecrement => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdScope, .quantifier = .required }, .{ .kind = .IdMemorySemantics, .quantifier = .required }, }, .OpAtomicIAdd => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdScope, .quantifier = .required }, .{ .kind = .IdMemorySemantics, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpAtomicISub => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdScope, .quantifier = .required }, .{ .kind = .IdMemorySemantics, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpAtomicSMin => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdScope, .quantifier = .required }, .{ .kind = .IdMemorySemantics, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpAtomicUMin => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdScope, .quantifier = .required }, .{ .kind = .IdMemorySemantics, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpAtomicSMax => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdScope, .quantifier = .required }, .{ .kind = .IdMemorySemantics, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpAtomicUMax => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdScope, .quantifier = .required }, .{ .kind = .IdMemorySemantics, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpAtomicAnd => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdScope, .quantifier = .required }, .{ .kind = .IdMemorySemantics, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpAtomicOr => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdScope, .quantifier = .required }, .{ .kind = .IdMemorySemantics, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpAtomicXor => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdScope, .quantifier = .required }, .{ .kind = .IdMemorySemantics, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpPhi => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .PairIdRefIdRef, .quantifier = .variadic }, }, .OpLoopMerge => &[_]Operand{ .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .LoopControl, .quantifier = .required }, }, .OpSelectionMerge => &[_]Operand{ .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .SelectionControl, .quantifier = .required }, }, .OpLabel => &[_]Operand{ .{ .kind = .IdResult, .quantifier = .required }, }, .OpBranch => &[_]Operand{ .{ .kind = .IdRef, .quantifier = .required }, }, .OpBranchConditional => &[_]Operand{ .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .LiteralInteger, .quantifier = .variadic }, }, .OpSwitch => &[_]Operand{ .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .PairLiteralIntegerIdRef, .quantifier = .variadic }, }, .OpKill => &[_]Operand{}, .OpReturn => &[_]Operand{}, .OpReturnValue => &[_]Operand{ .{ .kind = .IdRef, .quantifier = .required }, }, .OpUnreachable => &[_]Operand{}, .OpLifetimeStart => &[_]Operand{ .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .LiteralInteger, .quantifier = .required }, }, .OpLifetimeStop => &[_]Operand{ .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .LiteralInteger, .quantifier = .required }, }, .OpGroupAsyncCopy => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdScope, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpGroupWaitEvents => &[_]Operand{ .{ .kind = .IdScope, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpGroupAll => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdScope, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpGroupAny => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdScope, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpGroupBroadcast => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdScope, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpGroupIAdd => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdScope, .quantifier = .required }, .{ .kind = .GroupOperation, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpGroupFAdd => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdScope, .quantifier = .required }, .{ .kind = .GroupOperation, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpGroupFMin => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdScope, .quantifier = .required }, .{ .kind = .GroupOperation, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpGroupUMin => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdScope, .quantifier = .required }, .{ .kind = .GroupOperation, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpGroupSMin => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdScope, .quantifier = .required }, .{ .kind = .GroupOperation, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpGroupFMax => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdScope, .quantifier = .required }, .{ .kind = .GroupOperation, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpGroupUMax => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdScope, .quantifier = .required }, .{ .kind = .GroupOperation, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpGroupSMax => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdScope, .quantifier = .required }, .{ .kind = .GroupOperation, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpReadPipe => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpWritePipe => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpReservedReadPipe => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpReservedWritePipe => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpReserveReadPipePackets => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpReserveWritePipePackets => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpCommitReadPipe => &[_]Operand{ .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpCommitWritePipe => &[_]Operand{ .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpIsValidReserveId => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpGetNumPipePackets => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpGetMaxPipePackets => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpGroupReserveReadPipePackets => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdScope, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpGroupReserveWritePipePackets => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdScope, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpGroupCommitReadPipe => &[_]Operand{ .{ .kind = .IdScope, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpGroupCommitWritePipe => &[_]Operand{ .{ .kind = .IdScope, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpEnqueueMarker => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpEnqueueKernel => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .variadic }, }, .OpGetKernelNDrangeSubGroupCount => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpGetKernelNDrangeMaxSubGroupSize => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpGetKernelWorkGroupSize => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpGetKernelPreferredWorkGroupSizeMultiple => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpRetainEvent => &[_]Operand{ .{ .kind = .IdRef, .quantifier = .required }, }, .OpReleaseEvent => &[_]Operand{ .{ .kind = .IdRef, .quantifier = .required }, }, .OpCreateUserEvent => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, }, .OpIsValidEvent => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpSetUserEventStatus => &[_]Operand{ .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpCaptureEventProfilingInfo => &[_]Operand{ .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpGetDefaultQueue => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, }, .OpBuildNDRange => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpImageSparseSampleImplicitLod => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .ImageOperands, .quantifier = .optional }, }, .OpImageSparseSampleExplicitLod => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .ImageOperands, .quantifier = .required }, }, .OpImageSparseSampleDrefImplicitLod => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .ImageOperands, .quantifier = .optional }, }, .OpImageSparseSampleDrefExplicitLod => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .ImageOperands, .quantifier = .required }, }, .OpImageSparseSampleProjImplicitLod => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .ImageOperands, .quantifier = .optional }, }, .OpImageSparseSampleProjExplicitLod => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .ImageOperands, .quantifier = .required }, }, .OpImageSparseSampleProjDrefImplicitLod => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .ImageOperands, .quantifier = .optional }, }, .OpImageSparseSampleProjDrefExplicitLod => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .ImageOperands, .quantifier = .required }, }, .OpImageSparseFetch => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .ImageOperands, .quantifier = .optional }, }, .OpImageSparseGather => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .ImageOperands, .quantifier = .optional }, }, .OpImageSparseDrefGather => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .ImageOperands, .quantifier = .optional }, }, .OpImageSparseTexelsResident => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpNoLine => &[_]Operand{}, .OpAtomicFlagTestAndSet => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdScope, .quantifier = .required }, .{ .kind = .IdMemorySemantics, .quantifier = .required }, }, .OpAtomicFlagClear => &[_]Operand{ .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdScope, .quantifier = .required }, .{ .kind = .IdMemorySemantics, .quantifier = .required }, }, .OpImageSparseRead => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .ImageOperands, .quantifier = .optional }, }, .OpSizeOf => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpTypePipeStorage => &[_]Operand{ .{ .kind = .IdResult, .quantifier = .required }, }, .OpConstantPipeStorage => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .LiteralInteger, .quantifier = .required }, .{ .kind = .LiteralInteger, .quantifier = .required }, .{ .kind = .LiteralInteger, .quantifier = .required }, }, .OpCreatePipeFromPipeStorage => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpGetKernelLocalSizeForSubgroupCount => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpGetKernelMaxNumSubgroups => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpTypeNamedBarrier => &[_]Operand{ .{ .kind = .IdResult, .quantifier = .required }, }, .OpNamedBarrierInitialize => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpMemoryNamedBarrier => &[_]Operand{ .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdScope, .quantifier = .required }, .{ .kind = .IdMemorySemantics, .quantifier = .required }, }, .OpModuleProcessed => &[_]Operand{ .{ .kind = .LiteralString, .quantifier = .required }, }, .OpExecutionModeId => &[_]Operand{ .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .ExecutionMode, .quantifier = .required }, }, .OpDecorateId => &[_]Operand{ .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .Decoration, .quantifier = .required }, }, .OpGroupNonUniformElect => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdScope, .quantifier = .required }, }, .OpGroupNonUniformAll => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdScope, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpGroupNonUniformAny => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdScope, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpGroupNonUniformAllEqual => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdScope, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpGroupNonUniformBroadcast => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdScope, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpGroupNonUniformBroadcastFirst => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdScope, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpGroupNonUniformBallot => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdScope, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpGroupNonUniformInverseBallot => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdScope, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpGroupNonUniformBallotBitExtract => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdScope, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpGroupNonUniformBallotBitCount => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdScope, .quantifier = .required }, .{ .kind = .GroupOperation, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpGroupNonUniformBallotFindLSB => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdScope, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpGroupNonUniformBallotFindMSB => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdScope, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpGroupNonUniformShuffle => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdScope, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpGroupNonUniformShuffleXor => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdScope, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpGroupNonUniformShuffleUp => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdScope, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpGroupNonUniformShuffleDown => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdScope, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpGroupNonUniformIAdd => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdScope, .quantifier = .required }, .{ .kind = .GroupOperation, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .optional }, }, .OpGroupNonUniformFAdd => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdScope, .quantifier = .required }, .{ .kind = .GroupOperation, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .optional }, }, .OpGroupNonUniformIMul => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdScope, .quantifier = .required }, .{ .kind = .GroupOperation, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .optional }, }, .OpGroupNonUniformFMul => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdScope, .quantifier = .required }, .{ .kind = .GroupOperation, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .optional }, }, .OpGroupNonUniformSMin => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdScope, .quantifier = .required }, .{ .kind = .GroupOperation, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .optional }, }, .OpGroupNonUniformUMin => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdScope, .quantifier = .required }, .{ .kind = .GroupOperation, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .optional }, }, .OpGroupNonUniformFMin => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdScope, .quantifier = .required }, .{ .kind = .GroupOperation, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .optional }, }, .OpGroupNonUniformSMax => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdScope, .quantifier = .required }, .{ .kind = .GroupOperation, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .optional }, }, .OpGroupNonUniformUMax => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdScope, .quantifier = .required }, .{ .kind = .GroupOperation, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .optional }, }, .OpGroupNonUniformFMax => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdScope, .quantifier = .required }, .{ .kind = .GroupOperation, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .optional }, }, .OpGroupNonUniformBitwiseAnd => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdScope, .quantifier = .required }, .{ .kind = .GroupOperation, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .optional }, }, .OpGroupNonUniformBitwiseOr => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdScope, .quantifier = .required }, .{ .kind = .GroupOperation, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .optional }, }, .OpGroupNonUniformBitwiseXor => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdScope, .quantifier = .required }, .{ .kind = .GroupOperation, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .optional }, }, .OpGroupNonUniformLogicalAnd => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdScope, .quantifier = .required }, .{ .kind = .GroupOperation, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .optional }, }, .OpGroupNonUniformLogicalOr => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdScope, .quantifier = .required }, .{ .kind = .GroupOperation, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .optional }, }, .OpGroupNonUniformLogicalXor => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdScope, .quantifier = .required }, .{ .kind = .GroupOperation, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .optional }, }, .OpGroupNonUniformQuadBroadcast => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdScope, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpGroupNonUniformQuadSwap => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdScope, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpCopyLogical => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpPtrEqual => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpPtrNotEqual => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpPtrDiff => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpColorAttachmentReadEXT => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .optional }, }, .OpDepthAttachmentReadEXT => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .optional }, }, .OpStencilAttachmentReadEXT => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .optional }, }, .OpTerminateInvocation => &[_]Operand{}, .OpSubgroupBallotKHR => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpSubgroupFirstInvocationKHR => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpSubgroupAllKHR => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpSubgroupAnyKHR => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpSubgroupAllEqualKHR => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpGroupNonUniformRotateKHR => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdScope, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .optional }, }, .OpSubgroupReadInvocationKHR => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpTraceRayKHR => &[_]Operand{ .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpExecuteCallableKHR => &[_]Operand{ .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpConvertUToAccelerationStructureKHR => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpIgnoreIntersectionKHR => &[_]Operand{}, .OpTerminateRayKHR => &[_]Operand{}, .OpSDot => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .PackedVectorFormat, .quantifier = .optional }, }, .OpUDot => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .PackedVectorFormat, .quantifier = .optional }, }, .OpSUDot => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .PackedVectorFormat, .quantifier = .optional }, }, .OpSDotAccSat => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .PackedVectorFormat, .quantifier = .optional }, }, .OpUDotAccSat => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .PackedVectorFormat, .quantifier = .optional }, }, .OpSUDotAccSat => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .PackedVectorFormat, .quantifier = .optional }, }, .OpTypeRayQueryKHR => &[_]Operand{ .{ .kind = .IdResult, .quantifier = .required }, }, .OpRayQueryInitializeKHR => &[_]Operand{ .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpRayQueryTerminateKHR => &[_]Operand{ .{ .kind = .IdRef, .quantifier = .required }, }, .OpRayQueryGenerateIntersectionKHR => &[_]Operand{ .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpRayQueryConfirmIntersectionKHR => &[_]Operand{ .{ .kind = .IdRef, .quantifier = .required }, }, .OpRayQueryProceedKHR => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpRayQueryGetIntersectionTypeKHR => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpImageSampleWeightedQCOM => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpImageBoxFilterQCOM => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpImageBlockMatchSSDQCOM => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpImageBlockMatchSADQCOM => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpGroupIAddNonUniformAMD => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdScope, .quantifier = .required }, .{ .kind = .GroupOperation, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpGroupFAddNonUniformAMD => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdScope, .quantifier = .required }, .{ .kind = .GroupOperation, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpGroupFMinNonUniformAMD => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdScope, .quantifier = .required }, .{ .kind = .GroupOperation, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpGroupUMinNonUniformAMD => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdScope, .quantifier = .required }, .{ .kind = .GroupOperation, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpGroupSMinNonUniformAMD => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdScope, .quantifier = .required }, .{ .kind = .GroupOperation, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpGroupFMaxNonUniformAMD => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdScope, .quantifier = .required }, .{ .kind = .GroupOperation, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpGroupUMaxNonUniformAMD => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdScope, .quantifier = .required }, .{ .kind = .GroupOperation, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpGroupSMaxNonUniformAMD => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdScope, .quantifier = .required }, .{ .kind = .GroupOperation, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpFragmentMaskFetchAMD => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpFragmentFetchAMD => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpReadClockKHR => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdScope, .quantifier = .required }, }, .OpHitObjectRecordHitMotionNV => &[_]Operand{ .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpHitObjectRecordHitWithIndexMotionNV => &[_]Operand{ .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpHitObjectRecordMissMotionNV => &[_]Operand{ .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpHitObjectGetWorldToObjectNV => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpHitObjectGetObjectToWorldNV => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpHitObjectGetObjectRayDirectionNV => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpHitObjectGetObjectRayOriginNV => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpHitObjectTraceRayMotionNV => &[_]Operand{ .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpHitObjectGetShaderRecordBufferHandleNV => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpHitObjectGetShaderBindingTableRecordIndexNV => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpHitObjectRecordEmptyNV => &[_]Operand{ .{ .kind = .IdRef, .quantifier = .required }, }, .OpHitObjectTraceRayNV => &[_]Operand{ .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpHitObjectRecordHitNV => &[_]Operand{ .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpHitObjectRecordHitWithIndexNV => &[_]Operand{ .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpHitObjectRecordMissNV => &[_]Operand{ .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpHitObjectExecuteShaderNV => &[_]Operand{ .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpHitObjectGetCurrentTimeNV => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpHitObjectGetAttributesNV => &[_]Operand{ .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpHitObjectGetHitKindNV => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpHitObjectGetPrimitiveIndexNV => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpHitObjectGetGeometryIndexNV => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpHitObjectGetInstanceIdNV => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpHitObjectGetInstanceCustomIndexNV => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpHitObjectGetWorldRayDirectionNV => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpHitObjectGetWorldRayOriginNV => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpHitObjectGetRayTMaxNV => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpHitObjectGetRayTMinNV => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpHitObjectIsEmptyNV => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpHitObjectIsHitNV => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpHitObjectIsMissNV => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpReorderThreadWithHitObjectNV => &[_]Operand{ .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .optional }, .{ .kind = .IdRef, .quantifier = .optional }, }, .OpReorderThreadWithHintNV => &[_]Operand{ .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpTypeHitObjectNV => &[_]Operand{ .{ .kind = .IdResult, .quantifier = .required }, }, .OpImageSampleFootprintNV => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .ImageOperands, .quantifier = .optional }, }, .OpEmitMeshTasksEXT => &[_]Operand{ .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .optional }, }, .OpSetMeshOutputsEXT => &[_]Operand{ .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpGroupNonUniformPartitionNV => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpWritePackedPrimitiveIndices4x8NV => &[_]Operand{ .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpReportIntersectionKHR => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpIgnoreIntersectionNV => &[_]Operand{}, .OpTerminateRayNV => &[_]Operand{}, .OpTraceNV => &[_]Operand{ .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpTraceMotionNV => &[_]Operand{ .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpTraceRayMotionNV => &[_]Operand{ .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpRayQueryGetIntersectionTriangleVertexPositionsKHR => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpTypeAccelerationStructureKHR => &[_]Operand{ .{ .kind = .IdResult, .quantifier = .required }, }, .OpExecuteCallableNV => &[_]Operand{ .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpTypeCooperativeMatrixNV => &[_]Operand{ .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdScope, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpCooperativeMatrixLoadNV => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .MemoryAccess, .quantifier = .optional }, }, .OpCooperativeMatrixStoreNV => &[_]Operand{ .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .MemoryAccess, .quantifier = .optional }, }, .OpCooperativeMatrixMulAddNV => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpCooperativeMatrixLengthNV => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpBeginInvocationInterlockEXT => &[_]Operand{}, .OpEndInvocationInterlockEXT => &[_]Operand{}, .OpDemoteToHelperInvocation => &[_]Operand{}, .OpIsHelperInvocationEXT => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, }, .OpConvertUToImageNV => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpConvertUToSamplerNV => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpConvertImageToUNV => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpConvertSamplerToUNV => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpConvertUToSampledImageNV => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpConvertSampledImageToUNV => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpSamplerImageAddressingModeNV => &[_]Operand{ .{ .kind = .LiteralInteger, .quantifier = .required }, }, .OpSubgroupShuffleINTEL => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpSubgroupShuffleDownINTEL => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpSubgroupShuffleUpINTEL => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpSubgroupShuffleXorINTEL => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpSubgroupBlockReadINTEL => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpSubgroupBlockWriteINTEL => &[_]Operand{ .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpSubgroupImageBlockReadINTEL => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpSubgroupImageBlockWriteINTEL => &[_]Operand{ .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpSubgroupImageMediaBlockReadINTEL => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpSubgroupImageMediaBlockWriteINTEL => &[_]Operand{ .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpUCountLeadingZerosINTEL => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpUCountTrailingZerosINTEL => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpAbsISubINTEL => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpAbsUSubINTEL => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpIAddSatINTEL => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpUAddSatINTEL => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpIAverageINTEL => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpUAverageINTEL => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpIAverageRoundedINTEL => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpUAverageRoundedINTEL => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpISubSatINTEL => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpUSubSatINTEL => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpIMul32x16INTEL => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpUMul32x16INTEL => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpAtomicFMinEXT => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdScope, .quantifier = .required }, .{ .kind = .IdMemorySemantics, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpAtomicFMaxEXT => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdScope, .quantifier = .required }, .{ .kind = .IdMemorySemantics, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpAssumeTrueKHR => &[_]Operand{ .{ .kind = .IdRef, .quantifier = .required }, }, .OpExpectKHR => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpDecorateString => &[_]Operand{ .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .Decoration, .quantifier = .required }, }, .OpMemberDecorateString => &[_]Operand{ .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .LiteralInteger, .quantifier = .required }, .{ .kind = .Decoration, .quantifier = .required }, }, .OpLoopControlINTEL => &[_]Operand{ .{ .kind = .LiteralInteger, .quantifier = .variadic }, }, .OpReadPipeBlockingINTEL => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpWritePipeBlockingINTEL => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpFPGARegINTEL => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpRayQueryGetRayTMinKHR => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpRayQueryGetRayFlagsKHR => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpRayQueryGetIntersectionTKHR => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpRayQueryGetIntersectionInstanceCustomIndexKHR => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpRayQueryGetIntersectionInstanceIdKHR => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpRayQueryGetIntersectionInstanceShaderBindingTableRecordOffsetKHR => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpRayQueryGetIntersectionGeometryIndexKHR => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpRayQueryGetIntersectionPrimitiveIndexKHR => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpRayQueryGetIntersectionBarycentricsKHR => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpRayQueryGetIntersectionFrontFaceKHR => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpRayQueryGetIntersectionCandidateAABBOpaqueKHR => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpRayQueryGetIntersectionObjectRayDirectionKHR => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpRayQueryGetIntersectionObjectRayOriginKHR => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpRayQueryGetWorldRayDirectionKHR => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpRayQueryGetWorldRayOriginKHR => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpRayQueryGetIntersectionObjectToWorldKHR => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpRayQueryGetIntersectionWorldToObjectKHR => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpAtomicFAddEXT => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, .{ .kind = .IdScope, .quantifier = .required }, .{ .kind = .IdMemorySemantics, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpTypeBufferSurfaceINTEL => &[_]Operand{ .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .AccessQualifier, .quantifier = .required }, }, .OpTypeStructContinuedINTEL => &[_]Operand{ .{ .kind = .IdRef, .quantifier = .variadic }, }, .OpConstantCompositeContinuedINTEL => &[_]Operand{ .{ .kind = .IdRef, .quantifier = .variadic }, }, .OpSpecConstantCompositeContinuedINTEL => &[_]Operand{ .{ .kind = .IdRef, .quantifier = .variadic }, }, .OpConvertFToBF16INTEL => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpConvertBF16ToFINTEL => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpControlBarrierArriveINTEL => &[_]Operand{ .{ .kind = .IdScope, .quantifier = .required }, .{ .kind = .IdScope, .quantifier = .required }, .{ .kind = .IdMemorySemantics, .quantifier = .required }, }, .OpControlBarrierWaitINTEL => &[_]Operand{ .{ .kind = .IdScope, .quantifier = .required }, .{ .kind = .IdScope, .quantifier = .required }, .{ .kind = .IdMemorySemantics, .quantifier = .required }, }, .OpGroupIMulKHR => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdScope, .quantifier = .required }, .{ .kind = .GroupOperation, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpGroupFMulKHR => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdScope, .quantifier = .required }, .{ .kind = .GroupOperation, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpGroupBitwiseAndKHR => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdScope, .quantifier = .required }, .{ .kind = .GroupOperation, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpGroupBitwiseOrKHR => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdScope, .quantifier = .required }, .{ .kind = .GroupOperation, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpGroupBitwiseXorKHR => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdScope, .quantifier = .required }, .{ .kind = .GroupOperation, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpGroupLogicalAndKHR => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdScope, .quantifier = .required }, .{ .kind = .GroupOperation, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpGroupLogicalOrKHR => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdScope, .quantifier = .required }, .{ .kind = .GroupOperation, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, .OpGroupLogicalXorKHR => &[_]Operand{ .{ .kind = .IdResultType, .quantifier = .required }, .{ .kind = .IdResult, .quantifier = .required }, .{ .kind = .IdScope, .quantifier = .required }, .{ .kind = .GroupOperation, .quantifier = .required }, .{ .kind = .IdRef, .quantifier = .required }, }, }; } pub fn class(self: Opcode) Class { return switch (self) { .OpNop => .Miscellaneous, .OpUndef => .Miscellaneous, .OpSourceContinued => .Debug, .OpSource => .Debug, .OpSourceExtension => .Debug, .OpName => .Debug, .OpMemberName => .Debug, .OpString => .Debug, .OpLine => .Debug, .OpExtension => .Extension, .OpExtInstImport => .Extension, .OpExtInst => .Extension, .OpMemoryModel => .ModeSetting, .OpEntryPoint => .ModeSetting, .OpExecutionMode => .ModeSetting, .OpCapability => .ModeSetting, .OpTypeVoid => .TypeDeclaration, .OpTypeBool => .TypeDeclaration, .OpTypeInt => .TypeDeclaration, .OpTypeFloat => .TypeDeclaration, .OpTypeVector => .TypeDeclaration, .OpTypeMatrix => .TypeDeclaration, .OpTypeImage => .TypeDeclaration, .OpTypeSampler => .TypeDeclaration, .OpTypeSampledImage => .TypeDeclaration, .OpTypeArray => .TypeDeclaration, .OpTypeRuntimeArray => .TypeDeclaration, .OpTypeStruct => .TypeDeclaration, .OpTypeOpaque => .TypeDeclaration, .OpTypePointer => .TypeDeclaration, .OpTypeFunction => .TypeDeclaration, .OpTypeEvent => .TypeDeclaration, .OpTypeDeviceEvent => .TypeDeclaration, .OpTypeReserveId => .TypeDeclaration, .OpTypeQueue => .TypeDeclaration, .OpTypePipe => .TypeDeclaration, .OpTypeForwardPointer => .TypeDeclaration, .OpConstantTrue => .ConstantCreation, .OpConstantFalse => .ConstantCreation, .OpConstant => .ConstantCreation, .OpConstantComposite => .ConstantCreation, .OpConstantSampler => .ConstantCreation, .OpConstantNull => .ConstantCreation, .OpSpecConstantTrue => .ConstantCreation, .OpSpecConstantFalse => .ConstantCreation, .OpSpecConstant => .ConstantCreation, .OpSpecConstantComposite => .ConstantCreation, .OpSpecConstantOp => .ConstantCreation, .OpFunction => .Function, .OpFunctionParameter => .Function, .OpFunctionEnd => .Function, .OpFunctionCall => .Function, .OpVariable => .Memory, .OpImageTexelPointer => .Memory, .OpLoad => .Memory, .OpStore => .Memory, .OpCopyMemory => .Memory, .OpCopyMemorySized => .Memory, .OpAccessChain => .Memory, .OpInBoundsAccessChain => .Memory, .OpPtrAccessChain => .Memory, .OpArrayLength => .Memory, .OpGenericPtrMemSemantics => .Memory, .OpInBoundsPtrAccessChain => .Memory, .OpDecorate => .Annotation, .OpMemberDecorate => .Annotation, .OpDecorationGroup => .Annotation, .OpGroupDecorate => .Annotation, .OpGroupMemberDecorate => .Annotation, .OpVectorExtractDynamic => .Composite, .OpVectorInsertDynamic => .Composite, .OpVectorShuffle => .Composite, .OpCompositeConstruct => .Composite, .OpCompositeExtract => .Composite, .OpCompositeInsert => .Composite, .OpCopyObject => .Composite, .OpTranspose => .Composite, .OpSampledImage => .Image, .OpImageSampleImplicitLod => .Image, .OpImageSampleExplicitLod => .Image, .OpImageSampleDrefImplicitLod => .Image, .OpImageSampleDrefExplicitLod => .Image, .OpImageSampleProjImplicitLod => .Image, .OpImageSampleProjExplicitLod => .Image, .OpImageSampleProjDrefImplicitLod => .Image, .OpImageSampleProjDrefExplicitLod => .Image, .OpImageFetch => .Image, .OpImageGather => .Image, .OpImageDrefGather => .Image, .OpImageRead => .Image, .OpImageWrite => .Image, .OpImage => .Image, .OpImageQueryFormat => .Image, .OpImageQueryOrder => .Image, .OpImageQuerySizeLod => .Image, .OpImageQuerySize => .Image, .OpImageQueryLod => .Image, .OpImageQueryLevels => .Image, .OpImageQuerySamples => .Image, .OpConvertFToU => .Conversion, .OpConvertFToS => .Conversion, .OpConvertSToF => .Conversion, .OpConvertUToF => .Conversion, .OpUConvert => .Conversion, .OpSConvert => .Conversion, .OpFConvert => .Conversion, .OpQuantizeToF16 => .Conversion, .OpConvertPtrToU => .Conversion, .OpSatConvertSToU => .Conversion, .OpSatConvertUToS => .Conversion, .OpConvertUToPtr => .Conversion, .OpPtrCastToGeneric => .Conversion, .OpGenericCastToPtr => .Conversion, .OpGenericCastToPtrExplicit => .Conversion, .OpBitcast => .Conversion, .OpSNegate => .Arithmetic, .OpFNegate => .Arithmetic, .OpIAdd => .Arithmetic, .OpFAdd => .Arithmetic, .OpISub => .Arithmetic, .OpFSub => .Arithmetic, .OpIMul => .Arithmetic, .OpFMul => .Arithmetic, .OpUDiv => .Arithmetic, .OpSDiv => .Arithmetic, .OpFDiv => .Arithmetic, .OpUMod => .Arithmetic, .OpSRem => .Arithmetic, .OpSMod => .Arithmetic, .OpFRem => .Arithmetic, .OpFMod => .Arithmetic, .OpVectorTimesScalar => .Arithmetic, .OpMatrixTimesScalar => .Arithmetic, .OpVectorTimesMatrix => .Arithmetic, .OpMatrixTimesVector => .Arithmetic, .OpMatrixTimesMatrix => .Arithmetic, .OpOuterProduct => .Arithmetic, .OpDot => .Arithmetic, .OpIAddCarry => .Arithmetic, .OpISubBorrow => .Arithmetic, .OpUMulExtended => .Arithmetic, .OpSMulExtended => .Arithmetic, .OpAny => .RelationalAndLogical, .OpAll => .RelationalAndLogical, .OpIsNan => .RelationalAndLogical, .OpIsInf => .RelationalAndLogical, .OpIsFinite => .RelationalAndLogical, .OpIsNormal => .RelationalAndLogical, .OpSignBitSet => .RelationalAndLogical, .OpLessOrGreater => .RelationalAndLogical, .OpOrdered => .RelationalAndLogical, .OpUnordered => .RelationalAndLogical, .OpLogicalEqual => .RelationalAndLogical, .OpLogicalNotEqual => .RelationalAndLogical, .OpLogicalOr => .RelationalAndLogical, .OpLogicalAnd => .RelationalAndLogical, .OpLogicalNot => .RelationalAndLogical, .OpSelect => .RelationalAndLogical, .OpIEqual => .RelationalAndLogical, .OpINotEqual => .RelationalAndLogical, .OpUGreaterThan => .RelationalAndLogical, .OpSGreaterThan => .RelationalAndLogical, .OpUGreaterThanEqual => .RelationalAndLogical, .OpSGreaterThanEqual => .RelationalAndLogical, .OpULessThan => .RelationalAndLogical, .OpSLessThan => .RelationalAndLogical, .OpULessThanEqual => .RelationalAndLogical, .OpSLessThanEqual => .RelationalAndLogical, .OpFOrdEqual => .RelationalAndLogical, .OpFUnordEqual => .RelationalAndLogical, .OpFOrdNotEqual => .RelationalAndLogical, .OpFUnordNotEqual => .RelationalAndLogical, .OpFOrdLessThan => .RelationalAndLogical, .OpFUnordLessThan => .RelationalAndLogical, .OpFOrdGreaterThan => .RelationalAndLogical, .OpFUnordGreaterThan => .RelationalAndLogical, .OpFOrdLessThanEqual => .RelationalAndLogical, .OpFUnordLessThanEqual => .RelationalAndLogical, .OpFOrdGreaterThanEqual => .RelationalAndLogical, .OpFUnordGreaterThanEqual => .RelationalAndLogical, .OpShiftRightLogical => .Bit, .OpShiftRightArithmetic => .Bit, .OpShiftLeftLogical => .Bit, .OpBitwiseOr => .Bit, .OpBitwiseXor => .Bit, .OpBitwiseAnd => .Bit, .OpNot => .Bit, .OpBitFieldInsert => .Bit, .OpBitFieldSExtract => .Bit, .OpBitFieldUExtract => .Bit, .OpBitReverse => .Bit, .OpBitCount => .Bit, .OpDPdx => .Derivative, .OpDPdy => .Derivative, .OpFwidth => .Derivative, .OpDPdxFine => .Derivative, .OpDPdyFine => .Derivative, .OpFwidthFine => .Derivative, .OpDPdxCoarse => .Derivative, .OpDPdyCoarse => .Derivative, .OpFwidthCoarse => .Derivative, .OpEmitVertex => .Primitive, .OpEndPrimitive => .Primitive, .OpEmitStreamVertex => .Primitive, .OpEndStreamPrimitive => .Primitive, .OpControlBarrier => .Barrier, .OpMemoryBarrier => .Barrier, .OpAtomicLoad => .Atomic, .OpAtomicStore => .Atomic, .OpAtomicExchange => .Atomic, .OpAtomicCompareExchange => .Atomic, .OpAtomicCompareExchangeWeak => .Atomic, .OpAtomicIIncrement => .Atomic, .OpAtomicIDecrement => .Atomic, .OpAtomicIAdd => .Atomic, .OpAtomicISub => .Atomic, .OpAtomicSMin => .Atomic, .OpAtomicUMin => .Atomic, .OpAtomicSMax => .Atomic, .OpAtomicUMax => .Atomic, .OpAtomicAnd => .Atomic, .OpAtomicOr => .Atomic, .OpAtomicXor => .Atomic, .OpPhi => .ControlFlow, .OpLoopMerge => .ControlFlow, .OpSelectionMerge => .ControlFlow, .OpLabel => .ControlFlow, .OpBranch => .ControlFlow, .OpBranchConditional => .ControlFlow, .OpSwitch => .ControlFlow, .OpKill => .ControlFlow, .OpReturn => .ControlFlow, .OpReturnValue => .ControlFlow, .OpUnreachable => .ControlFlow, .OpLifetimeStart => .ControlFlow, .OpLifetimeStop => .ControlFlow, .OpGroupAsyncCopy => .Group, .OpGroupWaitEvents => .Group, .OpGroupAll => .Group, .OpGroupAny => .Group, .OpGroupBroadcast => .Group, .OpGroupIAdd => .Group, .OpGroupFAdd => .Group, .OpGroupFMin => .Group, .OpGroupUMin => .Group, .OpGroupSMin => .Group, .OpGroupFMax => .Group, .OpGroupUMax => .Group, .OpGroupSMax => .Group, .OpReadPipe => .Pipe, .OpWritePipe => .Pipe, .OpReservedReadPipe => .Pipe, .OpReservedWritePipe => .Pipe, .OpReserveReadPipePackets => .Pipe, .OpReserveWritePipePackets => .Pipe, .OpCommitReadPipe => .Pipe, .OpCommitWritePipe => .Pipe, .OpIsValidReserveId => .Pipe, .OpGetNumPipePackets => .Pipe, .OpGetMaxPipePackets => .Pipe, .OpGroupReserveReadPipePackets => .Pipe, .OpGroupReserveWritePipePackets => .Pipe, .OpGroupCommitReadPipe => .Pipe, .OpGroupCommitWritePipe => .Pipe, .OpEnqueueMarker => .DeviceSideEnqueue, .OpEnqueueKernel => .DeviceSideEnqueue, .OpGetKernelNDrangeSubGroupCount => .DeviceSideEnqueue, .OpGetKernelNDrangeMaxSubGroupSize => .DeviceSideEnqueue, .OpGetKernelWorkGroupSize => .DeviceSideEnqueue, .OpGetKernelPreferredWorkGroupSizeMultiple => .DeviceSideEnqueue, .OpRetainEvent => .DeviceSideEnqueue, .OpReleaseEvent => .DeviceSideEnqueue, .OpCreateUserEvent => .DeviceSideEnqueue, .OpIsValidEvent => .DeviceSideEnqueue, .OpSetUserEventStatus => .DeviceSideEnqueue, .OpCaptureEventProfilingInfo => .DeviceSideEnqueue, .OpGetDefaultQueue => .DeviceSideEnqueue, .OpBuildNDRange => .DeviceSideEnqueue, .OpImageSparseSampleImplicitLod => .Image, .OpImageSparseSampleExplicitLod => .Image, .OpImageSparseSampleDrefImplicitLod => .Image, .OpImageSparseSampleDrefExplicitLod => .Image, .OpImageSparseSampleProjImplicitLod => .Image, .OpImageSparseSampleProjExplicitLod => .Image, .OpImageSparseSampleProjDrefImplicitLod => .Image, .OpImageSparseSampleProjDrefExplicitLod => .Image, .OpImageSparseFetch => .Image, .OpImageSparseGather => .Image, .OpImageSparseDrefGather => .Image, .OpImageSparseTexelsResident => .Image, .OpNoLine => .Debug, .OpAtomicFlagTestAndSet => .Atomic, .OpAtomicFlagClear => .Atomic, .OpImageSparseRead => .Image, .OpSizeOf => .Miscellaneous, .OpTypePipeStorage => .TypeDeclaration, .OpConstantPipeStorage => .Pipe, .OpCreatePipeFromPipeStorage => .Pipe, .OpGetKernelLocalSizeForSubgroupCount => .DeviceSideEnqueue, .OpGetKernelMaxNumSubgroups => .DeviceSideEnqueue, .OpTypeNamedBarrier => .TypeDeclaration, .OpNamedBarrierInitialize => .Barrier, .OpMemoryNamedBarrier => .Barrier, .OpModuleProcessed => .Debug, .OpExecutionModeId => .ModeSetting, .OpDecorateId => .Annotation, .OpGroupNonUniformElect => .NonUniform, .OpGroupNonUniformAll => .NonUniform, .OpGroupNonUniformAny => .NonUniform, .OpGroupNonUniformAllEqual => .NonUniform, .OpGroupNonUniformBroadcast => .NonUniform, .OpGroupNonUniformBroadcastFirst => .NonUniform, .OpGroupNonUniformBallot => .NonUniform, .OpGroupNonUniformInverseBallot => .NonUniform, .OpGroupNonUniformBallotBitExtract => .NonUniform, .OpGroupNonUniformBallotBitCount => .NonUniform, .OpGroupNonUniformBallotFindLSB => .NonUniform, .OpGroupNonUniformBallotFindMSB => .NonUniform, .OpGroupNonUniformShuffle => .NonUniform, .OpGroupNonUniformShuffleXor => .NonUniform, .OpGroupNonUniformShuffleUp => .NonUniform, .OpGroupNonUniformShuffleDown => .NonUniform, .OpGroupNonUniformIAdd => .NonUniform, .OpGroupNonUniformFAdd => .NonUniform, .OpGroupNonUniformIMul => .NonUniform, .OpGroupNonUniformFMul => .NonUniform, .OpGroupNonUniformSMin => .NonUniform, .OpGroupNonUniformUMin => .NonUniform, .OpGroupNonUniformFMin => .NonUniform, .OpGroupNonUniformSMax => .NonUniform, .OpGroupNonUniformUMax => .NonUniform, .OpGroupNonUniformFMax => .NonUniform, .OpGroupNonUniformBitwiseAnd => .NonUniform, .OpGroupNonUniformBitwiseOr => .NonUniform, .OpGroupNonUniformBitwiseXor => .NonUniform, .OpGroupNonUniformLogicalAnd => .NonUniform, .OpGroupNonUniformLogicalOr => .NonUniform, .OpGroupNonUniformLogicalXor => .NonUniform, .OpGroupNonUniformQuadBroadcast => .NonUniform, .OpGroupNonUniformQuadSwap => .NonUniform, .OpCopyLogical => .Composite, .OpPtrEqual => .Memory, .OpPtrNotEqual => .Memory, .OpPtrDiff => .Memory, .OpColorAttachmentReadEXT => .Image, .OpDepthAttachmentReadEXT => .Image, .OpStencilAttachmentReadEXT => .Image, .OpTerminateInvocation => .ControlFlow, .OpSubgroupBallotKHR => .Group, .OpSubgroupFirstInvocationKHR => .Group, .OpSubgroupAllKHR => .Group, .OpSubgroupAnyKHR => .Group, .OpSubgroupAllEqualKHR => .Group, .OpGroupNonUniformRotateKHR => .Group, .OpSubgroupReadInvocationKHR => .Group, .OpTraceRayKHR => .Reserved, .OpExecuteCallableKHR => .Reserved, .OpConvertUToAccelerationStructureKHR => .Reserved, .OpIgnoreIntersectionKHR => .Reserved, .OpTerminateRayKHR => .Reserved, .OpSDot => .Arithmetic, .OpUDot => .Arithmetic, .OpSUDot => .Arithmetic, .OpSDotAccSat => .Arithmetic, .OpUDotAccSat => .Arithmetic, .OpSUDotAccSat => .Arithmetic, .OpTypeRayQueryKHR => .Reserved, .OpRayQueryInitializeKHR => .Reserved, .OpRayQueryTerminateKHR => .Reserved, .OpRayQueryGenerateIntersectionKHR => .Reserved, .OpRayQueryConfirmIntersectionKHR => .Reserved, .OpRayQueryProceedKHR => .Reserved, .OpRayQueryGetIntersectionTypeKHR => .Reserved, .OpImageSampleWeightedQCOM => .Image, .OpImageBoxFilterQCOM => .Image, .OpImageBlockMatchSSDQCOM => .Image, .OpImageBlockMatchSADQCOM => .Image, .OpGroupIAddNonUniformAMD => .Group, .OpGroupFAddNonUniformAMD => .Group, .OpGroupFMinNonUniformAMD => .Group, .OpGroupUMinNonUniformAMD => .Group, .OpGroupSMinNonUniformAMD => .Group, .OpGroupFMaxNonUniformAMD => .Group, .OpGroupUMaxNonUniformAMD => .Group, .OpGroupSMaxNonUniformAMD => .Group, .OpFragmentMaskFetchAMD => .Reserved, .OpFragmentFetchAMD => .Reserved, .OpReadClockKHR => .Reserved, .OpHitObjectRecordHitMotionNV => .Reserved, .OpHitObjectRecordHitWithIndexMotionNV => .Reserved, .OpHitObjectRecordMissMotionNV => .Reserved, .OpHitObjectGetWorldToObjectNV => .Reserved, .OpHitObjectGetObjectToWorldNV => .Reserved, .OpHitObjectGetObjectRayDirectionNV => .Reserved, .OpHitObjectGetObjectRayOriginNV => .Reserved, .OpHitObjectTraceRayMotionNV => .Reserved, .OpHitObjectGetShaderRecordBufferHandleNV => .Reserved, .OpHitObjectGetShaderBindingTableRecordIndexNV => .Reserved, .OpHitObjectRecordEmptyNV => .Reserved, .OpHitObjectTraceRayNV => .Reserved, .OpHitObjectRecordHitNV => .Reserved, .OpHitObjectRecordHitWithIndexNV => .Reserved, .OpHitObjectRecordMissNV => .Reserved, .OpHitObjectExecuteShaderNV => .Reserved, .OpHitObjectGetCurrentTimeNV => .Reserved, .OpHitObjectGetAttributesNV => .Reserved, .OpHitObjectGetHitKindNV => .Reserved, .OpHitObjectGetPrimitiveIndexNV => .Reserved, .OpHitObjectGetGeometryIndexNV => .Reserved, .OpHitObjectGetInstanceIdNV => .Reserved, .OpHitObjectGetInstanceCustomIndexNV => .Reserved, .OpHitObjectGetWorldRayDirectionNV => .Reserved, .OpHitObjectGetWorldRayOriginNV => .Reserved, .OpHitObjectGetRayTMaxNV => .Reserved, .OpHitObjectGetRayTMinNV => .Reserved, .OpHitObjectIsEmptyNV => .Reserved, .OpHitObjectIsHitNV => .Reserved, .OpHitObjectIsMissNV => .Reserved, .OpReorderThreadWithHitObjectNV => .Reserved, .OpReorderThreadWithHintNV => .Reserved, .OpTypeHitObjectNV => .Reserved, .OpImageSampleFootprintNV => .Image, .OpEmitMeshTasksEXT => .Reserved, .OpSetMeshOutputsEXT => .Reserved, .OpGroupNonUniformPartitionNV => .NonUniform, .OpWritePackedPrimitiveIndices4x8NV => .Reserved, .OpReportIntersectionKHR => .Reserved, .OpIgnoreIntersectionNV => .Reserved, .OpTerminateRayNV => .Reserved, .OpTraceNV => .Reserved, .OpTraceMotionNV => .Reserved, .OpTraceRayMotionNV => .Reserved, .OpRayQueryGetIntersectionTriangleVertexPositionsKHR => .Reserved, .OpTypeAccelerationStructureKHR => .Reserved, .OpExecuteCallableNV => .Reserved, .OpTypeCooperativeMatrixNV => .Reserved, .OpCooperativeMatrixLoadNV => .Reserved, .OpCooperativeMatrixStoreNV => .Reserved, .OpCooperativeMatrixMulAddNV => .Reserved, .OpCooperativeMatrixLengthNV => .Reserved, .OpBeginInvocationInterlockEXT => .Reserved, .OpEndInvocationInterlockEXT => .Reserved, .OpDemoteToHelperInvocation => .ControlFlow, .OpIsHelperInvocationEXT => .Reserved, .OpConvertUToImageNV => .Reserved, .OpConvertUToSamplerNV => .Reserved, .OpConvertImageToUNV => .Reserved, .OpConvertSamplerToUNV => .Reserved, .OpConvertUToSampledImageNV => .Reserved, .OpConvertSampledImageToUNV => .Reserved, .OpSamplerImageAddressingModeNV => .Reserved, .OpSubgroupShuffleINTEL => .Group, .OpSubgroupShuffleDownINTEL => .Group, .OpSubgroupShuffleUpINTEL => .Group, .OpSubgroupShuffleXorINTEL => .Group, .OpSubgroupBlockReadINTEL => .Group, .OpSubgroupBlockWriteINTEL => .Group, .OpSubgroupImageBlockReadINTEL => .Group, .OpSubgroupImageBlockWriteINTEL => .Group, .OpSubgroupImageMediaBlockReadINTEL => .Group, .OpSubgroupImageMediaBlockWriteINTEL => .Group, .OpUCountLeadingZerosINTEL => .Reserved, .OpUCountTrailingZerosINTEL => .Reserved, .OpAbsISubINTEL => .Reserved, .OpAbsUSubINTEL => .Reserved, .OpIAddSatINTEL => .Reserved, .OpUAddSatINTEL => .Reserved, .OpIAverageINTEL => .Reserved, .OpUAverageINTEL => .Reserved, .OpIAverageRoundedINTEL => .Reserved, .OpUAverageRoundedINTEL => .Reserved, .OpISubSatINTEL => .Reserved, .OpUSubSatINTEL => .Reserved, .OpIMul32x16INTEL => .Reserved, .OpUMul32x16INTEL => .Reserved, .OpAtomicFMinEXT => .Atomic, .OpAtomicFMaxEXT => .Atomic, .OpAssumeTrueKHR => .Miscellaneous, .OpExpectKHR => .Miscellaneous, .OpDecorateString => .Annotation, .OpMemberDecorateString => .Annotation, .OpLoopControlINTEL => .Reserved, .OpReadPipeBlockingINTEL => .Pipe, .OpWritePipeBlockingINTEL => .Pipe, .OpFPGARegINTEL => .Reserved, .OpRayQueryGetRayTMinKHR => .Reserved, .OpRayQueryGetRayFlagsKHR => .Reserved, .OpRayQueryGetIntersectionTKHR => .Reserved, .OpRayQueryGetIntersectionInstanceCustomIndexKHR => .Reserved, .OpRayQueryGetIntersectionInstanceIdKHR => .Reserved, .OpRayQueryGetIntersectionInstanceShaderBindingTableRecordOffsetKHR => .Reserved, .OpRayQueryGetIntersectionGeometryIndexKHR => .Reserved, .OpRayQueryGetIntersectionPrimitiveIndexKHR => .Reserved, .OpRayQueryGetIntersectionBarycentricsKHR => .Reserved, .OpRayQueryGetIntersectionFrontFaceKHR => .Reserved, .OpRayQueryGetIntersectionCandidateAABBOpaqueKHR => .Reserved, .OpRayQueryGetIntersectionObjectRayDirectionKHR => .Reserved, .OpRayQueryGetIntersectionObjectRayOriginKHR => .Reserved, .OpRayQueryGetWorldRayDirectionKHR => .Reserved, .OpRayQueryGetWorldRayOriginKHR => .Reserved, .OpRayQueryGetIntersectionObjectToWorldKHR => .Reserved, .OpRayQueryGetIntersectionWorldToObjectKHR => .Reserved, .OpAtomicFAddEXT => .Atomic, .OpTypeBufferSurfaceINTEL => .TypeDeclaration, .OpTypeStructContinuedINTEL => .TypeDeclaration, .OpConstantCompositeContinuedINTEL => .ConstantCreation, .OpSpecConstantCompositeContinuedINTEL => .ConstantCreation, .OpConvertFToBF16INTEL => .Conversion, .OpConvertBF16ToFINTEL => .Conversion, .OpControlBarrierArriveINTEL => .Barrier, .OpControlBarrierWaitINTEL => .Barrier, .OpGroupIMulKHR => .Group, .OpGroupFMulKHR => .Group, .OpGroupBitwiseAndKHR => .Group, .OpGroupBitwiseOrKHR => .Group, .OpGroupBitwiseXorKHR => .Group, .OpGroupLogicalAndKHR => .Group, .OpGroupLogicalOrKHR => .Group, .OpGroupLogicalXorKHR => .Group, }; } }; pub const ImageOperands = packed struct { Bias: bool = false, Lod: bool = false, Grad: bool = false, ConstOffset: bool = false, Offset: bool = false, ConstOffsets: bool = false, Sample: bool = false, MinLod: bool = false, MakeTexelAvailable: bool = false, MakeTexelVisible: bool = false, NonPrivateTexel: bool = false, VolatileTexel: bool = false, SignExtend: bool = false, ZeroExtend: bool = false, Nontemporal: bool = false, _reserved_bit_15: bool = false, Offsets: bool = false, _reserved_bit_17: bool = false, _reserved_bit_18: bool = false, _reserved_bit_19: bool = false, _reserved_bit_20: bool = false, _reserved_bit_21: bool = false, _reserved_bit_22: bool = false, _reserved_bit_23: bool = false, _reserved_bit_24: bool = false, _reserved_bit_25: bool = false, _reserved_bit_26: bool = false, _reserved_bit_27: bool = false, _reserved_bit_28: bool = false, _reserved_bit_29: bool = false, _reserved_bit_30: bool = false, _reserved_bit_31: bool = false, pub const MakeTexelAvailableKHR: ImageOperands = .{ .MakeTexelAvailable = true }; pub const MakeTexelVisibleKHR: ImageOperands = .{ .MakeTexelVisible = true }; pub const NonPrivateTexelKHR: ImageOperands = .{ .NonPrivateTexel = true }; pub const VolatileTexelKHR: ImageOperands = .{ .VolatileTexel = true }; pub const Extended = struct { Bias: ?struct { id_ref: IdRef } = null, Lod: ?struct { id_ref: IdRef } = null, Grad: ?struct { id_ref_0: IdRef, id_ref_1: IdRef } = null, ConstOffset: ?struct { id_ref: IdRef } = null, Offset: ?struct { id_ref: IdRef } = null, ConstOffsets: ?struct { id_ref: IdRef } = null, Sample: ?struct { id_ref: IdRef } = null, MinLod: ?struct { id_ref: IdRef } = null, MakeTexelAvailable: ?struct { id_scope: IdScope } = null, MakeTexelVisible: ?struct { id_scope: IdScope } = null, NonPrivateTexel: bool = false, VolatileTexel: bool = false, SignExtend: bool = false, ZeroExtend: bool = false, Nontemporal: bool = false, _reserved_bit_15: bool = false, Offsets: ?struct { id_ref: IdRef } = null, _reserved_bit_17: bool = false, _reserved_bit_18: bool = false, _reserved_bit_19: bool = false, _reserved_bit_20: bool = false, _reserved_bit_21: bool = false, _reserved_bit_22: bool = false, _reserved_bit_23: bool = false, _reserved_bit_24: bool = false, _reserved_bit_25: bool = false, _reserved_bit_26: bool = false, _reserved_bit_27: bool = false, _reserved_bit_28: bool = false, _reserved_bit_29: bool = false, _reserved_bit_30: bool = false, _reserved_bit_31: bool = false, }; }; pub const FPFastMathMode = packed struct { NotNaN: bool = false, NotInf: bool = false, NSZ: bool = false, AllowRecip: bool = false, Fast: bool = false, _reserved_bit_5: bool = false, _reserved_bit_6: bool = false, _reserved_bit_7: bool = false, _reserved_bit_8: bool = false, _reserved_bit_9: bool = false, _reserved_bit_10: bool = false, _reserved_bit_11: bool = false, _reserved_bit_12: bool = false, _reserved_bit_13: bool = false, _reserved_bit_14: bool = false, _reserved_bit_15: bool = false, AllowContractFastINTEL: bool = false, AllowReassocINTEL: bool = false, _reserved_bit_18: bool = false, _reserved_bit_19: bool = false, _reserved_bit_20: bool = false, _reserved_bit_21: bool = false, _reserved_bit_22: bool = false, _reserved_bit_23: bool = false, _reserved_bit_24: bool = false, _reserved_bit_25: bool = false, _reserved_bit_26: bool = false, _reserved_bit_27: bool = false, _reserved_bit_28: bool = false, _reserved_bit_29: bool = false, _reserved_bit_30: bool = false, _reserved_bit_31: bool = false, }; pub const SelectionControl = packed struct { Flatten: bool = false, DontFlatten: bool = false, _reserved_bit_2: bool = false, _reserved_bit_3: bool = false, _reserved_bit_4: bool = false, _reserved_bit_5: bool = false, _reserved_bit_6: bool = false, _reserved_bit_7: bool = false, _reserved_bit_8: bool = false, _reserved_bit_9: bool = false, _reserved_bit_10: bool = false, _reserved_bit_11: bool = false, _reserved_bit_12: bool = false, _reserved_bit_13: bool = false, _reserved_bit_14: bool = false, _reserved_bit_15: bool = false, _reserved_bit_16: bool = false, _reserved_bit_17: bool = false, _reserved_bit_18: bool = false, _reserved_bit_19: bool = false, _reserved_bit_20: bool = false, _reserved_bit_21: bool = false, _reserved_bit_22: bool = false, _reserved_bit_23: bool = false, _reserved_bit_24: bool = false, _reserved_bit_25: bool = false, _reserved_bit_26: bool = false, _reserved_bit_27: bool = false, _reserved_bit_28: bool = false, _reserved_bit_29: bool = false, _reserved_bit_30: bool = false, _reserved_bit_31: bool = false, }; pub const LoopControl = packed struct { Unroll: bool = false, DontUnroll: bool = false, DependencyInfinite: bool = false, DependencyLength: bool = false, MinIterations: bool = false, MaxIterations: bool = false, IterationMultiple: bool = false, PeelCount: bool = false, PartialCount: bool = false, _reserved_bit_9: bool = false, _reserved_bit_10: bool = false, _reserved_bit_11: bool = false, _reserved_bit_12: bool = false, _reserved_bit_13: bool = false, _reserved_bit_14: bool = false, _reserved_bit_15: bool = false, InitiationIntervalINTEL: bool = false, MaxConcurrencyINTEL: bool = false, DependencyArrayINTEL: bool = false, PipelineEnableINTEL: bool = false, LoopCoalesceINTEL: bool = false, MaxInterleavingINTEL: bool = false, SpeculatedIterationsINTEL: bool = false, NoFusionINTEL: bool = false, LoopCountINTEL: bool = false, MaxReinvocationDelayINTEL: bool = false, _reserved_bit_26: bool = false, _reserved_bit_27: bool = false, _reserved_bit_28: bool = false, _reserved_bit_29: bool = false, _reserved_bit_30: bool = false, _reserved_bit_31: bool = false, pub const Extended = struct { Unroll: bool = false, DontUnroll: bool = false, DependencyInfinite: bool = false, DependencyLength: ?struct { literal_integer: LiteralInteger } = null, MinIterations: ?struct { literal_integer: LiteralInteger } = null, MaxIterations: ?struct { literal_integer: LiteralInteger } = null, IterationMultiple: ?struct { literal_integer: LiteralInteger } = null, PeelCount: ?struct { literal_integer: LiteralInteger } = null, PartialCount: ?struct { literal_integer: LiteralInteger } = null, _reserved_bit_9: bool = false, _reserved_bit_10: bool = false, _reserved_bit_11: bool = false, _reserved_bit_12: bool = false, _reserved_bit_13: bool = false, _reserved_bit_14: bool = false, _reserved_bit_15: bool = false, InitiationIntervalINTEL: ?struct { literal_integer: LiteralInteger } = null, MaxConcurrencyINTEL: ?struct { literal_integer: LiteralInteger } = null, DependencyArrayINTEL: ?struct { literal_integer: LiteralInteger } = null, PipelineEnableINTEL: ?struct { literal_integer: LiteralInteger } = null, LoopCoalesceINTEL: ?struct { literal_integer: LiteralInteger } = null, MaxInterleavingINTEL: ?struct { literal_integer: LiteralInteger } = null, SpeculatedIterationsINTEL: ?struct { literal_integer: LiteralInteger } = null, NoFusionINTEL: bool = false, LoopCountINTEL: ?struct { literal_integer: LiteralInteger } = null, MaxReinvocationDelayINTEL: ?struct { literal_integer: LiteralInteger } = null, _reserved_bit_26: bool = false, _reserved_bit_27: bool = false, _reserved_bit_28: bool = false, _reserved_bit_29: bool = false, _reserved_bit_30: bool = false, _reserved_bit_31: bool = false, }; }; pub const FunctionControl = packed struct { Inline: bool = false, DontInline: bool = false, Pure: bool = false, Const: bool = false, _reserved_bit_4: bool = false, _reserved_bit_5: bool = false, _reserved_bit_6: bool = false, _reserved_bit_7: bool = false, _reserved_bit_8: bool = false, _reserved_bit_9: bool = false, _reserved_bit_10: bool = false, _reserved_bit_11: bool = false, _reserved_bit_12: bool = false, _reserved_bit_13: bool = false, _reserved_bit_14: bool = false, _reserved_bit_15: bool = false, OptNoneINTEL: bool = false, _reserved_bit_17: bool = false, _reserved_bit_18: bool = false, _reserved_bit_19: bool = false, _reserved_bit_20: bool = false, _reserved_bit_21: bool = false, _reserved_bit_22: bool = false, _reserved_bit_23: bool = false, _reserved_bit_24: bool = false, _reserved_bit_25: bool = false, _reserved_bit_26: bool = false, _reserved_bit_27: bool = false, _reserved_bit_28: bool = false, _reserved_bit_29: bool = false, _reserved_bit_30: bool = false, _reserved_bit_31: bool = false, }; pub const MemorySemantics = packed struct { _reserved_bit_0: bool = false, Acquire: bool = false, Release: bool = false, AcquireRelease: bool = false, SequentiallyConsistent: bool = false, _reserved_bit_5: bool = false, UniformMemory: bool = false, SubgroupMemory: bool = false, WorkgroupMemory: bool = false, CrossWorkgroupMemory: bool = false, AtomicCounterMemory: bool = false, ImageMemory: bool = false, OutputMemory: bool = false, MakeAvailable: bool = false, MakeVisible: bool = false, Volatile: bool = false, _reserved_bit_16: bool = false, _reserved_bit_17: bool = false, _reserved_bit_18: bool = false, _reserved_bit_19: bool = false, _reserved_bit_20: bool = false, _reserved_bit_21: bool = false, _reserved_bit_22: bool = false, _reserved_bit_23: bool = false, _reserved_bit_24: bool = false, _reserved_bit_25: bool = false, _reserved_bit_26: bool = false, _reserved_bit_27: bool = false, _reserved_bit_28: bool = false, _reserved_bit_29: bool = false, _reserved_bit_30: bool = false, _reserved_bit_31: bool = false, pub const OutputMemoryKHR: MemorySemantics = .{ .OutputMemory = true }; pub const MakeAvailableKHR: MemorySemantics = .{ .MakeAvailable = true }; pub const MakeVisibleKHR: MemorySemantics = .{ .MakeVisible = true }; }; pub const MemoryAccess = packed struct { Volatile: bool = false, Aligned: bool = false, Nontemporal: bool = false, MakePointerAvailable: bool = false, MakePointerVisible: bool = false, NonPrivatePointer: bool = false, _reserved_bit_6: bool = false, _reserved_bit_7: bool = false, _reserved_bit_8: bool = false, _reserved_bit_9: bool = false, _reserved_bit_10: bool = false, _reserved_bit_11: bool = false, _reserved_bit_12: bool = false, _reserved_bit_13: bool = false, _reserved_bit_14: bool = false, _reserved_bit_15: bool = false, AliasScopeINTELMask: bool = false, NoAliasINTELMask: bool = false, _reserved_bit_18: bool = false, _reserved_bit_19: bool = false, _reserved_bit_20: bool = false, _reserved_bit_21: bool = false, _reserved_bit_22: bool = false, _reserved_bit_23: bool = false, _reserved_bit_24: bool = false, _reserved_bit_25: bool = false, _reserved_bit_26: bool = false, _reserved_bit_27: bool = false, _reserved_bit_28: bool = false, _reserved_bit_29: bool = false, _reserved_bit_30: bool = false, _reserved_bit_31: bool = false, pub const MakePointerAvailableKHR: MemoryAccess = .{ .MakePointerAvailable = true }; pub const MakePointerVisibleKHR: MemoryAccess = .{ .MakePointerVisible = true }; pub const NonPrivatePointerKHR: MemoryAccess = .{ .NonPrivatePointer = true }; pub const Extended = struct { Volatile: bool = false, Aligned: ?struct { literal_integer: LiteralInteger } = null, Nontemporal: bool = false, MakePointerAvailable: ?struct { id_scope: IdScope } = null, MakePointerVisible: ?struct { id_scope: IdScope } = null, NonPrivatePointer: bool = false, _reserved_bit_6: bool = false, _reserved_bit_7: bool = false, _reserved_bit_8: bool = false, _reserved_bit_9: bool = false, _reserved_bit_10: bool = false, _reserved_bit_11: bool = false, _reserved_bit_12: bool = false, _reserved_bit_13: bool = false, _reserved_bit_14: bool = false, _reserved_bit_15: bool = false, AliasScopeINTELMask: ?struct { id_ref: IdRef } = null, NoAliasINTELMask: ?struct { id_ref: IdRef } = null, _reserved_bit_18: bool = false, _reserved_bit_19: bool = false, _reserved_bit_20: bool = false, _reserved_bit_21: bool = false, _reserved_bit_22: bool = false, _reserved_bit_23: bool = false, _reserved_bit_24: bool = false, _reserved_bit_25: bool = false, _reserved_bit_26: bool = false, _reserved_bit_27: bool = false, _reserved_bit_28: bool = false, _reserved_bit_29: bool = false, _reserved_bit_30: bool = false, _reserved_bit_31: bool = false, }; }; pub const KernelProfilingInfo = packed struct { CmdExecTime: bool = false, _reserved_bit_1: bool = false, _reserved_bit_2: bool = false, _reserved_bit_3: bool = false, _reserved_bit_4: bool = false, _reserved_bit_5: bool = false, _reserved_bit_6: bool = false, _reserved_bit_7: bool = false, _reserved_bit_8: bool = false, _reserved_bit_9: bool = false, _reserved_bit_10: bool = false, _reserved_bit_11: bool = false, _reserved_bit_12: bool = false, _reserved_bit_13: bool = false, _reserved_bit_14: bool = false, _reserved_bit_15: bool = false, _reserved_bit_16: bool = false, _reserved_bit_17: bool = false, _reserved_bit_18: bool = false, _reserved_bit_19: bool = false, _reserved_bit_20: bool = false, _reserved_bit_21: bool = false, _reserved_bit_22: bool = false, _reserved_bit_23: bool = false, _reserved_bit_24: bool = false, _reserved_bit_25: bool = false, _reserved_bit_26: bool = false, _reserved_bit_27: bool = false, _reserved_bit_28: bool = false, _reserved_bit_29: bool = false, _reserved_bit_30: bool = false, _reserved_bit_31: bool = false, }; pub const RayFlags = packed struct { OpaqueKHR: bool = false, NoOpaqueKHR: bool = false, TerminateOnFirstHitKHR: bool = false, SkipClosestHitShaderKHR: bool = false, CullBackFacingTrianglesKHR: bool = false, CullFrontFacingTrianglesKHR: bool = false, CullOpaqueKHR: bool = false, CullNoOpaqueKHR: bool = false, SkipTrianglesKHR: bool = false, SkipAABBsKHR: bool = false, ForceOpacityMicromap2StateEXT: bool = false, _reserved_bit_11: bool = false, _reserved_bit_12: bool = false, _reserved_bit_13: bool = false, _reserved_bit_14: bool = false, _reserved_bit_15: bool = false, _reserved_bit_16: bool = false, _reserved_bit_17: bool = false, _reserved_bit_18: bool = false, _reserved_bit_19: bool = false, _reserved_bit_20: bool = false, _reserved_bit_21: bool = false, _reserved_bit_22: bool = false, _reserved_bit_23: bool = false, _reserved_bit_24: bool = false, _reserved_bit_25: bool = false, _reserved_bit_26: bool = false, _reserved_bit_27: bool = false, _reserved_bit_28: bool = false, _reserved_bit_29: bool = false, _reserved_bit_30: bool = false, _reserved_bit_31: bool = false, }; pub const FragmentShadingRate = packed struct { Vertical2Pixels: bool = false, Vertical4Pixels: bool = false, Horizontal2Pixels: bool = false, Horizontal4Pixels: bool = false, _reserved_bit_4: bool = false, _reserved_bit_5: bool = false, _reserved_bit_6: bool = false, _reserved_bit_7: bool = false, _reserved_bit_8: bool = false, _reserved_bit_9: bool = false, _reserved_bit_10: bool = false, _reserved_bit_11: bool = false, _reserved_bit_12: bool = false, _reserved_bit_13: bool = false, _reserved_bit_14: bool = false, _reserved_bit_15: bool = false, _reserved_bit_16: bool = false, _reserved_bit_17: bool = false, _reserved_bit_18: bool = false, _reserved_bit_19: bool = false, _reserved_bit_20: bool = false, _reserved_bit_21: bool = false, _reserved_bit_22: bool = false, _reserved_bit_23: bool = false, _reserved_bit_24: bool = false, _reserved_bit_25: bool = false, _reserved_bit_26: bool = false, _reserved_bit_27: bool = false, _reserved_bit_28: bool = false, _reserved_bit_29: bool = false, _reserved_bit_30: bool = false, _reserved_bit_31: bool = false, }; pub const SourceLanguage = enum(u32) { Unknown = 0, ESSL = 1, GLSL = 2, OpenCL_C = 3, OpenCL_CPP = 4, HLSL = 5, CPP_for_OpenCL = 6, SYCL = 7, HERO_C = 8, }; pub const ExecutionModel = enum(u32) { Vertex = 0, TessellationControl = 1, TessellationEvaluation = 2, Geometry = 3, Fragment = 4, GLCompute = 5, Kernel = 6, TaskNV = 5267, MeshNV = 5268, RayGenerationKHR = 5313, IntersectionKHR = 5314, AnyHitKHR = 5315, ClosestHitKHR = 5316, MissKHR = 5317, CallableKHR = 5318, TaskEXT = 5364, MeshEXT = 5365, pub const RayGenerationNV = ExecutionModel.RayGenerationKHR; pub const IntersectionNV = ExecutionModel.IntersectionKHR; pub const AnyHitNV = ExecutionModel.AnyHitKHR; pub const ClosestHitNV = ExecutionModel.ClosestHitKHR; pub const MissNV = ExecutionModel.MissKHR; pub const CallableNV = ExecutionModel.CallableKHR; }; pub const AddressingModel = enum(u32) { Logical = 0, Physical32 = 1, Physical64 = 2, PhysicalStorageBuffer64 = 5348, pub const PhysicalStorageBuffer64EXT = AddressingModel.PhysicalStorageBuffer64; }; pub const MemoryModel = enum(u32) { Simple = 0, GLSL450 = 1, OpenCL = 2, Vulkan = 3, pub const VulkanKHR = MemoryModel.Vulkan; }; pub const ExecutionMode = enum(u32) { Invocations = 0, SpacingEqual = 1, SpacingFractionalEven = 2, SpacingFractionalOdd = 3, VertexOrderCw = 4, VertexOrderCcw = 5, PixelCenterInteger = 6, OriginUpperLeft = 7, OriginLowerLeft = 8, EarlyFragmentTests = 9, PointMode = 10, Xfb = 11, DepthReplacing = 12, DepthGreater = 14, DepthLess = 15, DepthUnchanged = 16, LocalSize = 17, LocalSizeHint = 18, InputPoints = 19, InputLines = 20, InputLinesAdjacency = 21, Triangles = 22, InputTrianglesAdjacency = 23, Quads = 24, Isolines = 25, OutputVertices = 26, OutputPoints = 27, OutputLineStrip = 28, OutputTriangleStrip = 29, VecTypeHint = 30, ContractionOff = 31, Initializer = 33, Finalizer = 34, SubgroupSize = 35, SubgroupsPerWorkgroup = 36, SubgroupsPerWorkgroupId = 37, LocalSizeId = 38, LocalSizeHintId = 39, NonCoherentColorAttachmentReadEXT = 4169, NonCoherentDepthAttachmentReadEXT = 4170, NonCoherentStencilAttachmentReadEXT = 4171, SubgroupUniformControlFlowKHR = 4421, PostDepthCoverage = 4446, DenormPreserve = 4459, DenormFlushToZero = 4460, SignedZeroInfNanPreserve = 4461, RoundingModeRTE = 4462, RoundingModeRTZ = 4463, EarlyAndLateFragmentTestsAMD = 5017, StencilRefReplacingEXT = 5027, StencilRefUnchangedFrontAMD = 5079, StencilRefGreaterFrontAMD = 5080, StencilRefLessFrontAMD = 5081, StencilRefUnchangedBackAMD = 5082, StencilRefGreaterBackAMD = 5083, StencilRefLessBackAMD = 5084, OutputLinesEXT = 5269, OutputPrimitivesEXT = 5270, DerivativeGroupQuadsNV = 5289, DerivativeGroupLinearNV = 5290, OutputTrianglesEXT = 5298, PixelInterlockOrderedEXT = 5366, PixelInterlockUnorderedEXT = 5367, SampleInterlockOrderedEXT = 5368, SampleInterlockUnorderedEXT = 5369, ShadingRateInterlockOrderedEXT = 5370, ShadingRateInterlockUnorderedEXT = 5371, SharedLocalMemorySizeINTEL = 5618, RoundingModeRTPINTEL = 5620, RoundingModeRTNINTEL = 5621, FloatingPointModeALTINTEL = 5622, FloatingPointModeIEEEINTEL = 5623, MaxWorkgroupSizeINTEL = 5893, MaxWorkDimINTEL = 5894, NoGlobalOffsetINTEL = 5895, NumSIMDWorkitemsINTEL = 5896, SchedulerTargetFmaxMhzINTEL = 5903, StreamingInterfaceINTEL = 6154, RegisterMapInterfaceINTEL = 6160, NamedBarrierCountINTEL = 6417, pub const OutputLinesNV = ExecutionMode.OutputLinesEXT; pub const OutputPrimitivesNV = ExecutionMode.OutputPrimitivesEXT; pub const OutputTrianglesNV = ExecutionMode.OutputTrianglesEXT; pub const Extended = union(ExecutionMode) { Invocations: struct { literal_integer: LiteralInteger }, SpacingEqual, SpacingFractionalEven, SpacingFractionalOdd, VertexOrderCw, VertexOrderCcw, PixelCenterInteger, OriginUpperLeft, OriginLowerLeft, EarlyFragmentTests, PointMode, Xfb, DepthReplacing, DepthGreater, DepthLess, DepthUnchanged, LocalSize: struct { x_size: LiteralInteger, y_size: LiteralInteger, z_size: LiteralInteger }, LocalSizeHint: struct { x_size: LiteralInteger, y_size: LiteralInteger, z_size: LiteralInteger }, InputPoints, InputLines, InputLinesAdjacency, Triangles, InputTrianglesAdjacency, Quads, Isolines, OutputVertices: struct { vertex_count: LiteralInteger }, OutputPoints, OutputLineStrip, OutputTriangleStrip, VecTypeHint: struct { vector_type: LiteralInteger }, ContractionOff, Initializer, Finalizer, SubgroupSize: struct { subgroup_size: LiteralInteger }, SubgroupsPerWorkgroup: struct { subgroups_per_workgroup: LiteralInteger }, SubgroupsPerWorkgroupId: struct { subgroups_per_workgroup: IdRef }, LocalSizeId: struct { x_size: IdRef, y_size: IdRef, z_size: IdRef }, LocalSizeHintId: struct { x_size_hint: IdRef, y_size_hint: IdRef, z_size_hint: IdRef }, NonCoherentColorAttachmentReadEXT, NonCoherentDepthAttachmentReadEXT, NonCoherentStencilAttachmentReadEXT, SubgroupUniformControlFlowKHR, PostDepthCoverage, DenormPreserve: struct { target_width: LiteralInteger }, DenormFlushToZero: struct { target_width: LiteralInteger }, SignedZeroInfNanPreserve: struct { target_width: LiteralInteger }, RoundingModeRTE: struct { target_width: LiteralInteger }, RoundingModeRTZ: struct { target_width: LiteralInteger }, EarlyAndLateFragmentTestsAMD, StencilRefReplacingEXT, StencilRefUnchangedFrontAMD, StencilRefGreaterFrontAMD, StencilRefLessFrontAMD, StencilRefUnchangedBackAMD, StencilRefGreaterBackAMD, StencilRefLessBackAMD, OutputLinesEXT, OutputPrimitivesEXT: struct { primitive_count: LiteralInteger }, DerivativeGroupQuadsNV, DerivativeGroupLinearNV, OutputTrianglesEXT, PixelInterlockOrderedEXT, PixelInterlockUnorderedEXT, SampleInterlockOrderedEXT, SampleInterlockUnorderedEXT, ShadingRateInterlockOrderedEXT, ShadingRateInterlockUnorderedEXT, SharedLocalMemorySizeINTEL: struct { size: LiteralInteger }, RoundingModeRTPINTEL: struct { target_width: LiteralInteger }, RoundingModeRTNINTEL: struct { target_width: LiteralInteger }, FloatingPointModeALTINTEL: struct { target_width: LiteralInteger }, FloatingPointModeIEEEINTEL: struct { target_width: LiteralInteger }, MaxWorkgroupSizeINTEL: struct { literal_integer_0: LiteralInteger, literal_integer_1: LiteralInteger, literal_integer_2: LiteralInteger }, MaxWorkDimINTEL: struct { literal_integer: LiteralInteger }, NoGlobalOffsetINTEL, NumSIMDWorkitemsINTEL: struct { literal_integer: LiteralInteger }, SchedulerTargetFmaxMhzINTEL: struct { literal_integer: LiteralInteger }, StreamingInterfaceINTEL: struct { stallfreereturn: LiteralInteger }, RegisterMapInterfaceINTEL: struct { waitfordonewrite: LiteralInteger }, NamedBarrierCountINTEL: struct { barrier_count: LiteralInteger }, }; }; pub const StorageClass = enum(u32) { UniformConstant = 0, Input = 1, Uniform = 2, Output = 3, Workgroup = 4, CrossWorkgroup = 5, Private = 6, Function = 7, Generic = 8, PushConstant = 9, AtomicCounter = 10, Image = 11, StorageBuffer = 12, TileImageEXT = 4172, CallableDataKHR = 5328, IncomingCallableDataKHR = 5329, RayPayloadKHR = 5338, HitAttributeKHR = 5339, IncomingRayPayloadKHR = 5342, ShaderRecordBufferKHR = 5343, PhysicalStorageBuffer = 5349, HitObjectAttributeNV = 5385, TaskPayloadWorkgroupEXT = 5402, CodeSectionINTEL = 5605, DeviceOnlyINTEL = 5936, HostOnlyINTEL = 5937, pub const CallableDataNV = StorageClass.CallableDataKHR; pub const IncomingCallableDataNV = StorageClass.IncomingCallableDataKHR; pub const RayPayloadNV = StorageClass.RayPayloadKHR; pub const HitAttributeNV = StorageClass.HitAttributeKHR; pub const IncomingRayPayloadNV = StorageClass.IncomingRayPayloadKHR; pub const ShaderRecordBufferNV = StorageClass.ShaderRecordBufferKHR; pub const PhysicalStorageBufferEXT = StorageClass.PhysicalStorageBuffer; }; pub const Dim = enum(u32) { @"1D" = 0, @"2D" = 1, @"3D" = 2, Cube = 3, Rect = 4, Buffer = 5, SubpassData = 6, TileImageDataEXT = 4173, }; pub const SamplerAddressingMode = enum(u32) { None = 0, ClampToEdge = 1, Clamp = 2, Repeat = 3, RepeatMirrored = 4, }; pub const SamplerFilterMode = enum(u32) { Nearest = 0, Linear = 1, }; pub const ImageFormat = enum(u32) { Unknown = 0, Rgba32f = 1, Rgba16f = 2, R32f = 3, Rgba8 = 4, Rgba8Snorm = 5, Rg32f = 6, Rg16f = 7, R11fG11fB10f = 8, R16f = 9, Rgba16 = 10, Rgb10A2 = 11, Rg16 = 12, Rg8 = 13, R16 = 14, R8 = 15, Rgba16Snorm = 16, Rg16Snorm = 17, Rg8Snorm = 18, R16Snorm = 19, R8Snorm = 20, Rgba32i = 21, Rgba16i = 22, Rgba8i = 23, R32i = 24, Rg32i = 25, Rg16i = 26, Rg8i = 27, R16i = 28, R8i = 29, Rgba32ui = 30, Rgba16ui = 31, Rgba8ui = 32, R32ui = 33, Rgb10a2ui = 34, Rg32ui = 35, Rg16ui = 36, Rg8ui = 37, R16ui = 38, R8ui = 39, R64ui = 40, R64i = 41, }; pub const ImageChannelOrder = enum(u32) { R = 0, A = 1, RG = 2, RA = 3, RGB = 4, RGBA = 5, BGRA = 6, ARGB = 7, Intensity = 8, Luminance = 9, Rx = 10, RGx = 11, RGBx = 12, Depth = 13, DepthStencil = 14, sRGB = 15, sRGBx = 16, sRGBA = 17, sBGRA = 18, ABGR = 19, }; pub const ImageChannelDataType = enum(u32) { SnormInt8 = 0, SnormInt16 = 1, UnormInt8 = 2, UnormInt16 = 3, UnormShort565 = 4, UnormShort555 = 5, UnormInt101010 = 6, SignedInt8 = 7, SignedInt16 = 8, SignedInt32 = 9, UnsignedInt8 = 10, UnsignedInt16 = 11, UnsignedInt32 = 12, HalfFloat = 13, Float = 14, UnormInt24 = 15, UnormInt101010_2 = 16, }; pub const FPRoundingMode = enum(u32) { RTE = 0, RTZ = 1, RTP = 2, RTN = 3, }; pub const FPDenormMode = enum(u32) { Preserve = 0, FlushToZero = 1, }; pub const QuantizationModes = enum(u32) { TRN = 0, TRN_ZERO = 1, RND = 2, RND_ZERO = 3, RND_INF = 4, RND_MIN_INF = 5, RND_CONV = 6, RND_CONV_ODD = 7, }; pub const FPOperationMode = enum(u32) { IEEE = 0, ALT = 1, }; pub const OverflowModes = enum(u32) { WRAP = 0, SAT = 1, SAT_ZERO = 2, SAT_SYM = 3, }; pub const LinkageType = enum(u32) { Export = 0, Import = 1, LinkOnceODR = 2, }; pub const AccessQualifier = enum(u32) { ReadOnly = 0, WriteOnly = 1, ReadWrite = 2, }; pub const FunctionParameterAttribute = enum(u32) { Zext = 0, Sext = 1, ByVal = 2, Sret = 3, NoAlias = 4, NoCapture = 5, NoWrite = 6, NoReadWrite = 7, RuntimeAlignedINTEL = 5940, }; pub const Decoration = enum(u32) { RelaxedPrecision = 0, SpecId = 1, Block = 2, BufferBlock = 3, RowMajor = 4, ColMajor = 5, ArrayStride = 6, MatrixStride = 7, GLSLShared = 8, GLSLPacked = 9, CPacked = 10, BuiltIn = 11, NoPerspective = 13, Flat = 14, Patch = 15, Centroid = 16, Sample = 17, Invariant = 18, Restrict = 19, Aliased = 20, Volatile = 21, Constant = 22, Coherent = 23, NonWritable = 24, NonReadable = 25, Uniform = 26, UniformId = 27, SaturatedConversion = 28, Stream = 29, Location = 30, Component = 31, Index = 32, Binding = 33, DescriptorSet = 34, Offset = 35, XfbBuffer = 36, XfbStride = 37, FuncParamAttr = 38, FPRoundingMode = 39, FPFastMathMode = 40, LinkageAttributes = 41, NoContraction = 42, InputAttachmentIndex = 43, Alignment = 44, MaxByteOffset = 45, AlignmentId = 46, MaxByteOffsetId = 47, NoSignedWrap = 4469, NoUnsignedWrap = 4470, WeightTextureQCOM = 4487, BlockMatchTextureQCOM = 4488, ExplicitInterpAMD = 4999, OverrideCoverageNV = 5248, PassthroughNV = 5250, ViewportRelativeNV = 5252, SecondaryViewportRelativeNV = 5256, PerPrimitiveEXT = 5271, PerViewNV = 5272, PerTaskNV = 5273, PerVertexKHR = 5285, NonUniform = 5300, RestrictPointer = 5355, AliasedPointer = 5356, HitObjectShaderRecordBufferNV = 5386, BindlessSamplerNV = 5398, BindlessImageNV = 5399, BoundSamplerNV = 5400, BoundImageNV = 5401, SIMTCallINTEL = 5599, ReferencedIndirectlyINTEL = 5602, ClobberINTEL = 5607, SideEffectsINTEL = 5608, VectorComputeVariableINTEL = 5624, FuncParamIOKindINTEL = 5625, VectorComputeFunctionINTEL = 5626, StackCallINTEL = 5627, GlobalVariableOffsetINTEL = 5628, CounterBuffer = 5634, UserSemantic = 5635, UserTypeGOOGLE = 5636, FunctionRoundingModeINTEL = 5822, FunctionDenormModeINTEL = 5823, RegisterINTEL = 5825, MemoryINTEL = 5826, NumbanksINTEL = 5827, BankwidthINTEL = 5828, MaxPrivateCopiesINTEL = 5829, SinglepumpINTEL = 5830, DoublepumpINTEL = 5831, MaxReplicatesINTEL = 5832, SimpleDualPortINTEL = 5833, MergeINTEL = 5834, BankBitsINTEL = 5835, ForcePow2DepthINTEL = 5836, BurstCoalesceINTEL = 5899, CacheSizeINTEL = 5900, DontStaticallyCoalesceINTEL = 5901, PrefetchINTEL = 5902, StallEnableINTEL = 5905, FuseLoopsInFunctionINTEL = 5907, MathOpDSPModeINTEL = 5909, AliasScopeINTEL = 5914, NoAliasINTEL = 5915, InitiationIntervalINTEL = 5917, MaxConcurrencyINTEL = 5918, PipelineEnableINTEL = 5919, BufferLocationINTEL = 5921, IOPipeStorageINTEL = 5944, FunctionFloatingPointModeINTEL = 6080, SingleElementVectorINTEL = 6085, VectorComputeCallableFunctionINTEL = 6087, MediaBlockIOINTEL = 6140, LatencyControlLabelINTEL = 6172, LatencyControlConstraintINTEL = 6173, ConduitKernelArgumentINTEL = 6175, RegisterMapKernelArgumentINTEL = 6176, MMHostInterfaceAddressWidthINTEL = 6177, MMHostInterfaceDataWidthINTEL = 6178, MMHostInterfaceLatencyINTEL = 6179, MMHostInterfaceReadWriteModeINTEL = 6180, MMHostInterfaceMaxBurstINTEL = 6181, MMHostInterfaceWaitRequestINTEL = 6182, StableKernelArgumentINTEL = 6183, pub const PerPrimitiveNV = Decoration.PerPrimitiveEXT; pub const PerVertexNV = Decoration.PerVertexKHR; pub const NonUniformEXT = Decoration.NonUniform; pub const RestrictPointerEXT = Decoration.RestrictPointer; pub const AliasedPointerEXT = Decoration.AliasedPointer; pub const HlslCounterBufferGOOGLE = Decoration.CounterBuffer; pub const HlslSemanticGOOGLE = Decoration.UserSemantic; pub const Extended = union(Decoration) { RelaxedPrecision, SpecId: struct { specialization_constant_id: LiteralInteger }, Block, BufferBlock, RowMajor, ColMajor, ArrayStride: struct { array_stride: LiteralInteger }, MatrixStride: struct { matrix_stride: LiteralInteger }, GLSLShared, GLSLPacked, CPacked, BuiltIn: struct { built_in: BuiltIn }, NoPerspective, Flat, Patch, Centroid, Sample, Invariant, Restrict, Aliased, Volatile, Constant, Coherent, NonWritable, NonReadable, Uniform, UniformId: struct { execution: IdScope }, SaturatedConversion, Stream: struct { stream_number: LiteralInteger }, Location: struct { location: LiteralInteger }, Component: struct { component: LiteralInteger }, Index: struct { index: LiteralInteger }, Binding: struct { binding_point: LiteralInteger }, DescriptorSet: struct { descriptor_set: LiteralInteger }, Offset: struct { byte_offset: LiteralInteger }, XfbBuffer: struct { xfb_buffer_number: LiteralInteger }, XfbStride: struct { xfb_stride: LiteralInteger }, FuncParamAttr: struct { function_parameter_attribute: FunctionParameterAttribute }, FPRoundingMode: struct { fprounding_mode: FPRoundingMode }, FPFastMathMode: struct { fpfast_math_mode: FPFastMathMode }, LinkageAttributes: struct { name: LiteralString, linkage_type: LinkageType }, NoContraction, InputAttachmentIndex: struct { attachment_index: LiteralInteger }, Alignment: struct { alignment: LiteralInteger }, MaxByteOffset: struct { max_byte_offset: LiteralInteger }, AlignmentId: struct { alignment: IdRef }, MaxByteOffsetId: struct { max_byte_offset: IdRef }, NoSignedWrap, NoUnsignedWrap, WeightTextureQCOM, BlockMatchTextureQCOM, ExplicitInterpAMD, OverrideCoverageNV, PassthroughNV, ViewportRelativeNV, SecondaryViewportRelativeNV: struct { offset: LiteralInteger }, PerPrimitiveEXT, PerViewNV, PerTaskNV, PerVertexKHR, NonUniform, RestrictPointer, AliasedPointer, HitObjectShaderRecordBufferNV, BindlessSamplerNV, BindlessImageNV, BoundSamplerNV, BoundImageNV, SIMTCallINTEL: struct { n: LiteralInteger }, ReferencedIndirectlyINTEL, ClobberINTEL: struct { register: LiteralString }, SideEffectsINTEL, VectorComputeVariableINTEL, FuncParamIOKindINTEL: struct { kind: LiteralInteger }, VectorComputeFunctionINTEL, StackCallINTEL, GlobalVariableOffsetINTEL: struct { offset: LiteralInteger }, CounterBuffer: struct { counter_buffer: IdRef }, UserSemantic: struct { semantic: LiteralString }, UserTypeGOOGLE: struct { user_type: LiteralString }, FunctionRoundingModeINTEL: struct { target_width: LiteralInteger, fp_rounding_mode: FPRoundingMode }, FunctionDenormModeINTEL: struct { target_width: LiteralInteger, fp_denorm_mode: FPDenormMode }, RegisterINTEL, MemoryINTEL: struct { memory_type: LiteralString }, NumbanksINTEL: struct { banks: LiteralInteger }, BankwidthINTEL: struct { bank_width: LiteralInteger }, MaxPrivateCopiesINTEL: struct { maximum_copies: LiteralInteger }, SinglepumpINTEL, DoublepumpINTEL, MaxReplicatesINTEL: struct { maximum_replicates: LiteralInteger }, SimpleDualPortINTEL, MergeINTEL: struct { merge_key: LiteralString, merge_type: LiteralString }, BankBitsINTEL: struct { bank_bits: []const LiteralInteger = &.{} }, ForcePow2DepthINTEL: struct { force_key: LiteralInteger }, BurstCoalesceINTEL, CacheSizeINTEL: struct { cache_size_in_bytes: LiteralInteger }, DontStaticallyCoalesceINTEL, PrefetchINTEL: struct { prefetcher_size_in_bytes: LiteralInteger }, StallEnableINTEL, FuseLoopsInFunctionINTEL, MathOpDSPModeINTEL: struct { mode: LiteralInteger, propagate: LiteralInteger }, AliasScopeINTEL: struct { aliasing_scopes_list: IdRef }, NoAliasINTEL: struct { aliasing_scopes_list: IdRef }, InitiationIntervalINTEL: struct { cycles: LiteralInteger }, MaxConcurrencyINTEL: struct { invocations: LiteralInteger }, PipelineEnableINTEL: struct { enable: LiteralInteger }, BufferLocationINTEL: struct { buffer_location_id: LiteralInteger }, IOPipeStorageINTEL: struct { io_pipe_id: LiteralInteger }, FunctionFloatingPointModeINTEL: struct { target_width: LiteralInteger, fp_operation_mode: FPOperationMode }, SingleElementVectorINTEL, VectorComputeCallableFunctionINTEL, MediaBlockIOINTEL, LatencyControlLabelINTEL: struct { latency_label: LiteralInteger }, LatencyControlConstraintINTEL: struct { relative_to: LiteralInteger, control_type: LiteralInteger, relative_cycle: LiteralInteger }, ConduitKernelArgumentINTEL, RegisterMapKernelArgumentINTEL, MMHostInterfaceAddressWidthINTEL: struct { addresswidth: LiteralInteger }, MMHostInterfaceDataWidthINTEL: struct { datawidth: LiteralInteger }, MMHostInterfaceLatencyINTEL: struct { latency: LiteralInteger }, MMHostInterfaceReadWriteModeINTEL: struct { readwritemode: AccessQualifier }, MMHostInterfaceMaxBurstINTEL: struct { maxburstcount: LiteralInteger }, MMHostInterfaceWaitRequestINTEL: struct { waitrequest: LiteralInteger }, StableKernelArgumentINTEL, }; }; pub const BuiltIn = enum(u32) { Position = 0, PointSize = 1, ClipDistance = 3, CullDistance = 4, VertexId = 5, InstanceId = 6, PrimitiveId = 7, InvocationId = 8, Layer = 9, ViewportIndex = 10, TessLevelOuter = 11, TessLevelInner = 12, TessCoord = 13, PatchVertices = 14, FragCoord = 15, PointCoord = 16, FrontFacing = 17, SampleId = 18, SamplePosition = 19, SampleMask = 20, FragDepth = 22, HelperInvocation = 23, NumWorkgroups = 24, WorkgroupSize = 25, WorkgroupId = 26, LocalInvocationId = 27, GlobalInvocationId = 28, LocalInvocationIndex = 29, WorkDim = 30, GlobalSize = 31, EnqueuedWorkgroupSize = 32, GlobalOffset = 33, GlobalLinearId = 34, SubgroupSize = 36, SubgroupMaxSize = 37, NumSubgroups = 38, NumEnqueuedSubgroups = 39, SubgroupId = 40, SubgroupLocalInvocationId = 41, VertexIndex = 42, InstanceIndex = 43, CoreIDARM = 4160, CoreCountARM = 4161, CoreMaxIDARM = 4162, WarpIDARM = 4163, WarpMaxIDARM = 4164, SubgroupEqMask = 4416, SubgroupGeMask = 4417, SubgroupGtMask = 4418, SubgroupLeMask = 4419, SubgroupLtMask = 4420, BaseVertex = 4424, BaseInstance = 4425, DrawIndex = 4426, PrimitiveShadingRateKHR = 4432, DeviceIndex = 4438, ViewIndex = 4440, ShadingRateKHR = 4444, BaryCoordNoPerspAMD = 4992, BaryCoordNoPerspCentroidAMD = 4993, BaryCoordNoPerspSampleAMD = 4994, BaryCoordSmoothAMD = 4995, BaryCoordSmoothCentroidAMD = 4996, BaryCoordSmoothSampleAMD = 4997, BaryCoordPullModelAMD = 4998, FragStencilRefEXT = 5014, ViewportMaskNV = 5253, SecondaryPositionNV = 5257, SecondaryViewportMaskNV = 5258, PositionPerViewNV = 5261, ViewportMaskPerViewNV = 5262, FullyCoveredEXT = 5264, TaskCountNV = 5274, PrimitiveCountNV = 5275, PrimitiveIndicesNV = 5276, ClipDistancePerViewNV = 5277, CullDistancePerViewNV = 5278, LayerPerViewNV = 5279, MeshViewCountNV = 5280, MeshViewIndicesNV = 5281, BaryCoordKHR = 5286, BaryCoordNoPerspKHR = 5287, FragSizeEXT = 5292, FragInvocationCountEXT = 5293, PrimitivePointIndicesEXT = 5294, PrimitiveLineIndicesEXT = 5295, PrimitiveTriangleIndicesEXT = 5296, CullPrimitiveEXT = 5299, LaunchIdKHR = 5319, LaunchSizeKHR = 5320, WorldRayOriginKHR = 5321, WorldRayDirectionKHR = 5322, ObjectRayOriginKHR = 5323, ObjectRayDirectionKHR = 5324, RayTminKHR = 5325, RayTmaxKHR = 5326, InstanceCustomIndexKHR = 5327, ObjectToWorldKHR = 5330, WorldToObjectKHR = 5331, HitTNV = 5332, HitKindKHR = 5333, CurrentRayTimeNV = 5334, HitTriangleVertexPositionsKHR = 5335, IncomingRayFlagsKHR = 5351, RayGeometryIndexKHR = 5352, WarpsPerSMNV = 5374, SMCountNV = 5375, WarpIDNV = 5376, SMIDNV = 5377, CullMaskKHR = 6021, pub const SubgroupEqMaskKHR = BuiltIn.SubgroupEqMask; pub const SubgroupGeMaskKHR = BuiltIn.SubgroupGeMask; pub const SubgroupGtMaskKHR = BuiltIn.SubgroupGtMask; pub const SubgroupLeMaskKHR = BuiltIn.SubgroupLeMask; pub const SubgroupLtMaskKHR = BuiltIn.SubgroupLtMask; pub const BaryCoordNV = BuiltIn.BaryCoordKHR; pub const BaryCoordNoPerspNV = BuiltIn.BaryCoordNoPerspKHR; pub const FragmentSizeNV = BuiltIn.FragSizeEXT; pub const InvocationsPerPixelNV = BuiltIn.FragInvocationCountEXT; pub const LaunchIdNV = BuiltIn.LaunchIdKHR; pub const LaunchSizeNV = BuiltIn.LaunchSizeKHR; pub const WorldRayOriginNV = BuiltIn.WorldRayOriginKHR; pub const WorldRayDirectionNV = BuiltIn.WorldRayDirectionKHR; pub const ObjectRayOriginNV = BuiltIn.ObjectRayOriginKHR; pub const ObjectRayDirectionNV = BuiltIn.ObjectRayDirectionKHR; pub const RayTminNV = BuiltIn.RayTminKHR; pub const RayTmaxNV = BuiltIn.RayTmaxKHR; pub const InstanceCustomIndexNV = BuiltIn.InstanceCustomIndexKHR; pub const ObjectToWorldNV = BuiltIn.ObjectToWorldKHR; pub const WorldToObjectNV = BuiltIn.WorldToObjectKHR; pub const HitKindNV = BuiltIn.HitKindKHR; pub const IncomingRayFlagsNV = BuiltIn.IncomingRayFlagsKHR; }; pub const Scope = enum(u32) { CrossDevice = 0, Device = 1, Workgroup = 2, Subgroup = 3, Invocation = 4, QueueFamily = 5, ShaderCallKHR = 6, pub const QueueFamilyKHR = Scope.QueueFamily; }; pub const GroupOperation = enum(u32) { Reduce = 0, InclusiveScan = 1, ExclusiveScan = 2, ClusteredReduce = 3, PartitionedReduceNV = 6, PartitionedInclusiveScanNV = 7, PartitionedExclusiveScanNV = 8, }; pub const KernelEnqueueFlags = enum(u32) { NoWait = 0, WaitKernel = 1, WaitWorkGroup = 2, }; pub const Capability = enum(u32) { Matrix = 0, Shader = 1, Geometry = 2, Tessellation = 3, Addresses = 4, Linkage = 5, Kernel = 6, Vector16 = 7, Float16Buffer = 8, Float16 = 9, Float64 = 10, Int64 = 11, Int64Atomics = 12, ImageBasic = 13, ImageReadWrite = 14, ImageMipmap = 15, Pipes = 17, Groups = 18, DeviceEnqueue = 19, LiteralSampler = 20, AtomicStorage = 21, Int16 = 22, TessellationPointSize = 23, GeometryPointSize = 24, ImageGatherExtended = 25, StorageImageMultisample = 27, UniformBufferArrayDynamicIndexing = 28, SampledImageArrayDynamicIndexing = 29, StorageBufferArrayDynamicIndexing = 30, StorageImageArrayDynamicIndexing = 31, ClipDistance = 32, CullDistance = 33, ImageCubeArray = 34, SampleRateShading = 35, ImageRect = 36, SampledRect = 37, GenericPointer = 38, Int8 = 39, InputAttachment = 40, SparseResidency = 41, MinLod = 42, Sampled1D = 43, Image1D = 44, SampledCubeArray = 45, SampledBuffer = 46, ImageBuffer = 47, ImageMSArray = 48, StorageImageExtendedFormats = 49, ImageQuery = 50, DerivativeControl = 51, InterpolationFunction = 52, TransformFeedback = 53, GeometryStreams = 54, StorageImageReadWithoutFormat = 55, StorageImageWriteWithoutFormat = 56, MultiViewport = 57, SubgroupDispatch = 58, NamedBarrier = 59, PipeStorage = 60, GroupNonUniform = 61, GroupNonUniformVote = 62, GroupNonUniformArithmetic = 63, GroupNonUniformBallot = 64, GroupNonUniformShuffle = 65, GroupNonUniformShuffleRelative = 66, GroupNonUniformClustered = 67, GroupNonUniformQuad = 68, ShaderLayer = 69, ShaderViewportIndex = 70, UniformDecoration = 71, CoreBuiltinsARM = 4165, TileImageColorReadAccessEXT = 4166, TileImageDepthReadAccessEXT = 4167, TileImageStencilReadAccessEXT = 4168, FragmentShadingRateKHR = 4422, SubgroupBallotKHR = 4423, DrawParameters = 4427, WorkgroupMemoryExplicitLayoutKHR = 4428, WorkgroupMemoryExplicitLayout8BitAccessKHR = 4429, WorkgroupMemoryExplicitLayout16BitAccessKHR = 4430, SubgroupVoteKHR = 4431, StorageBuffer16BitAccess = 4433, UniformAndStorageBuffer16BitAccess = 4434, StoragePushConstant16 = 4435, StorageInputOutput16 = 4436, DeviceGroup = 4437, MultiView = 4439, VariablePointersStorageBuffer = 4441, VariablePointers = 4442, AtomicStorageOps = 4445, SampleMaskPostDepthCoverage = 4447, StorageBuffer8BitAccess = 4448, UniformAndStorageBuffer8BitAccess = 4449, StoragePushConstant8 = 4450, DenormPreserve = 4464, DenormFlushToZero = 4465, SignedZeroInfNanPreserve = 4466, RoundingModeRTE = 4467, RoundingModeRTZ = 4468, RayQueryProvisionalKHR = 4471, RayQueryKHR = 4472, RayTraversalPrimitiveCullingKHR = 4478, RayTracingKHR = 4479, TextureSampleWeightedQCOM = 4484, TextureBoxFilterQCOM = 4485, TextureBlockMatchQCOM = 4486, Float16ImageAMD = 5008, ImageGatherBiasLodAMD = 5009, FragmentMaskAMD = 5010, StencilExportEXT = 5013, ImageReadWriteLodAMD = 5015, Int64ImageEXT = 5016, ShaderClockKHR = 5055, SampleMaskOverrideCoverageNV = 5249, GeometryShaderPassthroughNV = 5251, ShaderViewportIndexLayerEXT = 5254, ShaderViewportMaskNV = 5255, ShaderStereoViewNV = 5259, PerViewAttributesNV = 5260, FragmentFullyCoveredEXT = 5265, MeshShadingNV = 5266, ImageFootprintNV = 5282, MeshShadingEXT = 5283, FragmentBarycentricKHR = 5284, ComputeDerivativeGroupQuadsNV = 5288, FragmentDensityEXT = 5291, GroupNonUniformPartitionedNV = 5297, ShaderNonUniform = 5301, RuntimeDescriptorArray = 5302, InputAttachmentArrayDynamicIndexing = 5303, UniformTexelBufferArrayDynamicIndexing = 5304, StorageTexelBufferArrayDynamicIndexing = 5305, UniformBufferArrayNonUniformIndexing = 5306, SampledImageArrayNonUniformIndexing = 5307, StorageBufferArrayNonUniformIndexing = 5308, StorageImageArrayNonUniformIndexing = 5309, InputAttachmentArrayNonUniformIndexing = 5310, UniformTexelBufferArrayNonUniformIndexing = 5311, StorageTexelBufferArrayNonUniformIndexing = 5312, RayTracingPositionFetchKHR = 5336, RayTracingNV = 5340, RayTracingMotionBlurNV = 5341, VulkanMemoryModel = 5345, VulkanMemoryModelDeviceScope = 5346, PhysicalStorageBufferAddresses = 5347, ComputeDerivativeGroupLinearNV = 5350, RayTracingProvisionalKHR = 5353, CooperativeMatrixNV = 5357, FragmentShaderSampleInterlockEXT = 5363, FragmentShaderShadingRateInterlockEXT = 5372, ShaderSMBuiltinsNV = 5373, FragmentShaderPixelInterlockEXT = 5378, DemoteToHelperInvocation = 5379, RayTracingOpacityMicromapEXT = 5381, ShaderInvocationReorderNV = 5383, BindlessTextureNV = 5390, RayQueryPositionFetchKHR = 5391, SubgroupShuffleINTEL = 5568, SubgroupBufferBlockIOINTEL = 5569, SubgroupImageBlockIOINTEL = 5570, SubgroupImageMediaBlockIOINTEL = 5579, RoundToInfinityINTEL = 5582, FloatingPointModeINTEL = 5583, IntegerFunctions2INTEL = 5584, FunctionPointersINTEL = 5603, IndirectReferencesINTEL = 5604, AsmINTEL = 5606, AtomicFloat32MinMaxEXT = 5612, AtomicFloat64MinMaxEXT = 5613, AtomicFloat16MinMaxEXT = 5616, VectorComputeINTEL = 5617, VectorAnyINTEL = 5619, ExpectAssumeKHR = 5629, SubgroupAvcMotionEstimationINTEL = 5696, SubgroupAvcMotionEstimationIntraINTEL = 5697, SubgroupAvcMotionEstimationChromaINTEL = 5698, VariableLengthArrayINTEL = 5817, FunctionFloatControlINTEL = 5821, FPGAMemoryAttributesINTEL = 5824, FPFastMathModeINTEL = 5837, ArbitraryPrecisionIntegersINTEL = 5844, ArbitraryPrecisionFloatingPointINTEL = 5845, UnstructuredLoopControlsINTEL = 5886, FPGALoopControlsINTEL = 5888, KernelAttributesINTEL = 5892, FPGAKernelAttributesINTEL = 5897, FPGAMemoryAccessesINTEL = 5898, FPGAClusterAttributesINTEL = 5904, LoopFuseINTEL = 5906, FPGADSPControlINTEL = 5908, MemoryAccessAliasingINTEL = 5910, FPGAInvocationPipeliningAttributesINTEL = 5916, FPGABufferLocationINTEL = 5920, ArbitraryPrecisionFixedPointINTEL = 5922, USMStorageClassesINTEL = 5935, RuntimeAlignedAttributeINTEL = 5939, IOPipesINTEL = 5943, BlockingPipesINTEL = 5945, FPGARegINTEL = 5948, DotProductInputAll = 6016, DotProductInput4x8Bit = 6017, DotProductInput4x8BitPacked = 6018, DotProduct = 6019, RayCullMaskKHR = 6020, BitInstructions = 6025, GroupNonUniformRotateKHR = 6026, AtomicFloat32AddEXT = 6033, AtomicFloat64AddEXT = 6034, LongConstantCompositeINTEL = 6089, OptNoneINTEL = 6094, AtomicFloat16AddEXT = 6095, DebugInfoModuleINTEL = 6114, BFloat16ConversionINTEL = 6115, SplitBarrierINTEL = 6141, FPGAKernelAttributesv2INTEL = 6161, FPGALatencyControlINTEL = 6171, FPGAArgumentInterfacesINTEL = 6174, GroupUniformArithmeticKHR = 6400, pub const StorageUniformBufferBlock16 = Capability.StorageBuffer16BitAccess; pub const StorageUniform16 = Capability.UniformAndStorageBuffer16BitAccess; pub const ShaderViewportIndexLayerNV = Capability.ShaderViewportIndexLayerEXT; pub const FragmentBarycentricNV = Capability.FragmentBarycentricKHR; pub const ShadingRateNV = Capability.FragmentDensityEXT; pub const ShaderNonUniformEXT = Capability.ShaderNonUniform; pub const RuntimeDescriptorArrayEXT = Capability.RuntimeDescriptorArray; pub const InputAttachmentArrayDynamicIndexingEXT = Capability.InputAttachmentArrayDynamicIndexing; pub const UniformTexelBufferArrayDynamicIndexingEXT = Capability.UniformTexelBufferArrayDynamicIndexing; pub const StorageTexelBufferArrayDynamicIndexingEXT = Capability.StorageTexelBufferArrayDynamicIndexing; pub const UniformBufferArrayNonUniformIndexingEXT = Capability.UniformBufferArrayNonUniformIndexing; pub const SampledImageArrayNonUniformIndexingEXT = Capability.SampledImageArrayNonUniformIndexing; pub const StorageBufferArrayNonUniformIndexingEXT = Capability.StorageBufferArrayNonUniformIndexing; pub const StorageImageArrayNonUniformIndexingEXT = Capability.StorageImageArrayNonUniformIndexing; pub const InputAttachmentArrayNonUniformIndexingEXT = Capability.InputAttachmentArrayNonUniformIndexing; pub const UniformTexelBufferArrayNonUniformIndexingEXT = Capability.UniformTexelBufferArrayNonUniformIndexing; pub const StorageTexelBufferArrayNonUniformIndexingEXT = Capability.StorageTexelBufferArrayNonUniformIndexing; pub const VulkanMemoryModelKHR = Capability.VulkanMemoryModel; pub const VulkanMemoryModelDeviceScopeKHR = Capability.VulkanMemoryModelDeviceScope; pub const PhysicalStorageBufferAddressesEXT = Capability.PhysicalStorageBufferAddresses; pub const DemoteToHelperInvocationEXT = Capability.DemoteToHelperInvocation; pub const DotProductInputAllKHR = Capability.DotProductInputAll; pub const DotProductInput4x8BitKHR = Capability.DotProductInput4x8Bit; pub const DotProductInput4x8BitPackedKHR = Capability.DotProductInput4x8BitPacked; pub const DotProductKHR = Capability.DotProduct; }; pub const RayQueryIntersection = enum(u32) { RayQueryCandidateIntersectionKHR = 0, RayQueryCommittedIntersectionKHR = 1, }; pub const RayQueryCommittedIntersectionType = enum(u32) { RayQueryCommittedIntersectionNoneKHR = 0, RayQueryCommittedIntersectionTriangleKHR = 1, RayQueryCommittedIntersectionGeneratedKHR = 2, }; pub const RayQueryCandidateIntersectionType = enum(u32) { RayQueryCandidateIntersectionTriangleKHR = 0, RayQueryCandidateIntersectionAABBKHR = 1, }; pub const PackedVectorFormat = enum(u32) { PackedVectorFormat4x8Bit = 0, pub const PackedVectorFormat4x8BitKHR = PackedVectorFormat.PackedVectorFormat4x8Bit; };
0
repos/mach-sysgpu/src/shader/codegen
repos/mach-sysgpu/src/shader/codegen/spirv/Section.zig
//! Borrowed from Zig compiler codebase with changes. //! Licensed under LICENSE-ZIG //! //! Represents a section or subsection of instructions in a SPIR-V binary. Instructions can be append //! to separate sections, which can then later be merged into the final binary. const std = @import("std"); const spec = @import("spec.zig"); const Opcode = spec.Opcode; const Word = spec.Word; const DoubleWord = std.meta.Int(.unsigned, @bitSizeOf(Word) * 2); const Log2Word = std.math.Log2Int(Word); const Section = @This(); allocator: std.mem.Allocator, words: std.ArrayListUnmanaged(Word) = .{}, pub fn deinit(section: *Section) void { section.words.deinit(section.allocator); } pub fn toWords(section: Section) []Word { return section.words.items; } /// Append the words from another section into this section. pub fn append(section: *Section, other_section: Section) !void { try section.words.appendSlice(section.allocator, other_section.words.items); } /// Ensure capacity of at least `capacity` more words in this section. pub fn ensureUnusedCapacity(section: *Section, capacity: usize) !void { try section.words.ensureUnusedCapacity(section.allocator, capacity); } /// Write an instruction and size, operands are to be inserted manually. pub fn emitRaw( section: *Section, opcode: Opcode, operand_words: usize, // opcode itself not included ) !void { const word_count = 1 + operand_words; try section.words.ensureUnusedCapacity(section.allocator, word_count); section.writeWord(@as(Word, @intCast(word_count << 16)) | @intFromEnum(opcode)); } pub fn emit( section: *Section, comptime opcode: spec.Opcode, operands: opcode.Operands(), ) !void { const word_count = instructionSize(opcode, operands); try section.ensureUnusedCapacity(word_count); section.writeWord(@as(Word, @intCast(word_count << 16)) | @intFromEnum(opcode)); section.writeOperands(opcode.Operands(), operands); } pub fn emitSpecConstantOp( section: *Section, comptime opcode: spec.Opcode, operands: opcode.Operands(), ) !void { const word_count = operandsSize(opcode.Operands(), operands); try section.emitRaw(.OpSpecConstantOp, 1 + word_count); section.writeOperand(spec.IdRef, operands.id_result_type); section.writeOperand(spec.IdRef, operands.id_result); section.writeOperand(Opcode, opcode); const fields = @typeInfo(opcode.Operands()).Struct.fields; // First 2 fields are always id_result_type and id_result. inline for (fields[2..]) |field| { section.writeOperand(field.type, @field(operands, field.name)); } } pub fn writeWord(section: *Section, word: Word) void { section.words.appendAssumeCapacity(word); } pub fn writeWords(section: *Section, words: []const Word) void { section.words.appendSliceAssumeCapacity(words); } pub fn writeDoubleWord(section: *Section, dword: DoubleWord) void { section.writeWords(&[_]Word{ @truncate(dword), @truncate(dword >> @bitSizeOf(Word)), }); } fn writeOperands(section: *Section, comptime Operands: type, operands: Operands) void { const fields = switch (@typeInfo(Operands)) { .Struct => |info| info.fields, .Void => return, else => unreachable, }; inline for (fields) |field| { section.writeOperand(field.type, @field(operands, field.name)); } } pub fn writeOperand(section: *Section, comptime Operand: type, operand: Operand) void { switch (Operand) { spec.IdResult => section.writeWord(operand.id), spec.LiteralInteger => section.writeWord(operand), spec.LiteralString => section.writeString(operand), spec.LiteralContextDependentNumber => section.writeContextDependentNumber(operand), spec.LiteralExtInstInteger => section.writeWord(operand.inst), // TODO: Where this type is used (OpSpecConstantOp) is currently not correct in the spec json, // so it most likely needs to be altered into something that can actually describe the entire // instruction in which it is used. spec.LiteralSpecConstantOpInteger => section.writeWord(@intFromEnum(operand.opcode)), spec.PairLiteralIntegerIdRef => section.writeWords(&.{ operand.value, operand.label.id }), spec.PairIdRefLiteralInteger => section.writeWords(&.{ operand.target.id, operand.member }), spec.PairIdRefIdRef => section.writeWords(&.{ operand[0].id, operand[1].id }), else => switch (@typeInfo(Operand)) { .Enum => section.writeWord(@intFromEnum(operand)), .Optional => |info| if (operand) |child| { section.writeOperand(info.child, child); }, .Pointer => |info| { std.debug.assert(info.size == .Slice); // Should be no other pointer types in the spec. for (operand) |item| { section.writeOperand(info.child, item); } }, .Struct => |info| { if (info.layout == .Packed) { section.writeWord(@bitCast(operand)); } else { section.writeExtendedMask(Operand, operand); } }, .Union => section.writeExtendedUnion(Operand, operand), else => unreachable, }, } } fn writeString(section: *Section, str: []const u8) void { // TODO: Not actually sure whether this is correct for big-endian. // See https://www.khronos.org/registry/spir-v/specs/unified1/SPIRV.html#Literal const zero_terminated_len = str.len + 1; var i: usize = 0; while (i < zero_terminated_len) : (i += @sizeOf(Word)) { var word: Word = 0; var j: usize = 0; while (j < @sizeOf(Word) and i + j < str.len) : (j += 1) { word |= @as(Word, str[i + j]) << @as(Log2Word, @intCast(j * @bitSizeOf(u8))); } section.words.appendAssumeCapacity(word); } } fn writeContextDependentNumber(section: *Section, operand: spec.LiteralContextDependentNumber) void { switch (operand) { .int32 => |int| section.writeWord(@bitCast(int)), .uint32 => |int| section.writeWord(@bitCast(int)), .int64 => |int| section.writeDoubleWord(@bitCast(int)), .uint64 => |int| section.writeDoubleWord(@bitCast(int)), .float32 => |float| section.writeWord(@bitCast(float)), .float64 => |float| section.writeDoubleWord(@bitCast(float)), } } fn writeExtendedMask(section: *Section, comptime Operand: type, operand: Operand) void { var mask: Word = 0; inline for (@typeInfo(Operand).Struct.fields, 0..) |field, bit| { switch (@typeInfo(field.type)) { .Optional => if (@field(operand, field.name) != null) { mask |= 1 << @intCast(bit); }, .Bool => if (@field(operand, field.name)) { mask |= 1 << @intCast(bit); }, else => unreachable, } } section.writeWord(mask); inline for (@typeInfo(Operand).Struct.fields) |field| { switch (@typeInfo(field.type)) { .Optional => |info| if (@field(operand, field.name)) |child| { section.writeOperands(info.child, child); }, .Bool => {}, else => unreachable, } } } fn writeExtendedUnion(section: *Section, comptime Operand: type, operand: Operand) void { const tag = std.meta.activeTag(operand); section.writeWord(@intFromEnum(tag)); inline for (@typeInfo(Operand).Union.fields) |field| { if (@field(Operand, field.name) == tag) { section.writeOperands(field.type, @field(operand, field.name)); return; } } unreachable; } fn instructionSize(comptime opcode: spec.Opcode, operands: opcode.Operands()) usize { return 1 + operandsSize(opcode.Operands(), operands); } fn operandsSize(comptime Operands: type, operands: Operands) usize { const fields = switch (@typeInfo(Operands)) { .Struct => |info| info.fields, .Void => return 0, else => unreachable, }; var total: usize = 0; inline for (fields) |field| { total += operandSize(field.type, @field(operands, field.name)); } return total; } fn operandSize(comptime Operand: type, operand: Operand) usize { return switch (Operand) { spec.IdResult, spec.LiteralInteger, spec.LiteralExtInstInteger, => 1, // Add one for zero-terminator spec.LiteralString => std.math.divCeil(usize, operand.len + 1, @sizeOf(Word)) catch unreachable, spec.LiteralContextDependentNumber => switch (operand) { .int32, .uint32, .float32 => @as(usize, 1), .int64, .uint64, .float64 => @as(usize, 2), }, // TODO: Where this type is used (OpSpecConstantOp) is currently not correct in the spec // json, so it most likely needs to be altered into something that can actually // describe the entire insturction in which it is used. spec.LiteralSpecConstantOpInteger => 1, spec.PairLiteralIntegerIdRef, spec.PairIdRefLiteralInteger, spec.PairIdRefIdRef, => 2, else => switch (@typeInfo(Operand)) { .Enum => 1, .Optional => |info| if (operand) |child| operandSize(info.child, child) else 0, .Pointer => |info| blk: { std.debug.assert(info.size == .Slice); // Should be no other pointer types in the spec. var total: usize = 0; for (operand) |item| { total += operandSize(info.child, item); } break :blk total; }, .Struct => |info| if (info.layout == .Packed) 1 else extendedMaskSize(Operand, operand), .Union => extendedUnionSize(Operand, operand), else => unreachable, }, }; } fn extendedMaskSize(comptime Operand: type, operand: Operand) usize { var total: usize = 0; var any_set = false; inline for (@typeInfo(Operand).Struct.fields) |field| { switch (@typeInfo(field.type)) { .Optional => |info| if (@field(operand, field.name)) |child| { total += operandsSize(info.child, child); any_set = true; }, .Bool => if (@field(operand, field.name)) { any_set = true; }, else => unreachable, } } if (!any_set) { return 0; } return total + 1; // Add one for the mask itself. } fn extendedUnionSize(comptime Operand: type, operand: Operand) usize { const tag = std.meta.activeTag(operand); inline for (@typeInfo(Operand).Union.fields) |field| { if (@field(Operand, field.name) == tag) { // Add one for the tag itself. return 1 + operandsSize(field.type, @field(operand, field.name)); } } unreachable; }
0
repos/mach-sysgpu/src/shader
repos/mach-sysgpu/src/shader/test/pixel-post-process-pixel-frag.wgsl
@group(0) @binding(0) var draw_texture: texture_2d<f32>; @group(0) @binding(1) var draw_texture_sampler: sampler; @group(0) @binding(2) var depth_texture: texture_depth_2d; @group(0) @binding(3) var depth_texture_sampler: sampler; @group(0) @binding(4) var normal_texture: texture_2d<f32>; @group(0) @binding(5) var normal_texture_sampler: sampler; struct View { @location(0) width: u32, @location(1) height: u32, @location(2) pixel_size: u32, } @group(0) @binding(6) var<uniform> view: View; fn sample_depth(uv: vec2<f32>, x: f32, y: f32) -> f32 { return textureSample( depth_texture, depth_texture_sampler, uv + vec2<f32>(x * f32(view.pixel_size) / f32(view.width), y * f32(view.pixel_size) / f32(view.height)) ); } fn sample_normal(uv: vec2<f32>, x: f32, y: f32) -> vec3<f32> { return textureSample( normal_texture, normal_texture_sampler, uv + vec2<f32>(x * f32(view.pixel_size) / f32(view.width), y * f32(view.pixel_size) / f32(view.height)) ).xyz; } fn normal_indicator(uv: vec2<f32>, x: f32, y: f32) -> f32 { // TODO - integer promotion to float argument var depth_diff = sample_depth(uv, 0.0, 0.0) - sample_depth(uv, x, y); var dx = sample_normal(uv, 0.0, 0.0); var dy = sample_normal(uv, x, y); if (depth_diff > 0) { // only sample normals from closest pixel return 0; } return distance(dx, dy); } @fragment fn main( // TODO - vertex/fragment linkage @location(0) uv: vec2<f32>, @builtin(position) position: vec4<f32> ) -> @location(0) vec4<f32> { // TODO - integer promotion to float argument var depth = sample_depth(uv, 0.0, 0.0); var depth_diff: f32 = 0; depth_diff += abs(depth - sample_depth(uv, -1.0, 0.0)); depth_diff += abs(depth - sample_depth(uv, 1.0, 0.0)); depth_diff += abs(depth - sample_depth(uv, 0.0, -1.0)); depth_diff += abs(depth - sample_depth(uv, 0.0, 1.0)); var normal_diff: f32 = 0; normal_diff += normal_indicator(uv, -1.0, 0.0); normal_diff += normal_indicator(uv, 1.0, 0.0); normal_diff += normal_indicator(uv, 0.0, -1.0); normal_diff += normal_indicator(uv, 0.0, 1.0); var color = textureSample(draw_texture, draw_texture_sampler, uv); if (depth_diff > 0.007) { // magic number from testing return color * 0.7; } // add instead of multiply so really dark pixels get brighter return color + (vec4<f32>(1) * step(0.1, normal_diff) * 0.7); }
0
repos/mach-sysgpu/src/shader
repos/mach-sysgpu/src/shader/test/pbr-basic.wgsl
@group(0) @binding(0) var<uniform> ubo : UBO; @group(0) @binding(1) var<uniform> uboParams : UBOShared; @group(0) @binding(2) var<uniform> material : MaterialParams; @group(0) @binding(3) var<uniform> object : ObjectParams; struct VertexOut { @builtin(position) position_clip : vec4<f32>, @location(0) fragPosition : vec3<f32>, @location(1) fragNormal : vec3<f32>, } struct MaterialParams { roughness : f32, metallic : f32, r : f32, g : f32, b : f32 } struct UBOShared { lights : array<vec4<f32>, 4>, } struct UBO { projection : mat4x4<f32>, model : mat4x4<f32>, view : mat4x4<f32>, camPos : vec3<f32>, } struct ObjectParams { position : vec3<f32> } @vertex fn vertex_main( @location(0) position : vec3<f32>, @location(1) normal : vec3<f32> ) -> VertexOut { var output : VertexOut; var locPos = vec4<f32>(ubo.model * vec4<f32>(position, 1.0)); output.fragPosition = locPos.xyz + object.position; output.fragNormal = mat3x3<f32>(ubo.model[0].xyz, ubo.model[1].xyz, ubo.model[2].xyz) * normal; output.position_clip = ubo.projection * ubo.view * vec4<f32>(output.fragPosition, 1.0); return output; } const PI : f32 = 3.14159265359; fn material_color() -> vec3<f32> { return vec3<f32>(material.r, material.g, material.b); } // Normal Distribution function -------------------------------------- fn D_GGX(dotNH : f32, roughness : f32) -> f32 { var alpha : f32 = roughness * roughness; var alpha2 : f32 = alpha * alpha; var denom : f32 = dotNH * dotNH * (alpha2 - 1.0) + 1.0; return alpha2 / (PI * denom * denom); } // Geometric Shadowing function -------------------------------------- fn G_SchlicksmithGGX(dotNL : f32, dotNV : f32, roughness : f32) -> f32 { var r : f32 = roughness + 1.0; var k : f32 = (r * r) / 8.0; var GL : f32 = dotNL / (dotNL * (1.0 - k) + k); var GV : f32 = dotNV / (dotNV * (1.0 - k) + k); return GL * GV; } // Fresnel function ---------------------------------------------------- fn F_Schlick(cosTheta : f32, metallic : f32) -> vec3<f32> { var F0 : vec3<f32> = mix(vec3<f32>(0.04), material_color(), metallic); var F : vec3<f32> = F0 + (1.0 - F0) * pow(1.0 - cosTheta, 5.0); return F; } // Specular BRDF composition -------------------------------------------- fn BRDF(L : vec3<f32>, V : vec3<f32>, N : vec3<f32>, metallic : f32, roughness : f32) -> vec3<f32> { var H : vec3<f32> = normalize(V + L); var dotNV : f32 = clamp(dot(N, V), 0.0, 1.0); var dotNL : f32 = clamp(dot(N, L), 0.0, 1.0); var dotLH : f32 = clamp(dot(L, H), 0.0, 1.0); var dotNH : f32 = clamp(dot(N, H), 0.0, 1.0); var lightColor = vec3<f32>(1.0); var color = vec3<f32>(0.0); if(dotNL > 0.0) { var rroughness : f32 = max(0.05, roughness); // D = Normal distribution (Distribution of the microfacets) var D : f32 = D_GGX(dotNH, roughness); // G = Geometric shadowing term (Microfacets shadowing) var G : f32 = G_SchlicksmithGGX(dotNL, dotNV, roughness); // F = Fresnel factor (Reflectance depending on angle of incidence) var F : vec3<f32> = F_Schlick(dotNV, metallic); var spec : vec3<f32> = (D * F * G) / (4.0 * dotNL * dotNV); color += spec * dotNL * lightColor; } return color; } // TODO - global variable declaration order @fragment fn frag_main( @location(0) position : vec3<f32>, @location(1) normal: vec3<f32> ) -> @location(0) vec4<f32> { var N : vec3<f32> = normalize(normal); var V : vec3<f32> = normalize(ubo.camPos - position); var Lo = vec3<f32>(0.0); // Specular contribution for(var i: i32 = 0; i < 4; i++) { var L : vec3<f32> = normalize(uboParams.lights[i].xyz - position); Lo += BRDF(L, V, N, material.metallic, material.roughness); } // Combine with ambient var color : vec3<f32> = material_color() * 0.02; color += Lo; // Gamma correct color = pow(color, vec3<f32>(0.4545)); return vec4<f32>(color, 1.0); }
0
repos/mach-sysgpu/src/shader
repos/mach-sysgpu/src/shader/test/sprite2d.wgsl
struct Uniforms { modelViewProjectionMatrix : mat4x4<f32>, }; @binding(0) @group(0) var<uniform> uniforms : Uniforms; struct VertexOutput { @builtin(position) Position : vec4<f32>, @location(0) fragUV : vec2<f32>, @location(1) spriteIndex : f32, }; struct Sprite { pos: vec2<f32>, size: vec2<f32>, world_pos: vec2<f32>, sheet_size: vec2<f32>, }; @binding(3) @group(0) var<storage, read> sprites: array<Sprite>; @vertex fn vertex_main( @builtin(vertex_index) VertexIndex : u32 ) -> VertexOutput { var sprite = sprites[VertexIndex / 6]; // Calculate the vertex position var positions = array<vec2<f32>, 6>( vec2<f32>(0.0, 0.0), // bottom-left vec2<f32>(0.0, 1.0), // top-left vec2<f32>(1.0, 0.0), // bottom-right vec2<f32>(1.0, 0.0), // bottom-right vec2<f32>(0.0, 1.0), // top-left vec2<f32>(1.0, 1.0), // top-right ); var pos = positions[VertexIndex % 6]; pos.x *= sprite.size.x; pos.y *= sprite.size.y; pos.x += sprite.world_pos.x; pos.y += sprite.world_pos.y; // Calculate the UV coordinate var uvs = array<vec2<f32>, 6>( vec2<f32>(0.0, 1.0), // bottom-left vec2<f32>(0.0, 0.0), // top-left vec2<f32>(1.0, 1.0), // bottom-right vec2<f32>(1.0, 1.0), // bottom-right vec2<f32>(0.0, 0.0), // top-left vec2<f32>(1.0, 0.0), // top-right ); var uv = uvs[VertexIndex % 6]; uv.x *= sprite.size.x / sprite.sheet_size.x; uv.y *= sprite.size.y / sprite.sheet_size.y; uv.x += sprite.pos.x / sprite.sheet_size.x; uv.y += sprite.pos.y / sprite.sheet_size.y; var output : VertexOutput; output.Position = vec4<f32>(pos.x, 0.0, pos.y, 1.0) * uniforms.modelViewProjectionMatrix; output.fragUV = uv; output.spriteIndex = f32(VertexIndex / 6); return output; } @group(0) @binding(1) var spriteSampler: sampler; @group(0) @binding(2) var spriteTexture: texture_2d<f32>; @fragment fn frag_main( @location(0) fragUV: vec2<f32>, @location(1) spriteIndex: f32 ) -> @location(0) vec4<f32> { var color = textureSample(spriteTexture, spriteSampler, fragUV); if (spriteIndex == 0.0) { if (color[3] > 0.0) { color[0] = 0.3; color[1] = 0.2; color[2] = 0.5; color[3] = 1.0; } } return color; }
0
repos/mach-sysgpu/src/shader
repos/mach-sysgpu/src/shader/test/pixel-post-process-pixel-vert.wgsl
struct VertexOut { @builtin(position) position_clip: vec4<f32>, @location(0) uv: vec2<f32> } @vertex fn main( @location(0) position: vec3<f32>, @location(1) uv: vec2<f32> ) -> VertexOut { var output : VertexOut; output.position_clip = vec4<f32>(position.xy, 0.0, 1.0); output.uv = uv; return output; }
0
repos/mach-sysgpu/src/shader
repos/mach-sysgpu/src/shader/test/pixel-post-process-normal-frag.wgsl
@fragment fn main( @location(0) normal: vec3<f32>, @location(1) uv: vec2<f32>, ) -> @location(0) vec4<f32> { return vec4<f32>(normal / 2 + 0.5, 1.0); }
0
repos/mach-sysgpu/src/shader
repos/mach-sysgpu/src/shader/test/procedural-primitives.wgsl
struct Uniforms { mvp_matrix : mat4x4<f32>, }; @binding(0) @group(0) var<uniform> ubo : Uniforms; struct VertexOutput { @builtin(position) position: vec4<f32>, @location(0) normal: vec3<f32>, }; @vertex fn vertex_main( // TODO - struct input @location(0) position: vec3<f32>, @location(1) normal: vec3<f32>, ) -> VertexOutput { var out: VertexOutput; out.position = vec4<f32>(position, 1.0) * ubo.mvp_matrix; out.normal = normal; return out; } struct FragmentOutput { @location(0) pixel_color: vec4<f32> }; @fragment fn frag_main(in: VertexOutput) -> FragmentOutput { var out : FragmentOutput; out.pixel_color = vec4<f32>((in.normal + 1) / 2, 1.0); return out; }
0
repos/mach-sysgpu/src/shader
repos/mach-sysgpu/src/shader/test/instanced-cube.wgsl
@binding(0) @group(0) var<uniform> ubos : array<mat4x4<f32>, 16>; struct VertexOutput { @builtin(position) position_clip : vec4<f32>, @location(0) fragUV : vec2<f32>, @location(1) fragPosition: vec4<f32>, }; @vertex fn vertex_main(@builtin(instance_index) instanceIdx : u32, @location(0) position : vec4<f32>, @location(1) uv : vec2<f32>) -> VertexOutput { var output : VertexOutput; output.position_clip = ubos[instanceIdx] * position; output.fragUV = uv; output.fragPosition = 0.5 * (position + vec4<f32>(1.0, 1.0, 1.0, 1.0)); return output; } @fragment fn frag_main( @location(0) fragUV: vec2<f32>, @location(1) fragPosition: vec4<f32> ) -> @location(0) vec4<f32> { return fragPosition; }
0
repos/mach-sysgpu/src/shader
repos/mach-sysgpu/src/shader/test/builtins.wgsl
@group(0) @binding(0) var<storage> _runtime_array : array<u32>; @fragment fn main() { // TODO: Add all builtins let _array_length = arrayLength(&_runtime_array); let _sin = sin(1.0); let _cos = cos(1.0); let _normalize = normalize(vec3(1.0)); let _length = length(1.0); let _floor = floor(1.0); let _abs = abs(1.0); let _all = all(vec3(true)); let _dpdx = dpdx(1.0); let _dpdy = dpdy(1.0); let _fwidth = fwidth(1.0); let _min = min(1.0, 1.0); let _max = max(1.0, 1.0); let _atan2 = atan2(1.0, 1.0); let _distance = distance(1.0, 1.0); let _dot = dot(vec3(1.0), vec3(1.0)); let _pow = pow(1.0, 1.0); let _step = step(1.0, 1.0); let _mix = mix(1.0, 1.0, 1.0); let _clamp = clamp(1.0, 1.0, 1.0); let _smoothstep = smoothstep(1.0, 1.0, 1.0); }
0
repos/mach-sysgpu/src/shader
repos/mach-sysgpu/src/shader/test/lightUpdate.wgsl
struct LightData { position : vec4<f32>, color : vec3<f32>, radius : f32, } struct LightsBuffer { lights: array<LightData>, } @group(0) @binding(0) var<storage, read_write> lightsBuffer: LightsBuffer; struct Config { numLights : u32, } @group(0) @binding(1) var<uniform> config: Config; struct LightExtent { min : vec4<f32>, max : vec4<f32>, } @group(0) @binding(2) var<uniform> lightExtent: LightExtent; @compute @workgroup_size(64, 1, 1) fn main(@builtin(global_invocation_id) GlobalInvocationID : vec3<u32>) { var index = GlobalInvocationID.x; if (index >= config.numLights) { return; } lightsBuffer.lights[index].position.y = lightsBuffer.lights[index].position.y - 0.5 - 0.003 * (f32(index) - 64.0 * floor(f32(index) / 64.0)); if (lightsBuffer.lights[index].position.y < lightExtent.min.y) { lightsBuffer.lights[index].position.y = lightExtent.max.y; } }
0
repos/mach-sysgpu/src/shader
repos/mach-sysgpu/src/shader/test/vertexWriteGBuffers.wgsl
struct Uniforms { modelMatrix : mat4x4<f32>, normalModelMatrix : mat4x4<f32>, } struct Camera { viewProjectionMatrix : mat4x4<f32>, invViewProjectionMatrix : mat4x4<f32>, } @group(0) @binding(0) var<uniform> uniforms : Uniforms; @group(0) @binding(1) var<uniform> camera : Camera; struct VertexOutput { @builtin(position) Position : vec4<f32>, @location(0) fragNormal: vec3<f32>, // normal in world space @location(1) fragUV: vec2<f32>, } @vertex fn main( @location(0) position : vec3<f32>, @location(1) normal : vec3<f32>, @location(2) uv : vec2<f32> ) -> VertexOutput { var output : VertexOutput; let worldPosition = (uniforms.modelMatrix * vec4(position, 1.0)).xyz; output.Position = camera.viewProjectionMatrix * vec4(worldPosition, 1.0); output.fragNormal = normalize((uniforms.normalModelMatrix * vec4(normal, 1.0)).xyz); output.fragUV = uv; return output; }
0
repos/mach-sysgpu/src/shader
repos/mach-sysgpu/src/shader/test/image-blur.wgsl
struct Params { filterDim : i32, blockDim : u32, } @group(0) @binding(0) var samp : sampler; @group(0) @binding(1) var<uniform> params : Params; @group(1) @binding(1) var inputTex : texture_2d<f32>; @group(1) @binding(2) var outputTex : texture_storage_2d<rgba8unorm, write>; struct Flip { value : u32, } @group(1) @binding(3) var<uniform> flip : Flip; // This shader blurs the input texture in one direction, depending on whether // |flip.value| is 0 or 1. // It does so by running (128 / 4) threads per workgroup to load 128 // texels into 4 rows of shared memory. Each thread loads a // 4 x 4 block of texels to take advantage of the texture sampling // hardware. // Then, each thread computes the blur result by averaging the adjacent texel values // in shared memory. // Because we're operating on a subset of the texture, we cannot compute all of the // results since not all of the neighbors are available in shared memory. // Specifically, with 128 x 128 tiles, we can only compute and write out // square blocks of size 128 - (filterSize - 1). We compute the number of blocks // needed in Javascript and dispatch that amount. var<workgroup> tile : array<array<vec3<f32>, 128>, 4>; @compute @workgroup_size(32, 1, 1) fn main( @builtin(workgroup_id) WorkGroupID : vec3<u32>, @builtin(local_invocation_id) LocalInvocationID : vec3<u32>, @builtin(local_invocation_index) LocalInvocationIndex : u32 ) { for (var idx = LocalInvocationIndex; idx < 512u; idx+=32) { tile[idx / 128u][idx % 128u] = vec3(0.0); } workgroupBarrier(); // TODO - mixed vector arithmetic (vec2<u32> and vec2<i32>) let filterOffset = (params.filterDim - 1) / 2; let dims = textureDimensions(inputTex, 0); let baseIndex = vec2<u32>(WorkGroupID.xy * vec2(params.blockDim, 4) + LocalInvocationID.xy * vec2<u32>(4, 1)) - vec2<u32>(u32(filterOffset), 0); for (var r: u32 = 0; r < 4; r++) { for (var c: u32 = 0; c < 4; c++) { var loadIndex = baseIndex + vec2<u32>(c, r); if (flip.value != 0u) { loadIndex = loadIndex.yx; } tile[r][4 * LocalInvocationID.x + c] = textureSampleLevel( inputTex, samp, (vec2<f32>(loadIndex) + vec2<f32>(0.25, 0.25)) / vec2<f32>(dims), 0.0 ).rgb; } } workgroupBarrier(); for (var r: u32 = 0; r < 4; r++) { for (var c: u32 = 0; c < 4; c++) { var writeIndex = baseIndex + vec2<u32>(c, r); if (flip.value != 0) { writeIndex = writeIndex.yx; } let center = u32(4 * LocalInvocationID.x) + c; if (center >= u32(filterOffset) && center < 128 - u32(filterOffset) && all(writeIndex < dims)) { var acc = vec3(0.0, 0.0, 0.0); for (var f = 0; f < params.filterDim; f++) { var i = i32(center) + f - filterOffset; acc = acc + (1.0 / f32(params.filterDim)) * tile[r][i]; } textureStore(outputTex, writeIndex, vec4(acc, 1.0)); } } } }
0
repos/mach-sysgpu/src/shader
repos/mach-sysgpu/src/shader/test/two-cubes.wgsl
@group(0) @binding(0) var<uniform> ubo : mat4x4<f32>; struct VertexOut { @builtin(position) position_clip : vec4<f32>, @location(0) fragUV : vec2<f32>, @location(1) fragPosition: vec4<f32>, } @vertex fn vertex_main( @location(0) position : vec4<f32>, @location(1) uv: vec2<f32> ) -> VertexOut { var output : VertexOut; output.position_clip = position * ubo; output.fragUV = uv; output.fragPosition = 0.5 * (position + vec4<f32>(1.0, 1.0, 1.0, 1.0)); return output; } @fragment fn frag_main( @location(0) fragUV: vec2<f32>, @location(1) fragPosition: vec4<f32> ) -> @location(0) vec4<f32> { return fragPosition; }
0
repos/mach-sysgpu/src/shader
repos/mach-sysgpu/src/shader/test/textured-cube.wgsl
struct Uniforms { modelViewProjectionMatrix : mat4x4<f32>, }; @binding(0) @group(0) var<uniform> uniforms : Uniforms; struct VertexOutput { @builtin(position) Position : vec4<f32>, @location(0) fragUV : vec2<f32>, @location(1) fragPosition: vec4<f32>, }; @vertex fn vertex_main(@location(0) position : vec4<f32>, @location(1) uv : vec2<f32>) -> VertexOutput { var output : VertexOutput; output.Position = position * uniforms.modelViewProjectionMatrix; output.fragUV = uv; output.fragPosition = 0.5 * (position + vec4<f32>(1.0, 1.0, 1.0, 1.0)); return output; } @group(0) @binding(1) var mySampler: sampler; @group(0) @binding(2) var myTexture: texture_2d<f32>; @fragment fn frag_main(@location(0) fragUV: vec2<f32>, @location(1) fragPosition: vec4<f32>) -> @location(0) vec4<f32> { return textureSample(myTexture, mySampler, fragUV); }
0
repos/mach-sysgpu/src/shader
repos/mach-sysgpu/src/shader/test/fragmentDeferredRendering.wgsl
@group(0) @binding(0) var gBufferNormal: texture_2d<f32>; @group(0) @binding(1) var gBufferAlbedo: texture_2d<f32>; @group(0) @binding(2) var gBufferDepth: texture_depth_2d; struct LightData { position : vec4<f32>, color : vec3<f32>, radius : f32, } struct LightsBuffer { lights: array<LightData>, } @group(1) @binding(0) var<storage, read> lightsBuffer: LightsBuffer; struct Config { numLights : u32, } struct Camera { viewProjectionMatrix : mat4x4<f32>, invViewProjectionMatrix : mat4x4<f32>, } @group(1) @binding(1) var<uniform> config: Config; @group(1) @binding(2) var<uniform> camera: Camera; fn world_from_screen_coord(coord : vec2<f32>, depth_sample: f32) -> vec3<f32> { // reconstruct world-space position from the screen coordinate. let posClip = vec4(coord.x * 2.0 - 1.0, (1.0 - coord.y) * 2.0 - 1.0, depth_sample, 1.0); let posWorldW = camera.invViewProjectionMatrix * posClip; let posWorld = posWorldW.xyz / posWorldW.www; return posWorld; } @fragment fn main( @builtin(position) coord : vec4<f32> ) -> @location(0) vec4<f32> { var result : vec3<f32>; let depth = textureLoad( gBufferDepth, vec2<i32>(floor(coord.xy)), 0 ); // Don't light the sky. if (depth >= 1.0) { discard; } let bufferSize = textureDimensions(gBufferDepth); let coordUV = coord.xy / vec2<f32>(bufferSize); let position = world_from_screen_coord(coordUV, depth); let normal = textureLoad( gBufferNormal, vec2<i32>(floor(coord.xy)), 0 ).xyz; let albedo = textureLoad( gBufferAlbedo, vec2<i32>(floor(coord.xy)), 0 ).rgb; for (var i = 0u; i < config.numLights; i++) { let L = lightsBuffer.lights[i].position.xyz - position; let distance = length(L); if (distance > lightsBuffer.lights[i].radius) { continue; } let lambert = max(dot(normal, normalize(L)), 0.0); result += vec3<f32>( lambert * pow(1.0 - distance / lightsBuffer.lights[i].radius, 2.0) * lightsBuffer.lights[i].color * albedo ); } // some manual ambient result += vec3(0.2); return vec4(result, 1.0); }
0
repos/mach-sysgpu/src/shader
repos/mach-sysgpu/src/shader/test/cube-map.wgsl
struct Uniforms { modelViewProjectionMatrix : mat4x4<f32>, } @binding(0) @group(0) var<uniform> uniforms : Uniforms; struct VertexOutput { @builtin(position) Position : vec4<f32>, @location(0) fragUV : vec2<f32>, @location(1) fragPosition: vec4<f32>, } @vertex fn vertex_main( @location(0) position : vec4<f32>, @location(1) uv : vec2<f32> ) -> VertexOutput { var output : VertexOutput; output.Position = uniforms.modelViewProjectionMatrix * position; output.fragUV = uv; output.fragPosition = 0.5 * (position + vec4<f32>(1.0, 1.0, 1.0, 1.0)); return output; } @group(0) @binding(1) var mySampler: sampler; @group(0) @binding(2) var myTexture: texture_cube<f32>; @fragment fn frag_main( @location(0) fragUV: vec2<f32>, @location(1) fragPosition: vec4<f32> ) -> @location(0) vec4<f32> { var cubemapVec = fragPosition.xyz - vec3<f32>(0.5, 0.5, 0.5); return textureSample(myTexture, mySampler, cubemapVec); }
0
repos/mach-sysgpu/src/shader
repos/mach-sysgpu/src/shader/test/gen-texture-light.wgsl
struct CameraUniform { view_pos: vec4<f32>, view_proj: mat4x4<f32>, }; struct VertexInput { @location(0) position: vec3<f32>, @location(1) normal: vec3<f32>, @location(2) tex_coords: vec2<f32>, }; struct VertexOutput { @builtin(position) clip_position: vec4<f32>, }; struct Light { position: vec4<f32>, color: vec4<f32>, }; @group(0) @binding(0) var<uniform> camera: CameraUniform; @group(1) @binding(0) var<uniform> light: Light; @vertex fn vs_main(model: VertexInput) -> VertexOutput { var out: VertexOutput; let world_pos = vec4<f32>(model.position + light.position.xyz, 1.0); out.clip_position = camera.view_proj * world_pos; return out; } @fragment fn fs_main(in: VertexOutput) -> @location(0) vec4<f32> { return vec4<f32>(1.0, 1.0, 1.0, 0.5); }
0
repos/mach-sysgpu/src/shader
repos/mach-sysgpu/src/shader/test/boids-sprite-update.wgsl
struct Particle { pos : vec2<f32>, vel : vec2<f32>, }; struct SimParams { deltaT : f32, rule1Distance : f32, rule2Distance : f32, rule3Distance : f32, rule1Scale : f32, rule2Scale : f32, rule3Scale : f32, }; struct Particles { particles : array<Particle>, }; @binding(0) @group(0) var<uniform> params : SimParams; @binding(1) @group(0) var<storage, read> particlesA : Particles; @binding(2) @group(0) var<storage, read_write> particlesB : Particles; // https://github.com/austinEng/Project6-Vulkan-Flocking/blob/master/data/shaders/computeparticles/particle.comp @compute @workgroup_size(64) fn main(@builtin(global_invocation_id) GlobalInvocationID : vec3<u32>) { var index : u32 = GlobalInvocationID.x; if (index >= arrayLength(&particlesA.particles)) { return; } var vPos = particlesA.particles[index].pos; var vVel = particlesA.particles[index].vel; var cMass = vec2<f32>(0.0, 0.0); var cVel = vec2<f32>(0.0, 0.0); var colVel = vec2<f32>(0.0, 0.0); var cMassCount : u32 = 0u; var cVelCount : u32 = 0u; var pos : vec2<f32>; var vel : vec2<f32>; for (var i : u32 = 0u; i < arrayLength(&particlesA.particles); i = i + 1u) { if (i == index) { continue; } pos = particlesA.particles[i].pos.xy; vel = particlesA.particles[i].vel.xy; if (distance(pos, vPos) < params.rule1Distance) { cMass = cMass + pos; cMassCount = cMassCount + 1u; } if (distance(pos, vPos) < params.rule2Distance) { colVel = colVel - (pos - vPos); } if (distance(pos, vPos) < params.rule3Distance) { cVel = cVel + vel; cVelCount = cVelCount + 1u; } } if (cMassCount > 0u) { var temp = f32(cMassCount); cMass = (cMass / vec2<f32>(temp, temp)) - vPos; } if (cVelCount > 0u) { var temp = f32(cVelCount); cVel = cVel / vec2<f32>(temp, temp); } vVel = vVel + (cMass * params.rule1Scale) + (colVel * params.rule2Scale) + (cVel * params.rule3Scale); // clamp velocity for a more pleasing simulation vVel = normalize(vVel) * clamp(length(vVel), 0.0, 0.1); // kinematic update vPos = vPos + (vVel * params.deltaT); // Wrap around boundary if (vPos.x < -1.0) { vPos.x = 1.0; } if (vPos.x > 1.0) { vPos.x = -1.0; } if (vPos.y < -1.0) { vPos.y = 1.0; } if (vPos.y > 1.0) { vPos.y = -1.0; } // Write back particlesB.particles[index].pos = vPos; particlesB.particles[index].vel = vVel; }
0
repos/mach-sysgpu/src/shader
repos/mach-sysgpu/src/shader/test/fractal-cube.wgsl
struct Uniforms { matrix : mat4x4<f32>, }; @binding(0) @group(0) var<uniform> ubo : Uniforms; struct VertexOut { @builtin(position) Position : vec4<f32>, @location(0) fragUV : vec2<f32>, @location(1) fragPosition: vec4<f32>, } @vertex fn vertex_main( @location(0) position : vec4<f32>, @location(1) uv: vec2<f32> ) -> VertexOut { var output : VertexOut; output.Position = position * ubo.matrix; output.fragUV = uv; output.fragPosition = 0.5 * (position + vec4<f32>(1.0, 1.0, 1.0, 1.0)); return output; } @binding(1) @group(0) var mySampler: sampler; @binding(2) @group(0) var myTexture: texture_2d<f32>; @fragment fn frag_main( @location(0) fragUV: vec2<f32>, @location(1) fragPosition: vec4<f32> ) -> @location(0) vec4<f32> { let texColor = textureSample(myTexture, mySampler, fragUV * 0.8 + vec2<f32>(0.1, 0.1)); let f = f32(length(texColor.rgb - vec3<f32>(0.5, 0.5, 0.5)) < 0.01); return (1.0 - f) * texColor + f * fragPosition; // return vec4<f32>(texColor.rgb,1.0); }
0
repos/mach-sysgpu/src/shader
repos/mach-sysgpu/src/shader/test/gen-texture-light-cube.wgsl
struct CameraUniform { pos: vec4<f32>, view_proj: mat4x4<f32>, }; struct InstanceInput { @location(3) model_matrix_0: vec4<f32>, @location(4) model_matrix_1: vec4<f32>, @location(5) model_matrix_2: vec4<f32>, @location(6) model_matrix_3: vec4<f32>, }; struct VertexInput { @location(0) position: vec3<f32>, @location(1) normal: vec3<f32>, @location(2) tex_coords: vec2<f32>, }; struct VertexOutput { @builtin(position) clip_position: vec4<f32>, @location(0) tex_coords: vec2<f32>, @location(1) normal: vec3<f32>, @location(2) position: vec3<f32>, }; struct Light { position: vec4<f32>, color: vec4<f32>, }; @group(0) @binding(0) var<uniform> camera: CameraUniform; @group(1) @binding(0) var t_diffuse: texture_2d<f32>; @group(1) @binding(1) var s_diffuse: sampler; @group(2) @binding(0) var<uniform> light: Light; @vertex fn vs_main(model: VertexInput, instance: InstanceInput) -> VertexOutput { let model_matrix = mat4x4<f32>( instance.model_matrix_0, instance.model_matrix_1, instance.model_matrix_2, instance.model_matrix_3, ); var out: VertexOutput; let world_pos = model_matrix * vec4<f32>(model.position, 1.0); out.position = world_pos.xyz; out.normal = (model_matrix * vec4<f32>(model.normal, 0.0)).xyz; out.clip_position = camera.view_proj * world_pos; out.tex_coords = model.tex_coords; return out; } @fragment fn fs_main(in: VertexOutput) -> @location(0) vec4<f32> { let object_color = textureSample(t_diffuse, s_diffuse, in.tex_coords); let ambient = 0.1; let ambient_color = light.color.rbg * ambient; let light_dir = normalize(light.position.xyz - in.position); let diffuse = max(dot(in.normal, light_dir), 0.0); let diffuse_color = light.color.rgb * diffuse; let view_dir = normalize(camera.pos.xyz - in.position); let half_dir = normalize(view_dir + light_dir); let specular = pow(max(dot(in.normal, half_dir), 0.0), 32.0); let specular_color = light.color.rbg * specular; let all = ambient_color + diffuse_color + specular_color; let result = all * object_color.rgb; return vec4<f32>(result, object_color.a); }
0
repos/mach-sysgpu/src/shader
repos/mach-sysgpu/src/shader/test/pixel-post-process.wgsl
@group(0) @binding(0) var<uniform> ubo: mat4x4<f32>; struct VertexOut { @builtin(position) position_clip: vec4<f32>, @location(0) normal: vec3<f32>, @location(1) uv: vec2<f32>, } @vertex fn vertex_main( @location(0) position: vec3<f32>, @location(1) normal: vec3<f32>, @location(2) uv: vec2<f32> ) -> VertexOut { var output: VertexOut; output.position_clip = vec4<f32>(position, 1) * ubo; output.normal = (vec4<f32>(normal, 0) * ubo).xyz; output.uv = uv; return output; } @fragment fn frag_main( @location(0) normal: vec3<f32>, @location(1) uv: vec2<f32>, ) -> @location(0) vec4<f32> { var color = floor((uv * 0.5 + 0.25) * 32) / 32; return vec4<f32>(color, 1, 1); }
0
repos/mach-sysgpu/src/shader
repos/mach-sysgpu/src/shader/test/map-async.wgsl
@group(0) @binding(0) var<storage, read_write> output: array<f32>; @compute @workgroup_size(64, 1, 1) fn main( @builtin(global_invocation_id) global_id : vec3<u32>, @builtin(local_invocation_id) local_id : vec3<u32>, ) { if (global_id.x >= arrayLength(&output)) { return; } output[global_id.x] = f32(global_id.x) * 1000. + f32(local_id.x); }
0
repos/mach-sysgpu/src/shader
repos/mach-sysgpu/src/shader/test/if-else.wgsl
@fragment fn fs_main() -> @location(0) vec4<f32> { var dummy = false; if dummy { let dummy_var_1 = 0.0; return vec4<f32>(dummy_var_1, 1, 1, 1); } else if !dummy { let dummy_var_2 = 0.0; return vec4<f32>(dummy_var_2, 1, 1, 1); } return vec4<f32>(0.0, 1, 1, 1); }
0
repos/mach-sysgpu/src/shader
repos/mach-sysgpu/src/shader/test/fragmentGBuffersDebugView.wgsl
@group(0) @binding(0) var gBufferNormal: texture_2d<f32>; @group(0) @binding(1) var gBufferAlbedo: texture_2d<f32>; @group(0) @binding(2) var gBufferDepth: texture_depth_2d; @group(1) @binding(0) var<uniform> canvas : CanvasConstants; struct CanvasConstants { size: vec2<f32>, } @fragment fn main( @builtin(position) coord : vec4<f32> ) -> @location(0) vec4<f32> { var result : vec4<f32>; let c = coord.xy / vec2<f32>(canvas.size.x, canvas.size.y); if (c.x < 0.33333) { let rawDepth = textureLoad( gBufferDepth, vec2<i32>(floor(coord.xy)), 0 ); // remap depth into something a bit more visible let depth = (1.0 - rawDepth) * 50.0; result = vec4(depth); } else if (c.x < 0.66667) { result = textureLoad( gBufferNormal, vec2<i32>(floor(coord.xy)), 0 ); result.x = (result.x + 1.0) * 0.5; result.y = (result.y + 1.0) * 0.5; result.z = (result.z + 1.0) * 0.5; } else { result = textureLoad( gBufferAlbedo, vec2<i32>(floor(coord.xy)), 0 ); } return result; }
0
repos/mach-sysgpu/src/shader
repos/mach-sysgpu/src/shader/test/vertexTextureQuad.wgsl
@vertex fn main( @builtin(vertex_index) VertexIndex : u32 ) -> @builtin(position) vec4<f32> { const pos = array( vec2(-1.0, -1.0), vec2(1.0, -1.0), vec2(-1.0, 1.0), vec2(-1.0, 1.0), vec2(1.0, -1.0), vec2(1.0, 1.0), ); return vec4<f32>(pos[VertexIndex], 0.0, 1.0); }
0
repos/mach-sysgpu/src/shader
repos/mach-sysgpu/src/shader/test/triangle.wgsl
@vertex fn vertex_main( @builtin(vertex_index) VertexIndex : u32 ) -> @builtin(position) vec4<f32> { var pos = array<vec2<f32>, 3>( vec2<f32>( 0.0, 0.5), vec2<f32>(-0.5, -0.5), vec2<f32>( 0.5, -0.5) ); return vec4<f32>(pos[VertexIndex], 0.0, 1.0); } @fragment fn frag_main() -> @location(0) vec4<f32> { return vec4<f32>(1.0, 0.0, 0.0, 1.0); }