Unnamed: 0
int64
0
0
repo_id
stringlengths
5
186
file_path
stringlengths
15
223
content
stringlengths
1
32.8M
0
repos/c2z/test_cases
repos/c2z/test_cases/include/c018_includes.h
#include <c001_c_structs.h> vec2_t sub(vec2_t a, vec2_t b);
0
repos/c2z/test_cases
repos/c2z/test_cases/include/c022_cpp_string.h
#include <string> int size_of_string(); const char *data(const std::string &input); const char *cstr(const std::string &input); int cap(const std::string &input); std::string get_str(); bool write_numbers(std::string &output, size_t count);
0
repos/c2z/test_cases
repos/c2z/test_cases/include/c005_inheritance.cpp
#include "c005_inheritance.h" circle_t::circle_t() { this->radius = 0; } circle_t::circle_t(float radius) { this->radius = radius; } circle_t::~circle_t() { } float circle_t::area() const { return 3.14 * radius * radius; } float area(const shape_t &shape) { return shape.area(); }
0
repos/c2z/test_cases
repos/c2z/test_cases/include/c022_cpp_string.cpp
#include "c022_cpp_string.h" int size_of_string() { return sizeof(std::string); } const char *data(const std::string &input) { return input.data(); } const char *cstr(const std::string &input) { return input.c_str(); } int cap(const std::string &input) { return (int)input.capacity(); } std::string get_str() { return std::string("Hello, World!"); } bool write_numbers(std::string &output, size_t count) { if (output.length() > 0) { output.push_back(','); output.push_back(' '); } for (size_t i = 0; i < count; i++) { output.push_back('0' + i); if (i < (count - 1)) { output.push_back(','); output.push_back(' '); } } return true; }
0
repos/c2z/test_cases
repos/c2z/test_cases/include/c003_unions.h
union Color32 { unsigned int mU32; struct { unsigned char r; unsigned char g; unsigned char b; unsigned char a; }; }; struct ColorClass { static const ColorClass sBlack; static const ColorClass sWhite; union { unsigned int mU32; struct { unsigned char r; unsigned char g; unsigned char b; unsigned char a; }; }; };
0
repos/c2z/test_cases
repos/c2z/test_cases/include/c016_for.h
void foo(int); void for1() { for (int i = 0; i < 5; i++) foo(i); } void for2() { for (int i = 0; i < 5; i++) { foo(i); } } void for3() { for (int i = 0; i < 5; i++) { foo(i); } for (int i = 5; i < 10; i++) { foo(i); } } void for4() { for (int i = 0, j = 8; i < 5; i++, j--) { foo(i); } } void for5() { int i = 0, j = 0; for (; i < 5; i++) { for (; j < 5; j+=2) foo(i * j); } for (i = 0; i < 5; i++) foo(i); i += j += 2; }
0
repos/c2z/test_cases
repos/c2z/test_cases/include/c005_inheritance.h
struct vec2_t { float x; float y; }; struct shape_t { public: vec2_t aabb; virtual float area() const = 0; }; struct circle_t: public shape_t { public: circle_t(); circle_t(float radius); ~circle_t(); virtual float area() const; private: float radius; }; float area(const shape_t &shape);
0
repos/c2z/test_cases
repos/c2z/test_cases/include/c007_multiple_inheritance.h
// pub const shape_t = extern struct { // vtable: *const anyopaque, // id: c_int, // }; struct shape_t { public: int id; }; // pub const renderable_t = extern struct { // vtable: *const anyopaque, // mat: c_int, // }; struct renderable_t { public: int mat; virtual float render() const = 0; }; // pub const circle_t = extern struct { // base1: renderable_t, // polymorphic classes always start frist, despite the declaration order // base0: shape_t, // radius: f32, // } struct circle_t: public shape_t, public renderable_t { public: float radius; virtual float render() const; virtual float area() const; };
0
repos/c2z/test_cases
repos/c2z/test_cases/include/c015_call.h
void* malloc(unsigned long long); void free(void*); void run() { void* a = malloc(1); free(a); } class Foo { int* ptr = 0; bool init(bool val) { if (ptr == 0) { ptr = (int*)malloc(sizeof(int)); } return !val; } public: void inc() { init(true); *ptr += 1; } };
0
repos/c2z/test_cases
repos/c2z/test_cases/include/c009_enum_flags.h
// fpng enum { // Enables computing custom Huffman tables for each file, instead of using the custom global tables. // Results in roughly 6% smaller files on average, but compression is around 40% slower. FPNG_ENCODE_SLOWER = 1, // Only use raw Deflate blocks (no compression at all). Intended for testing. FPNG_FORCE_UNCOMPRESSED = 2, }; // ImGui typedef enum { FLAG_VSYNC_HINT = 0x00000040, // Set to try enabling V-Sync on GPU FLAG_FULLSCREEN_MODE = 0x00000002, // Set to run program in fullscreen FLAG_WINDOW_RESIZABLE = 0x00000004, // Set to allow resizable window FLAG_WINDOW_UNDECORATED = 0x00000008, // Set to disable window decoration (frame and buttons) FLAG_WINDOW_HIDDEN = 0x00000080, // Set to hide window FLAG_WINDOW_MINIMIZED = 0x00000200, // Set to minimize window (iconify) FLAG_WINDOW_MAXIMIZED = 0x00000400, // Set to maximize window (expanded to monitor) FLAG_WINDOW_UNFOCUSED = 0x00000800, // Set to window non focused FLAG_WINDOW_TOPMOST = 0x00001000, // Set to window always on top FLAG_WINDOW_ALWAYS_RUN = 0x00000100, // Set to allow windows running while minimized FLAG_WINDOW_TRANSPARENT = 0x00000010, // Set to allow transparent framebuffer FLAG_WINDOW_HIGHDPI = 0x00002000, // Set to support HighDPI FLAG_WINDOW_MOUSE_PASSTHROUGH = 0x00004000, // Set to support mouse passthrough, only supported when FLAG_WINDOW_UNDECORATED FLAG_MSAA_4X_HINT = 0x00000020, // Set to try enabling MSAA 4X FLAG_INTERLACED_HINT = 0x00010000 // Set to try enabling interlaced video format (for V3D) } ConfigFlags; enum ImGuiWindowFlags_ { ImGuiWindowFlags_None = 0, ImGuiWindowFlags_NoTitleBar = 1 << 0, // Disable title-bar ImGuiWindowFlags_NoResize = 1 << 1, // Disable user resizing with the lower-right grip ImGuiWindowFlags_NoMove = 1 << 2, // Disable user moving the window ImGuiWindowFlags_NoScrollbar = 1 << 3, // Disable scrollbars (window can still scroll with mouse or programmatically) ImGuiWindowFlags_NoScrollWithMouse = 1 << 4, // Disable user vertically scrolling with mouse wheel. On child window, mouse wheel will be forwarded to the parent unless NoScrollbar is also set. ImGuiWindowFlags_NoCollapse = 1 << 5, // Disable user collapsing window by double-clicking on it. Also referred to as Window Menu Button (e.g. within a docking node). ImGuiWindowFlags_AlwaysAutoResize = 1 << 6, // Resize every window to its content every frame ImGuiWindowFlags_NoBackground = 1 << 7, // Disable drawing background color (WindowBg, etc.) and outside border. Similar as using SetNextWindowBgAlpha(0.0f). ImGuiWindowFlags_NoSavedSettings = 1 << 8, // Never load/save settings in .ini file ImGuiWindowFlags_NoMouseInputs = 1 << 9, // Disable catching mouse, hovering test with pass through. ImGuiWindowFlags_MenuBar = 1 << 10, // Has a menu-bar ImGuiWindowFlags_HorizontalScrollbar = 1 << 11, // Allow horizontal scrollbar to appear (off by default). You may use SetNextWindowContentSize(ImVec2(width,0.0f)); prior to calling Begin() to specify width. Read code in imgui_demo in the "Horizontal Scrolling" section. ImGuiWindowFlags_NoFocusOnAppearing = 1 << 12, // Disable taking focus when transitioning from hidden to visible state ImGuiWindowFlags_NoBringToFrontOnFocus = 1 << 13, // Disable bringing window to front when taking focus (e.g. clicking on it or programmatically giving it focus) ImGuiWindowFlags_AlwaysVerticalScrollbar= 1 << 14, // Always show vertical scrollbar (even if ContentSize.y < Size.y) ImGuiWindowFlags_AlwaysHorizontalScrollbar=1<< 15, // Always show horizontal scrollbar (even if ContentSize.x < Size.x) ImGuiWindowFlags_AlwaysUseWindowPadding = 1 << 16, // Ensure child windows without border uses style.WindowPadding (ignored by default for non-bordered child windows, because more convenient) ImGuiWindowFlags_NoNavInputs = 1 << 18, // No gamepad/keyboard navigation within the window ImGuiWindowFlags_NoNavFocus = 1 << 19, // No focusing toward this window with gamepad/keyboard navigation (e.g. skipped by CTRL+TAB) ImGuiWindowFlags_UnsavedDocument = 1 << 20, // Display a dot next to the title. When used in a tab/docking context, tab is selected when clicking the X + closure is not assumed (will wait for user to stop submitting the tab). Otherwise closure is assumed when pressing the X, so if you keep submitting the tab may reappear at end of tab bar. ImGuiWindowFlags_NoDocking = 1 << 21, // Disable docking of this window ImGuiWindowFlags_NoNav = ImGuiWindowFlags_NoNavInputs | ImGuiWindowFlags_NoNavFocus, ImGuiWindowFlags_NoDecoration = ImGuiWindowFlags_NoTitleBar | ImGuiWindowFlags_NoResize | ImGuiWindowFlags_NoScrollbar | ImGuiWindowFlags_NoCollapse, ImGuiWindowFlags_NoInputs = ImGuiWindowFlags_NoMouseInputs | ImGuiWindowFlags_NoNavInputs | ImGuiWindowFlags_NoNavFocus, // [Internal] ImGuiWindowFlags_NavFlattened = 1 << 23, // [BETA] On child window: allow gamepad/keyboard navigation to cross over parent border to this child or between sibling child windows. ImGuiWindowFlags_ChildWindow = 1 << 24, // Don't use! For internal use by BeginChild() ImGuiWindowFlags_Tooltip = 1 << 25, // Don't use! For internal use by BeginTooltip() ImGuiWindowFlags_Popup = 1 << 26, // Don't use! For internal use by BeginPopup() ImGuiWindowFlags_Modal = 1 << 27, // Don't use! For internal use by BeginPopupModal() ImGuiWindowFlags_ChildMenu = 1 << 28, // Don't use! For internal use by BeginMenu() ImGuiWindowFlags_DockNodeHost = 1 << 29, // Don't use! For internal use by Begin()/NewFrame() };
0
repos/c2z/test_cases
repos/c2z/test_cases/include/c023_cpp_nested_structs.cpp
#include "c023_cpp_nested_structs.h" int test_sizeof_RootStruct() { return (int)sizeof(RootStruct); } int test_sizeof_RootUnion() { return (int)sizeof(RootUnion); }
0
repos/c2z/test_cases
repos/c2z/test_cases/include/c013_cpp_vector.h
#include <vector> std::vector<uint8_t> create(); size_t sizeof_vector_uint8_t(); const uint8_t *vector_data(const std::vector<uint8_t> &vec); size_t vector_size(const std::vector<uint8_t> &vec); size_t vector_capacity(const std::vector<uint8_t> &vec); bool enumerate(std::vector<uint8_t>& out_buf, size_t count);
0
repos/c2z/test_cases
repos/c2z/test_cases/include/c019_size_t.h
#include <stdint.h> #include <stddef.h> struct A { size_t a; static const A sA; A(int a); A(size_t a); ~A() {} void foo(size_t b); size_t boo(); };
0
repos/c2z/test_cases
repos/c2z/test_cases/include/c024_cpp_bitfields.h
typedef struct Bitfields { unsigned long long bitfield1 : 10; unsigned long long bitfield2 : 10; unsigned long bitfield3 : 5; signed long bitfield4 : 5; bool bitfield5 : 2; char bitfield6 : 2; unsigned char bitfield7 : 2; int bitfield8 : 31; long long bitfield9 : 30; // TODO: Add test of 0-length bitfield here // long long : 0; long long bitfield11 : 30; } Bitfields; int size_of_Bitfields();
0
repos/c2z/test_cases
repos/c2z/test_cases/include/c023_cpp_nested_structs.h
class RootStruct { int value_begin; struct { float m1; } nested_struct_1a, nested_struct_1b; class NestedStruct2a { float m2; }; int value_mid; NestedStruct2a nested_struct_2b; struct NestedStruct3a { float m3; } nested_struct_3b, nested_struct_3c; // Fully anonymous 4 struct { float m44; }; int value_end; }; int test_sizeof_RootStruct(); class RootUnion { int value_begin; union { int iii1; float fff1; } nested_union_1a, nested_union_1b; union NestedUnion2a { int iii2; float fff2; }; int value_mid; NestedUnion2a nested_union_2b; union NestedUnion3a { int iii3; float fff3; } nested_union_3b, nested_union_3c; // Fully anonymous 4 union { int iii4; float fff4; }; int value_end; }; int test_sizeof_RootUnion(); // TODO // class RootEnum // { // int value_begin; // enum // { // AAAA, // ZZZZ // } nested_enum_1a, // nested_enum_1b; // enum NestedEnum2a // { // AAAA, // ZZZZ // }; // int value_mid; // NestedEnum2a nested_enum_2b; // enum NestedEnum3a // { // AAAA, // ZZZZ // } nested_enum_3b, // nested_enum_3c; // // Fully anonymous 4 // enum // { // AAAA, // ZZZZ // }; // int value_end; // };
0
repos/c2z/test_cases
repos/c2z/test_cases/include/c021_operator_overload.h
struct ImVec2 { float x, y; ImVec2(); ImVec2(float x, float y); float& operator[](int idx); float operator[](int idx) const; ImVec2 operator*(const float rhs) const; ImVec2& operator*=(const float rhs); ImVec2 operator/(const float rhs) const; ImVec2& operator/=(const float rhs); ImVec2 operator+(const ImVec2& rhs) const; ImVec2& operator+=(const ImVec2& rhs); ImVec2 operator-(const ImVec2& rhs) const; ImVec2& operator-=(const ImVec2& rhs); }; struct ImVec4 { float x, y, y, z; }; static ImVec4 operator+(const ImVec4& lhs, const ImVec4& rhs); static ImVec4 operator-(const ImVec4& lhs, const ImVec4& rhs); static ImVec4 operator*(const ImVec4& lhs, const ImVec4& rhs);
0
repos/c2z
repos/c2z/cpp/vector.cpp
#include <stdio.h> #include <stdint.h> #include <vector> /// STL allocator that takes care that memory is aligned to N bytes template <typename T, size_t N> class STLAlignedAllocator { private: size_t state = 0; public: using value_type = T; /// Pointer to type using pointer = T *; using const_pointer = const T *; /// Reference to type. /// Can be removed in C++20. using reference = T &; using const_reference = const T &; using size_type = size_t; using difference_type = ptrdiff_t; /// Constructor inline STLAlignedAllocator() = default; /// Constructor from other allocator template <typename T2> inline explicit STLAlignedAllocator(const STLAlignedAllocator<T2, N> &) { } /// Allocate memory inline pointer allocate(size_type inN) { return (pointer)malloc(sizeof(T) * inN); } /// Free memory inline void deallocate(pointer inPointer, size_type) { free((void*)inPointer); } /// Allocators are stateless so assumed to be equal inline bool operator == (const STLAlignedAllocator<T, N> &) const { return true; } inline bool operator != (const STLAlignedAllocator<T, N> &) const { return false; } /// Converting to allocator for other type template <typename T2> struct rebind { using other = STLAlignedAllocator<T2, N>; }; }; struct vector_layout { #ifdef _MSC_VER &&_DEBUG size_t __debug; #endif void* head; void* tail; void* end; }; template <class _Alloc> struct vector_with_custom_alloc_layout { #ifdef _MSC_VER #ifdef _DEBUG size_t __debug; #endif _Alloc alloc; #endif void* head; void* tail; void* end; #ifndef _MSC_VER _Alloc alloc; #endif }; int main() { printf("std::vector<char>\n"); printf("size: %llu -> %llu\n", sizeof(std::vector<char>), sizeof(vector_layout)); auto vec0 = std::vector<char>(); vec0.push_back('a'); vec0.push_back('b'); vec0.push_back('c'); auto layout0 = (vector_layout*)(&vec0); #ifdef _MSC_VER &&_DEBUG printf("__debug: %llx\n", layout0->__debug); #endif printf("ptr: %p -> %p\n", vec0.data(), layout0->head); printf("size: %lld -> %lld\n", (int64_t)vec0.size(), (int64_t)layout0->tail - (int64_t)layout0->head); printf("capacity: %lld -> %lld\n", (int64_t)vec0.capacity(), (int64_t)layout0->end - (int64_t)layout0->head); printf("=============================================\n"); printf("std::vector<char, STLAlignedAllocator<char, 64>>\n"); printf("size: %llu -> %llu\n", sizeof(std::vector<char, STLAlignedAllocator<char, 64>>), sizeof(vector_with_custom_alloc_layout<STLAlignedAllocator<char, 64>>)); auto vec1 = std::vector<char, STLAlignedAllocator<char, 64>>(); vec1.push_back('a'); vec1.push_back('b'); vec1.push_back('c'); auto layout1 = (vector_with_custom_alloc_layout<STLAlignedAllocator<char, 64>>*)(&vec1); printf("ptr: %p -> %p\n", vec1.data(), layout1->head); printf("size: %lld -> %lld\n", (int64_t)vec1.size(), (int64_t)layout1->tail - (int64_t)layout1->head); printf("capacity: %lld -> %lld\n", (int64_t)vec1.capacity(), (int64_t)layout1->end - (int64_t)layout1->head); printf("=============================================\n"); #ifdef _MSC_VER &&_DEBUG auto vec2 = std::vector<char>(); auto layout2 = (vector_layout*)(&vec2); printf("ptr: %p\n", vec2.data()); printf("size: %llu\n", vec2.size()); printf("capacity: %llu\n", vec2.capacity()); layout2->__debug = 0;// 0xAAAAAAAAAAAAAAAA; vec2.push_back('a'); vec2.push_back('a'); vec2.push_back('a'); vec2.push_back('a'); vec2.push_back('a'); vec2.push_back('a'); vec2.push_back('a'); #endif return 0; }
0
repos/c2z
repos/c2z/cpp/string.cpp
#include <stdio.h> #include <stdint.h> #include <string> /// STL allocator that takes care that memory is aligned to N bytes template <typename T, size_t N> class STLAlignedAllocator { private: size_t state = 0xFAFA0000FAFA0000; public: using value_type = T; /// Pointer to type using pointer = T *; using const_pointer = const T *; /// Reference to type. /// Can be removed in C++20. using reference = T &; using const_reference = const T &; using size_type = size_t; using difference_type = ptrdiff_t; /// Constructor inline STLAlignedAllocator() = default; /// Constructor from other allocator template <typename T2> inline explicit STLAlignedAllocator(const STLAlignedAllocator<T2, N> &) { } /// Allocate memory inline pointer allocate(size_type inN) { return (pointer)malloc(sizeof(T) * inN); } /// Free memory inline void deallocate(pointer inPointer, size_type) { free((void*)inPointer); } /// Allocators are stateless so assumed to be equal inline bool operator == (const STLAlignedAllocator<T, N> &) const { return true; } inline bool operator != (const STLAlignedAllocator<T, N> &) const { return false; } /// Converting to allocator for other type template <typename T2> struct rebind { using other = STLAlignedAllocator<T2, N>; }; }; struct string_layout { #ifdef _MSC_VER void* ptr; size_t __data; size_t len; size_t capacity; #else size_t capacity; size_t len; void* ptr; #endif }; template <class _Alloc> struct string_with_custom_alloc_layout { #ifdef _MSC_VER _Alloc alloc; void* ptr; size_t __data; size_t len; size_t capacity; #else size_t capacity; size_t len; void* ptr; _Alloc alloc; #endif }; int main() { printf("std::string<char>\n"); printf("size: %llu -> %llu\n", sizeof(std::string), sizeof(string_layout)); auto vec0 = std::string(); for (size_t i = 0; i < 55; i++) { vec0.push_back('0'); } auto in_place0 = (char*)(&vec0); auto heap0 = (string_layout*)(&vec0); printf("len: %lld\n", (int64_t)vec0.length()); printf("data: %p\n", vec0.data()); printf("in_place: "); for (size_t i = 0; i < sizeof(std::string); i++) { if ((i != 0) && (i % 8 == 0)) printf(" "); printf("%02hhX", in_place0[i]); } printf("\n"); printf("heap: %p %lld %lld\n", heap0->ptr, heap0->len, heap0->capacity); printf("=============================================\n"); printf("std::basic_string<char, std::char_traits<char>, STLAlignedAllocator<char, 64>>\n"); printf("size: %llu -> %llu\n", sizeof(std::basic_string<char, std::char_traits<char>, STLAlignedAllocator<char, 64>>), sizeof(string_with_custom_alloc_layout<STLAlignedAllocator<char, 64>>)); auto vec1 = std::basic_string<char, std::char_traits<char>, STLAlignedAllocator<char, 64>>(); for (size_t i = 0; i < 55; i++) { vec1.push_back('0'); } auto in_place1 = (char*)(&vec1); auto heap1 = (string_with_custom_alloc_layout<STLAlignedAllocator<char, 64>>*)(&vec1); printf("len: %lld\n", (int64_t)vec1.length()); printf("data: %p\n", vec1.data()); printf("in_place: "); for (size_t i = 0; i < sizeof(std::basic_string<char, std::char_traits<char>, STLAlignedAllocator<char, 64>>); i++) { if ((i != 0) && (i % 8 == 0)) printf(" "); printf("%02hhX", in_place1[i]); } printf("\n"); printf("alloc: %p\n", heap1->alloc); printf("heap: %p %lld %lld\n", heap1->ptr, heap1->len, heap1->capacity); return 0; }
0
repos/c2z
repos/c2z/src/cpp.zig
//! zig c++ interop types and utilities const std = @import("std"); const builtin = @import("builtin"); /// Switch by linux target triple, usage: /// ```zig /// const size_t = targetSwitch(type, .{ /// .{ "x86_64-windows-gnu", c_ulonglong }, /// .{ "x86_64-linux-gnu", c_ulong }, /// }); /// ``` pub fn targetSwitch( comptime T: type, comptime lookup: anytype, ) T { var buffer: [1024]u8 = undefined; var fba = std.heap.FixedBufferAllocator.init(&buffer); const tuple = builtin.target.linuxTriple(fba.allocator()) catch unreachable; for (lookup) |entry| { if (std.mem.eql(u8, entry[0], tuple)) { return entry[1]; } } @compileError("target `" ++ tuple ++ "` not listed"); } /// optional c enumeration extension functions pub fn FlagsMixin(comptime FlagsType: type) type { return struct { pub const IntType = @typeInfo(FlagsType).Struct.fields[0].type; pub inline fn init(flags: IntType) FlagsType { return .{ .bits = flags }; } pub inline fn merge(lhs: FlagsType, rhs: FlagsType) FlagsType { return init(lhs.bits | rhs.bits); } pub inline fn intersect(lhs: FlagsType, rhs: FlagsType) FlagsType { return init(lhs.bits & rhs.bits); } pub inline fn complement(self: FlagsType) FlagsType { return init(~self.bits); } pub inline fn subtract(lhs: FlagsType, rhs: FlagsType) FlagsType { return init(lhs.bits & rhs.complement().bits); } pub inline fn contains(lhs: FlagsType, rhs: FlagsType) bool { return intersect(lhs, rhs).bits == rhs.bits; } }; } extern fn memset(ptr: *anyopaque, value: c_int, num: usize) *anyopaque; extern fn memcpy(destination: *anyopaque, source: *const anyopaque, num: usize) *anyopaque; extern fn memmove(destination: *anyopaque, source: *const anyopaque, num: usize) *anyopaque; extern fn strcmp(str1: *const c_char, str2: *const c_char) c_int; extern fn malloc(size: usize) ?*anyopaque; extern fn free(ptr: ?*anyopaque) void; /// default stateless allocator, uses `malloc` and `free` internally pub fn Allocator(comptime T: type) type { return extern struct { const Self = @This(); pub fn allocate(self: *Self, size: usize) !*T { _ = self; if (@as(?*T, @ptrCast(malloc(@sizeOf(T) * size)))) |ptr| { return ptr; } else { return std.mem.Allocator.Error.OutOfMemory; } } pub fn deallocate(self: *Self, ptr: *T, size: usize) void { _ = self; _ = size; free(ptr); } }; } pub const native = switch (builtin.abi) { .msvc => msvc, else => gnu, }; pub const msvc = struct { /// MSVC `_Container_base12`, it might be used in debug builds to invalidate iterators const Container = if (builtin.mode == .Debug) extern struct { const Self = @This(); const Iter = extern struct { proxy: ?*const ContainerProxy = null, next: ?*Iter = null, }; const ContainerProxy = extern struct { cont: ?*const Self = null, iter: ?*Iter = null, }; proxy: ?*ContainerProxy, pub fn init() Self { const proxy = @as(?*ContainerProxy, @ptrCast(@alignCast(malloc(@sizeOf(ContainerProxy))))); proxy.?.* = .{}; return .{ .proxy = proxy }; } pub fn deinit(self: *Self) void { _ = self; // todo: to free it you must walk the linked list // todo: can't use `free(self.proxy);` on String // todo: test if will leak memory when passing containers between ffi bounds } } else extern struct { const Self = @This(); pub fn init() Self { return .{}; } pub fn deinit(_: *Self) void {} }; /// basic `std::vector` compatible type, it doesn't free items pub fn Vector(comptime T: type) type { return msvc.VectorRaw(T, Allocator(T)); } /// base type for any `std::vector` derived type with a custom allocator type and other configurations, it doesn't free items pub fn VectorRaw(comptime T: type, comptime Alloc: type) type { return extern struct { const Self = @This(); __proxy: Container, allocator: Alloc, head: ?*T = null, tail: ?*T = null, limit: ?*T = null, pub fn init(allocator: Alloc) Self { return .{ .__proxy = Container.init(), .allocator = allocator, }; } pub inline fn size(self: *const Self) usize { return (@intFromPtr(self.tail) - @intFromPtr(self.head)); } pub inline fn capacity(self: *const Self) usize { return (@intFromPtr(self.limit) - @intFromPtr(self.head)); } pub inline fn values(self: Self) []T { return if (self.head) |head| @as([*]T, @ptrCast(head))[0..self.size()] else &[_]T{}; } pub fn deinit(self: *Self) void { if (self.head) |head| { self.allocator.deallocate(head, self.size()); self.head = null; self.tail = null; self.limit = null; } self.__proxy.deinit(); } }; } /// drop-in replacement for `std::string` pub const String = msvc.StringRaw(Allocator(u8)); /// similar to `std::basic_string<char, std::char_traits<char>, Alloc>` pub fn StringRaw(comptime Alloc: type) type { const Heap = extern struct { ptr: [*]u8, __payload: usize, }; const Data = extern union { in_place: [@sizeOf(Heap)]u8, heap: Heap, }; return extern struct { const Self = @This(); __proxy: Container, allocator: Alloc, data: Data, len: usize, cap: usize, pub fn init(allocator: Alloc) Self { return .{ .__proxy = Container.init(), .allocator = allocator, .data = undefined, .len = 0, .cap = @sizeOf(Heap) - 1, }; } inline fn inHeap(self: *const Self) bool { return self.cap > (@sizeOf(Heap) - 1); } pub inline fn size(self: *const Self) usize { return self.len; } pub inline fn capacity(self: *const Self) usize { return self.cap; } pub inline fn values(self: *Self) []u8 { return if (self.inHeap()) self.data.heap.ptr[0..self.len] else self.data.in_place[0..self.len]; } pub fn deinit(self: *Self) void { if (self.inHeap()) { self.allocator.deallocate(@as(*u8, @ptrCast(self.data.heap.ptr)), self.cap); self.data.in_place[0] = 0; } self.__proxy.deinit(); } }; } }; pub const gnu = struct { /// basic `std::vector` compatible type, it doesn't free items pub fn Vector(comptime T: type) type { return gnu.VectorRaw(T, Allocator(T)); } /// base type for any `std::vector` derived type with a custom allocator type and other configurations, it doesn't free items pub fn VectorRaw(comptime T: type, comptime Alloc: type) type { return extern struct { const Self = @This(); head: ?*T = null, tail: ?*T = null, limit: ?*T = null, allocator: Alloc, pub fn init(allocator: Alloc) Self { return .{ .allocator = allocator }; } pub inline fn size(self: *const Self) usize { return (@intFromPtr(self.tail) - @intFromPtr(self.head)); } pub inline fn capacity(self: *const Self) usize { return (@intFromPtr(self.limit) - @intFromPtr(self.head)); } pub inline fn values(self: Self) []T { return if (self.head) |head| @as([*]T, @ptrCast(head))[0..self.size()] else &[_]T{}; } pub fn deinit(self: *Self) void { if (self.head) |head| { self.allocator.deallocate(head, self.size()); self.head = null; self.tail = null; self.limit = null; } } }; } /// drop-in replacement for `std::string` pub const String = gnu.StringRaw(Allocator(u8)); /// similar to `std::basic_string<char, std::char_traits<char>, Alloc>` pub fn StringRaw(comptime Alloc: type) type { const Heap = extern struct { cap: usize, len: usize, ptr: [*]u8, }; const Data = extern union { in_place: [@sizeOf(Heap)]u8, heap: Heap, }; return extern struct { const Self = @This(); data: Data, allocator: Alloc, pub fn init(allocator: Alloc) Self { return Self{ .data = Data{ .in_place = [_]u8{0} ** @sizeOf(Heap) }, .allocator = allocator, }; } inline fn inHeap(self: *const Self) bool { return (self.data.in_place[0] & 1) != 0; } pub inline fn size(self: *const Self) usize { return if (self.inHeap()) self.data.heap.len else (self.data.in_place[0] >> 1); } pub inline fn capacity(self: *const Self) usize { return if (self.inHeap()) self.data.heap.cap else // in_place[0] >> 1 == length and in_place[in_place.len - 1] == '\0' @sizeOf(Heap) - 2; } pub inline fn values(self: *Self) []u8 { return if (self.inHeap()) self.data.heap.ptr[0..self.data.heap.len] else self.data.in_place[1 .. (self.data.in_place[0] >> 1) + 1]; } pub fn deinit(self: *Self) void { if (self.inHeap()) { self.allocator.deallocate(@as(*u8, @ptrCast(self.data.heap.ptr)), self.data.heap.cap); self.data.in_place[0] = 0; } } }; } }; // todo: try std::array in msvc, I think it works differently in debug /// just use `[N]T` pub fn Array( comptime T: type, comptime N: comptime_int, ) type { return @Type([N]T); } pub const Vector = native.Vector; pub const VectorRaw = native.VectorRaw; pub const String = native.String; pub const StringRaw = native.StringRaw; // todo: UniquePtr, SharedPtr
0
repos/c2z
repos/c2z/src/main.zig
const std = @import("std"); const builtin = @import("builtin"); const debug = std.debug; const io = std.io; const log = std.log; const json = std.json; const mem = std.mem; const fmt = std.fmt; const Allocator = mem.Allocator; const Transpiler = @import("Transpiler.zig"); pub fn main() !void { var gpa = std.heap.GeneralPurposeAllocator(.{}){}; defer std.testing.expect(gpa.deinit() != .leak) catch @panic("memory leak"); const allocator = gpa.allocator(); // zig cc -x c++ -std=c++11 -Xclang -ast-dump=json {input_file} var clang = std.ArrayList([]const u8).init(allocator); defer clang.deinit(); try clang.append("zig"); // 0 try clang.append("cc"); // 1 try clang.append("-x"); // 2 try clang.append("c++"); // 3 try clang.append("-lc++"); // 4 try clang.append("-Xclang"); try clang.append("-ast-dump=json"); try clang.append("-fsyntax-only"); try clang.append("-fparse-all-comments"); const argv = try std.process.argsAlloc(allocator); defer std.process.argsFree(allocator, argv); var target_tuple: ?[]const u8 = null; var transpiler = Transpiler.init(allocator); defer transpiler.deinit(); var output_ast = false; var i: usize = 1; while (i < argv.len) : (i += 1) { const arg = argv[i]; if (mem.eql(u8, arg, "-h") or mem.eql(u8, arg, "-help")) { _ = try io.getStdErr().writer().write( \\-h, -help Display this help and exit \\-target TARGET_TUPLE Clang target tuple, e.g. x86_86-windows-gnu \\-R Recursive transpiling, use to also parse includes \\-no-glue No C++ glue code, bindings will be target specific \\-no-comments Don't write comments \\-c99 Use C99 instead of C++ \\[clang arguments] Pass any clang arguments, e.g. -DNDEBUG -I.\include -target x86-linux-gnu \\[--] [FILES] Input files \\ ); return; } else if (mem.eql(u8, arg, "-c99")) { clang.items[3] = "c"; clang.items[4] = "-std=c99"; transpiler.no_glue = true; // glue is not needed for c continue; } else if (mem.eql(u8, arg, "-R")) { transpiler.recursive = true; continue; } else if (mem.eql(u8, arg, "-no-glue")) { transpiler.no_glue = true; continue; } else if (mem.eql(u8, arg, "-no-comments")) { transpiler.no_comments = true; continue; } else if (mem.eql(u8, arg, "--")) { // positionals arguments i += 1; break; } else if (mem.eql(u8, arg, "-target")) { // track the target tuple if specified try clang.append(arg); i += 1; target_tuple = argv[i]; try clang.append(argv[i]); continue; } else if (mem.eql(u8, arg, "-output-ast")) { output_ast = true; continue; } else if (i == argv.len - 1 and arg[0] != '-') { // last arg is not a command, so it must be a input arg break; } try clang.append(arg); } const host_target = try builtin.target.linuxTriple(allocator); defer allocator.free(host_target); if (target_tuple == null) { // assing a default target tuple target_tuple = host_target; } var dclang = std.ArrayList(u8).init(allocator); defer dclang.deinit(); for (clang.items) |arg| { try dclang.appendSlice(arg); try dclang.appendSlice(" "); } log.info("{s}", .{dclang.items}); const cwd = try std.fs.cwd().realpathAlloc(allocator, "."); defer allocator.free(cwd); var output_path = std.ArrayList(u8).init(allocator); defer output_path.deinit(); while (i < argv.len) : (i += 1) { const file_path = argv[i]; log.info("binding `{s}`", .{file_path}); try clang.append(file_path); defer _ = clang.pop(); const astdump = try std.process.Child.run(.{ .allocator = allocator, .argv = clang.items, .max_output_bytes = 4 * 512 * 1024 * 1024, }); defer { allocator.free(astdump.stdout); allocator.free(astdump.stderr); } if (output_ast) { var astfile = try std.fs.cwd().createFile("c2z_ast.json", .{}); try astfile.writeAll(astdump.stdout); astfile.close(); } var parsed = try json.parseFromSlice(json.Value, allocator, astdump.stdout, .{}); defer parsed.deinit(); transpiler.header = std.fs.path.basename(file_path); try transpiler.run(&parsed.value); log.info("transpiled {d}/{d} ({d:.2} %)", .{ transpiler.nodes_visited, transpiler.nodes_count, (100.0 * @as(f64, @floatFromInt(transpiler.nodes_visited)) / @as(f64, @floatFromInt(transpiler.nodes_count))), }); const file_name = std.fs.path.stem(file_path); // zig output { output_path.clearRetainingCapacity(); try output_path.writer().print("{s}.zig", .{file_name}); var file = try std.fs.cwd().createFile(output_path.items, .{}); try file.writeAll(transpiler.buffer.items); file.close(); log.info("formating `{s}`", .{output_path.items}); var zfmt_args = std.ArrayList([]const u8).init(allocator); defer zfmt_args.deinit(); zfmt_args.clearRetainingCapacity(); try zfmt_args.append("zig"); try zfmt_args.append("fmt"); try zfmt_args.append(output_path.items); var zfmt = std.process.Child.init(zfmt_args.items, allocator); zfmt.stderr_behavior = .Ignore; zfmt.stdout_behavior = .Ignore; _ = try zfmt.spawnAndWait(); } // glue output if (!transpiler.no_glue) { output_path.clearRetainingCapacity(); try output_path.writer().print("{s}_glue.cpp", .{file_name}); var file = try std.fs.cwd().createFile(output_path.items, .{}); try file.writeAll(transpiler.c_buffer.items); file.close(); } } }
0
repos/c2z
repos/c2z/src/Transpiler.zig
const std = @import("std"); const debug = std.debug; const io = std.io; const log = std.log; const json = std.json; const mem = std.mem; const fmt = std.fmt; const Allocator = mem.Allocator; const Self = @This(); const FnSig = struct { raw: []const u8, is_const: bool, is_varidatic: bool, ret: []const u8, }; // https://ziglang.org/documentation/master/#Keyword-Reference const KeywordsLUT = std.ComptimeStringMap(void, .{ .{ "addrspace", "__addrspace" }, .{ "align", "__align" }, .{ "allowzero", "__allowzero" }, .{ "and", "__and" }, .{ "anyframe", "__anyframe" }, .{ "anytype", "__anytype" }, .{ "asm", "__asm" }, .{ "async", "__async" }, .{ "await", "__await" }, .{ "catch", "__catch" }, .{ "comptime", "__comptime" }, .{ "defer", "__defer" }, .{ "errdefer", "__errdefer" }, .{ "error", "__error" }, .{ "export", "__export" }, .{ "fn", "__fn" }, .{ "linksection", "__linksection" }, .{ "noalias", "__noalias" }, .{ "noinline", "__noinline" }, .{ "nosuspend", "__nosuspend" }, .{ "or", "__or" }, .{ "orelse", "__orelse" }, .{ "packed", "__packed" }, .{ "pub", "__pub" }, .{ "resume", "__resume" }, .{ "suspend", "__suspend" }, .{ "test", "__test" }, .{ "threadlocal", "__threadlocal" }, .{ "try", "__try" }, .{ "type", "__type" }, .{ "undefined", "__undefined" }, .{ "unreachable", "__unreachable" }, .{ "usingnamespace", "__usingnamespace" }, .{ "var", "__var" }, .{ "volatile", "__volatile" }, }); const PrimitivesTypeLUT = std.StaticStringMap([]const u8).initComptime(.{ .{ "bool", "bool" }, .{ "char", "u8" }, .{ "signed char", "i8" }, .{ "unsigned char", "u8" }, .{ "short", "c_short" }, .{ "unsigned short", "c_ushort" }, .{ "int", "c_int" }, .{ "unsigned int", "c_uint" }, .{ "long", "c_long" }, .{ "unsigned long", "c_ulong" }, .{ "long long", "c_longlong" }, .{ "unsigned long long", "c_ulonglong" }, .{ "float", "f32" }, .{ "double", "f64" }, .{ "long double", "c_longdouble" }, .{ "int8_t", "i8" }, .{ "uint8_t", "u8" }, .{ "int16_t", "i16" }, .{ "uint16_t", "u16" }, .{ "int32_t", "i32" }, .{ "uint32_t", "u32" }, .{ "int64_t", "i64" }, .{ "uint64_t", "u64" }, // note: zig docs do say they are equivalents so it should be ok todo this .{ "__int128", "i128" }, .{ "unsigned __int128", "u128" }, .{ "intptr_t", "isize" }, .{ "uintptr_t", "usize" }, .{ "size_t", "usize" }, // assumed types .{ "va_list", "[*c]u8" }, .{ "__va_list_tag", "[*c]u8" }, .{ "ptrdiff_t", "isize" }, .{ "ssize_t", "isize" }, // custom types .{ "std::vector", "cpp.Vector" }, .{ "std::array", "cpp.Array" }, // todo: std::array<T, N> -> [N]T .{ "std::string", "cpp.String" }, }); const TypeToByteSizeLUT = std.StaticStringMap(u32).initComptime(.{ .{ "bool", @sizeOf(bool) }, .{ "c_int", @sizeOf(c_int) }, .{ "c_long", @sizeOf(c_long) }, .{ "c_longdouble", @sizeOf(c_longdouble) }, .{ "c_longlong", @sizeOf(c_longlong) }, .{ "c_short", @sizeOf(c_short) }, .{ "c_uint", @sizeOf(c_uint) }, .{ "c_ulong", @sizeOf(c_ulong) }, .{ "c_ulonglong", @sizeOf(c_ulonglong) }, .{ "c_ushort", @sizeOf(c_ushort) }, .{ "f32", @sizeOf(f32) }, .{ "f64", @sizeOf(f64) }, .{ "i128", @sizeOf(i128) }, .{ "i16", @sizeOf(i16) }, .{ "i32", @sizeOf(i32) }, .{ "i64", @sizeOf(i64) }, .{ "i8", @sizeOf(i8) }, .{ "isize", @sizeOf(isize) }, .{ "u128", @sizeOf(u128) }, .{ "u16", @sizeOf(u16) }, .{ "u32", @sizeOf(u32) }, .{ "u64", @sizeOf(u64) }, .{ "u8", @sizeOf(u8) }, .{ "usize", @sizeOf(usize) }, }); const TypeToSignedLUT = std.StaticStringMap(bool).initComptime(.{ .{ "bool", false }, .{ "c_int", true }, .{ "c_long", true }, .{ "c_longdouble", true }, .{ "c_longlong", true }, .{ "c_short", true }, .{ "c_uint", false }, .{ "c_ulong", false }, .{ "c_ulonglong", false }, .{ "c_ushort", false }, .{ "i128", true }, .{ "i16", true }, .{ "i32", true }, .{ "i64", true }, .{ "i8", true }, .{ "isize", true }, .{ "u128", false }, .{ "u16", false }, .{ "u32", false }, .{ "u64", false }, .{ "u8", false }, .{ "usize", false }, }); const ScopeTag = enum { root, class, local, }; const Scope = struct { tag: ScopeTag, name: ?[]const u8, /// Constructors indexing ctors: usize = 0, /// Generate unnamed nodes fields: usize = 0, is_polymorphic: bool = false, }; const NamespaceScope = struct { root: bool, full_path: std.ArrayList(u8), unnamed_nodes: std.AutoHashMap(u64, json.Value), // todo: didn't find a hashset, maybe by using the key as `void` no extra allocations will be made? opaques: std.StringArrayHashMap(void), overloads: std.StringArrayHashMap(std.StringArrayHashMap(usize)), fn init(allocator: Allocator) NamespaceScope { return .{ .root = false, .full_path = std.ArrayList(u8).init(allocator), .unnamed_nodes = std.AutoHashMap(u64, json.Value).init(allocator), .opaques = std.StringArrayHashMap(void).init(allocator), .overloads = std.StringArrayHashMap(std.StringArrayHashMap(usize)).init(allocator), }; } fn resolveOverloadIndex(self: *NamespaceScope, name: []const u8, signature: []const u8) !?usize { if (self.overloads.getPtr(name)) |lut| { if (lut.get(signature)) |index| { return index; } else { const index: usize = lut.count() + 2; try lut.put(signature, index); return index; } } else { try self.overloads.put(name, std.StringArrayHashMap(usize).init(self.overloads.allocator)); return null; } } fn deinit(self: *NamespaceScope) void { self.full_path.deinit(); self.unnamed_nodes.deinit(); self.opaques.deinit(); var it = self.overloads.iterator(); while (it.next()) |entry| { entry.value_ptr.deinit(); } self.overloads.deinit(); } }; // ClassInfo is now stored in class_info both with its name as the key, // and with it's line and column. const ClassInfo = struct { is_polymorphic: bool, name: []const u8, }; allocator: Allocator, arena: std.heap.ArenaAllocator, buffer: std.ArrayList(u8), out: std.ArrayList(u8).Writer, c_buffer: std.ArrayList(u8), c_out: std.ArrayList(u8).Writer, nodes_visited: usize, nodes_count: usize, namespace: NamespaceScope, scope: Scope, semicolon: bool = true, public: bool = true, class_info: std.StringArrayHashMap(ClassInfo), // options recursive: bool = false, no_glue: bool = false, no_comments: bool = false, header: []const u8 = "", pub fn init(allocator: Allocator) Self { return Self{ .allocator = allocator, .arena = std.heap.ArenaAllocator.init(allocator), .buffer = std.ArrayList(u8).init(allocator), .out = undefined, // can't be initialized because Self will be moved .c_buffer = std.ArrayList(u8).init(allocator), .c_out = undefined, // can't be initialized because Self will be moved .nodes_visited = 0, .nodes_count = 0, .namespace = NamespaceScope.init(allocator), .scope = .{ .tag = .root, .name = null }, .class_info = std.StringArrayHashMap(ClassInfo).init(allocator), }; } pub fn deinit(self: *Self) void { self.arena.deinit(); self.buffer.deinit(); self.c_buffer.deinit(); self.namespace.deinit(); self.class_info.deinit(); } pub fn run(self: *Self, value: *const json.Value) anyerror!void { self.buffer.clearRetainingCapacity(); self.out = self.buffer.writer(); self.c_buffer.clearRetainingCapacity(); self.c_out = self.c_buffer.writer(); self.nodes_count = nodeCount(value); _ = try self.out.write("// auto generated by c2z\n"); _ = try self.out.write("const std = @import(\"std\");\n"); _ = try self.out.write("//const cpp = @import(\"cpp\");\n\n"); if (!self.no_glue) { _ = try self.c_out.write("// auto generated by c2z\n"); try self.c_out.print("#include <new>\n", .{}); try self.c_out.print("#include \"{s}\"\n\n", .{self.header}); } // root namespace try self.namespace.full_path.appendSlice("::"); self.namespace.root = true; try self.visit(value); // odd but it works ... try self.endNamespace(NamespaceScope.init(self.allocator)); } fn beginNamespace(self: *Self) NamespaceScope { const parent = self.namespace; self.namespace = NamespaceScope.init(self.allocator); return parent; } fn endNamespace(self: *Self, parent: NamespaceScope) !void { if (self.namespace.opaques.keys().len > 0) { try self.out.print("\n\n// opaques\n\n", .{}); for (self.namespace.opaques.keys()) |name| { log.warn("defining `{s}` as an opaque type", .{name}); try self.out.print("const {s} = anyopaque;\n", .{name}); // todo: replace `[*c]{name}` for ` ?* {name}` // todo: replace `[*c]const {name}` to ` ?* const {name}` } } if (self.namespace.unnamed_nodes.count() > 0) { try self.out.print("\n\n// unnamed nodes\n\n", .{}); var unnamed: usize = 0; var nodes_it = self.namespace.unnamed_nodes.iterator(); while (nodes_it.next()) |entry| { // todo: sometimes these enums are inside a namespace or a struct, so they should be defined at the end of these but still inside const kind = entry.value_ptr.object.get("kind").?.string; if (mem.eql(u8, kind, "EnumDecl")) { const name = try fmt.allocPrint(self.allocator, "UnnamedEnum{d}", .{unnamed}); defer self.allocator.free(name); _ = try entry.value_ptr.object.put("name", json.Value{ .string = name }); try self.visitEnumDecl(entry.value_ptr); } else { log.warn("unused unnamed node `{s}`", .{kind}); continue; } unnamed += 1; } } self.namespace.deinit(); self.namespace = parent; } fn fmtCode(self: *Self, code: *std.ArrayList(u8)) !bool { // prepare input try code.append(0); const input = code.items[0 .. code.items.len - 1 :0]; var tree = try std.zig.Ast.parse(self.allocator, input, .zig); defer tree.deinit(self.allocator); if (tree.errors.len > 0) { return false; } // todo: ast checks https://github.com/ziglang/zig/blob/146b79af153bbd5dafda0ba12a040385c7fc58f8/src/main.zig#L4656 ??? // format code const formatted = try tree.render(self.allocator); defer self.allocator.free(formatted); try code.ensureTotalCapacity(formatted.len); code.clearRetainingCapacity(); code.appendSliceAssumeCapacity(formatted); return true; } fn commentCode(self: *Self, code: *std.ArrayList(u8)) !void { const commented = try mem.replaceOwned(u8, self.allocator, code.items, "\n", "\n// "); defer self.allocator.free(commented); try code.ensureTotalCapacity("// ".len + commented.len); code.clearRetainingCapacity(); code.appendSliceAssumeCapacity("// "); code.appendSliceAssumeCapacity(commented); } fn writeCommentedCode(self: *Self, code: []const u8) !void { var block = code; while (mem.indexOf(u8, block, "\n")) |i| { _ = try self.out.write("// "); _ = try self.out.write(block[0 .. i + 1]); block = block[i + 1 ..]; } } fn writeDocs(self: *Self, inner: ?*json.Value) !void { if (self.no_comments) return; if (inner != null) { for (inner.?.array.items) |*item| { const kind = item.object.get("kind").?.string; if (mem.eql(u8, kind, "FullComment")) { try self.visitFullComment(item); } } } } fn visit(self: *Self, value: *const json.Value) anyerror!void { // ignore empty nodes if (value.object.count() == 0) return; if (value.object.getPtr("isImplicit")) |implicit| { if (implicit.bool) { self.nodes_visited += nodeCount(value); return; } } if (self.shouldSkip(value)) { self.nodes_visited += nodeCount(value); return; } const kind = value.object.getPtr("kind").?.string; if (mem.eql(u8, kind, "TranslationUnitDecl")) { try self.visitTranslationUnitDecl(value); } else if (mem.eql(u8, kind, "LinkageSpecDecl")) { try self.visitLinkageSpecDecl(value); } else if (mem.eql(u8, kind, "CXXRecordDecl")) { try self.visitCXXRecordDecl(value); } else if (mem.eql(u8, kind, "EnumDecl")) { try self.visitEnumDecl(value); } else if (mem.eql(u8, kind, "TypedefDecl")) { try self.visitTypedefDecl(value); } else if (mem.eql(u8, kind, "NamespaceDecl")) { try self.visitNamespaceDecl(value); } else if (mem.eql(u8, kind, "FunctionDecl")) { try self.visitFunctionDecl(value); } else if (mem.eql(u8, kind, "ClassTemplateDecl")) { try self.visitClassTemplateDecl(value); } else if (mem.eql(u8, kind, "CompoundStmt")) { try self.visitCompoundStmt(value); } else if (mem.eql(u8, kind, "ReturnStmt")) { try self.visitReturnStmt(value); } else if (mem.eql(u8, kind, "BinaryOperator")) { try self.visitBinaryOperator(value); } else if (mem.eql(u8, kind, "ImplicitCastExpr")) { try self.visitImplicitCastExpr(value); } else if (mem.eql(u8, kind, "MemberExpr")) { try self.visitMemberExpr(value); } else if (mem.eql(u8, kind, "IntegerLiteral")) { try self.visitIntegerLiteral(value); } else if (mem.eql(u8, kind, "FloatingLiteral")) { try self.visitFloatingLiteral(value); } else if (mem.eql(u8, kind, "CStyleCastExpr")) { try self.visitCStyleCastExpr(value); } else if (mem.eql(u8, kind, "ArraySubscriptExpr")) { try self.visitArraySubscriptExpr(value); } else if (mem.eql(u8, kind, "UnaryExprOrTypeTraitExpr")) { try self.visitUnaryExprOrTypeTraitExpr(value); } else if (mem.eql(u8, kind, "DeclRefExpr")) { try self.visitDeclRefExpr(value); } else if (mem.eql(u8, kind, "ParenExpr")) { try self.visitParenExpr(value); } else if (mem.eql(u8, kind, "UnaryOperator")) { try self.visitUnaryOperator(value); } else if (mem.eql(u8, kind, "CXXThisExpr")) { try self.visitCXXThisExpr(value); } else if (mem.eql(u8, kind, "ConstantExpr")) { try self.visitConstantExpr(value); } else if (mem.eql(u8, kind, "VarDecl")) { try self.visitVarDecl(value); } else if (mem.eql(u8, kind, "IfStmt")) { try self.visitIfStmt(value); } else if (mem.eql(u8, kind, "ForStmt")) { try self.visitForStmt(value); } else if (mem.eql(u8, kind, "WhileStmt")) { try self.visitWhileStmt(value); } else if (mem.eql(u8, kind, "CXXBoolLiteralExpr")) { try self.visitCXXBoolLiteralExpr(value); } else if (mem.eql(u8, kind, "DeclStmt")) { try self.visitDeclStmt(value); } else if (mem.eql(u8, kind, "CallExpr")) { try self.visitCallExpr(value); } else if (mem.eql(u8, kind, "CXXMemberCallExpr")) { try self.visitCXXMemberCallExpr(value); } else if (mem.eql(u8, kind, "CXXNullPtrLiteralExpr")) { try self.visitCXXNullPtrLiteralExpr(value); } else if (mem.eql(u8, kind, "FunctionTemplateDecl")) { try self.visitFunctionTemplateDecl(value); } else if (mem.eql(u8, kind, "CXXPseudoDestructorExpr")) { try self.visitCXXPseudoDestructorExpr(value); } else if (mem.eql(u8, kind, "CompoundAssignOperator")) { try self.visitCompoundAssignOperator(value); } else if (mem.eql(u8, kind, "CXXOperatorCallExpr")) { try self.visitCXXOperatorCallExpr(value); } else if (mem.eql(u8, kind, "UnresolvedMemberExpr")) { try self.visitUnresolvedMemberExpr(value); } else if (mem.eql(u8, kind, "CXXDependentScopeMemberExpr")) { try self.visitCXXDependentScopeMemberExpr(value); } else if (mem.eql(u8, kind, "ConditionalOperator")) { try self.visitConditionalOperator(value); } else if (mem.eql(u8, kind, "BreakStmt")) { try self.visitBreakStmt(value); } else if (mem.eql(u8, kind, "StringLiteral")) { try self.visitStringLiteral(value); } else if (mem.eql(u8, kind, "CXXTemporaryObjectExpr")) { try self.visitCXXTemporaryObjectExpr(value); } else if (mem.eql(u8, kind, "ExprWithCleanups")) { try self.visitExprWithCleanups(value); } else if (mem.eql(u8, kind, "MaterializeTemporaryExpr")) { try self.visitMaterializeTemporaryExpr(value); } else if (mem.eql(u8, kind, "FullComment")) { // skip } else if (mem.eql(u8, kind, "ParagraphComment")) { try self.visitParagraphComment(value); } else if (mem.eql(u8, kind, "ParamCommandComment")) { try self.visitParamCommandComment(value); } else if (mem.eql(u8, kind, "TextComment")) { try self.visitTextComment(value); } else { log.err("unhandled `{s}` node kind", .{kind}); } } fn visitLinkageSpecDecl(self: *Self, value: *const json.Value) !void { self.nodes_visited += 1; if (value.object.get("language")) |v_lang| { if (mem.eql(u8, v_lang.string, "C")) { // c lang, basically tells the compiler no function overload so don't mangle } else { log.err("unknow language `{s}` in `LinkageSpecDecl`", .{v_lang.string}); return; } } else { log.err("unspecified language in `LinkageSpecDecl`", .{}); return; } if (value.object.getPtr("inner")) |inner| { for (inner.array.items) |*item| { try self.visit(item); } } } fn visitTranslationUnitDecl(self: *Self, value: *const json.Value) !void { self.nodes_visited += 1; if (value.object.getPtr("inner")) |inner| { for (inner.array.items) |*item| { try self.visit(item); } } } fn visitCXXRecordDecl(self: *Self, value: *const json.Value) !void { // c++ class or struct if (self.shouldSkip(value)) { self.nodes_visited += nodeCount(value); return; } self.nodes_visited += 1; const tag = value.object.get("tagUsed").?.string; const is_union = mem.eql(u8, tag, "union"); var is_generated_name = false; var name: []const u8 = undefined; if (value.object.get("name")) |v| { name = v.string; } else if (self.scope.tag == .class) { is_generated_name = true; name = try fmt.allocPrint(self.allocator, "__{s}{d}", .{ if (is_union) "Union" else "Struct", self.scope.fields, }); self.scope.fields += 1; } else { // referenced by someone else const id = try std.fmt.parseInt(u64, value.object.get("id").?.string, 0); _ = try self.namespace.unnamed_nodes.put(id, value.*); return; } defer if (is_generated_name) self.allocator.free(name); const inner = value.object.getPtr("inner"); if (inner == null) { // e.g. `struct ImDrawChannel;` try self.namespace.opaques.put(name, undefined); return; } if (!is_generated_name) { _ = self.namespace.opaques.swapRemove(name); } var is_polymorphic = false; if (value.object.get("definitionData")) |v_def_data| { if (v_def_data.object.get("isPolymorphic")) |v_is_polymorphic| { is_polymorphic = v_is_polymorphic.bool; } } const public = self.public; defer self.public = public; if (mem.eql(u8, tag, "class")) { self.public = false; } try self.writeDocs(inner); try self.out.print("pub const {s} = extern {s} {{\n", .{ name, if (is_union) "union" else "struct", }); const v_bases = value.object.get("bases"); if (is_polymorphic and v_bases != null) { if (v_bases.?.array.items.len == 1) { const parent_type_name = typeQualifier(&v_bases.?.array.items[0]).?; if (self.class_info.get(parent_type_name)) |def_data| { if (def_data.is_polymorphic) { // when the parent is polymorphic don't add the vtable pointer in the base class is_polymorphic = false; } } else { log.warn("base class of `{s}` might not be polymorphic", .{name}); } } } if (is_polymorphic) { try self.out.print(" vtable: *const anyopaque,\n\n", .{}); } _ = try self.class_info.put(name, .{ .is_polymorphic = is_polymorphic, .name = name, }); // Double-storing class info by the line and col so we can look up it's name using that. // Need to store this 'globally' so using an arena // For anonymous structs and similar constructs, the corresponding FieldDecl comes *after* // the CXXRecordDecl. if (location(value)) |loc| { const line_col_key = try fmt.allocPrint(self.arena.allocator(), "{d}:{d}", .{ loc.line, loc.col }); _ = try self.class_info.put(line_col_key, .{ .is_polymorphic = is_polymorphic, .name = try fmt.allocPrint(self.arena.allocator(), "{s}", .{name}), }); } if (v_bases != null) { if (v_bases.?.array.items.len > 1) { log.err("multiple inheritance not supported in `{s}`", .{name}); } // generate a non working code on purpose in case of many bases, // because the user must manually fix it for (v_bases.?.array.items) |v_base| { const parent_type = try self.transpileType(typeQualifier(&v_base).?); defer self.allocator.free(parent_type); try self.out.print(" base: {s},\n", .{parent_type}); } try self.out.print("\n", .{}); } var functions = std.ArrayList(u8).init(self.allocator); defer functions.deinit(); const parent_state = self.scope; self.scope = .{ .tag = .class, .name = name, .is_polymorphic = is_polymorphic }; defer self.scope = parent_state; const parent_namespace = self.beginNamespace(); try self.namespace.full_path.appendSlice(parent_namespace.full_path.items); if (!parent_namespace.root) { try self.namespace.full_path.appendSlice("::"); } try self.namespace.full_path.appendSlice(name); var bitfield_type_bytes_curr: ?u32 = null; var bitfield_signed_curr = false; var bitfield_group: u32 = 0; var bitfield_struct_bits_remaining: u32 = 0; for (inner.?.array.items) |*item| { const kind = item.object.getPtr("kind").?.string; // FieldDecls that are implicit shouldn't be skipped. This is things like // anonymous structs. const inner_is_field = mem.eql(u8, kind, "FieldDecl"); var is_implicit = false; if (item.object.getPtr("isImplicit")) |implicit| { if (implicit.bool) { is_implicit = true; if (!inner_is_field) { self.nodes_visited += nodeCount(item); continue; } } } if (mem.eql(u8, kind, "FullComment")) { // skip } else if (inner_is_field) { self.nodes_visited += 1; const field_name = if (is_implicit) blk: { const type_name = item.object.getPtr("type").?.object.get("qualType").?.string; const field_type = if (mem.indexOf(u8, type_name, "union at") != null) "union_field" else if (mem.indexOf(u8, type_name, "struct at") != null) "struct_field" else "field"; const field_name_tmp = try fmt.allocPrint(self.arena.allocator(), "__{s}{d}", .{ field_type, self.scope.fields }); self.scope.fields += 1; break :blk field_name_tmp; } else item.object.getPtr("name").?.string; if (item.object.getPtr("isInvalid")) |invalid| { if (invalid.bool) { log.err("invalid field `{s}::{s}`", .{ name, field_name }); continue; } } const item_inner = item.object.getPtr("inner"); const item_type = try self.transpileType(typeQualifier(item).?); defer self.allocator.free(item_type); const bitfield_signed = if (TypeToSignedLUT.has(item_type)) TypeToSignedLUT.get(item_type).? else false; var bitfield_field_bits: u32 = 0; if (item.object.getPtr("isBitfield")) |is_bitfield| { if (!is_bitfield.bool) { // Not sure when this would be true. std.debug.assert(false); } const inner_value_index = 0; // Not sure if this is always 0. const inner_value_elem = item_inner.?.array.items[inner_value_index]; const bitfield_field_bits_str = inner_value_elem.object.getPtr("value").?.string; bitfield_field_bits = try std.fmt.parseInt(u32, bitfield_field_bits_str, 10); const bitfield_type_bytes = if (TypeToByteSizeLUT.has(item_type)) TypeToByteSizeLUT.get(item_type).? else 4; const bitfield_type_bits = bitfield_type_bytes * 8; // TODO: Need to handle 0-length (unnamed) bitfields (used for re-aligning next field) const bitfield_type_size_prev = if (bitfield_type_bytes_curr == null) 0 else bitfield_type_bytes_curr.?; const bitfield_type_size_changed = bitfield_type_size_prev != bitfield_type_bytes; const bitfield_sign_changed = false; // Actually fine to mix I think? bitfield_signed_curr != bitfield_signed; if (bitfield_type_size_changed or bitfield_sign_changed) { // A new bitfield // - or - // Underlying type's size changed, need to start a new bitfield // NOTE: C's behavior of padding when the type and signedness has changed seems tricky and perhaps // not even consistent across platforms/compilers so leaving a warning when it's noticed. if (bitfield_type_bytes_curr != null) { if (bitfield_struct_bits_remaining > 0) { try self.out.print(" /// C2Z WARNING: This perhaps shouldn't be padded in this way! \n", .{}); } try self.finalizeBitfield(bitfield_struct_bits_remaining); } bitfield_type_bytes_curr = bitfield_type_bytes; bitfield_signed_curr = bitfield_signed; bitfield_group += 1; try self.startBitfield(bitfield_group, bitfield_type_bits); bitfield_struct_bits_remaining = bitfield_type_bits; } else if (bitfield_struct_bits_remaining < bitfield_field_bits) { // Existing bitfield but new field doesn't fit try self.finalizeBitfield(bitfield_struct_bits_remaining); bitfield_group += 1; try self.startBitfield(bitfield_group, bitfield_type_bits); bitfield_struct_bits_remaining = bitfield_type_bits; } } else if (bitfield_type_bytes_curr != null) { try self.finalizeBitfield(bitfield_struct_bits_remaining); bitfield_type_bytes_curr = null; } try self.writeDocs(item_inner); const field_type = switch (bitfield_type_bytes_curr != null) { true => blk: { bitfield_struct_bits_remaining -= bitfield_field_bits; break :blk try self.addBitfieldField(bitfield_signed, bitfield_field_bits); }, false => try self.transpileType(typeQualifier(item).?), }; defer self.allocator.free(field_type); try self.out.print(" {s}: {s}", .{ field_name, field_type }); // field default value if (item_inner != null and bitfield_type_bytes_curr == null) { var value_exp = std.ArrayList(u8).init(self.allocator); defer value_exp.deinit(); const out = self.out; self.out = value_exp.writer(); for (item_inner.?.array.items) |*item_inner_item| { try self.visit(item_inner_item); } self.out = out; if (value_exp.items.len > 0) { try self.out.print(" = {s}", .{value_exp.items}); } } if (bitfield_type_bytes_curr != null) { try self.out.print(", // {d} bits\n", .{bitfield_type_bytes_curr.? * 8 - bitfield_struct_bits_remaining}); } else { try self.out.print(",\n", .{}); } } else if (mem.eql(u8, kind, "CXXMethodDecl")) { if (!self.public) continue; const out = self.out; self.out = functions.writer(); try self.visitCXXMethodDecl(item, name); self.out = out; } else if (mem.eql(u8, kind, "EnumDecl")) { // nested enums try self.visitEnumDecl(item); } else if (mem.eql(u8, kind, "CXXRecordDecl")) { // nested stucts, classes and unions, mustn't be intermixed with fields. const out = self.out; self.out = functions.writer(); try self.visitCXXRecordDecl(item); self.out = out; } else if (mem.eql(u8, kind, "VarDecl")) { const out = self.out; self.out = functions.writer(); try self.visitVarDecl(item); self.out = out; } else if (mem.eql(u8, kind, "CXXConstructorDecl")) { if (!self.public) continue; const out = self.out; self.out = functions.writer(); try self.visitCXXConstructorDecl(item); self.out = out; } else if (mem.eql(u8, kind, "CXXDestructorDecl")) { if (!self.public) continue; const dtor = if (self.no_glue) item.object.get("mangledName").?.string else try self.mangle("deinit", null); defer { if (!self.no_glue) self.allocator.free(dtor); } var w = functions.writer(); try w.print(" extern fn @\"{s}\"(self: *{s}) void;\n", .{ dtor, name }); try w.print(" pub const deinit = @\"{s}\";\n\n", .{dtor}); if (!self.no_glue) { try self.c_out.print("extern \"C\" void {s}({s} *self) {{ self->~{s}(); }}\n", .{ dtor, self.namespace.full_path.items, self.scope.name.? }); } } else if (mem.eql(u8, kind, "AccessSpecDecl")) { const access = item.object.get("access").?.string; self.public = mem.eql(u8, access, "public"); } else if (mem.eql(u8, kind, "FriendDecl")) { const out = self.out; self.out = functions.writer(); try self.visitFriendDecl(item); self.out = out; } else { self.nodes_visited -= 1; log.err("unhandled `{s}` in {s} `{s}`", .{ kind, tag, name }); } } if (bitfield_type_bytes_curr != null) { bitfield_type_bytes_curr = null; try self.finalizeBitfield(bitfield_struct_bits_remaining); } // declarations must be after fields if (functions.items.len > 0) { try self.out.print("\n{s}", .{functions.items}); } try self.endNamespace(parent_namespace); try self.out.print("}};\n\n", .{}); } fn startBitfield(self: *Self, bitfield_group: u32, bitfield_type_bits: u32) !void { try self.out.print(" bitfield_{d}: packed struct(u{d}) {{\n", .{ bitfield_group, bitfield_type_bits }); try self.out.print(" // NOTE: Bitfield generation not guaranteed to work on all platforms, use with caution. \n\n", .{}); } fn addBitfieldField(self: *Self, is_signed: bool, bitfield_field_bits: u32) ![]u8 { const signed_str = if (is_signed) "i" else "u"; return try fmt.allocPrint(self.allocator, "{s}{d}", .{ signed_str, bitfield_field_bits }); } fn finalizeBitfield(self: *Self, bits_remaining: u32) !void { if (bits_remaining > 0) { try self.out.print(" /// Padding added by c2z\n", .{}); try self.out.print(" _dummy_padding: u{d},\n", .{bits_remaining}); } try self.out.print(" }},\n\n", .{}); } fn visitVarDecl(self: *Self, value: *const json.Value) !void { const name = value.object.getPtr("name").?.string; var constant = false; var raw_ty = typeQualifier(value).?; if (mem.startsWith(u8, raw_ty, "const ")) { constant = true; raw_ty = raw_ty["const ".len..]; } const ty = try self.transpileType(raw_ty); defer self.allocator.free(ty); const decl = if (constant) "const" else "var"; const ptr_deco = if (constant) "const" else ""; if (self.scope.tag == .local) { // variable _ = try self.out.write(decl); try self.out.print(" {s}: {s}", .{ name, ty }); if (value.object.getPtr("inner")) |j_inner| { // declaration statement like `int a;` try self.out.print(" = ", .{}); try self.visit(&j_inner.array.items[0]); } self.nodes_visited += 1; return; } self.nodes_visited += nodeCount(value); if (self.no_glue) { const mangled_name = value.object.getPtr("mangledName").?.string; try self.out.print("extern {s} @\"{s}\": {s};\n", .{ decl, mangled_name, ty }); try self.out.print("pub inline fn {s}() *{s} {s} {{ return &@\"{s}\"; }}\n\n", .{ name, ptr_deco, ty, mangled_name }); } else { const mangled_name = try self.mangle(name, null); defer self.allocator.free(mangled_name); try self.out.print("extern {s} {s}: *{s} {s};\n", .{ decl, mangled_name, ptr_deco, ty }); try self.out.print("pub const {s} = {s};\n\n", .{ name, mangled_name }); // alias try self.c_out.print("extern \"C\" const void* {s} = (void*)& ", .{mangled_name}); _ = try self.c_out.write(self.namespace.full_path.items); if (!self.namespace.root) { _ = try self.c_out.write("::"); } _ = try self.c_out.write(name); _ = try self.c_out.write(";\n"); } } fn visitCXXConstructorDecl(self: *Self, value: *const json.Value) !void { const parent = self.scope.name.?; if (value.object.get("isInvalid")) |invalid| { if (invalid.bool) { log.err("invalid ctor of `{s}`", .{parent}); return; } } const sig = parseFnSignature(value).?; if (self.no_glue) { if (value.object.get("constexpr")) |constexpr| { if (constexpr.bool) { log.err("unhandled constexpr ctor of `{s}`", .{parent}); return; } } } else { if (sig.is_varidatic) { log.warn("unsupported varidact contructor of `{s}`", .{self.namespace.full_path.items}); return; } } if (sig.is_const) { // todo: what? log.err("constant contructor `{s}`", .{self.namespace.full_path.items}); } self.nodes_visited += 1; if (self.scope.is_polymorphic) { // todo: inlnie function } else { // default behaviour } // note: if the function has a `= 0` at the end it will have "pure" = true attribute // todo: deal with inlined methods // var inlined = false; // if (value.object.get("inline")) |v_inline| { // inlined = v_inline.bool; // if (inlined) { // // // log.err("unhandled inlined method `{?s}::{s}`", .{ parent, method_name }); // return; // } // } // Docs first const inner_opt = value.object.getPtr("inner"); try self.writeDocs(inner_opt); const mangled_name = if (self.no_glue) value.object.get("mangledName").?.string else try self.mangle("init", if (self.scope.ctors == 0) null else self.scope.ctors + 1); defer { if (!self.no_glue) self.allocator.free(mangled_name); } var ctor_comma = true; try self.out.print("extern fn @\"{s}\"(self: *{s}", .{ mangled_name, parent }); try self.c_out.print("extern \"C\" void {s}({s}* self", .{ mangled_name, self.namespace.full_path.items }); var comma = false; var fn_args = std.ArrayList(u8).init(self.allocator); defer fn_args.deinit(); var z_call = std.ArrayList(u8).init(self.allocator); defer z_call.deinit(); var c_call = std.ArrayList(u8).init(self.allocator); defer c_call.deinit(); // method args if (inner_opt) |inner| { for (inner.array.items, 0..) |*item, i| { self.nodes_visited += 1; const arg_kind = item.object.get("kind").?.string; if (mem.eql(u8, arg_kind, "ParmVarDecl")) { var c_type = typeQualifier(item).?; const z_type = try self.transpileType(c_type); defer self.allocator.free(z_type); var free_arg = false; var arg: []const u8 = undefined; if (item.object.get("name")) |v_item_name| { arg = v_item_name.string; } else { free_arg = true; arg = try fmt.allocPrint(self.allocator, "__arg{d}", .{i}); } defer if (free_arg) self.allocator.free(arg); if (!self.no_glue) { if (comma) try c_call.appendSlice(", "); if (ctor_comma) try self.c_out.print(", ", .{}); try c_call.appendSlice(arg); if (mem.indexOf(u8, c_type, "(*)")) |o| { try self.c_out.print("{s}{s}{s}", .{ c_type[0 .. o + 2], arg, c_type[o + 2 ..] }); } else { try self.c_out.print("{s} {s}", .{ c_type, arg }); } } if (comma) { try fn_args.appendSlice(", "); try z_call.appendSlice(", "); } comma = true; try fn_args.writer().print("{s}: {s}", .{ arg, z_type }); try z_call.writer().print("{s}", .{arg}); if (ctor_comma) try self.out.print(", ", .{}); ctor_comma = true; try self.out.print("{s}: {s}", .{ arg, z_type }); } else if (mem.eql(u8, arg_kind, "FormatAttr")) { // varidatic function with the same properties as printf } else if (mem.eql(u8, arg_kind, "CXXCtorInitializer")) { // constructor initializer for a single variable, not interesting here // try self.visitCXXCtorInitializer(item); } else if (mem.eql(u8, arg_kind, "CompoundStmt")) { // try self.visitCompoundStmt(item); } else if (mem.eql(u8, arg_kind, "FullComment")) { // Already handled in writeDocs above } else { self.nodes_visited -= 1; log.err("unhandled `{s}` in ctor `{s}`", .{ arg_kind, parent }); } } } // sig if (sig.is_varidatic) { try self.out.print(", ...) callconv(.C) void;\npub fn init", .{}); } else { try self.out.print(") void;\npub inline fn init", .{}); } if (self.scope.ctors != 0) try self.out.print("{d}", .{self.scope.ctors + 1}); // avoid name conflict try self.out.print("({s}) {s} {{\n", .{ fn_args.items, parent }); // body try self.out.print(" var self: {s} = undefined;\n", .{parent}); try self.out.print(" @\"{s}\"(&self, {s});\n", .{ mangled_name, z_call.items }); try self.out.print(" return self;\n", .{}); try self.out.print("}}\n\n", .{}); if (!self.no_glue) { try self.c_out.print(") {{ new (self) {s}({s}); }}\n", .{ self.namespace.full_path.items, c_call.items }); } self.scope.ctors += 1; } // fn visitCXXCtorInitializer(self: *Self, value: *const json.Value) !void { // // } fn visitCXXTemporaryObjectExpr(self: *Self, value: *const json.Value) !void { const ty = typeQualifier(value).?; // todo: resolve contructor override try self.out.print("{s}.init(", .{ty}); if (value.object.getPtr("inner")) |inner| { var comma = false; for (inner.array.items) |*entry| { if (comma) try self.out.print(", ", .{}); try self.visit(entry); comma = true; } } else { // todo: figure out when using `.{}` is a valid option } try self.out.print(")", .{}); } fn visitExprWithCleanups(self: *Self, value: *const json.Value) !void { if (value.object.getPtr("inner")) |inner| { const entry = &inner.array.items[0]; try self.visit(entry); } } fn visitMaterializeTemporaryExpr(self: *Self, value: *const json.Value) !void { if (value.object.getPtr("inner")) |inner| { // boundToLValueRef boolean const entry = &inner.array.items[0]; try self.visit(entry); } } fn visitCXXMethodDecl(self: *Self, value: *const json.Value, this_opt: ?[]const u8) !void { const sig = parseFnSignature(value).?; var name = value.object.get("name").?.string; var operator: ?[]const u8 = null; if (mem.startsWith(u8, name, "operator")) { const op = name["operator".len..]; if (op.len > 0 and std.ascii.isAlphanumeric(op[0])) { // just a function starting with operator } else { // todo: implicit casting operator = op; if (mem.eql(u8, op, "[]")) { if (!sig.is_const and mem.endsWith(u8, sig.ret, "&")) { // class[i] = value; name = "getPtr"; } else { // value = class[i]; name = "get"; } } else if (mem.eql(u8, op, "()")) { name = "call"; } else if (mem.eql(u8, op, "==")) { name = "eql"; } else if (mem.eql(u8, op, "!=")) { name = "notEql"; } else if (mem.eql(u8, op, "!")) { name = "not"; } else if (mem.eql(u8, op, "+")) { name = "add"; } else if (mem.eql(u8, op, "-")) { name = "sub"; } else if (mem.eql(u8, op, "*")) { name = "mul"; } else if (mem.eql(u8, op, "/")) { name = "div"; } else if (mem.eql(u8, op, "+=")) { name = "addInto"; } else if (mem.eql(u8, op, "-=")) { name = "subInto"; } else if (mem.eql(u8, op, "*=")) { name = "mulInto"; } else if (mem.eql(u8, op, "/=")) { name = "divInto"; } else if (mem.eql(u8, op, "=")) { // assign name = "copyFrom"; } else if (mem.eql(u8, op, "<")) { name = "lessThan"; } else if (mem.eql(u8, op, "<=")) { name = "lessEqThan"; } else if (mem.eql(u8, op, ">")) { name = "greaterThan"; } else if (mem.eql(u8, op, ">=")) { name = "greaterEqThan"; } else if (mem.eql(u8, op, "==")) { name = "equalTo"; } else if (mem.eql(u8, op, "<<")) { name = "shiftLeft"; } else if (mem.eql(u8, op, ">>")) { name = "shiftRight"; } else { log.err("unhandled operator `{s}` in `{?s}`", .{ op, this_opt }); return; } } } if (self.no_glue) { if (value.object.get("constexpr")) |constexpr| { if (constexpr.bool) { log.err("unhandled constexpr method `{?s}::{s}`", .{ this_opt, name }); return; } } } if (value.object.get("isInvalid")) |invalid| { if (invalid.bool) { log.err("invalid method `{?s}::{s}`", .{ this_opt, name }); return; } } // note: if the function has a `= 0` at the end it will have "pure" = true attribute const method_tret = try self.transpileType(sig.ret); defer self.allocator.free(method_tret); var is_mangled: bool = undefined; var mangled_name: []const u8 = undefined; // template function doesn't have the `mangledName` field if (value.object.get("mangledName")) |v_mangled_name| { mangled_name = v_mangled_name.string; // functions decorated with `extern "C"` won't be mangled is_mangled = !mem.eql(u8, mangled_name, name); } else { mangled_name = name; is_mangled = false; } if (!self.no_glue and is_mangled and sig.is_varidatic) { log.warn("unsupported fn `{s}::{s}`", .{ self.namespace.full_path.items, name }); return; } self.nodes_visited += 1; const overload_opt = try self.namespace.resolveOverloadIndex(name, sig.raw); var has_glue = false; var glue: ?[]u8 = null; if (!self.no_glue and is_mangled) { has_glue = true; glue = try self.mangle(name, overload_opt); mangled_name = glue.?; } defer if (glue != null) self.allocator.free(glue.?); const inner = value.object.getPtr("inner"); var has_body = false; if (inner != null and inner.?.array.items.len > 0) { const item_kind = inner.?.array.items[inner.?.array.items.len - 1].object.get("kind").?.string; has_body = mem.eql(u8, item_kind, "CompoundStmt"); } const out = self.out; var code = std.ArrayList(u8).init(self.allocator); defer code.deinit(); // start fn signature if (has_body) { has_glue = false; self.out = code.writer(); try self.out.print("pub ", .{}); if (value.object.get("inline")) |v_inline| { if (v_inline.bool) try self.out.print("inline ", .{}); } try self.writeDocs(inner); if (overload_opt) |i| { try self.out.print("fn {s}__Overload{d}(", .{ name, i }); } else { try self.out.print("fn {s}(", .{name}); } } else { if (!is_mangled) { try self.writeDocs(inner); try self.out.print("pub ", .{}); } else { if (has_glue) try self.c_out.print("extern \"C\" {s} {s}(", .{ sig.ret, mangled_name }); } try self.out.print("extern fn @\"{s}\"(", .{mangled_name}); } var comma = false; // self param if (this_opt) |this| block: { if (value.object.getPtr("storageClass")) |storage| { if (mem.eql(u8, storage.string, "static")) { // static method doesnt have self param break :block; } } comma = true; if (sig.is_const) { try self.out.print("self: *const {s}", .{this}); if (has_glue) try self.c_out.print("const {s} *self", .{self.namespace.full_path.items}); } else { try self.out.print("self: *{s}", .{this}); if (has_glue) try self.c_out.print("{s}* self", .{self.namespace.full_path.items}); } } var unnamed_buffer: [64]u8 = undefined; var c_call = std.ArrayList(u8).init(self.allocator); defer c_call.deinit(); var body = std.ArrayList(u8).init(self.allocator); defer body.deinit(); var va_args = false; // optional arguments var z_args = std.ArrayList(u8).init(self.allocator); defer z_args.deinit(); var z_opt = std.ArrayList(u8).init(self.allocator); defer z_opt.deinit(); var z_call = std.ArrayList(u8).init(self.allocator); defer z_call.deinit(); // visit parameters then body (if any) if (inner != null) { for (inner.?.array.items, 0..) |*item, i| { const kind = item.object.get("kind").?.string; if (mem.eql(u8, kind, "FullComment")) { // skip } else if (mem.eql(u8, kind, "ParmVarDecl")) { self.nodes_visited += 1; if (comma) { try self.out.print(", ", .{}); try z_call.appendSlice(", "); if (has_glue) try self.c_out.print(", ", .{}); } comma = true; // `c_call` doesn't have the same ammount of commas of the input function // when it recives has a reference to self if (has_glue and c_call.items.len > 0) { try c_call.appendSlice(", "); } var c_type = typeQualifier(item).?; const z_type = try self.transpileType(c_type); defer self.allocator.free(z_type); // check if requires a second helper function to call it va_args = va_args or mem.eql(u8, c_type, "va_list") or mem.eql(u8, c_type, "__va_list_tag"); const arg_name = if (item.object.get("name")) |n| n.string else try fmt.bufPrint(&unnamed_buffer, "__arg{d}", .{i}); try self.out.print("{s}: {s}", .{ arg_name, z_type }); // default arg const inner_opt = item.object.getPtr("inner"); if (inner_opt == null) { if (z_args.items.len > 0) try z_args.appendSlice(", "); try z_args.appendSlice(arg_name); try z_args.appendSlice(": "); try z_args.appendSlice(z_type); try z_call.appendSlice(arg_name); } else { // default const out2 = self.out; self.out = z_opt.writer(); try self.out.print("{s}: {s} = ", .{ arg_name, z_type }); try self.visit(&inner_opt.?.array.items[0]); try self.out.print(", ", .{}); self.out = out2; // forward args try z_call.appendSlice("__opt."); try z_call.appendSlice(arg_name); } if (has_glue) { if (mem.indexOf(u8, c_type, "(*)")) |o| { try self.c_out.print("{s}{s}{s}", .{ c_type[0 .. o + 2], arg_name, c_type[o + 2 ..] }); } else { try self.c_out.print("{s} {s}", .{ c_type, arg_name }); } try c_call.appendSlice(arg_name); } } else if (mem.eql(u8, kind, "FormatAttr")) { // varidatic function with the same properties as printf self.nodes_visited += 1; } else if (mem.eql(u8, kind, "CompoundStmt")) { const out2 = self.out; self.out = body.writer(); try self.visitCompoundStmt(item); self.out = out2; } else { log.err("unhandled `{s}` in function `{?s}.{s}`", .{ kind, this_opt, name }); continue; } } } if (sig.is_varidatic) { if (comma) { try self.out.print(", ", .{}); } try self.out.print("...) callconv(.C) {s}", .{method_tret}); // note: glue doesn't support varidact std.debug.assert(!has_glue); } else { try self.out.print(") {s}", .{method_tret}); if (has_glue) try self.c_out.print(") ", .{}); } // body must be after fields if (has_body) { // todo: optional args try self.out.print(" {s}\n\n", .{body.items}); self.out = out; if (code.items.len > 0) { if (try self.fmtCode(&code)) { // write formated code ... _ = try self.out.write(code.items); } else { // bad code log.err("syntax errors in `{?s}.{s}`", .{ this_opt, name }); _ = try self.out.write("// syntax errors:\n"); try self.writeCommentedCode(code.items); } } } else { try self.out.print(";\n", .{}); if (is_mangled) { try self.writeDocs(inner); if (z_opt.items.len > 0) { try self.out.print("pub fn ", .{}); } else { try self.out.print("pub const ", .{}); } if (overload_opt) |i| { try self.out.print("{s}__Overload{d}", .{ name, i }); } else { try self.out.print("{s}", .{name}); } if (z_opt.items.len > 0) { // optional arguments try self.out.print("(", .{}); if (this_opt) |this| { if (sig.is_const) { try self.out.print("self: *const {s}, ", .{this}); } else { try self.out.print("self: *{s}, ", .{this}); } } if (z_args.items.len > 0) { try self.out.print("{s}, ", .{z_args.items}); } try self.out.print("__opt: struct {{ {s} }},) {s} {{\n", .{ z_opt.items, method_tret }); if (this_opt != null) { try self.out.print(" return @\"{s}\"(self{s});\n", .{ mangled_name, z_call.items }); } else { try self.out.print(" return @\"{s}\"({s});\n", .{ mangled_name, z_call.items }); } try self.out.print("}}\n\n", .{}); } else { try self.out.print(" = @\"{s}\";\n\n", .{mangled_name}); } // glue body if (has_glue) { try self.c_out.print("{{ ", .{}); if (!mem.eql(u8, sig.ret, "void")) try self.c_out.print("return ", .{}); if (this_opt != null) { if (operator != null) { if (mem.eql(u8, operator.?, "[]") or mem.eql(u8, operator.?, "()")) { try self.c_out.print("(*self){c}{s}{c}; }}\n", .{ operator.?[0], c_call.items, operator.?[1] }); } else { try self.c_out.print("*self {s} {s}; }}\n", .{ operator.?, c_call.items }); } } else { try self.c_out.print("self->{s}({s}); }}\n", .{ name, c_call.items }); } } else { if (operator != null) { // cursed way of extending functionality by externally overloading operators var it = mem.split(u8, c_call.items, ", "); const a = it.next().?; const b = it.next().?; if (mem.eql(u8, operator.?, "[]") or mem.eql(u8, operator.?, "()")) { try self.c_out.print("{s}{c}{s}{c}; }}\n", .{ a, operator.?[0], b, operator.?[1] }); } else { try self.c_out.print("{s} {s} {s}; }}\n", .{ a, operator.?, b }); } } else { _ = try self.c_out.write(self.namespace.full_path.items); if (!self.namespace.root) { _ = try self.c_out.write("::"); } try self.c_out.print("{s}({s}); }}\n", .{ name, c_call.items }); } } if (va_args) { // todo: } } } } } fn visitFunctionTemplateDecl(self: *Self, node: *const json.Value) !void { self.nodes_visited += 1; // gather comptime parameters ... var cp = std.ArrayList(u8).init(self.allocator); defer cp.deinit(); const inner = node.object.getPtr("inner").?; try self.writeDocs(inner); var comma = false; const name = node.object.getPtr("name").?.string; for (inner.array.items) |*item| { const kind = item.object.getPtr("kind").?.string; if (mem.eql(u8, kind, "TemplateTypeParmDecl")) { if (comma) try self.out.print(", ", .{}); comma = true; const out = self.out; self.out = cp.writer(); try self.visitTemplateTypeParmDecl(item, name); self.out = out; } else if (mem.eql(u8, kind, "FunctionDecl")) { self.nodes_visited += 1; const f_inner = item.object.get("inner"); if (f_inner == null) { log.err("`FunctionTemplateDecl` `{s}` with empty inner body", .{name}); return; } const f_items = f_inner.?.array.items; const found_compound_stmt = blk: { for (f_items) |f_item| { const f_item_kind = f_item.object.getPtr("kind").?.string; if (mem.eql(u8, f_item_kind, "CompoundStmt")) { break :blk true; } } break :blk false; }; if (!found_compound_stmt) { log.err("`FunctionTemplateDecl` `{s}` without `CompoundStmt`", .{name}); return; } const sig = parseFnSignature(item).?; const method_name = item.object.get("name").?.string; const ret = try self.transpileType(sig.ret); defer self.allocator.free(ret); try self.out.print("pub ", .{}); if (item.object.get("inline")) |v_inline| { if (v_inline.bool) try self.out.print("inline ", .{}); } try self.out.print("fn {s}({s}", .{ method_name, cp.items }); var body = std.ArrayList(u8).init(self.allocator); defer body.deinit(); var unused_arg_names = try std.ArrayList(u8).initCapacity(self.allocator, 16); defer unused_arg_names.deinit(); for (f_items, 0..) |*f_item, i_item| { const arg_kind = f_item.object.get("kind").?.string; if (mem.eql(u8, arg_kind, "ParmVarDecl")) { // todo: obsolete self.nodes_visited += 1; const ty = f_item.object.get("type").?; const qual = ty.object.get("qualType").?.string; if (comma) { try self.out.print(", ", .{}); } comma = true; const arg_name_opt = f_item.object.get("name"); const arg_name = blk: { if (arg_name_opt) |an| break :blk an.string else { unused_arg_names.resize(0) catch unreachable; try unused_arg_names.writer().print("arg_{}", .{i_item}); break :blk unused_arg_names.items; } }; const arg_type = try self.transpileType(qual); defer self.allocator.free(arg_type); try self.out.print("{s}: {s}", .{ arg_name, arg_type }); } else if (mem.eql(u8, arg_kind, "FormatAttr")) { // varidatic function with the same properties as printf self.nodes_visited += 1; } else if (mem.eql(u8, arg_kind, "CompoundStmt")) { const tmp = self.out; self.out = body.writer(); try self.visitCompoundStmt(f_item); self.out = tmp; break; } else { log.err("unhandled `FunctionDecl` item `{s}` in `FunctionTemplateDecl` `{s}`", .{ arg_kind, method_name }); } } if (sig.is_varidatic) { if (comma) { try self.out.print(", ", .{}); } try self.out.print("...) {s}", .{ret}); } else { try self.out.print(") {s}", .{ret}); } try self.out.print(" {s}\n\n", .{body.items}); return; } else { log.err("unhandled item `{s}` in `FunctionTemplateDecl` `{s}`", .{ kind, name }); } } log.err("`FunctionTemplateDecl` `{s}` without `FunctionDecl`", .{name}); } fn visitEnumDecl(self: *Self, node: *const json.Value) !void { if (self.shouldSkip(node)) { self.nodes_visited += nodeCount(node); return; } var name: []const u8 = undefined; if (node.object.getPtr("name")) |v| { name = v.string; } else { // todo: handle unamed enumerations that aren't inside a typedef like these `enum { FPNG_ENCODE_SLOWER = 1, FPNG_FORCE_UNCOMPRESSED = 2, };` // referenced by someone else const id = try std.fmt.parseInt(u64, node.object.getPtr("id").?.string, 0); _ = try self.namespace.unnamed_nodes.put(id, node.*); return; } const inner = node.object.getPtr("inner"); if (inner == null) { // e.g. `enum ImGuiKey : int;` try self.namespace.opaques.put(name, undefined); return; } self.nodes_visited += 1; // remove opaque if any _ = self.namespace.opaques.swapRemove(name); try self.writeDocs(inner); // todo: use "fixedUnderlyingType" or figure out the type by himself try self.out.print("pub const {s} = extern struct {{\n", .{name}); try self.out.print(" bits: c_int = 0,\n\n", .{}); var variant_prev: ?[]const u8 = null; var variant_counter: usize = 0; for (inner.?.array.items) |*item| { if (item.object.getPtr("isImplicit")) |is_implicit| { if (is_implicit.bool) { self.nodes_visited += nodeCount(item); continue; } } const kind = item.object.getPtr("kind").?.string; if (mem.eql(u8, kind, "FullComment")) { // skip } else if (mem.eql(u8, kind, "EnumConstantDecl")) { const decl_inner = item.object.getPtr("inner"); try self.writeDocs(decl_inner); const variant_name = resolveEnumVariantName(name, item.object.get("name").?.string); try self.out.print(" pub const {s}: {s}", .{ variant_name, name }); // transpile enum value try self.out.print(" = .{{ .bits = ", .{}); var value = std.ArrayList(u8).init(self.allocator); defer value.deinit(); // try to generate the variant value if (decl_inner != null) { const out = self.out; self.out = value.writer(); for (decl_inner.?.array.items) |*body_item| { try self.visit(body_item); } self.out = out; } if (value.items.len > 0) { _ = try self.out.write(value.items); variant_prev = variant_name; variant_counter = 1; } else { if (variant_prev) |n| { try self.out.print("{s}.{s}.bits + {d}", .{ name, n, variant_counter }); } else { try self.out.print("{d}", .{variant_counter}); } variant_counter += 1; } try self.out.print(" }};\n", .{}); } else { log.err("unhandled `{s}` in enum `{s}`", .{ kind, name }); continue; } self.nodes_visited += 1; } try self.out.print("\n // pub usingnamespace cpp.FlagsMixin({s});\n", .{name}); try self.out.print("}};\n\n", .{}); } fn visitTypedefDecl(self: *Self, value: *const json.Value) !void { if (self.shouldSkip(value)) { self.nodes_visited += nodeCount(value); return; } self.nodes_visited += 1; const name = value.object.get("name").?.string; const v_inner_opt = value.object.getPtr("inner"); try self.writeDocs(v_inner_opt); if (v_inner_opt) |v_inner| { const v_item = &v_inner.array.items[0]; const tag = v_item.object.get("kind").?.string; if (mem.eql(u8, tag, "BuiltinType") or mem.eql(u8, tag, "TypedefType")) { // type alias self.nodes_visited += nodeCount(v_item); } else if (mem.eql(u8, tag, "ElaboratedType")) { // c style simplified struct definition if (v_item.object.get("ownedTagDecl")) |v_owned| { const id_name = v_owned.object.get("id").?.string; const id = try std.fmt.parseInt(u64, id_name, 0); if (self.namespace.unnamed_nodes.getPtr(id)) |node| { const n_tag = node.object.getPtr("kind").?.string; if (mem.eql(u8, n_tag, "CXXRecordDecl")) { self.nodes_visited += 1; var object = try node.object.clone(); defer object.deinit(); // rename the object _ = try object.put("name", json.Value{ .string = name }); // todo: impl the union or struct or whatever using `name` try self.visitCXXRecordDecl(&json.Value{ .object = object }); } else if (mem.eql(u8, n_tag, "EnumDecl")) { self.nodes_visited += 1; var object = try node.object.clone(); defer object.deinit(); // rename the object _ = try object.put("name", json.Value{ .string = name }); // todo: impl the union or struct or whatever using `name` try self.visitEnumDecl(&json.Value{ .object = object }); } else { log.err("unhandled `ElaboratedType` `{s}` in typedef `{s}`", .{ n_tag, name }); } // remove used node _ = self.namespace.unnamed_nodes.remove(id); } else { // currenly been triggered by: `typedef struct Point { float x, y; } Point;` self.nodes_visited += nodeCount(v_item); log.warn("missing node `{s}` of `ElaboratedType` in typedef `{s}`", .{ id_name, name }); } return; } else { // other kind of type alias // todo: use the inner "RecordType" self.nodes_visited += nodeCount(v_item); } } else if (mem.eql(u8, tag, "PointerType")) { // note: sadly the clang will remove type names from function pointers, but is one thing less to deal with self.nodes_visited += nodeCount(v_item); } else if (mem.eql(u8, tag, "TemplateTypeParmType")) { self.nodes_visited += 1; try self.out.print("pub const {s} = {s};\n\n", .{ name, typeQualifier(v_item).? }); return; } else if (mem.eql(u8, tag, "ParenType")) { self.nodes_visited += 1; const parentype_inner_opt = v_inner.array.items[0].object.getPtr("inner"); if (parentype_inner_opt) |parentype_inner| { const parentype_inner_kind = parentype_inner.array.items[0].object.get("kind").?.string; if (mem.eql(u8, parentype_inner_kind, "FunctionProtoType")) { try self.visitFunctionProtoType(name, &parentype_inner.array.items[0]); } } return; } else { log.err("unhandled `{s}` in typedef `{s}`", .{ tag, name }); return; } // default type alias behaviour const type_alised = try self.transpileType(typeQualifier(v_item).?); defer self.allocator.free(type_alised); try self.out.print("pub const {s} = {s};\n\n", .{ name, type_alised }); } } fn visitFunctionProtoType(self: *Self, name: []const u8, value: *const json.Value) !void { self.nodes_visited += 1; try self.out.print("pub const {s} = fn(", .{name}); var return_type_opt: ?*const json.Value = null; const inner_opt = value.object.getPtr("inner"); if (inner_opt) |inner| { for (inner.array.items, 0..) |*item, i| { if (return_type_opt == null) { return_type_opt = item; continue; } const zig_type = try self.transpileType(typeQualifier(item).?); defer self.allocator.free(zig_type); try self.out.print("{s}", .{zig_type}); if (i + 1 < inner.array.items.len) { try self.out.print(", ", .{}); } } } if (return_type_opt) |return_type| { const zig_type = try self.transpileType(typeQualifier(return_type).?); defer self.allocator.free(zig_type); try self.out.print(") callconv(.C) {s};\n\n", .{zig_type}); } else { try self.out.print(") callconv(.C) {s};\n\n", .{"void"}); } } fn visitNamespaceDecl(self: *Self, value: *const json.Value) !void { if (self.shouldSkip(value)) { self.nodes_visited += nodeCount(value); return; } self.nodes_visited += 1; const v_name = value.object.get("name"); const inner = value.object.get("inner"); if (inner == null) { if (v_name) |name| { log.warn("empty namespace `{s}`", .{name.string}); } else { // super cursed edge case log.warn("empty and unamed namespace", .{}); } return; } // todo: namespace merging const parent_namespace = self.beginNamespace(); if (v_name) |name| { try self.namespace.full_path.appendSlice(parent_namespace.full_path.items); if (!parent_namespace.root) { try self.namespace.full_path.appendSlice("::"); } try self.namespace.full_path.appendSlice(name.string); try self.out.print("pub const {s} = struct {{\n", .{name.string}); } for (inner.?.array.items) |*item| { try self.visit(item); } try self.endNamespace(parent_namespace); if (v_name) |_| { try self.out.print("}};\n\n", .{}); } } inline fn visitFunctionDecl(self: *Self, value: *const json.Value) !void { // a function is a method without a parent struct return self.visitCXXMethodDecl(value, null); } fn visitTemplateTypeParmDecl(self: *Self, node: *const json.Value, this: []const u8) !void { self.nodes_visited += 1; const name = node.object.get("name"); if (name == null) { log.err("unnamed `TemplateTypeParmDecl` in `{s}`", .{this}); return; } if (node.object.get("tagUsed")) |tag| { if (mem.eql(u8, tag.string, "typename")) { try self.out.print("comptime {s}: type", .{name.?.string}); return; } } try self.out.print("comptime {s}: anytype", .{name.?.string}); } fn visitNonTypeTemplateParmDecl(self: *Self, node: *const json.Value, this: []const u8) !void { self.nodes_visited += 1; const name = node.object.get("name"); if (name == null) { log.err("unnamed `NonTypeTemplateParmDecl` in `{s}`", .{this}); return; } if (typeQualifier(node)) |c_type| { const ty = try self.transpileType(c_type); defer self.allocator.free(ty); try self.out.print("comptime {s}: {s}", .{ name.?.string, ty }); return; } try self.out.print("comptime {s}: anytype", .{name.?.string}); } fn visitClassTemplateDecl(self: *Self, value: *const json.Value) !void { if (self.shouldSkip(value)) { self.nodes_visited += nodeCount(value); return; } self.nodes_visited += 1; var name: []const u8 = undefined; if (value.object.get("name")) |v| { name = v.string; } else { self.nodes_visited -= 1; log.err("unnamed `ClassTemplateDecl`", .{}); return; } const inner = value.object.get("inner"); if (inner == null) { log.warn("generic opaque `{s}`", .{name}); return; } // pub fn Generic(comptime T: type, ...) { // return struct { // }; // }; try self.out.print("pub fn {s}(", .{name}); var functions = std.ArrayList(u8).init(self.allocator); defer functions.deinit(); const parent_state = self.scope; self.scope = .{ .tag = .class, .name = "Self" }; defer self.scope = parent_state; // template param var comma = false; for (inner.?.array.items) |*item| { const item_kind = item.object.get("kind").?.string; if (mem.eql(u8, item_kind, "TemplateTypeParmDecl")) { if (comma) try self.out.print(", ", .{}); comma = true; try self.visitTemplateTypeParmDecl(item, name); } else if (mem.eql(u8, item_kind, "NonTypeTemplateParmDecl")) { if (comma) try self.out.print(", ", .{}); comma = true; try self.visitNonTypeTemplateParmDecl(item, name); } else if (mem.eql(u8, item_kind, "CXXRecordDecl")) { self.nodes_visited += 1; // template definition try self.out.print(") type {{\n return extern struct {{\n", .{}); try self.out.print(" const Self = @This();\n\n", .{}); const inner_inner = item.object.get("inner"); if (inner_inner == null) { log.warn("blank `{s}` template", .{name}); return; } const parent_namespace = self.beginNamespace(); try self.namespace.full_path.appendSlice(parent_namespace.full_path.items); if (!parent_namespace.root) { try self.namespace.full_path.appendSlice("::"); } try self.namespace.full_path.appendSlice(name); for (inner_inner.?.array.items) |*inner_item| { self.nodes_visited += 1; const inner_item_kind = inner_item.object.get("kind").?.string; if (mem.eql(u8, inner_item_kind, "CXXRecordDecl")) { // class or struct } else if (mem.eql(u8, inner_item_kind, "FieldDecl")) { const field_name = inner_item.object.get("name").?.string; const field_type = try self.transpileType(typeQualifier(inner_item).?); defer self.allocator.free(field_type); try self.out.print(" {s}: {s},\n", .{ field_name, field_type }); } else if (mem.eql(u8, inner_item_kind, "CXXMethodDecl")) { const tmp = self.out; self.out = functions.writer(); try self.visitCXXMethodDecl(inner_item, "Self"); self.out = tmp; } else if (mem.eql(u8, inner_item_kind, "TypedefDecl")) { const out = self.out; self.out = functions.writer(); try self.visitTypedefDecl(inner_item); self.out = out; } else { self.nodes_visited -= 1; log.err("unhandled `{s}` in template `{s}`", .{ inner_item_kind, name }); } } // delcarations must be after fields if (functions.items.len > 0) { try self.out.print("\n{s}", .{functions.items}); } try self.endNamespace(parent_namespace); try self.out.print(" }};\n}}\n\n", .{}); return; } else { log.err("unhandled `{s}` in template `{s}`", .{ item_kind, name }); } } } fn visitCompoundStmt(self: *Self, value: *const json.Value) !void { self.nodes_visited += 1; try self.out.print("{{", .{}); const inner_opt = value.object.get("inner"); if (inner_opt) |inner| { try self.out.print("\n", .{}); const scope = self.scope; defer self.scope = scope; self.scope = .{ .tag = .local, .name = null, }; self.semicolon = true; for (inner.array.items) |*item| { try self.visit(item); if (self.semicolon) { _ = try self.out.write(";\n"); } else { // reset self.semicolon = true; } } } try self.out.print("}}", .{}); } fn visitIfStmt(self: *Self, value: *const json.Value) !void { const j_inner = value.object.getPtr("inner").?; try self.out.print(" if (", .{}); try self.visit(&j_inner.array.items[0]); try self.out.print(") ", .{}); var body = &j_inner.array.items[1]; try self.visit(body); if (if (value.object.getPtr("hasElse")) |j_else| j_else.bool else false) { try self.out.print(" else ", .{}); body = &j_inner.array.items[2]; try self.visit(body); } // don't print a semicolon when the if else is guarded with braces `if { ... }` const body_kind = body.object.getPtr("kind").?.string; self.semicolon = !(mem.eql(u8, body_kind, "CompoundStmt") or mem.eql(u8, body_kind, "ForStmt") or mem.eql(u8, body_kind, "IfStmt")); self.nodes_visited += 1; } fn visitForStmt(self: *Self, value: *const json.Value) !void { const j_inner = value.object.getPtr("inner").?; // var delcarations var braces = false; var vars = &j_inner.array.items[0]; if (vars.object.count() != 0) { // note: extra braces required for nested loops and to avoid variable shadowing braces = true; try self.out.print("{{ ", .{}); try self.visit(vars); try self.out.print("; ", .{}); } // todo: handle node at index == `1` try self.out.print("while (", .{}); try self.visit(&j_inner.array.items[2]); try self.out.print(")", .{}); var exp = &j_inner.array.items[3]; if (exp.object.count() != 0) { const exp_kind = exp.object.getPtr("kind").?.string; if (mem.eql(u8, exp_kind, "UnaryOperator") or mem.eql(u8, exp_kind, "CompoundAssignOperator") or (mem.eql(u8, exp_kind, "BinaryOperator") and !mem.eql(u8, exp.object.getPtr("opcode").?.string, ","))) { // not a ',' operator so it doesnt require a scope try self.out.print(" : (", .{}); try self.visit(exp); try self.out.print(")", .{}); } else { // default behaviour try self.out.print(" : ({{ ", .{}); try self.visit(exp); try self.out.print("; }})", .{}); } } var body = &j_inner.array.items[4]; try self.visit(body); // don't print a semicolon when the if else is guarded with braces `for (...) { ... }` const body_kind = body.object.getPtr("kind").?.string; self.semicolon = !(mem.eql(u8, body_kind, "CompoundStmt") or mem.eql(u8, body_kind, "ForStmt") or mem.eql(u8, body_kind, "IfStmt")); if (braces) { if (self.semicolon) try self.out.print("; ", .{}); self.semicolon = false; try self.out.print("}} ", .{}); } self.nodes_visited += 1; } fn visitWhileStmt(self: *Self, node: *const json.Value) !void { const j_inner = node.object.getPtr("inner").?; try self.out.print("while (", .{}); const exp = &j_inner.array.items[0]; try self.visit(exp); try self.out.print(")", .{}); var body = &j_inner.array.items[1]; try self.visit(body); // don't print a semicolon when the if else is guarded with braces `while (...) { ... }` self.semicolon = !mem.eql(u8, body.object.getPtr("kind").?.string, "CompoundStmt"); self.nodes_visited += 1; } fn visitReturnStmt(self: *Self, value: *const json.Value) !void { self.nodes_visited += 1; const v_inner = value.object.get("inner"); if (v_inner == null) { try self.out.print("return", .{}); return; } _ = try self.out.write("return "); // todo: must check if is returning an aliesed pointer, if so it must add the referece operator '&' try self.visit(&v_inner.?.array.items[0]); } fn visitBinaryOperator(self: *Self, node: *const json.Value) !void { const inner = node.object.getPtr("inner").?; const opcode = node.object.getPtr("opcode").?.string; // todo: transpile `a = b = c;` into `b = c; a = b;` but can't ignore all casts try self.visit(&inner.array.items[0]); if (mem.eql(u8, opcode, "||")) { try self.out.print(" or ", .{}); } else if (mem.eql(u8, opcode, "&&")) { try self.out.print(" and ", .{}); } else if (mem.eql(u8, opcode, ",")) { // yep this is a thing used inside some loops `for(...;...; i++, j++)` // ^^^^^^^^ binary operator "," try self.out.print("; ", .{}); } else { try self.out.print(" {s} ", .{opcode}); } try self.visit(&inner.array.items[1]); self.nodes_visited += 1; } fn visitCompoundAssignOperator(self: *Self, node: *const json.Value) !void { const inner = node.object.getPtr("inner").?; const opcode = node.object.getPtr("opcode").?.string; // todo: transpile `a = b = c;` into `b = c; a = b;` but can't ignore all casts try self.visit(&inner.array.items[0]); try self.out.print(" {s} ", .{opcode}); try self.visit(&inner.array.items[1]); self.nodes_visited += 1; } fn visitImplicitCastExpr(self: *Self, value: *const json.Value) !void { self.nodes_visited += 1; const kind = value.object.getPtr("castKind").?.string; if (mem.eql(u8, kind, "IntegralToBoolean")) { try self.out.print("((", .{}); try self.visit(&value.object.getPtr("inner").?.array.items[0]); try self.out.print(") != 0)", .{}); return; } else if (mem.eql(u8, kind, "LValueToRValue") or mem.eql(u8, kind, "NoOp")) { // todo: wut!? try self.visit(&value.object.getPtr("inner").?.array.items[0]); return; } else if (mem.eql(u8, kind, "FunctionToPointerDecay")) { // todo: figure out when a conversion is really needed try self.visit(&value.object.getPtr("inner").?.array.items[0]); return; } else if (mem.eql(u8, kind, "ToVoid")) { // todo: casting to void is a shitty way of evaluating expressions that might have side effects, // https://godbolt.org/z/45xYqaz37 shown that the following snippet should be executed even in release builds try self.out.print("_ = (", .{}); try self.visit(&value.object.getPtr("inner").?.array.items[0]); try self.out.print(");\n", .{}); return; } const dst = try self.transpileType(typeQualifier(value).?); defer self.allocator.free(dst); if (mem.eql(u8, kind, "BitCast")) { if (mem.startsWith(u8, dst, "*") or mem.startsWith(u8, dst, "[*c]")) { try self.out.print("@as({s}, @ptrCast(", .{dst}); } else { try self.out.print("@as({s}, @bitCast(", .{dst}); } try self.visit(&value.object.getPtr("inner").?.array.items[0]); try self.out.print(")", .{}); } else if (mem.eql(u8, kind, "IntegralCast")) { try self.out.print("@as({s}, @intCast(", .{dst}); try self.visit(&value.object.getPtr("inner").?.array.items[0]); try self.out.print(")", .{}); } else if (mem.eql(u8, kind, "NullToPointer")) { self.nodes_visited += 1; try self.out.print("null", .{}); return; } else if (mem.eql(u8, kind, "IntegralToFloating")) { try self.out.print("@floatFromInt(", .{}); try self.visit(&value.object.getPtr("inner").?.array.items[0]); } else if (mem.eql(u8, kind, "ArrayToPointerDecay")) { try self.out.print("&", .{}); try self.visit(&value.object.getPtr("inner").?.array.items[0]); return; } else if (mem.eql(u8, kind, "PointerToBoolean")) { try self.visit(&value.object.getPtr("inner").?.array.items[0]); try self.out.print(" != null", .{}); return; } else { log.warn("unknown `{s}` cast", .{kind}); try self.out.print("@as({s}, ", .{dst}); try self.visit(&value.object.getPtr("inner").?.array.items[0]); } try self.out.print(")", .{}); } fn visitMemberExpr(self: *Self, node: *const json.Value) !void { self.nodes_visited += 1; const target = &node.object.getPtr("inner").?.array.items[0]; try self.visit(target); const name = node.object.getPtr("name").?.string; try self.out.print(".{s}", .{name}); } fn visitCXXDependentScopeMemberExpr(self: *Self, node: *const json.Value) !void { self.nodes_visited += 1; const target = &node.object.getPtr("inner").?.array.items[0]; try self.visit(target); const member = node.object.getPtr("member").?.string; try self.out.print(".{s}", .{member}); } fn visitIntegerLiteral(self: *Self, value: *const json.Value) !void { const literal = value.object.getPtr("value").?.string; _ = try self.out.write(literal); self.nodes_visited += 1; } fn visitFloatingLiteral(self: *Self, value: *const json.Value) !void { const literal = value.object.getPtr("value").?.string; _ = try self.out.write(literal); self.nodes_visited += 1; } inline fn visitCStyleCastExpr(self: *Self, value: *const json.Value) !void { return self.visitImplicitCastExpr(value); } fn visitArraySubscriptExpr(self: *Self, value: *const json.Value) !void { self.nodes_visited += 1; var v_inner = value.object.get("inner"); try self.visit(&v_inner.?.array.items[0]); try self.out.print("[", .{}); try self.visit(&v_inner.?.array.items[1]); try self.out.print("]", .{}); } fn visitUnaryExprOrTypeTraitExpr(self: *Self, value: *const json.Value) !void { const name = value.object.getPtr("name").?.string; if (mem.eql(u8, name, "sizeof")) { if (value.object.getPtr("argType")) |j_ty| { // simple type name const size_of = try self.transpileType(j_ty.object.get("qualType").?.string); defer self.allocator.free(size_of); try self.out.print("@sizeOf({s})", .{size_of}); } else { // complex expression like a template type parameter _ = try self.out.write("@sizeOf"); try self.visit(&value.object.getPtr("inner").?.array.items[0]); } } else { log.err("unknonw `UnaryExprOrTypeTraitExpr` `{s}`", .{name}); return; } self.nodes_visited += 1; } fn visitDeclRefExpr(self: *Self, value: *const json.Value) !void { const j_ref = value.object.getPtr("referencedDecl").?; const kind = j_ref.object.getPtr("kind").?.string; if (mem.eql(u8, kind, "ParmVarDecl") or mem.eql(u8, kind, "FunctionDecl") or mem.eql(u8, kind, "VarDecl") or mem.eql(u8, kind, "NonTypeTemplateParmDecl")) { const name = j_ref.object.get("name").?.string; _ = try self.out.write(name); } else if (mem.eql(u8, kind, "EnumConstantDecl")) { const base = typeQualifier(j_ref).?; const variant = resolveEnumVariantName(base, j_ref.object.get("name").?.string); try self.out.print("{s}.{s}.bits", .{ base, variant }); } else { log.err("unhandled `{s}` in `DeclRefExpr`", .{kind}); return; } self.nodes_visited += 1; } fn visitParenExpr(self: *Self, value: *const json.Value) !void { const rvalue = typeQualifier(value).?; if (mem.eql(u8, rvalue, "void")) { // inner expression results in nothing try self.visit(&value.object.get("inner").?.array.items[0]); } else { try self.out.print("(", .{}); try self.visit(&value.object.get("inner").?.array.items[0]); try self.out.print(")", .{}); } self.nodes_visited += 1; } fn visitUnaryOperator(self: *Self, value: *const json.Value) !void { const opcode = value.object.getPtr("opcode").?.string; if (mem.eql(u8, opcode, "*")) { // deref const exp = &value.object.get("inner").?.array.items[0]; const exp_kind = exp.object.getPtr("kind").?.string; // handles the case of `*data++`, is still worng but is easer to see why const parentheses = mem.eql(u8, exp_kind, "UnaryOperator"); if (parentheses) try self.out.print("(", .{}); try self.visit(exp); if (parentheses) try self.out.print(")", .{}); try self.out.print(".*", .{}); } else if (mem.eql(u8, opcode, "++")) { try self.visit(&value.object.get("inner").?.array.items[0]); try self.out.print(" += 1", .{}); } else if (mem.eql(u8, opcode, "--")) { try self.visit(&value.object.get("inner").?.array.items[0]); try self.out.print(" -= 1", .{}); } else if (mem.eql(u8, opcode, "+")) { // don't print "+360" try self.visit(&value.object.get("inner").?.array.items[0]); } else { // note: any special cases should be handled with ifelse branches try self.out.print("{s}", .{opcode}); try self.visit(&value.object.get("inner").?.array.items[0]); } self.nodes_visited += 1; } fn visitCallExpr(self: *Self, value: *const json.Value) !void { self.nodes_visited += 1; const inner = value.object.getPtr("inner").?; const callee = &inner.array.items[0]; const kind = callee.object.getPtr("kind").?.string; if (mem.eql(u8, kind, "UnresolvedLookupExpr")) { self.nodes_visited += 1; const loopups = callee.object.getPtr("lookups").?.array.items; if (loopups.len > 1) { // todo: resolve the callee from the lookup table log.warn("unresolved `CallExpr` callee `{s}`", .{callee.object.getPtr("name").?.string}); } // just take the frist one const entry = &loopups[0]; const entry_kind = entry.object.getPtr("kind").?.string; if (mem.eql(u8, entry_kind, "FunctionDecl")) { _ = try self.out.write(entry.object.getPtr("name").?.string); } else if (mem.eql(u8, entry_kind, "FunctionTemplateDecl")) { // todo: outpu generic parameters _ = try self.out.write(entry.object.getPtr("name").?.string); } else { log.err("unhandled loopup entry `{s}` in `CallExpr` `{s}`", .{ entry_kind, callee.object.getPtr("name").?.string }); return; } } else { try self.visit(callee); } _ = try self.out.write("("); // args const count = inner.array.items.len; for (1..count) |i| { try self.visit(&inner.array.items[i]); if (i != count - 1) { _ = try self.out.write(", "); } } _ = try self.out.write(")"); } fn visitCXXMemberCallExpr(self: *Self, value: *const json.Value) !void { self.nodes_visited += 1; const j_inner = value.object.getPtr("inner").?; const j_member = &j_inner.array.items[0]; const kind = j_member.object.getPtr("kind").?.string; if (mem.eql(u8, kind, "MemberExpr")) { const name = j_member.object.getPtr("name").?.string; try self.out.print("self.{s}", .{name}); self.nodes_visited += nodeCount(j_member); } else { log.err("unknown `{s}` in `CXXMemberCallExpr`", .{kind}); return; } _ = try self.out.write("("); // args const count = j_inner.array.items.len; for (1..count) |i| { try self.visit(&j_inner.array.items[i]); if (i != count - 1) { _ = try self.out.write(", "); } } _ = try self.out.write(")"); } fn visitCXXPseudoDestructorExpr(self: *Self, node: *const json.Value) !void { self.nodes_visited += 1; const items = node.object.getPtr("inner").?.array.items; try self.visit(&items[0]); _ = try self.out.write(".deinit"); } fn visitCXXThisExpr(self: *Self, _: *const json.Value) !void { self.nodes_visited += 1; try self.out.print("self", .{}); } fn visitConstantExpr(self: *Self, value: *const json.Value) !void { self.nodes_visited += 1; for (value.object.get("inner").?.array.items) |j_stmt| { // note: any special cases should be handled with ifelse branches try self.visit(&j_stmt); } } fn visitCXXBoolLiteralExpr(self: *Self, value: *const json.Value) !void { self.nodes_visited += 1; try self.out.print("{}", .{value.object.getPtr("value").?.bool}); } fn visitDeclStmt(self: *Self, node: *const json.Value) !void { self.nodes_visited += 1; if (node.object.getPtr("inner")) |decls| { // declaration statement like `int a, b, c;` const last = decls.array.items.len - 1; for (decls.array.items, 0..) |*decl, i| { try self.visit(decl); if (last != i) { try self.out.print(";\n", .{}); } } } } fn visitCXXNullPtrLiteralExpr(self: *Self, _: *const json.Value) !void { self.nodes_visited += 1; _ = try self.out.write("null"); } fn visitCXXOperatorCallExpr(self: *Self, node: *const json.Value) !void { // A call to an overloaded operator self.nodes_visited += 1; //var name = node.object.get("name").?.string; var inner = node.object.getPtr("inner").?.array.items; // ignore implicit casts var op = &inner[0]; var op_kind = op.object.getPtr("kind").?.string; while (mem.eql(u8, op_kind, "ImplicitCastExpr")) { self.nodes_visited += 1; op = &op.object.getPtr("inner").?.array.items[0]; op_kind = op.object.getPtr("kind").?.string; } // figure out what operator function to call var op_name: []const u8 = "???"; var deref = false; if (mem.eql(u8, op_kind, "DeclRefExpr")) { self.nodes_visited += 1; const ref = op.object.getPtr("referencedDecl").?; const ref_kind = ref.object.getPtr("kind").?.string; if (mem.eql(u8, ref_kind, "CXXMethodDecl")) { const name = ref.object.getPtr("name").?.string; if (mem.eql(u8, name, "operator[]")) { const sig = parseFnSignature(ref).?; if (!sig.is_const and mem.endsWith(u8, sig.ret, "&")) { // class[i] = value; op_name = "getPtr"; deref = true; } else { // value = class[i]; op_name = "get"; } } else if (mem.eql(u8, name, "operator()")) { op_name = "call"; } else if (mem.eql(u8, name, "operator=")) { op_name = "copyFrom"; } else if (mem.eql(u8, name, "operator==")) { op_name = "eql"; } else if (mem.eql(u8, name, "operator!=")) { op_name = "notEql"; } else if (mem.eql(u8, name, "operator!")) { op_name = "not"; } else if (mem.eql(u8, name, "operator+")) { op_name = "add"; } else if (mem.eql(u8, name, "operator-")) { op_name = "sub"; } else if (mem.eql(u8, name, "operator*")) { op_name = "mul"; } else if (mem.eql(u8, name, "operator/")) { op_name = "div"; } else if (mem.eql(u8, name, "operator+=")) { op_name = "addInto"; } else if (mem.eql(u8, name, "operator-=")) { op_name = "subInto"; } else if (mem.eql(u8, name, "operator*=")) { op_name = "mulInto"; } else if (mem.eql(u8, name, "operator/=")) { op_name = "divInto"; } else if (mem.eql(u8, name, "operator<")) { op_name = "lessThan"; } else if (mem.eql(u8, name, "operator<=")) { op_name = "lessEqThan"; } else if (mem.eql(u8, name, "operator>")) { op_name = "greaterThan"; } else if (mem.eql(u8, name, "operator>=")) { op_name = "greaterEqThan"; } else if (mem.eql(u8, name, "operator==")) { op_name = "equalTo"; } else if (mem.eql(u8, name, "operator<<")) { op_name = "shiftLeft"; } else if (mem.eql(u8, name, "operator>>")) { op_name = "shiftRight"; } else { log.err("unhandled operator `{s}` in `CXXOperatorCallExpr`", .{name}); return; } } else { log.err("unhandlded referece decl `{s}` of `DeclRefExpr` in `CXXOperatorCallExpr`", .{ref_kind}); return; } } else { log.err("unhandlded `{s}` in `CXXOperatorCallExpr`", .{op_kind}); return; } try self.visit(&inner[1]); try self.out.print(".{s}(", .{op_name}); // args var comma = false; const count = inner.len; for (2..count) |i| { try self.visit(&inner[i]); if (comma) _ = try self.out.write(", "); comma = true; } _ = try self.out.write(")"); if (deref) _ = try self.out.write(".*"); } fn visitUnresolvedMemberExpr(self: *Self, _: *const json.Value) !void { self.nodes_visited += 1; // todo: wut?! log.warn("impossible to solve `UnresolvedMemberExpr`", .{}); _ = try self.out.write("0=0=UnresolvedMemberExpr"); } fn visitConditionalOperator(self: *Self, node: *const json.Value) !void { // The ?: ternary operator. self.nodes_visited += 1; const inner = node.object.getPtr("inner").?.array.items; _ = try self.out.write(" if ("); try self.visit(&inner[0]); _ = try self.out.write(") "); try self.visit(&inner[1]); _ = try self.out.write(" else "); try self.visit(&inner[2]); _ = try self.out.write(" "); self.semicolon = true; } fn visitBreakStmt(self: *Self, _: *const json.Value) !void { self.nodes_visited += 1; _ = try self.out.write("break"); } fn visitStringLiteral(self: *Self, node: *const json.Value) !void { self.nodes_visited += 1; _ = try self.out.write("\""); try self.out.print("{}", .{std.zig.fmtEscapes(node.object.getPtr("value").?.string)}); _ = try self.out.write("\""); } fn visitFriendDecl(self: *Self, node: *const json.Value) !void { self.nodes_visited += 1; if (node.object.getPtr("inner")) |inner| { for (inner.array.items) |*item| { const kind = item.object.getPtr("kind").?.string; if (mem.eql(u8, kind, "FunctionDecl")) { try self.visitFunctionDecl(item); } else { log.err("unhandled `{s}` in `FriendDecl`", .{kind}); } } } else { log.err("no `inner` in `FriendDecl`", .{}); } } fn visitFullComment(self: *Self, node: *const json.Value) !void { self.nodes_visited += 1; // This is a bit convoluted but seems to match the style of the input var has_paragraph_comment = false; for (node.object.getPtr("inner").?.array.items) |*item| { const kind = item.object.getPtr("kind").?.string; if (mem.eql(u8, kind, "ParagraphComment")) { if (has_paragraph_comment) { _ = try self.out.write("\n///\n"); } has_paragraph_comment = true; _ = try self.out.write("///"); try self.visitParagraphComment(item); } else if (mem.eql(u8, kind, "VerbatimLineComment")) { try self.visitVerbatimLineComment(item); } else if (mem.eql(u8, kind, "ParamCommandComment")) { try self.visitParamCommandComment(item); } else if (mem.eql(u8, kind, "BlockCommandComment")) { try self.visitBlockCommandComment(item); } else { log.err("unhandled `{s}` in `FullComment`", .{kind}); } } // Write newline after _ = try self.out.write("\n"); } fn visitParagraphComment(self: *Self, node: *const json.Value) !void { self.nodes_visited += 1; const inner = node.object.getPtr("inner").?; for (inner.array.items, 0..) |*item, i| { const kind = item.object.getPtr("kind").?.string; if (i > 0) { _ = try self.out.write("///"); } if (mem.eql(u8, kind, "TextComment")) { try self.visitTextComment(item); } else if (mem.eql(u8, kind, "InlineCommandComment")) { self.nodes_visited += 1; const name = item.object.getPtr("name").?.string; _ = try self.out.write("@"); _ = try self.out.write(name); _ = try self.out.write(" "); } else { self.nodes_visited -= 1; log.err("unhandled `{s}` in `ParagraphComment`", .{kind}); } if (i + 1 < inner.array.items.len) { // No newline after last element _ = try self.out.write("\n"); } } } fn visitTextComment(self: *Self, node: *const json.Value) !void { const text = node.object.getPtr("text").?.string; _ = try self.out.write(text); } fn visitParamCommandComment(self: *Self, node: *const json.Value) !void { self.nodes_visited += 1; const direction = node.object.getPtr("direction").?.string; const param = node.object.getPtr("param").?.string; _ = try self.out.write("@param["); _ = try self.out.write(direction); _ = try self.out.write("] "); _ = try self.out.write(param); _ = try self.out.write(" "); for (node.object.getPtr("inner").?.array.items) |*item| { try self.visit(item); } } fn visitBlockCommandComment(self: *Self, node: *const json.Value) !void { self.nodes_visited += 1; _ = try self.out.write("@see"); for (node.object.getPtr("inner").?.array.items) |*item| { try self.visit(item); } } fn visitVerbatimLineComment(self: *Self, node: *const json.Value) !void { self.nodes_visited += 1; // TODO: To do this access to the original source is required // const loc = node.object.get("loc").?; // const loc_offset: usize = @intCast(loc.object.get("offset").?.integer); // const tok_len: usize = @intCast(loc.object.get("tokLen").?.integer); // const start = loc_offset; // Add col here? // const end = loc_offset + tok_len; // const text = SOURCE CODE GOES HERE // const range = text[start..end]; // _ = try self.out.write(range); _ = try self.out.write("@UntranspiledVerbatimLineCommentCommand"); const text = node.object.getPtr("text").?.string; _ = try self.out.write(text); } /////////////////////////////////////////////////////////////////////////////// inline fn typeQualifier(value: *const json.Value) ?[]const u8 { if (value.object.getPtr("type")) |ty| { if (ty.object.getPtr("qualType")) |qual| { return qual.string; } } return null; } inline fn location(value: *const json.Value) ?struct { line: i64, col: i64 } { if (value.object.getPtr("loc")) |loc| { if (loc.object.getPtr("spellingLoc")) |spelling_loc| { const line = spelling_loc.object.get("line").?.integer; const col = spelling_loc.object.get("col").?.integer; return .{ .line = line, .col = col }; } else if (loc.object.getPtr("expansionLoc")) |expansion_loc| { const line = expansion_loc.object.get("line").?.integer; const col = expansion_loc.object.get("col").?.integer; return .{ .line = line, .col = col }; } const line = loc.object.get("line").?.integer; const col = loc.object.get("col").?.integer; return .{ .line = line, .col = col }; } return null; } inline fn resolveEnumVariantName(base: []const u8, variant: []const u8) []const u8 { return if (mem.startsWith(u8, variant, base)) variant[base.len..] else variant; } fn parseFnSignature(value: *const json.Value) ?FnSig { if (typeQualifier(value)) |raw| { const rp = mem.lastIndexOf(u8, raw, ")").?; const lp = mem.indexOf(u8, raw, "(").?; return .{ .raw = raw, .is_const = mem.endsWith(u8, raw[rp..], ") const"), .is_varidatic = mem.endsWith(u8, raw[0 .. rp + 1], "...)"), .ret = mem.trim(u8, raw[0..lp], " "), }; } return null; } inline fn shouldSkip(self: *Self, value: *const json.Value) bool { // todo: incorporate this? // if (value.object.get("isImplicit")) |implicit| { // if (implicit.bool) { // return true; // } // } if (self.recursive) return false; if (value.object.getPtr("loc")) |loc| { // c include if (loc.object.getPtr("includedFrom") != null) return true; // c++ ... if (loc.object.getPtr("expansionLoc")) |expansionLoc| { if (expansionLoc.object.getPtr("includedFrom") != null) return true; } } return false; } fn nodeCount(value: *const json.Value) usize { var count: usize = 1; if (value.object.getPtr("inner")) |j_inner| { for (j_inner.array.items) |*j_item| { count += nodeCount(j_item); } } return count; } inline fn keywordFix(name: []const u8) []const u8 { return if (KeywordsLUT.get(name)) |fix| fix else name; } fn mangle(self: *Self, name: []const u8, overload: ?usize) ![]u8 { var tmp = try std.ArrayList(u8).initCapacity(self.allocator, self.namespace.full_path.items.len + 32); defer tmp.deinit(); var o = tmp.writer(); _ = try o.print("_{d}_", .{if (overload) |i| i else 1}); var it = mem.split(u8, self.namespace.full_path.items, "::"); while (it.next()) |value| { if (value.len != 0) _ = try o.print("{s}_", .{value}); } _ = try o.print("{s}_", .{name}); return try self.allocator.dupe(u8, tmp.items); } fn transpileType(self: *Self, tname: []const u8) ![]u8 { var ttname = mem.trim(u8, tname, " "); // remove struct from C style definition if (mem.startsWith(u8, ttname, "struct ")) { ttname = ttname["struct ".len..]; } // remove class from C style definition if (mem.startsWith(u8, ttname, "class ")) { ttname = ttname["class ".len..]; } // remove union from C style definition if (mem.startsWith(u8, ttname, "union ")) { ttname = ttname["union ".len..]; } const ch = ttname[ttname.len - 1]; if (ch == '*' or ch == '&') { // note: avoid c-style pointers `[*c]` when dealing with references types // note: references pointer types can't be null const ptr = if (ch == '&') "*" else "[*c]"; var constness: []const u8 = "const "; var raw_name: []const u8 = undefined; var buf: [7]u8 = undefined; const template = try fmt.bufPrint(&buf, "const {c}", .{ch}); if (mem.endsWith(u8, ttname, template)) { // const pointer of pointers raw_name = ttname[0..(ttname.len - template.len)]; } else if (mem.startsWith(u8, ttname, "const ")) { // const pointer raw_name = ttname[("const ".len)..(ttname.len - 1)]; } else { // mutable pointer case raw_name = ttname[0..(ttname.len - 1)]; constness = ""; } // special case if (mem.eql(u8, mem.trim(u8, raw_name, " "), "void")) { return try fmt.allocPrint(self.allocator, "?*{s}anyopaque", .{constness}); } const inner = try self.transpileType(raw_name); defer self.allocator.free(inner); return try fmt.allocPrint(self.allocator, "{s}{s}{s}", .{ ptr, constness, inner }); } else if (mem.indexOf(u8, ttname, "struct at ") != null or mem.indexOf(u8, ttname, "union at ") != null) // or // mem.indexOf(u8, ttname, "enum at ") != null) { // "qualType": "RootStruct::(anonymous union at bitfieldtest.h:4:5)" // "qualType": "RootStruct::(anonymous struct at bitfieldtest.h:25:5)" // "qualType": "struct (unnamed struct at header.h:4:5)" var separator_index = mem.lastIndexOf(u8, ttname, ":").?; const tmpname = ttname[0 .. separator_index - 1]; separator_index = mem.lastIndexOf(u8, tmpname, ":").?; ttname = ttname[separator_index + 1 ..]; ttname = ttname[0 .. ttname.len - 1]; const class = self.class_info.get(ttname).?; ttname = class.name; } else if (mem.endsWith(u8, ttname, " *const")) { // NOTE: This can probably be improved to handle more cases, or maybe combined with the // above case. const raw_name = ttname[0..(ttname.len - (" *const".len))]; const inner = try self.transpileType(raw_name); defer self.allocator.free(inner); return try fmt.allocPrint(self.allocator, "*const {s}", .{inner}); } else if (ch == ']') { // fixed sized array const len = mem.lastIndexOf(u8, ttname, "[").?; const inner_name = try self.transpileType(ttname[0..len]); defer self.allocator.free(inner_name); return try fmt.allocPrint(self.allocator, "{s}{s}", .{ ttname[len..], inner_name }); } else if (ch == ')') { // todo: handle named function pointers `typedef int (*)(ImGuiInputTextCallbackData* data);` // function pointer or invalid type name if (mem.indexOf(u8, ttname, "(*)")) |ptr| { var index: usize = 0; var args = std.ArrayList(u8).init(self.allocator); defer args.deinit(); try self.transpileArgs(mem.trim(u8, ttname[(ptr + "(*)".len) + 1 .. ttname.len - 1], " "), &args, &index); const tret = try self.transpileType(ttname[0..ptr]); defer self.allocator.free(tret); return try fmt.allocPrint(self.allocator, "?*const fn({s}) callconv(.C) {s} ", .{ args.items, tret }); } else { log.err("unknown type `{s}`, falling back to `*anyopaque`", .{ttname}); ttname = "*anyopaque"; } } else if (ch == '>') { // templated type var index: usize = 0; var args = std.ArrayList(u8).init(self.allocator); defer args.deinit(); const less_than = mem.indexOf(u8, ttname, "<").?; const root = try self.transpileType(ttname[0..less_than]); defer self.allocator.free(root); try self.transpileArgs(ttname[less_than..], &args, &index); return try fmt.allocPrint(self.allocator, "{s}{s}", .{ root, args.items }); } else if (mem.startsWith(u8, ttname, "const ")) { // doesn't mean anything to zig if is not a pointer // todo: it could be a alises pointer type ... return try self.transpileType(ttname["const ".len..]); } else if (mem.startsWith(u8, ttname, "enum ")) { return try self.transpileType(ttname["enum ".len..]); } else { // common primitives if (PrimitivesTypeLUT.get(ttname)) |pname| { ttname = pname; } } const buf = try self.allocator.alloc(u8, ttname.len); mem.copyForwards(u8, buf, ttname); return buf; } // generics `Vector<TypeArgs>` // function arguments without parameters name in fn pointers `const void (x)(TypeArgs)` fn transpileArgs(self: *Self, args: []const u8, buffer: *std.ArrayList(u8), index: *usize) anyerror!void { var start = index.*; while (index.* < args.len) { const ch = args[index.*]; if (ch == '<') { const arg = args[start..index.*]; try buffer.appendSlice(arg); try buffer.append('('); index.* += 1; try self.transpileArgs(args, buffer, index); start = index.*; continue; } else if (ch == '>') { const arg = args[start..index.*]; const name = try self.transpileType(arg); defer self.allocator.free(name); try buffer.appendSlice(name); try buffer.append(')'); index.* += 1; return; } else if (ch == ',') { if (index.* > start) { const arg = args[start..index.*]; const name = try self.transpileType(arg); defer self.allocator.free(name); try buffer.appendSlice(name); try buffer.append(','); start = index.* + 1; } else { start = index.*; } } index.* += 1; } const rem = mem.trim(u8, args[start..], " "); if (rem.len > 0) { const name = try self.transpileType(rem); defer self.allocator.free(name); try buffer.appendSlice(name); } if (buffer.items.len > 0 and buffer.items[buffer.items.len - 1] == ',') { _ = buffer.pop(); } }
0
repos/c2z
repos/c2z/.vscode/tasks.json
{ // See https://go.microsoft.com/fwlink/?LinkId=733558 // for the documentation about the tasks.json format "version": "2.0.0", "tasks": [ { "label": "buildDebug", "type": "shell", "command": "zig build --summary failures -freference-trace", "group": "build", "problemMatcher": [ "$gcc" ], "presentation": { "clear": true, // "revealProblems": "onProblem" } }, ] }
0
repos/c2z
repos/c2z/.vscode/settings.json
{ "files.associations": { "cmath": "cpp", "vector": "cpp", "xmemory": "cpp", "algorithm": "cpp", "array": "cpp", "atomic": "cpp", "bit": "cpp", "cctype": "cpp", "charconv": "cpp", "chrono": "cpp", "cinttypes": "cpp", "clocale": "cpp", "compare": "cpp", "concepts": "cpp", "condition_variable": "cpp", "cstddef": "cpp", "cstdint": "cpp", "cstdio": "cpp", "cstdlib": "cpp", "cstring": "cpp", "ctime": "cpp", "cwchar": "cpp", "deque": "cpp", "exception": "cpp", "format": "cpp", "forward_list": "cpp", "fstream": "cpp", "functional": "cpp", "initializer_list": "cpp", "iomanip": "cpp", "ios": "cpp", "iosfwd": "cpp", "istream": "cpp", "iterator": "cpp", "limits": "cpp", "list": "cpp", "locale": "cpp", "map": "cpp", "memory": "cpp", "mutex": "cpp", "new": "cpp", "optional": "cpp", "ostream": "cpp", "queue": "cpp", "random": "cpp", "ranges": "cpp", "ratio": "cpp", "shared_mutex": "cpp", "span": "cpp", "sstream": "cpp", "stdexcept": "cpp", "stop_token": "cpp", "streambuf": "cpp", "string": "cpp", "system_error": "cpp", "thread": "cpp", "tuple": "cpp", "type_traits": "cpp", "typeinfo": "cpp", "unordered_map": "cpp", "unordered_set": "cpp", "utility": "cpp", "xfacet": "cpp", "xhash": "cpp", "xiosbase": "cpp", "xlocale": "cpp", "xlocbuf": "cpp", "xlocinfo": "cpp", "xlocmes": "cpp", "xlocmon": "cpp", "xlocnum": "cpp", "xloctime": "cpp", "xstddef": "cpp", "xstring": "cpp", "xtr1common": "cpp", "xtree": "cpp", "xutility": "cpp", "iostream": "cpp" } }
0
repos/c2z/use_cases
repos/c2z/use_cases/fpng/fpng.zig
// auto-generated then modified to be the most idiomatic as possible const std = @import("std"); const cpp = @import("cpp"); extern fn _ZN4fpng9fpng_initEv() void; pub const fpng_init = _ZN4fpng9fpng_initEv; extern fn _ZN4fpng23fpng_cpu_supports_sse41Ev() bool; pub const fpng_cpu_supports_sse41 = _ZN4fpng23fpng_cpu_supports_sse41Ev; extern fn _ZN4fpng10fpng_crc32EPKvyj(pData: ?*const anyopaque, size: usize, prev_crc32: u32) u32; pub inline fn fpng_crc32( pData: ?*const anyopaque, size: usize, opt: struct { prev_crc32: u32 = 0 }, ) u32 { return _ZN4fpng10fpng_crc32EPKvyj(pData, size, opt.prev_crc32); } extern fn _ZN4fpng12fpng_adler32EPKvyj(pData: ?*const anyopaque, size: usize, adler: u32) u32; pub inline fn fpng_adler32( pData: ?*const anyopaque, size: usize, opt: struct { adler: u32 = 1 }, ) u32 { return _ZN4fpng12fpng_adler32EPKvyj(pData, size, opt.adler); } pub const EncodeFlag = enum(u32) { ENCODE_SLOWER = 1, FORCE_UNCOMPRESSED = 2, }; extern fn _ZN4fpng27fpng_encode_image_to_memoryEPKvjjjRNSt3__16vectorIhNS2_9allocatorIhEEEEj(pImage: ?*const anyopaque, w: u32, h: u32, num_chans: u32, out_buf: *cpp.Vector(u8), flags: EncodeFlag) bool; pub const fpng_encode_image_to_memory = _ZN4fpng27fpng_encode_image_to_memoryEPKvjjjRNSt3__16vectorIhNS2_9allocatorIhEEEEj; extern fn _ZN4fpng25fpng_encode_image_to_fileEPKcPKvjjjj(pFilename: [*c]const u8, pImage: ?*const anyopaque, w: u32, h: u32, num_chans: u32, flags: EncodeFlag) bool; pub const fpng_encode_image_to_file = _ZN4fpng25fpng_encode_image_to_fileEPKcPKvjjjj; pub const DecodeResult = enum(c_int) { SUCCESS = 0, NOT_FPNG = 1, INVALID_ARG = 2, FAILED_NOT_PNG = 3, FAILED_HEADER_CRC32 = 4, FAILED_INVALID_DIMENSIONS = 5, FAILED_DIMENSIONS_TOO_LARGE = 6, FAILED_CHUNK_PARSING = 7, FAILED_INVALID_IDAT = 8, FILE_OPEN_FAILED = 9, FILE_TOO_LARGE = 10, FILE_READ_FAILED = 11, FILE_SEEK_FAILED = 12, }; extern fn _ZN4fpng13fpng_get_infoEPKvjRjS2_S2_(pImage: ?*const anyopaque, image_size: u32, width: *u32, height: *u32, channels_in_file: *u32) DecodeResult; pub const fpng_get_info = _ZN4fpng13fpng_get_infoEPKvjRjS2_S2_; extern fn _ZN4fpng18fpng_decode_memoryEPKvjRNSt3__16vectorIhNS2_9allocatorIhEEEERjS8_S8_j(pImage: ?*const anyopaque, image_size: u32, out: *cpp.Vector(u8), width: *u32, height: *u32, channels_in_file: *u32, desired_channels: u32) DecodeResult; pub const fpng_decode_memory = _ZN4fpng18fpng_decode_memoryEPKvjRNSt3__16vectorIhNS2_9allocatorIhEEEERjS8_S8_j; extern fn _ZN4fpng16fpng_decode_fileEPKcRNSt3__16vectorIhNS2_9allocatorIhEEEERjS8_S8_j(pFilename: [*c]const u8, out: *cpp.Vector(u8), width: *u32, height: *u32, channels_in_file: *u32, desired_channels: u32) DecodeResult; pub const fpng_decode_file = _ZN4fpng16fpng_decode_fileEPKcRNSt3__16vectorIhNS2_9allocatorIhEEEERjS8_S8_j;
0
repos/c2z/use_cases/fpng
repos/c2z/use_cases/fpng/include/fpng.h
// fpng.h - unlicense (see end of fpng.cpp) #pragma once #include <stdlib.h> #include <stdint.h> #include <vector> #ifndef FPNG_TRAIN_HUFFMAN_TABLES // Set to 1 when using the -t (training) option in fpng_test to generate new opaque/alpha Huffman tables for the single pass encoder. #define FPNG_TRAIN_HUFFMAN_TABLES (0) #endif namespace fpng { // ---- Library initialization - call once to identify if the processor supports SSE. // Otherwise you'll only get scalar fallbacks. void fpng_init(); // ---- Useful Utilities // Returns true if the CPU supports SSE 4.1, and SSE support wasn't disabled by setting FPNG_NO_SSE=1. // fpng_init() must have been called first, or it'll assert and return false. bool fpng_cpu_supports_sse41(); // Fast CRC-32 SSE4.1+pclmul or a scalar fallback (slice by 4) const uint32_t FPNG_CRC32_INIT = 0; uint32_t fpng_crc32(const void* pData, size_t size, uint32_t prev_crc32 = FPNG_CRC32_INIT); // Fast Adler32 SSE4.1 Adler-32 with a scalar fallback. const uint32_t FPNG_ADLER32_INIT = 1; uint32_t fpng_adler32(const void* pData, size_t size, uint32_t adler = FPNG_ADLER32_INIT); // ---- Compression enum { // Enables computing custom Huffman tables for each file, instead of using the custom global tables. // Results in roughly 6% smaller files on average, but compression is around 40% slower. FPNG_ENCODE_SLOWER = 1, // Only use raw Deflate blocks (no compression at all). Intended for testing. FPNG_FORCE_UNCOMPRESSED = 2, }; // Fast PNG encoding. The resulting file can be decoded either using a standard PNG decoder or by the fpng_decode_memory() function below. // pImage: pointer to RGB or RGBA image pixels, R first in memory, B/A last. // w/h - image dimensions. Image's row pitch in bytes must is w*num_chans. // num_chans must be 3 or 4. bool fpng_encode_image_to_memory(const void* pImage, uint32_t w, uint32_t h, uint32_t num_chans, std::vector<uint8_t>& out_buf, uint32_t flags = 0); #ifndef FPNG_NO_STDIO // Fast PNG encoding to the specified file. bool fpng_encode_image_to_file(const char* pFilename, const void* pImage, uint32_t w, uint32_t h, uint32_t num_chans, uint32_t flags = 0); #endif // ---- Decompression enum { FPNG_DECODE_SUCCESS = 0, // file is a valid PNG file and written by FPNG and the decode succeeded FPNG_DECODE_NOT_FPNG, // file is a valid PNG file, but it wasn't written by FPNG so you should try decoding it with a general purpose PNG decoder FPNG_DECODE_INVALID_ARG, // invalid function parameter FPNG_DECODE_FAILED_NOT_PNG, // file cannot be a PNG file FPNG_DECODE_FAILED_HEADER_CRC32, // a chunk CRC32 check failed, file is likely corrupted or not PNG FPNG_DECODE_FAILED_INVALID_DIMENSIONS, // invalid image dimensions in IHDR chunk (0 or too large) FPNG_DECODE_FAILED_DIMENSIONS_TOO_LARGE, // decoding the file fully into memory would likely require too much memory (only on 32bpp builds) FPNG_DECODE_FAILED_CHUNK_PARSING, // failed while parsing the chunk headers, or file is corrupted FPNG_DECODE_FAILED_INVALID_IDAT, // IDAT data length is too small and cannot be valid, file is either corrupted or it's a bug // fpng_decode_file() specific errors FPNG_DECODE_FILE_OPEN_FAILED, FPNG_DECODE_FILE_TOO_LARGE, FPNG_DECODE_FILE_READ_FAILED, FPNG_DECODE_FILE_SEEK_FAILED }; // Fast PNG decoding of files ONLY created by fpng_encode_image_to_memory() or fpng_encode_image_to_file(). // If fpng_get_info() or fpng_decode_memory() returns FPNG_DECODE_NOT_FPNG, you should decode the PNG by falling back to a general purpose decoder. // // fpng_get_info() parses the PNG header and iterates through all chunks to determine if it's a file written by FPNG, but does not decompress the actual image data so it's relatively fast. // // pImage, image_size: Pointer to PNG image data and its size // width, height: output image's dimensions // channels_in_file: will be 3 or 4 // // Returns FPNG_DECODE_SUCCESS on success, otherwise one of the failure codes above. // If FPNG_DECODE_NOT_FPNG is returned, you must decompress the file with a general purpose PNG decoder. // If another error occurs, the file is likely corrupted or invalid, but you can still try to decompress the file with another decoder (which will likely fail). int fpng_get_info(const void* pImage, uint32_t image_size, uint32_t& width, uint32_t& height, uint32_t& channels_in_file); // fpng_decode_memory() decompresses 24/32bpp PNG files ONLY encoded by this module. // If the image was written by FPNG, it will decompress the image data, otherwise it will return FPNG_DECODE_NOT_FPNG in which case you should fall back to a general purpose PNG decoder (lodepng, stb_image, libpng, etc.) // // pImage, image_size: Pointer to PNG image data and its size // out: Output 24/32bpp image buffer // width, height: output image's dimensions // channels_in_file: will be 3 or 4 // desired_channels: must be 3 or 4 // // If the image is 24bpp and 32bpp is requested, the alpha values will be set to 0xFF. // If the image is 32bpp and 24bpp is requested, the alpha values will be discarded. // // Returns FPNG_DECODE_SUCCESS on success, otherwise one of the failure codes above. // If FPNG_DECODE_NOT_FPNG is returned, you must decompress the file with a general purpose PNG decoder. // If another error occurs, the file is likely corrupted or invalid, but you can still try to decompress the file with another decoder (which will likely fail). int fpng_decode_memory(const void* pImage, uint32_t image_size, std::vector<uint8_t>& out, uint32_t& width, uint32_t& height, uint32_t& channels_in_file, uint32_t desired_channels); #ifndef FPNG_NO_STDIO int fpng_decode_file(const char* pFilename, std::vector<uint8_t>& out, uint32_t& width, uint32_t& height, uint32_t& channels_in_file, uint32_t desired_channels); #endif // ---- Internal API used for Huffman table training purposes #if FPNG_TRAIN_HUFFMAN_TABLES const uint32_t HUFF_COUNTS_SIZE = 288; extern uint64_t g_huff_counts[HUFF_COUNTS_SIZE]; bool create_dynamic_block_prefix(uint64_t* pFreq, uint32_t num_chans, std::vector<uint8_t>& prefix, uint64_t& bit_buf, int& bit_buf_size, uint32_t *pCodes, uint8_t *pCodesizes); #endif } // namespace fpng
0
repos/c2z/use_cases/meshoptimizer
repos/c2z/use_cases/meshoptimizer/include/meshoptimizer.h
/** * meshoptimizer - version 0.19 * * Copyright (C) 2016-2023, by Arseny Kapoulkine ([email protected]) * Report bugs and download new versions at https://github.com/zeux/meshoptimizer * * This library is distributed under the MIT License. See notice at the end of this file. */ #pragma once #include <assert.h> #include <stddef.h> /* Version macro; major * 1000 + minor * 10 + patch */ #define MESHOPTIMIZER_VERSION 180 /* 0.19 */ /* If no API is defined, assume default */ #ifndef MESHOPTIMIZER_API #define MESHOPTIMIZER_API #endif /* Set the calling-convention for alloc/dealloc function pointers */ #ifndef MESHOPTIMIZER_ALLOC_CALLCONV #ifdef _MSC_VER #define MESHOPTIMIZER_ALLOC_CALLCONV __cdecl #else #define MESHOPTIMIZER_ALLOC_CALLCONV #endif #endif /* Experimental APIs have unstable interface and might have implementation that's not fully tested or optimized */ #define MESHOPTIMIZER_EXPERIMENTAL MESHOPTIMIZER_API /* C interface */ #ifdef __cplusplus extern "C" { #endif /** * Vertex attribute stream * Each element takes size bytes, beginning at data, with stride controlling the spacing between successive elements (stride >= size). */ struct meshopt_Stream { const void* data; size_t size; size_t stride; }; /** * Generates a vertex remap table from the vertex buffer and an optional index buffer and returns number of unique vertices * As a result, all vertices that are binary equivalent map to the same (new) location, with no gaps in the resulting sequence. * Resulting remap table maps old vertices to new vertices and can be used in meshopt_remapVertexBuffer/meshopt_remapIndexBuffer. * Note that binary equivalence considers all vertex_size bytes, including padding which should be zero-initialized. * * destination must contain enough space for the resulting remap table (vertex_count elements) * indices can be NULL if the input is unindexed */ MESHOPTIMIZER_API size_t meshopt_generateVertexRemap(unsigned int* destination, const unsigned int* indices, size_t index_count, const void* vertices, size_t vertex_count, size_t vertex_size); /** * Generates a vertex remap table from multiple vertex streams and an optional index buffer and returns number of unique vertices * As a result, all vertices that are binary equivalent map to the same (new) location, with no gaps in the resulting sequence. * Resulting remap table maps old vertices to new vertices and can be used in meshopt_remapVertexBuffer/meshopt_remapIndexBuffer. * To remap vertex buffers, you will need to call meshopt_remapVertexBuffer for each vertex stream. * Note that binary equivalence considers all size bytes in each stream, including padding which should be zero-initialized. * * destination must contain enough space for the resulting remap table (vertex_count elements) * indices can be NULL if the input is unindexed */ MESHOPTIMIZER_API size_t meshopt_generateVertexRemapMulti(unsigned int* destination, const unsigned int* indices, size_t index_count, size_t vertex_count, const struct meshopt_Stream* streams, size_t stream_count); /** * Generates vertex buffer from the source vertex buffer and remap table generated by meshopt_generateVertexRemap * * destination must contain enough space for the resulting vertex buffer (unique_vertex_count elements, returned by meshopt_generateVertexRemap) * vertex_count should be the initial vertex count and not the value returned by meshopt_generateVertexRemap */ MESHOPTIMIZER_API void meshopt_remapVertexBuffer(void* destination, const void* vertices, size_t vertex_count, size_t vertex_size, const unsigned int* remap); /** * Generate index buffer from the source index buffer and remap table generated by meshopt_generateVertexRemap * * destination must contain enough space for the resulting index buffer (index_count elements) * indices can be NULL if the input is unindexed */ MESHOPTIMIZER_API void meshopt_remapIndexBuffer(unsigned int* destination, const unsigned int* indices, size_t index_count, const unsigned int* remap); /** * Generate index buffer that can be used for more efficient rendering when only a subset of the vertex attributes is necessary * All vertices that are binary equivalent (wrt first vertex_size bytes) map to the first vertex in the original vertex buffer. * This makes it possible to use the index buffer for Z pre-pass or shadowmap rendering, while using the original index buffer for regular rendering. * Note that binary equivalence considers all vertex_size bytes, including padding which should be zero-initialized. * * destination must contain enough space for the resulting index buffer (index_count elements) */ MESHOPTIMIZER_API void meshopt_generateShadowIndexBuffer(unsigned int* destination, const unsigned int* indices, size_t index_count, const void* vertices, size_t vertex_count, size_t vertex_size, size_t vertex_stride); /** * Generate index buffer that can be used for more efficient rendering when only a subset of the vertex attributes is necessary * All vertices that are binary equivalent (wrt specified streams) map to the first vertex in the original vertex buffer. * This makes it possible to use the index buffer for Z pre-pass or shadowmap rendering, while using the original index buffer for regular rendering. * Note that binary equivalence considers all size bytes in each stream, including padding which should be zero-initialized. * * destination must contain enough space for the resulting index buffer (index_count elements) */ MESHOPTIMIZER_API void meshopt_generateShadowIndexBufferMulti(unsigned int* destination, const unsigned int* indices, size_t index_count, size_t vertex_count, const struct meshopt_Stream* streams, size_t stream_count); /** * Generate index buffer that can be used as a geometry shader input with triangle adjacency topology * Each triangle is converted into a 6-vertex patch with the following layout: * - 0, 2, 4: original triangle vertices * - 1, 3, 5: vertices adjacent to edges 02, 24 and 40 * The resulting patch can be rendered with geometry shaders using e.g. VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST_WITH_ADJACENCY. * This can be used to implement algorithms like silhouette detection/expansion and other forms of GS-driven rendering. * * destination must contain enough space for the resulting index buffer (index_count*2 elements) * vertex_positions should have float3 position in the first 12 bytes of each vertex */ MESHOPTIMIZER_API void meshopt_generateAdjacencyIndexBuffer(unsigned int* destination, const unsigned int* indices, size_t index_count, const float* vertex_positions, size_t vertex_count, size_t vertex_positions_stride); /** * Generate index buffer that can be used for PN-AEN tessellation with crack-free displacement * Each triangle is converted into a 12-vertex patch with the following layout: * - 0, 1, 2: original triangle vertices * - 3, 4: opposing edge for edge 0, 1 * - 5, 6: opposing edge for edge 1, 2 * - 7, 8: opposing edge for edge 2, 0 * - 9, 10, 11: dominant vertices for corners 0, 1, 2 * The resulting patch can be rendered with hardware tessellation using PN-AEN and displacement mapping. * See "Tessellation on Any Budget" (John McDonald, GDC 2011) for implementation details. * * destination must contain enough space for the resulting index buffer (index_count*4 elements) * vertex_positions should have float3 position in the first 12 bytes of each vertex */ MESHOPTIMIZER_API void meshopt_generateTessellationIndexBuffer(unsigned int* destination, const unsigned int* indices, size_t index_count, const float* vertex_positions, size_t vertex_count, size_t vertex_positions_stride); /** * Vertex transform cache optimizer * Reorders indices to reduce the number of GPU vertex shader invocations * If index buffer contains multiple ranges for multiple draw calls, this functions needs to be called on each range individually. * * destination must contain enough space for the resulting index buffer (index_count elements) */ MESHOPTIMIZER_API void meshopt_optimizeVertexCache(unsigned int* destination, const unsigned int* indices, size_t index_count, size_t vertex_count); /** * Vertex transform cache optimizer for strip-like caches * Produces inferior results to meshopt_optimizeVertexCache from the GPU vertex cache perspective * However, the resulting index order is more optimal if the goal is to reduce the triangle strip length or improve compression efficiency * * destination must contain enough space for the resulting index buffer (index_count elements) */ MESHOPTIMIZER_API void meshopt_optimizeVertexCacheStrip(unsigned int* destination, const unsigned int* indices, size_t index_count, size_t vertex_count); /** * Vertex transform cache optimizer for FIFO caches * Reorders indices to reduce the number of GPU vertex shader invocations * Generally takes ~3x less time to optimize meshes but produces inferior results compared to meshopt_optimizeVertexCache * If index buffer contains multiple ranges for multiple draw calls, this functions needs to be called on each range individually. * * destination must contain enough space for the resulting index buffer (index_count elements) * cache_size should be less than the actual GPU cache size to avoid cache thrashing */ MESHOPTIMIZER_API void meshopt_optimizeVertexCacheFifo(unsigned int* destination, const unsigned int* indices, size_t index_count, size_t vertex_count, unsigned int cache_size); /** * Overdraw optimizer * Reorders indices to reduce the number of GPU vertex shader invocations and the pixel overdraw * If index buffer contains multiple ranges for multiple draw calls, this functions needs to be called on each range individually. * * destination must contain enough space for the resulting index buffer (index_count elements) * indices must contain index data that is the result of meshopt_optimizeVertexCache (*not* the original mesh indices!) * vertex_positions should have float3 position in the first 12 bytes of each vertex * threshold indicates how much the overdraw optimizer can degrade vertex cache efficiency (1.05 = up to 5%) to reduce overdraw more efficiently */ MESHOPTIMIZER_API void meshopt_optimizeOverdraw(unsigned int* destination, const unsigned int* indices, size_t index_count, const float* vertex_positions, size_t vertex_count, size_t vertex_positions_stride, float threshold); /** * Vertex fetch cache optimizer * Reorders vertices and changes indices to reduce the amount of GPU memory fetches during vertex processing * Returns the number of unique vertices, which is the same as input vertex count unless some vertices are unused * This functions works for a single vertex stream; for multiple vertex streams, use meshopt_optimizeVertexFetchRemap + meshopt_remapVertexBuffer for each stream. * * destination must contain enough space for the resulting vertex buffer (vertex_count elements) * indices is used both as an input and as an output index buffer */ MESHOPTIMIZER_API size_t meshopt_optimizeVertexFetch(void* destination, unsigned int* indices, size_t index_count, const void* vertices, size_t vertex_count, size_t vertex_size); /** * Vertex fetch cache optimizer * Generates vertex remap to reduce the amount of GPU memory fetches during vertex processing * Returns the number of unique vertices, which is the same as input vertex count unless some vertices are unused * The resulting remap table should be used to reorder vertex/index buffers using meshopt_remapVertexBuffer/meshopt_remapIndexBuffer * * destination must contain enough space for the resulting remap table (vertex_count elements) */ MESHOPTIMIZER_API size_t meshopt_optimizeVertexFetchRemap(unsigned int* destination, const unsigned int* indices, size_t index_count, size_t vertex_count); /** * Index buffer encoder * Encodes index data into an array of bytes that is generally much smaller (<1.5 bytes/triangle) and compresses better (<1 bytes/triangle) compared to original. * Input index buffer must represent a triangle list. * Returns encoded data size on success, 0 on error; the only error condition is if buffer doesn't have enough space * For maximum efficiency the index buffer being encoded has to be optimized for vertex cache and vertex fetch first. * * buffer must contain enough space for the encoded index buffer (use meshopt_encodeIndexBufferBound to compute worst case size) */ MESHOPTIMIZER_API size_t meshopt_encodeIndexBuffer(unsigned char* buffer, size_t buffer_size, const unsigned int* indices, size_t index_count); MESHOPTIMIZER_API size_t meshopt_encodeIndexBufferBound(size_t index_count, size_t vertex_count); /** * Set index encoder format version * version must specify the data format version to encode; valid values are 0 (decodable by all library versions) and 1 (decodable by 0.14+) */ MESHOPTIMIZER_API void meshopt_encodeIndexVersion(int version); /** * Index buffer decoder * Decodes index data from an array of bytes generated by meshopt_encodeIndexBuffer * Returns 0 if decoding was successful, and an error code otherwise * The decoder is safe to use for untrusted input, but it may produce garbage data (e.g. out of range indices). * * destination must contain enough space for the resulting index buffer (index_count elements) */ MESHOPTIMIZER_API int meshopt_decodeIndexBuffer(void* destination, size_t index_count, size_t index_size, const unsigned char* buffer, size_t buffer_size); /** * Index sequence encoder * Encodes index sequence into an array of bytes that is generally smaller and compresses better compared to original. * Input index sequence can represent arbitrary topology; for triangle lists meshopt_encodeIndexBuffer is likely to be better. * Returns encoded data size on success, 0 on error; the only error condition is if buffer doesn't have enough space * * buffer must contain enough space for the encoded index sequence (use meshopt_encodeIndexSequenceBound to compute worst case size) */ MESHOPTIMIZER_API size_t meshopt_encodeIndexSequence(unsigned char* buffer, size_t buffer_size, const unsigned int* indices, size_t index_count); MESHOPTIMIZER_API size_t meshopt_encodeIndexSequenceBound(size_t index_count, size_t vertex_count); /** * Index sequence decoder * Decodes index data from an array of bytes generated by meshopt_encodeIndexSequence * Returns 0 if decoding was successful, and an error code otherwise * The decoder is safe to use for untrusted input, but it may produce garbage data (e.g. out of range indices). * * destination must contain enough space for the resulting index sequence (index_count elements) */ MESHOPTIMIZER_API int meshopt_decodeIndexSequence(void* destination, size_t index_count, size_t index_size, const unsigned char* buffer, size_t buffer_size); /** * Vertex buffer encoder * Encodes vertex data into an array of bytes that is generally smaller and compresses better compared to original. * Returns encoded data size on success, 0 on error; the only error condition is if buffer doesn't have enough space * This function works for a single vertex stream; for multiple vertex streams, call meshopt_encodeVertexBuffer for each stream. * Note that all vertex_size bytes of each vertex are encoded verbatim, including padding which should be zero-initialized. * * buffer must contain enough space for the encoded vertex buffer (use meshopt_encodeVertexBufferBound to compute worst case size) */ MESHOPTIMIZER_API size_t meshopt_encodeVertexBuffer(unsigned char* buffer, size_t buffer_size, const void* vertices, size_t vertex_count, size_t vertex_size); MESHOPTIMIZER_API size_t meshopt_encodeVertexBufferBound(size_t vertex_count, size_t vertex_size); /** * Set vertex encoder format version * version must specify the data format version to encode; valid values are 0 (decodable by all library versions) */ MESHOPTIMIZER_API void meshopt_encodeVertexVersion(int version); /** * Vertex buffer decoder * Decodes vertex data from an array of bytes generated by meshopt_encodeVertexBuffer * Returns 0 if decoding was successful, and an error code otherwise * The decoder is safe to use for untrusted input, but it may produce garbage data. * * destination must contain enough space for the resulting vertex buffer (vertex_count * vertex_size bytes) */ MESHOPTIMIZER_API int meshopt_decodeVertexBuffer(void* destination, size_t vertex_count, size_t vertex_size, const unsigned char* buffer, size_t buffer_size); /** * Vertex buffer filters * These functions can be used to filter output of meshopt_decodeVertexBuffer in-place. * * meshopt_decodeFilterOct decodes octahedral encoding of a unit vector with K-bit (K <= 16) signed X/Y as an input; Z must store 1.0f. * Each component is stored as an 8-bit or 16-bit normalized integer; stride must be equal to 4 or 8. W is preserved as is. * * meshopt_decodeFilterQuat decodes 3-component quaternion encoding with K-bit (4 <= K <= 16) component encoding and a 2-bit component index indicating which component to reconstruct. * Each component is stored as an 16-bit integer; stride must be equal to 8. * * meshopt_decodeFilterExp decodes exponential encoding of floating-point data with 8-bit exponent and 24-bit integer mantissa as 2^E*M. * Each 32-bit component is decoded in isolation; stride must be divisible by 4. */ MESHOPTIMIZER_EXPERIMENTAL void meshopt_decodeFilterOct(void* buffer, size_t count, size_t stride); MESHOPTIMIZER_EXPERIMENTAL void meshopt_decodeFilterQuat(void* buffer, size_t count, size_t stride); MESHOPTIMIZER_EXPERIMENTAL void meshopt_decodeFilterExp(void* buffer, size_t count, size_t stride); /** * Vertex buffer filter encoders * These functions can be used to encode data in a format that meshopt_decodeFilter can decode * * meshopt_encodeFilterOct encodes unit vectors with K-bit (K <= 16) signed X/Y as an output. * Each component is stored as an 8-bit or 16-bit normalized integer; stride must be equal to 4 or 8. W is preserved as is. * Input data must contain 4 floats for every vector (count*4 total). * * meshopt_encodeFilterQuat encodes unit quaternions with K-bit (4 <= K <= 16) component encoding. * Each component is stored as an 16-bit integer; stride must be equal to 8. * Input data must contain 4 floats for every quaternion (count*4 total). * * meshopt_encodeFilterExp encodes arbitrary (finite) floating-point data with 8-bit exponent and K-bit integer mantissa (1 <= K <= 24). * Exponent can be shared between all components of a given vector as defined by stride or all values of a given component; stride must be divisible by 4. * Input data must contain stride/4 floats for every vector (count*stride/4 total). */ enum meshopt_EncodeExpMode { /* When encoding exponents, use separate values for each component (maximum quality) */ meshopt_EncodeExpSeparate, /* When encoding exponents, use shared value for all components of each vector (better compression) */ meshopt_EncodeExpSharedVector, /* When encoding exponents, use shared value for each component of all vectors (best compression) */ meshopt_EncodeExpSharedComponent, }; MESHOPTIMIZER_EXPERIMENTAL void meshopt_encodeFilterOct(void* destination, size_t count, size_t stride, int bits, const float* data); MESHOPTIMIZER_EXPERIMENTAL void meshopt_encodeFilterQuat(void* destination, size_t count, size_t stride, int bits, const float* data); MESHOPTIMIZER_EXPERIMENTAL void meshopt_encodeFilterExp(void* destination, size_t count, size_t stride, int bits, const float* data, enum meshopt_EncodeExpMode mode); /** * Simplification options */ enum { /* Do not move vertices that are located on the topological border (vertices on triangle edges that don't have a paired triangle). Useful for simplifying portions of the larger mesh. */ meshopt_SimplifyLockBorder = 1 << 0, }; /** * Mesh simplifier * Reduces the number of triangles in the mesh, attempting to preserve mesh appearance as much as possible * The algorithm tries to preserve mesh topology and can stop short of the target goal based on topology constraints or target error. * If not all attributes from the input mesh are required, it's recommended to reindex the mesh using meshopt_generateShadowIndexBuffer prior to simplification. * Returns the number of indices after simplification, with destination containing new index data * The resulting index buffer references vertices from the original vertex buffer. * If the original vertex data isn't required, creating a compact vertex buffer using meshopt_optimizeVertexFetch is recommended. * * destination must contain enough space for the target index buffer, worst case is index_count elements (*not* target_index_count)! * vertex_positions should have float3 position in the first 12 bytes of each vertex * target_error represents the error relative to mesh extents that can be tolerated, e.g. 0.01 = 1% deformation; value range [0..1] * options must be a bitmask composed of meshopt_SimplifyX options; 0 is a safe default * result_error can be NULL; when it's not NULL, it will contain the resulting (relative) error after simplification */ MESHOPTIMIZER_API size_t meshopt_simplify(unsigned int* destination, const unsigned int* indices, size_t index_count, const float* vertex_positions, size_t vertex_count, size_t vertex_positions_stride, size_t target_index_count, float target_error, unsigned int options, float* result_error); /** * Experimental: Mesh simplifier (sloppy) * Reduces the number of triangles in the mesh, sacrificing mesh appearance for simplification performance * The algorithm doesn't preserve mesh topology but can stop short of the target goal based on target error. * Returns the number of indices after simplification, with destination containing new index data * The resulting index buffer references vertices from the original vertex buffer. * If the original vertex data isn't required, creating a compact vertex buffer using meshopt_optimizeVertexFetch is recommended. * * destination must contain enough space for the target index buffer, worst case is index_count elements (*not* target_index_count)! * vertex_positions should have float3 position in the first 12 bytes of each vertex * target_error represents the error relative to mesh extents that can be tolerated, e.g. 0.01 = 1% deformation; value range [0..1] * result_error can be NULL; when it's not NULL, it will contain the resulting (relative) error after simplification */ MESHOPTIMIZER_EXPERIMENTAL size_t meshopt_simplifySloppy(unsigned int* destination, const unsigned int* indices, size_t index_count, const float* vertex_positions, size_t vertex_count, size_t vertex_positions_stride, size_t target_index_count, float target_error, float* result_error); /** * Experimental: Point cloud simplifier * Reduces the number of points in the cloud to reach the given target * Returns the number of points after simplification, with destination containing new index data * The resulting index buffer references vertices from the original vertex buffer. * If the original vertex data isn't required, creating a compact vertex buffer using meshopt_optimizeVertexFetch is recommended. * * destination must contain enough space for the target index buffer (target_vertex_count elements) * vertex_positions should have float3 position in the first 12 bytes of each vertex */ MESHOPTIMIZER_EXPERIMENTAL size_t meshopt_simplifyPoints(unsigned int* destination, const float* vertex_positions, size_t vertex_count, size_t vertex_positions_stride, size_t target_vertex_count); /** * Returns the error scaling factor used by the simplifier to convert between absolute and relative extents * * Absolute error must be *divided* by the scaling factor before passing it to meshopt_simplify as target_error * Relative error returned by meshopt_simplify via result_error must be *multiplied* by the scaling factor to get absolute error. */ MESHOPTIMIZER_API float meshopt_simplifyScale(const float* vertex_positions, size_t vertex_count, size_t vertex_positions_stride); /** * Mesh stripifier * Converts a previously vertex cache optimized triangle list to triangle strip, stitching strips using restart index or degenerate triangles * Returns the number of indices in the resulting strip, with destination containing new index data * For maximum efficiency the index buffer being converted has to be optimized for vertex cache first. * Using restart indices can result in ~10% smaller index buffers, but on some GPUs restart indices may result in decreased performance. * * destination must contain enough space for the target index buffer, worst case can be computed with meshopt_stripifyBound * restart_index should be 0xffff or 0xffffffff depending on index size, or 0 to use degenerate triangles */ MESHOPTIMIZER_API size_t meshopt_stripify(unsigned int* destination, const unsigned int* indices, size_t index_count, size_t vertex_count, unsigned int restart_index); MESHOPTIMIZER_API size_t meshopt_stripifyBound(size_t index_count); /** * Mesh unstripifier * Converts a triangle strip to a triangle list * Returns the number of indices in the resulting list, with destination containing new index data * * destination must contain enough space for the target index buffer, worst case can be computed with meshopt_unstripifyBound */ MESHOPTIMIZER_API size_t meshopt_unstripify(unsigned int* destination, const unsigned int* indices, size_t index_count, unsigned int restart_index); MESHOPTIMIZER_API size_t meshopt_unstripifyBound(size_t index_count); struct meshopt_VertexCacheStatistics { unsigned int vertices_transformed; unsigned int warps_executed; float acmr; /* transformed vertices / triangle count; best case 0.5, worst case 3.0, optimum depends on topology */ float atvr; /* transformed vertices / vertex count; best case 1.0, worst case 6.0, optimum is 1.0 (each vertex is transformed once) */ }; /** * Vertex transform cache analyzer * Returns cache hit statistics using a simplified FIFO model * Results may not match actual GPU performance */ MESHOPTIMIZER_API struct meshopt_VertexCacheStatistics meshopt_analyzeVertexCache(const unsigned int* indices, size_t index_count, size_t vertex_count, unsigned int cache_size, unsigned int warp_size, unsigned int primgroup_size); struct meshopt_OverdrawStatistics { unsigned int pixels_covered; unsigned int pixels_shaded; float overdraw; /* shaded pixels / covered pixels; best case 1.0 */ }; /** * Overdraw analyzer * Returns overdraw statistics using a software rasterizer * Results may not match actual GPU performance * * vertex_positions should have float3 position in the first 12 bytes of each vertex */ MESHOPTIMIZER_API struct meshopt_OverdrawStatistics meshopt_analyzeOverdraw(const unsigned int* indices, size_t index_count, const float* vertex_positions, size_t vertex_count, size_t vertex_positions_stride); struct meshopt_VertexFetchStatistics { unsigned int bytes_fetched; float overfetch; /* fetched bytes / vertex buffer size; best case 1.0 (each byte is fetched once) */ }; /** * Vertex fetch cache analyzer * Returns cache hit statistics using a simplified direct mapped model * Results may not match actual GPU performance */ MESHOPTIMIZER_API struct meshopt_VertexFetchStatistics meshopt_analyzeVertexFetch(const unsigned int* indices, size_t index_count, size_t vertex_count, size_t vertex_size); struct meshopt_Meshlet { /* offsets within meshlet_vertices and meshlet_triangles arrays with meshlet data */ unsigned int vertex_offset; unsigned int triangle_offset; /* number of vertices and triangles used in the meshlet; data is stored in consecutive range defined by offset and count */ unsigned int vertex_count; unsigned int triangle_count; }; /** * Meshlet builder * Splits the mesh into a set of meshlets where each meshlet has a micro index buffer indexing into meshlet vertices that refer to the original vertex buffer * The resulting data can be used to render meshes using NVidia programmable mesh shading pipeline, or in other cluster-based renderers. * When using buildMeshlets, vertex positions need to be provided to minimize the size of the resulting clusters. * When using buildMeshletsScan, for maximum efficiency the index buffer being converted has to be optimized for vertex cache first. * * meshlets must contain enough space for all meshlets, worst case size can be computed with meshopt_buildMeshletsBound * meshlet_vertices must contain enough space for all meshlets, worst case size is equal to max_meshlets * max_vertices * meshlet_triangles must contain enough space for all meshlets, worst case size is equal to max_meshlets * max_triangles * 3 * vertex_positions should have float3 position in the first 12 bytes of each vertex * max_vertices and max_triangles must not exceed implementation limits (max_vertices <= 255 - not 256!, max_triangles <= 512) * cone_weight should be set to 0 when cone culling is not used, and a value between 0 and 1 otherwise to balance between cluster size and cone culling efficiency */ MESHOPTIMIZER_API size_t meshopt_buildMeshlets(struct meshopt_Meshlet* meshlets, unsigned int* meshlet_vertices, unsigned char* meshlet_triangles, const unsigned int* indices, size_t index_count, const float* vertex_positions, size_t vertex_count, size_t vertex_positions_stride, size_t max_vertices, size_t max_triangles, float cone_weight); MESHOPTIMIZER_API size_t meshopt_buildMeshletsScan(struct meshopt_Meshlet* meshlets, unsigned int* meshlet_vertices, unsigned char* meshlet_triangles, const unsigned int* indices, size_t index_count, size_t vertex_count, size_t max_vertices, size_t max_triangles); MESHOPTIMIZER_API size_t meshopt_buildMeshletsBound(size_t index_count, size_t max_vertices, size_t max_triangles); struct meshopt_Bounds { /* bounding sphere, useful for frustum and occlusion culling */ float center[3]; float radius; /* normal cone, useful for backface culling */ float cone_apex[3]; float cone_axis[3]; float cone_cutoff; /* = cos(angle/2) */ /* normal cone axis and cutoff, stored in 8-bit SNORM format; decode using x/127.0 */ signed char cone_axis_s8[3]; signed char cone_cutoff_s8; }; /** * Cluster bounds generator * Creates bounding volumes that can be used for frustum, backface and occlusion culling. * * For backface culling with orthographic projection, use the following formula to reject backfacing clusters: * dot(view, cone_axis) >= cone_cutoff * * For perspective projection, you can the formula that needs cone apex in addition to axis & cutoff: * dot(normalize(cone_apex - camera_position), cone_axis) >= cone_cutoff * * Alternatively, you can use the formula that doesn't need cone apex and uses bounding sphere instead: * dot(normalize(center - camera_position), cone_axis) >= cone_cutoff + radius / length(center - camera_position) * or an equivalent formula that doesn't have a singularity at center = camera_position: * dot(center - camera_position, cone_axis) >= cone_cutoff * length(center - camera_position) + radius * * The formula that uses the apex is slightly more accurate but needs the apex; if you are already using bounding sphere * to do frustum/occlusion culling, the formula that doesn't use the apex may be preferable (for derivation see * Real-Time Rendering 4th Edition, section 19.3). * * vertex_positions should have float3 position in the first 12 bytes of each vertex * index_count/3 should be less than or equal to 512 (the function assumes clusters of limited size) */ MESHOPTIMIZER_API struct meshopt_Bounds meshopt_computeClusterBounds(const unsigned int* indices, size_t index_count, const float* vertex_positions, size_t vertex_count, size_t vertex_positions_stride); MESHOPTIMIZER_API struct meshopt_Bounds meshopt_computeMeshletBounds(const unsigned int* meshlet_vertices, const unsigned char* meshlet_triangles, size_t triangle_count, const float* vertex_positions, size_t vertex_count, size_t vertex_positions_stride); /** * Experimental: Spatial sorter * Generates a remap table that can be used to reorder points for spatial locality. * Resulting remap table maps old vertices to new vertices and can be used in meshopt_remapVertexBuffer. * * destination must contain enough space for the resulting remap table (vertex_count elements) */ MESHOPTIMIZER_EXPERIMENTAL void meshopt_spatialSortRemap(unsigned int* destination, const float* vertex_positions, size_t vertex_count, size_t vertex_positions_stride); /** * Experimental: Spatial sorter * Reorders triangles for spatial locality, and generates a new index buffer. The resulting index buffer can be used with other functions like optimizeVertexCache. * * destination must contain enough space for the resulting index buffer (index_count elements) * vertex_positions should have float3 position in the first 12 bytes of each vertex */ MESHOPTIMIZER_EXPERIMENTAL void meshopt_spatialSortTriangles(unsigned int* destination, const unsigned int* indices, size_t index_count, const float* vertex_positions, size_t vertex_count, size_t vertex_positions_stride); /** * Set allocation callbacks * These callbacks will be used instead of the default operator new/operator delete for all temporary allocations in the library. * Note that all algorithms only allocate memory for temporary use. * allocate/deallocate are always called in a stack-like order - last pointer to be allocated is deallocated first. */ MESHOPTIMIZER_API void meshopt_setAllocator(void* (MESHOPTIMIZER_ALLOC_CALLCONV *allocate)(size_t), void (MESHOPTIMIZER_ALLOC_CALLCONV *deallocate)(void*)); #ifdef __cplusplus } /* extern "C" */ #endif /* Quantization into commonly supported data formats */ #ifdef __cplusplus /** * Quantize a float in [0..1] range into an N-bit fixed point unorm value * Assumes reconstruction function (q / (2^N-1)), which is the case for fixed-function normalized fixed point conversion * Maximum reconstruction error: 1/2^(N+1) */ inline int meshopt_quantizeUnorm(float v, int N); /** * Quantize a float in [-1..1] range into an N-bit fixed point snorm value * Assumes reconstruction function (q / (2^(N-1)-1)), which is the case for fixed-function normalized fixed point conversion (except early OpenGL versions) * Maximum reconstruction error: 1/2^N */ inline int meshopt_quantizeSnorm(float v, int N); /** * Quantize a float into half-precision floating point value * Generates +-inf for overflow, preserves NaN, flushes denormals to zero, rounds to nearest * Representable magnitude range: [6e-5; 65504] * Maximum relative reconstruction error: 5e-4 */ inline unsigned short meshopt_quantizeHalf(float v); /** * Quantize a float into a floating point value with a limited number of significant mantissa bits * Generates +-inf for overflow, preserves NaN, flushes denormals to zero, rounds to nearest * Assumes N is in a valid mantissa precision range, which is 1..23 */ inline float meshopt_quantizeFloat(float v, int N); #endif /** * C++ template interface * * These functions mirror the C interface the library provides, providing template-based overloads so that * the caller can use an arbitrary type for the index data, both for input and output. * When the supplied type is the same size as that of unsigned int, the wrappers are zero-cost; when it's not, * the wrappers end up allocating memory and copying index data to convert from one type to another. */ #if defined(__cplusplus) && !defined(MESHOPTIMIZER_NO_WRAPPERS) template <typename T> inline size_t meshopt_generateVertexRemap(unsigned int* destination, const T* indices, size_t index_count, const void* vertices, size_t vertex_count, size_t vertex_size); template <typename T> inline size_t meshopt_generateVertexRemapMulti(unsigned int* destination, const T* indices, size_t index_count, size_t vertex_count, const meshopt_Stream* streams, size_t stream_count); template <typename T> inline void meshopt_remapIndexBuffer(T* destination, const T* indices, size_t index_count, const unsigned int* remap); template <typename T> inline void meshopt_generateShadowIndexBuffer(T* destination, const T* indices, size_t index_count, const void* vertices, size_t vertex_count, size_t vertex_size, size_t vertex_stride); template <typename T> inline void meshopt_generateShadowIndexBufferMulti(T* destination, const T* indices, size_t index_count, size_t vertex_count, const meshopt_Stream* streams, size_t stream_count); template <typename T> inline void meshopt_generateAdjacencyIndexBuffer(T* destination, const T* indices, size_t index_count, const float* vertex_positions, size_t vertex_count, size_t vertex_positions_stride); template <typename T> inline void meshopt_generateTessellationIndexBuffer(T* destination, const T* indices, size_t index_count, const float* vertex_positions, size_t vertex_count, size_t vertex_positions_stride); template <typename T> inline void meshopt_optimizeVertexCache(T* destination, const T* indices, size_t index_count, size_t vertex_count); template <typename T> inline void meshopt_optimizeVertexCacheStrip(T* destination, const T* indices, size_t index_count, size_t vertex_count); template <typename T> inline void meshopt_optimizeVertexCacheFifo(T* destination, const T* indices, size_t index_count, size_t vertex_count, unsigned int cache_size); template <typename T> inline void meshopt_optimizeOverdraw(T* destination, const T* indices, size_t index_count, const float* vertex_positions, size_t vertex_count, size_t vertex_positions_stride, float threshold); template <typename T> inline size_t meshopt_optimizeVertexFetchRemap(unsigned int* destination, const T* indices, size_t index_count, size_t vertex_count); template <typename T> inline size_t meshopt_optimizeVertexFetch(void* destination, T* indices, size_t index_count, const void* vertices, size_t vertex_count, size_t vertex_size); template <typename T> inline size_t meshopt_encodeIndexBuffer(unsigned char* buffer, size_t buffer_size, const T* indices, size_t index_count); template <typename T> inline int meshopt_decodeIndexBuffer(T* destination, size_t index_count, const unsigned char* buffer, size_t buffer_size); template <typename T> inline size_t meshopt_encodeIndexSequence(unsigned char* buffer, size_t buffer_size, const T* indices, size_t index_count); template <typename T> inline int meshopt_decodeIndexSequence(T* destination, size_t index_count, const unsigned char* buffer, size_t buffer_size); template <typename T> inline size_t meshopt_simplify(T* destination, const T* indices, size_t index_count, const float* vertex_positions, size_t vertex_count, size_t vertex_positions_stride, size_t target_index_count, float target_error, unsigned int options = 0, float* result_error = 0); template <typename T> inline size_t meshopt_simplifySloppy(T* destination, const T* indices, size_t index_count, const float* vertex_positions, size_t vertex_count, size_t vertex_positions_stride, size_t target_index_count, float target_error, float* result_error = 0); template <typename T> inline size_t meshopt_stripify(T* destination, const T* indices, size_t index_count, size_t vertex_count, T restart_index); template <typename T> inline size_t meshopt_unstripify(T* destination, const T* indices, size_t index_count, T restart_index); template <typename T> inline meshopt_VertexCacheStatistics meshopt_analyzeVertexCache(const T* indices, size_t index_count, size_t vertex_count, unsigned int cache_size, unsigned int warp_size, unsigned int buffer_size); template <typename T> inline meshopt_OverdrawStatistics meshopt_analyzeOverdraw(const T* indices, size_t index_count, const float* vertex_positions, size_t vertex_count, size_t vertex_positions_stride); template <typename T> inline meshopt_VertexFetchStatistics meshopt_analyzeVertexFetch(const T* indices, size_t index_count, size_t vertex_count, size_t vertex_size); template <typename T> inline size_t meshopt_buildMeshlets(meshopt_Meshlet* meshlets, unsigned int* meshlet_vertices, unsigned char* meshlet_triangles, const T* indices, size_t index_count, const float* vertex_positions, size_t vertex_count, size_t vertex_positions_stride, size_t max_vertices, size_t max_triangles, float cone_weight); template <typename T> inline size_t meshopt_buildMeshletsScan(meshopt_Meshlet* meshlets, unsigned int* meshlet_vertices, unsigned char* meshlet_triangles, const T* indices, size_t index_count, size_t vertex_count, size_t max_vertices, size_t max_triangles); template <typename T> inline meshopt_Bounds meshopt_computeClusterBounds(const T* indices, size_t index_count, const float* vertex_positions, size_t vertex_count, size_t vertex_positions_stride); template <typename T> inline void meshopt_spatialSortTriangles(T* destination, const T* indices, size_t index_count, const float* vertex_positions, size_t vertex_count, size_t vertex_positions_stride); #endif /* Inline implementation */ #ifdef __cplusplus inline int meshopt_quantizeUnorm(float v, int N) { const float scale = float((1 << N) - 1); v = (v >= 0) ? v : 0; v = (v <= 1) ? v : 1; return int(v * scale + 0.5f); } inline int meshopt_quantizeSnorm(float v, int N) { const float scale = float((1 << (N - 1)) - 1); float round = (v >= 0 ? 0.5f : -0.5f); v = (v >= -1) ? v : -1; v = (v <= +1) ? v : +1; return int(v * scale + round); } inline unsigned short meshopt_quantizeHalf(float v) { union { float f; unsigned int ui; } u = {v}; unsigned int ui = u.ui; int s = (ui >> 16) & 0x8000; int em = ui & 0x7fffffff; /* bias exponent and round to nearest; 112 is relative exponent bias (127-15) */ int h = (em - (112 << 23) + (1 << 12)) >> 13; /* underflow: flush to zero; 113 encodes exponent -14 */ h = (em < (113 << 23)) ? 0 : h; /* overflow: infinity; 143 encodes exponent 16 */ h = (em >= (143 << 23)) ? 0x7c00 : h; /* NaN; note that we convert all types of NaN to qNaN */ h = (em > (255 << 23)) ? 0x7e00 : h; return (unsigned short)(s | h); } inline float meshopt_quantizeFloat(float v, int N) { union { float f; unsigned int ui; } u = {v}; unsigned int ui = u.ui; const int mask = (1 << (23 - N)) - 1; const int round = (1 << (23 - N)) >> 1; int e = ui & 0x7f800000; unsigned int rui = (ui + round) & ~mask; /* round all numbers except inf/nan; this is important to make sure nan doesn't overflow into -0 */ ui = e == 0x7f800000 ? ui : rui; /* flush denormals to zero */ ui = e == 0 ? 0 : ui; u.ui = ui; return u.f; } #endif /* Internal implementation helpers */ #ifdef __cplusplus class meshopt_Allocator { public: template <typename T> struct StorageT { static void* (MESHOPTIMIZER_ALLOC_CALLCONV *allocate)(size_t); static void (MESHOPTIMIZER_ALLOC_CALLCONV *deallocate)(void*); }; typedef StorageT<void> Storage; meshopt_Allocator() : blocks() , count(0) { } ~meshopt_Allocator() { for (size_t i = count; i > 0; --i) Storage::deallocate(blocks[i - 1]); } template <typename T> T* allocate(size_t size) { assert(count < sizeof(blocks) / sizeof(blocks[0])); T* result = static_cast<T*>(Storage::allocate(size > size_t(-1) / sizeof(T) ? size_t(-1) : size * sizeof(T))); blocks[count++] = result; return result; } private: void* blocks[24]; size_t count; }; // This makes sure that allocate/deallocate are lazily generated in translation units that need them and are deduplicated by the linker template <typename T> void* (MESHOPTIMIZER_ALLOC_CALLCONV *meshopt_Allocator::StorageT<T>::allocate)(size_t) = operator new; template <typename T> void (MESHOPTIMIZER_ALLOC_CALLCONV *meshopt_Allocator::StorageT<T>::deallocate)(void*) = operator delete; #endif /* Inline implementation for C++ templated wrappers */ #if defined(__cplusplus) && !defined(MESHOPTIMIZER_NO_WRAPPERS) template <typename T, bool ZeroCopy = sizeof(T) == sizeof(unsigned int)> struct meshopt_IndexAdapter; template <typename T> struct meshopt_IndexAdapter<T, false> { T* result; unsigned int* data; size_t count; meshopt_IndexAdapter(T* result_, const T* input, size_t count_) : result(result_) , data(0) , count(count_) { size_t size = count > size_t(-1) / sizeof(unsigned int) ? size_t(-1) : count * sizeof(unsigned int); data = static_cast<unsigned int*>(meshopt_Allocator::Storage::allocate(size)); if (input) { for (size_t i = 0; i < count; ++i) data[i] = input[i]; } } ~meshopt_IndexAdapter() { if (result) { for (size_t i = 0; i < count; ++i) result[i] = T(data[i]); } meshopt_Allocator::Storage::deallocate(data); } }; template <typename T> struct meshopt_IndexAdapter<T, true> { unsigned int* data; meshopt_IndexAdapter(T* result, const T* input, size_t) : data(reinterpret_cast<unsigned int*>(result ? result : const_cast<T*>(input))) { } }; template <typename T> inline size_t meshopt_generateVertexRemap(unsigned int* destination, const T* indices, size_t index_count, const void* vertices, size_t vertex_count, size_t vertex_size) { meshopt_IndexAdapter<T> in(0, indices, indices ? index_count : 0); return meshopt_generateVertexRemap(destination, indices ? in.data : 0, index_count, vertices, vertex_count, vertex_size); } template <typename T> inline size_t meshopt_generateVertexRemapMulti(unsigned int* destination, const T* indices, size_t index_count, size_t vertex_count, const meshopt_Stream* streams, size_t stream_count) { meshopt_IndexAdapter<T> in(0, indices, indices ? index_count : 0); return meshopt_generateVertexRemapMulti(destination, indices ? in.data : 0, index_count, vertex_count, streams, stream_count); } template <typename T> inline void meshopt_remapIndexBuffer(T* destination, const T* indices, size_t index_count, const unsigned int* remap) { meshopt_IndexAdapter<T> in(0, indices, indices ? index_count : 0); meshopt_IndexAdapter<T> out(destination, 0, index_count); meshopt_remapIndexBuffer(out.data, indices ? in.data : 0, index_count, remap); } template <typename T> inline void meshopt_generateShadowIndexBuffer(T* destination, const T* indices, size_t index_count, const void* vertices, size_t vertex_count, size_t vertex_size, size_t vertex_stride) { meshopt_IndexAdapter<T> in(0, indices, index_count); meshopt_IndexAdapter<T> out(destination, 0, index_count); meshopt_generateShadowIndexBuffer(out.data, in.data, index_count, vertices, vertex_count, vertex_size, vertex_stride); } template <typename T> inline void meshopt_generateShadowIndexBufferMulti(T* destination, const T* indices, size_t index_count, size_t vertex_count, const meshopt_Stream* streams, size_t stream_count) { meshopt_IndexAdapter<T> in(0, indices, index_count); meshopt_IndexAdapter<T> out(destination, 0, index_count); meshopt_generateShadowIndexBufferMulti(out.data, in.data, index_count, vertex_count, streams, stream_count); } template <typename T> inline void meshopt_generateAdjacencyIndexBuffer(T* destination, const T* indices, size_t index_count, const float* vertex_positions, size_t vertex_count, size_t vertex_positions_stride) { meshopt_IndexAdapter<T> in(0, indices, index_count); meshopt_IndexAdapter<T> out(destination, 0, index_count * 2); meshopt_generateAdjacencyIndexBuffer(out.data, in.data, index_count, vertex_positions, vertex_count, vertex_positions_stride); } template <typename T> inline void meshopt_generateTessellationIndexBuffer(T* destination, const T* indices, size_t index_count, const float* vertex_positions, size_t vertex_count, size_t vertex_positions_stride) { meshopt_IndexAdapter<T> in(0, indices, index_count); meshopt_IndexAdapter<T> out(destination, 0, index_count * 4); meshopt_generateTessellationIndexBuffer(out.data, in.data, index_count, vertex_positions, vertex_count, vertex_positions_stride); } template <typename T> inline void meshopt_optimizeVertexCache(T* destination, const T* indices, size_t index_count, size_t vertex_count) { meshopt_IndexAdapter<T> in(0, indices, index_count); meshopt_IndexAdapter<T> out(destination, 0, index_count); meshopt_optimizeVertexCache(out.data, in.data, index_count, vertex_count); } template <typename T> inline void meshopt_optimizeVertexCacheStrip(T* destination, const T* indices, size_t index_count, size_t vertex_count) { meshopt_IndexAdapter<T> in(0, indices, index_count); meshopt_IndexAdapter<T> out(destination, 0, index_count); meshopt_optimizeVertexCacheStrip(out.data, in.data, index_count, vertex_count); } template <typename T> inline void meshopt_optimizeVertexCacheFifo(T* destination, const T* indices, size_t index_count, size_t vertex_count, unsigned int cache_size) { meshopt_IndexAdapter<T> in(0, indices, index_count); meshopt_IndexAdapter<T> out(destination, 0, index_count); meshopt_optimizeVertexCacheFifo(out.data, in.data, index_count, vertex_count, cache_size); } template <typename T> inline void meshopt_optimizeOverdraw(T* destination, const T* indices, size_t index_count, const float* vertex_positions, size_t vertex_count, size_t vertex_positions_stride, float threshold) { meshopt_IndexAdapter<T> in(0, indices, index_count); meshopt_IndexAdapter<T> out(destination, 0, index_count); meshopt_optimizeOverdraw(out.data, in.data, index_count, vertex_positions, vertex_count, vertex_positions_stride, threshold); } template <typename T> inline size_t meshopt_optimizeVertexFetchRemap(unsigned int* destination, const T* indices, size_t index_count, size_t vertex_count) { meshopt_IndexAdapter<T> in(0, indices, index_count); return meshopt_optimizeVertexFetchRemap(destination, in.data, index_count, vertex_count); } template <typename T> inline size_t meshopt_optimizeVertexFetch(void* destination, T* indices, size_t index_count, const void* vertices, size_t vertex_count, size_t vertex_size) { meshopt_IndexAdapter<T> inout(indices, indices, index_count); return meshopt_optimizeVertexFetch(destination, inout.data, index_count, vertices, vertex_count, vertex_size); } template <typename T> inline size_t meshopt_encodeIndexBuffer(unsigned char* buffer, size_t buffer_size, const T* indices, size_t index_count) { meshopt_IndexAdapter<T> in(0, indices, index_count); return meshopt_encodeIndexBuffer(buffer, buffer_size, in.data, index_count); } template <typename T> inline int meshopt_decodeIndexBuffer(T* destination, size_t index_count, const unsigned char* buffer, size_t buffer_size) { char index_size_valid[sizeof(T) == 2 || sizeof(T) == 4 ? 1 : -1]; (void)index_size_valid; return meshopt_decodeIndexBuffer(destination, index_count, sizeof(T), buffer, buffer_size); } template <typename T> inline size_t meshopt_encodeIndexSequence(unsigned char* buffer, size_t buffer_size, const T* indices, size_t index_count) { meshopt_IndexAdapter<T> in(0, indices, index_count); return meshopt_encodeIndexSequence(buffer, buffer_size, in.data, index_count); } template <typename T> inline int meshopt_decodeIndexSequence(T* destination, size_t index_count, const unsigned char* buffer, size_t buffer_size) { char index_size_valid[sizeof(T) == 2 || sizeof(T) == 4 ? 1 : -1]; (void)index_size_valid; return meshopt_decodeIndexSequence(destination, index_count, sizeof(T), buffer, buffer_size); } template <typename T> inline size_t meshopt_simplify(T* destination, const T* indices, size_t index_count, const float* vertex_positions, size_t vertex_count, size_t vertex_positions_stride, size_t target_index_count, float target_error, unsigned int options, float* result_error) { meshopt_IndexAdapter<T> in(0, indices, index_count); meshopt_IndexAdapter<T> out(destination, 0, index_count); return meshopt_simplify(out.data, in.data, index_count, vertex_positions, vertex_count, vertex_positions_stride, target_index_count, target_error, options, result_error); } template <typename T> inline size_t meshopt_simplifySloppy(T* destination, const T* indices, size_t index_count, const float* vertex_positions, size_t vertex_count, size_t vertex_positions_stride, size_t target_index_count, float target_error, float* result_error) { meshopt_IndexAdapter<T> in(0, indices, index_count); meshopt_IndexAdapter<T> out(destination, 0, index_count); return meshopt_simplifySloppy(out.data, in.data, index_count, vertex_positions, vertex_count, vertex_positions_stride, target_index_count, target_error, result_error); } template <typename T> inline size_t meshopt_stripify(T* destination, const T* indices, size_t index_count, size_t vertex_count, T restart_index) { meshopt_IndexAdapter<T> in(0, indices, index_count); meshopt_IndexAdapter<T> out(destination, 0, (index_count / 3) * 5); return meshopt_stripify(out.data, in.data, index_count, vertex_count, unsigned(restart_index)); } template <typename T> inline size_t meshopt_unstripify(T* destination, const T* indices, size_t index_count, T restart_index) { meshopt_IndexAdapter<T> in(0, indices, index_count); meshopt_IndexAdapter<T> out(destination, 0, (index_count - 2) * 3); return meshopt_unstripify(out.data, in.data, index_count, unsigned(restart_index)); } template <typename T> inline meshopt_VertexCacheStatistics meshopt_analyzeVertexCache(const T* indices, size_t index_count, size_t vertex_count, unsigned int cache_size, unsigned int warp_size, unsigned int buffer_size) { meshopt_IndexAdapter<T> in(0, indices, index_count); return meshopt_analyzeVertexCache(in.data, index_count, vertex_count, cache_size, warp_size, buffer_size); } template <typename T> inline meshopt_OverdrawStatistics meshopt_analyzeOverdraw(const T* indices, size_t index_count, const float* vertex_positions, size_t vertex_count, size_t vertex_positions_stride) { meshopt_IndexAdapter<T> in(0, indices, index_count); return meshopt_analyzeOverdraw(in.data, index_count, vertex_positions, vertex_count, vertex_positions_stride); } template <typename T> inline meshopt_VertexFetchStatistics meshopt_analyzeVertexFetch(const T* indices, size_t index_count, size_t vertex_count, size_t vertex_size) { meshopt_IndexAdapter<T> in(0, indices, index_count); return meshopt_analyzeVertexFetch(in.data, index_count, vertex_count, vertex_size); } template <typename T> inline size_t meshopt_buildMeshlets(meshopt_Meshlet* meshlets, unsigned int* meshlet_vertices, unsigned char* meshlet_triangles, const T* indices, size_t index_count, const float* vertex_positions, size_t vertex_count, size_t vertex_positions_stride, size_t max_vertices, size_t max_triangles, float cone_weight) { meshopt_IndexAdapter<T> in(0, indices, index_count); return meshopt_buildMeshlets(meshlets, meshlet_vertices, meshlet_triangles, in.data, index_count, vertex_positions, vertex_count, vertex_positions_stride, max_vertices, max_triangles, cone_weight); } template <typename T> inline size_t meshopt_buildMeshletsScan(meshopt_Meshlet* meshlets, unsigned int* meshlet_vertices, unsigned char* meshlet_triangles, const T* indices, size_t index_count, size_t vertex_count, size_t max_vertices, size_t max_triangles) { meshopt_IndexAdapter<T> in(0, indices, index_count); return meshopt_buildMeshletsScan(meshlets, meshlet_vertices, meshlet_triangles, in.data, index_count, vertex_count, max_vertices, max_triangles); } template <typename T> inline meshopt_Bounds meshopt_computeClusterBounds(const T* indices, size_t index_count, const float* vertex_positions, size_t vertex_count, size_t vertex_positions_stride) { meshopt_IndexAdapter<T> in(0, indices, index_count); return meshopt_computeClusterBounds(in.data, index_count, vertex_positions, vertex_count, vertex_positions_stride); } template <typename T> inline void meshopt_spatialSortTriangles(T* destination, const T* indices, size_t index_count, const float* vertex_positions, size_t vertex_count, size_t vertex_positions_stride) { meshopt_IndexAdapter<T> in(0, indices, index_count); meshopt_IndexAdapter<T> out(destination, 0, index_count); meshopt_spatialSortTriangles(out.data, in.data, index_count, vertex_positions, vertex_count, vertex_positions_stride); } #endif /** * Copyright (c) 2016-2023 Arseny Kapoulkine * * Permission is hereby granted, free of charge, to any person * obtaining a copy of this software and associated documentation * files (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, * copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following * conditions: * * The above copyright notice and this permission notice shall be * included in all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. */
0
repos/c2z/use_cases/JoltPhysics
repos/c2z/use_cases/JoltPhysics/include/JoltViewer.h
// Jolt Physics Library (https://github.com/jrouwe/JoltPhysics) // SPDX-FileCopyrightText: 2021 Jorrit Rouwe // SPDX-License-Identifier: MIT #pragma once #include <Application/Application.h> #ifdef JPH_DEBUG_RENDERER #include <Jolt/Renderer/DebugRendererPlayback.h> #else // Hack to still compile DebugRenderer inside the test framework when Jolt is compiled without #define JPH_DEBUG_RENDERER #include <Jolt/Renderer/DebugRendererPlayback.h> #undef JPH_DEBUG_RENDERER #endif using namespace std; // Application that views recordings produced by DebugRendererRecorder class JoltViewer : public Application { public: // Constructor / destructor JoltViewer(); // Render the frame virtual bool RenderFrame(float inDeltaTime) override; private: enum class EPlaybackMode { Rewind, StepBack, Stop, StepForward, Play }; DebugRendererPlayback mRendererPlayback { *mDebugRenderer }; EPlaybackMode mPlaybackMode = EPlaybackMode::Play; // Current playback state. Indicates if we're playing or scrubbing back/forward. uint mCurrentFrame = 0; };
0
repos/c2z/use_cases/JoltPhysics
repos/c2z/use_cases/JoltPhysics/include/Jolt.h
// Jolt Physics Library (https://github.com/jrouwe/JoltPhysics) // SPDX-FileCopyrightText: 2021 Jorrit Rouwe // SPDX-License-Identifier: MIT #pragma once // Project includes #include <Jolt/Core/Core.h> #include <Jolt/Core/ARMNeon.h> #include <Jolt/Core/Memory.h> #include <Jolt/Core/STLAllocator.h> #include <Jolt/Core/IssueReporting.h> #include <Jolt/Math/Math.h> #include <Jolt/Math/Vec4.h> #include <Jolt/Math/Mat44.h> #include <Jolt/Math/Real.h>
0
repos/c2z/use_cases/JoltPhysics
repos/c2z/use_cases/JoltPhysics/include/ConfigurationString.h
// Jolt Physics Library (https://github.com/jrouwe/JoltPhysics) // SPDX-FileCopyrightText: 2023 Jorrit Rouwe // SPDX-License-Identifier: MIT #pragma once JPH_NAMESPACE_BEGIN /// Construct a string that lists the most important configuration settings inline const char *GetConfigurationString() { return JPH_IF_SINGLE_PRECISION_ELSE("Single", "Double") " precision " #if defined(JPH_CPU_X86) "x86 " #elif defined(JPH_CPU_ARM) "ARM " #elif defined(JPH_PLATFORM_WASM) "WASM " #endif #if JPH_CPU_ADDRESS_BITS == 64 "64-bit " #elif JPH_CPU_ADDRESS_BITS == 32 "32-bit " #endif "with instructions: " #ifdef JPH_USE_NEON "NEON " #endif #ifdef JPH_USE_SSE "SSE2 " #endif #ifdef JPH_USE_SSE4_1 "SSE4.1 " #endif #ifdef JPH_USE_SSE4_2 "SSE4.2 " #endif #ifdef JPH_USE_AVX "AVX " #endif #ifdef JPH_USE_AVX2 "AVX2 " #endif #ifdef JPH_USE_AVX512 "AVX512 " #endif #ifdef JPH_USE_F16C "F16C " #endif #ifdef JPH_USE_LZCNT "LZCNT " #endif #ifdef JPH_USE_TZCNT "TZCNT " #endif #ifdef JPH_USE_FMADD "FMADD " #endif #ifdef JPH_CROSS_PLATFORM_DETERMINISTIC "(Cross Platform Deterministic) " #endif #ifdef JPH_FLOATING_POINT_EXCEPTIONS_ENABLED "(FP Exceptions) " #endif #ifdef _DEBUG "(Debug) " #endif ; } JPH_NAMESPACE_END
0
repos/c2z/use_cases/JoltPhysics
repos/c2z/use_cases/JoltPhysics/include/RegisterTypes.h
// Jolt Physics Library (https://github.com/jrouwe/JoltPhysics) // SPDX-FileCopyrightText: 2021 Jorrit Rouwe // SPDX-License-Identifier: MIT #pragma once JPH_NAMESPACE_BEGIN /// Register all physics types with the factory extern void RegisterTypes(); /// Unregisters all types with the factory and cleans up the default material extern void UnregisterTypes(); JPH_NAMESPACE_END
0
repos/c2z/use_cases/JoltPhysics/include
repos/c2z/use_cases/JoltPhysics/include/ObjectStream/SerializableAttribute.h
// Jolt Physics Library (https://github.com/jrouwe/JoltPhysics) // SPDX-FileCopyrightText: 2021 Jorrit Rouwe // SPDX-License-Identifier: MIT #pragma once JPH_NAMESPACE_BEGIN class RTTI; class IObjectStreamIn; class IObjectStreamOut; /// Data type enum class EOSDataType { /// Control codes Declare, ///< Used to declare the attributes of a new object type Object, ///< Start of a new object Instance, ///< Used in attribute declaration, indicates that an object is an instanced attribute (no pointer) Pointer, ///< Used in attribute declaration, indicates that an object is a pointer attribute Array, ///< Used in attribute declaration, indicates that this is an array of objects // Basic types (primitives) #define JPH_DECLARE_PRIMITIVE(name) T_##name, // This file uses the JPH_DECLARE_PRIMITIVE macro to define all types #include <Jolt/ObjectStream/ObjectStreamTypes.h> // Error values for read functions Invalid, ///< Next token on the stream was not a valid data type }; /// Attributes are members of classes that need to be serialized. class SerializableAttribute { public: ///@ Serialization functions using pGetMemberPrimitiveType = const RTTI * (*)(); using pIsType = bool (*)(int inArrayDepth, EOSDataType inDataType, const char *inClassName); using pReadData = bool (*)(IObjectStreamIn &ioStream, void *inObject); using pWriteData = void (*)(IObjectStreamOut &ioStream, const void *inObject); using pWriteDataType = void (*)(IObjectStreamOut &ioStream); /// Constructor SerializableAttribute(const char *inName, uint inMemberOffset, pGetMemberPrimitiveType inGetMemberPrimitiveType, pIsType inIsType, pReadData inReadData, pWriteData inWriteData, pWriteDataType inWriteDataType) : mName(inName), mMemberOffset(inMemberOffset), mGetMemberPrimitiveType(inGetMemberPrimitiveType), mIsType(inIsType), mReadData(inReadData), mWriteData(inWriteData), mWriteDataType(inWriteDataType) { } /// Construct from other attribute with base class offset SerializableAttribute(const SerializableAttribute &inOther, int inBaseOffset) : mName(inOther.mName), mMemberOffset(inOther.mMemberOffset + inBaseOffset), mGetMemberPrimitiveType(inOther.mGetMemberPrimitiveType), mIsType(inOther.mIsType), mReadData(inOther.mReadData), mWriteData(inOther.mWriteData), mWriteDataType(inOther.mWriteDataType) { } /// Name of the attribute void SetName(const char *inName) { mName = inName; } const char * GetName() const { return mName; } /// Access to the memory location that contains the member template <class T> inline T * GetMemberPointer(void *inObject) const { return reinterpret_cast<T *>(reinterpret_cast<uint8 *>(inObject) + mMemberOffset); } template <class T> inline const T * GetMemberPointer(const void *inObject) const { return reinterpret_cast<const T *>(reinterpret_cast<const uint8 *>(inObject) + mMemberOffset); } /// In case this attribute contains an RTTI type, return it (note that a Array<sometype> will return the rtti of sometype) const RTTI * GetMemberPrimitiveType() const { return mGetMemberPrimitiveType(); } /// Check if this attribute is of a specific type bool IsType(int inArrayDepth, EOSDataType inDataType, const char *inClassName) const { return mIsType(inArrayDepth, inDataType, inClassName); } /// Read the data for this attribute into attribute containing class inObject bool ReadData(IObjectStreamIn &ioStream, void *inObject) const { return mReadData(ioStream, GetMemberPointer<void>(inObject)); } /// Write the data for this attribute from attribute containing class inObject void WriteData(IObjectStreamOut &ioStream, const void *inObject) const { mWriteData(ioStream, GetMemberPointer<void>(inObject)); } /// Write the data type of this attribute to a stream void WriteDataType(IObjectStreamOut &ioStream) const { mWriteDataType(ioStream); } private: // Name of the attribute const char * mName; // Offset of the member relative to the class uint mMemberOffset; // In case this attribute contains an RTTI type, return it (note that a Array<sometype> will return the rtti of sometype) pGetMemberPrimitiveType mGetMemberPrimitiveType; // Serialization operations pIsType mIsType; pReadData mReadData; pWriteData mWriteData; pWriteDataType mWriteDataType; }; JPH_NAMESPACE_END
0
repos/c2z/use_cases/JoltPhysics/include
repos/c2z/use_cases/JoltPhysics/include/ObjectStream/SerializableObject.h
// Jolt Physics Library (https://github.com/jrouwe/JoltPhysics) // SPDX-FileCopyrightText: 2021 Jorrit Rouwe // SPDX-License-Identifier: MIT #pragma once #include <Jolt/ObjectStream/ObjectStream.h> JPH_NAMESPACE_BEGIN ////////////////////////////////////////////////////////////////////////////////////////// // Helper macros ////////////////////////////////////////////////////////////////////////////////////////// // JPH_DECLARE_SERIALIZATION_FUNCTIONS #define JPH_DECLARE_SERIALIZATION_FUNCTIONS(prefix, class_name) \ prefix bool OSReadData(IObjectStreamIn &ioStream, class_name &inInstance); \ prefix bool OSReadData(IObjectStreamIn &ioStream, class_name *&inPointer); \ prefix bool OSIsType(class_name *, int inArrayDepth, EOSDataType inDataType, const char *inClassName); \ prefix bool OSIsType(class_name **, int inArrayDepth, EOSDataType inDataType, const char *inClassName); \ prefix void OSWriteData(IObjectStreamOut &ioStream, const class_name &inInstance); \ prefix void OSWriteData(IObjectStreamOut &ioStream, class_name *const &inPointer); \ prefix void OSWriteDataType(IObjectStreamOut &ioStream, class_name *); \ prefix void OSWriteDataType(IObjectStreamOut &ioStream, class_name **); // JPH_IMPLEMENT_SERIALIZATION_FUNCTIONS #define JPH_IMPLEMENT_SERIALIZATION_FUNCTIONS(class_name) \ bool OSReadData(IObjectStreamIn &ioStream, class_name &inInstance) \ { \ return ioStream.ReadClassData(#class_name, (void *)&inInstance); \ } \ bool OSReadData(IObjectStreamIn &ioStream, class_name *&inPointer) \ { \ return ioStream.ReadPointerData(JPH_RTTI(class_name), (void **)&inPointer); \ } \ bool OSIsType(class_name *, int inArrayDepth, EOSDataType inDataType, const char *inClassName) \ { \ return inArrayDepth == 0 && inDataType == EOSDataType::Instance && strcmp(inClassName, #class_name) == 0; \ } \ bool OSIsType(class_name **, int inArrayDepth, EOSDataType inDataType, const char *inClassName) \ { \ return inArrayDepth == 0 && inDataType == EOSDataType::Pointer && strcmp(inClassName, #class_name) == 0; \ } \ void OSWriteData(IObjectStreamOut &ioStream, const class_name &inInstance) \ { \ ioStream.WriteClassData(JPH_RTTI(class_name), (void *)&inInstance); \ } \ void OSWriteData(IObjectStreamOut &ioStream, class_name *const &inPointer) \ { \ if (inPointer) \ ioStream.WritePointerData(GetRTTI(inPointer), (void *)inPointer); \ else \ ioStream.WritePointerData(nullptr, nullptr); \ } \ void OSWriteDataType(IObjectStreamOut &ioStream, class_name *) \ { \ ioStream.WriteDataType(EOSDataType::Instance); \ ioStream.WriteName(#class_name); \ } \ void OSWriteDataType(IObjectStreamOut &ioStream, class_name **) \ { \ ioStream.WriteDataType(EOSDataType::Pointer); \ ioStream.WriteName(#class_name); \ } ////////////////////////////////////////////////////////////////////////////////////////// // Use these macros on non-virtual objects to make them serializable ////////////////////////////////////////////////////////////////////////////////////////// // JPH_DECLARE_SERIALIZABLE_NON_VIRTUAL #define JPH_DECLARE_SERIALIZABLE_NON_VIRTUAL(class_name) \ public: \ JPH_DECLARE_RTTI_NON_VIRTUAL(class_name) \ JPH_DECLARE_SERIALIZATION_FUNCTIONS(friend, class_name) \ // JPH_IMPLEMENT_SERIALIZABLE_NON_VIRTUAL #define JPH_IMPLEMENT_SERIALIZABLE_NON_VIRTUAL(class_name) \ JPH_IMPLEMENT_SERIALIZATION_FUNCTIONS(class_name) \ JPH_IMPLEMENT_RTTI_NON_VIRTUAL(class_name) \ ////////////////////////////////////////////////////////////////////////////////////////// // Same as above, but when you cannot insert the declaration in the class itself ////////////////////////////////////////////////////////////////////////////////////////// // JPH_DECLARE_SERIALIZABLE_OUTSIDE_CLASS #define JPH_DECLARE_SERIALIZABLE_OUTSIDE_CLASS(class_name) \ JPH_DECLARE_RTTI_OUTSIDE_CLASS(class_name) \ JPH_DECLARE_SERIALIZATION_FUNCTIONS(extern, class_name) \ // JPH_IMPLEMENT_SERIALIZABLE_OUTSIDE_CLASS #define JPH_IMPLEMENT_SERIALIZABLE_OUTSIDE_CLASS(class_name) \ JPH_IMPLEMENT_SERIALIZATION_FUNCTIONS(class_name) \ JPH_IMPLEMENT_RTTI_OUTSIDE_CLASS(class_name) \ ////////////////////////////////////////////////////////////////////////////////////////// // Same as above, but for classes that have virtual functions ////////////////////////////////////////////////////////////////////////////////////////// // JPH_DECLARE_SERIALIZABLE_VIRTUAL - Use for concrete, non-base classes #define JPH_DECLARE_SERIALIZABLE_VIRTUAL(class_name) \ public: \ JPH_DECLARE_RTTI_VIRTUAL(class_name) \ JPH_DECLARE_SERIALIZATION_FUNCTIONS(friend, class_name) \ // JPH_IMPLEMENT_SERIALIZABLE_VIRTUAL #define JPH_IMPLEMENT_SERIALIZABLE_VIRTUAL(class_name) \ JPH_IMPLEMENT_SERIALIZATION_FUNCTIONS(class_name) \ JPH_IMPLEMENT_RTTI_VIRTUAL(class_name) \ // JPH_DECLARE_SERIALIZABLE_ABSTRACT - Use for abstract, non-base classes #define JPH_DECLARE_SERIALIZABLE_ABSTRACT(class_name) \ public: \ JPH_DECLARE_RTTI_ABSTRACT(class_name) \ JPH_DECLARE_SERIALIZATION_FUNCTIONS(friend, class_name) \ // JPH_IMPLEMENT_SERIALIZABLE_ABSTRACT #define JPH_IMPLEMENT_SERIALIZABLE_ABSTRACT(class_name) \ JPH_IMPLEMENT_SERIALIZATION_FUNCTIONS(class_name) \ JPH_IMPLEMENT_RTTI_ABSTRACT(class_name) \ // JPH_DECLARE_SERIALIZABLE_VIRTUAL_BASE - Use for concrete base classes #define JPH_DECLARE_SERIALIZABLE_VIRTUAL_BASE(class_name) \ public: \ JPH_DECLARE_RTTI_VIRTUAL_BASE(class_name) \ JPH_DECLARE_SERIALIZATION_FUNCTIONS(friend, class_name) \ // JPH_IMPLEMENT_SERIALIZABLE_VIRTUAL_BASE #define JPH_IMPLEMENT_SERIALIZABLE_VIRTUAL_BASE(class_name) \ JPH_IMPLEMENT_SERIALIZATION_FUNCTIONS(class_name) \ JPH_IMPLEMENT_RTTI_VIRTUAL_BASE(class_name) \ // JPH_DECLARE_SERIALIZABLE_ABSTRACT_BASE - Use for abstract base class #define JPH_DECLARE_SERIALIZABLE_ABSTRACT_BASE(class_name) \ public: \ JPH_DECLARE_RTTI_ABSTRACT_BASE(class_name) \ JPH_DECLARE_SERIALIZATION_FUNCTIONS(friend, class_name) \ // JPH_IMPLEMENT_SERIALIZABLE_ABSTRACT_BASE #define JPH_IMPLEMENT_SERIALIZABLE_ABSTRACT_BASE(class_name) \ JPH_IMPLEMENT_SERIALIZATION_FUNCTIONS(class_name) \ JPH_IMPLEMENT_RTTI_ABSTRACT_BASE(class_name) /// Classes must be derived from SerializableObject if you want to be able to save pointers or /// reference counting pointers to objects of this or derived classes. The type will automatically /// be determined during serialization and upon deserialization it will be restored correctly. class SerializableObject { JPH_DECLARE_SERIALIZABLE_ABSTRACT_BASE(SerializableObject) public: /// Constructor virtual ~SerializableObject() = default; }; JPH_NAMESPACE_END
0
repos/c2z/use_cases/JoltPhysics/include
repos/c2z/use_cases/JoltPhysics/include/ObjectStream/ObjectStreamIn.h
// Jolt Physics Library (https://github.com/jrouwe/JoltPhysics) // SPDX-FileCopyrightText: 2021 Jorrit Rouwe // SPDX-License-Identifier: MIT #pragma once #include <Jolt/ObjectStream/ObjectStream.h> #include <Jolt/Core/Reference.h> #include <Jolt/Core/RTTI.h> #include <Jolt/Core/UnorderedMap.h> JPH_SUPPRESS_WARNINGS_STD_BEGIN #include <fstream> JPH_SUPPRESS_WARNINGS_STD_END JPH_NAMESPACE_BEGIN /// ObjectStreamIn contains all logic for reading an object from disk. It is the base /// class for the text and binary input streams (ObjectStreamTextIn and ObjectStreamBinaryIn). class ObjectStreamIn : public IObjectStreamIn { private: struct ClassDescription; public: /// Main function to read an object from a stream template <class T> static bool sReadObject(istream &inStream, T *&outObject) { // Create the input stream bool result = false; ObjectStreamIn *stream = ObjectStreamIn::Open(inStream); if (stream) { // Read the object outObject = (T *)stream->Read(JPH_RTTI(T)); result = (outObject != nullptr); delete stream; } return result; } /// Main function to read an object from a stream (reference counting pointer version) template <class T> static bool sReadObject(istream &inStream, Ref<T> &outObject) { T *object = nullptr; bool result = sReadObject(inStream, object); outObject = object; return result; } /// Main function to read an object from a file template <class T> static bool sReadObject(const char *inFileName, T *&outObject) { std::ifstream stream; stream.open(inFileName, std::ifstream::in | std::ifstream::binary); if (!stream.is_open()) return false; return sReadObject(stream, outObject); } /// Main function to read an object from a file (reference counting pointer version) template <class T> static bool sReadObject(const char *inFileName, Ref<T> &outObject) { T *object = nullptr; bool result = sReadObject(inFileName, object); outObject = object; return result; } ////////////////////////////////////////////////////// // EVERYTHING BELOW THIS SHOULD NOT DIRECTLY BE CALLED ////////////////////////////////////////////////////// ///@name Serialization operations void * Read(const RTTI *inRTTI); void * ReadObject(const RTTI *& outRTTI); bool ReadRTTI(); virtual bool ReadClassData(const char *inClassName, void *inInstance) override; bool ReadClassData(const ClassDescription &inClassDesc, void *inInstance); virtual bool ReadPointerData(const RTTI *inRTTI, void **inPointer, int inRefCountOffset = -1) override; bool SkipAttributeData(int inArrayDepth, EOSDataType inDataType, const char *inClassName); protected: /// Constructor explicit ObjectStreamIn(istream &inStream); /// Determine the type and version of an object stream static bool GetInfo(istream &inStream, EStreamType &outType, int &outVersion, int &outRevision); /// Static constructor static ObjectStreamIn * Open(istream &inStream); istream & mStream; private: /// Class descriptions struct AttributeDescription { int mArrayDepth = 0; EOSDataType mSourceType = EOSDataType::Invalid; EOSDataType mDestinationType = EOSDataType::Invalid; String mClassName; int mIndex = -1; }; struct ClassDescription { ClassDescription() = default; explicit ClassDescription(const RTTI *inRTTI) : mRTTI(inRTTI) { } const RTTI * mRTTI = nullptr; Array<AttributeDescription> mAttributes; }; struct ObjectInfo { ObjectInfo() = default; ObjectInfo(void *inInstance, const RTTI *inRTTI) : mInstance(inInstance), mRTTI(inRTTI) { } void * mInstance = nullptr; const RTTI * mRTTI = nullptr; }; struct Link { void ** mPointer; int mRefCountOffset; Identifier mIdentifier; const RTTI * mRTTI; }; using IdentifierMap = UnorderedMap<Identifier, ObjectInfo>; using ClassDescriptionMap = UnorderedMap<String, ClassDescription>; ClassDescriptionMap mClassDescriptionMap; IdentifierMap mIdentifierMap; ///< Links identifier to an object pointer Array<Link> mUnresolvedLinks; ///< All pointers (links) are resolved after reading the entire file, e.g. when all object exist }; JPH_NAMESPACE_END
0
repos/c2z/use_cases/JoltPhysics/include
repos/c2z/use_cases/JoltPhysics/include/ObjectStream/ObjectStream.h
// Jolt Physics Library (https://github.com/jrouwe/JoltPhysics) // SPDX-FileCopyrightText: 2021 Jorrit Rouwe // SPDX-License-Identifier: MIT #pragma once #include <Jolt/Core/StaticArray.h> #include <Jolt/Core/Reference.h> #include <Jolt/ObjectStream/SerializableAttribute.h> #include <Jolt/Core/RTTI.h> JPH_NAMESPACE_BEGIN /// Base class for object stream input and output streams. class ObjectStream { public: /// Stream type enum class EStreamType { Text, Binary, }; protected: /// Constructor virtual ~ObjectStream() = default; /// Identifier for objects using Identifier = uint32; static constexpr int sVersion = 1; static constexpr int sRevision = 0; static constexpr Identifier sNullIdentifier = 0; }; /// Interface class for reading from an object stream class IObjectStreamIn : public ObjectStream { public: ///@name Input type specific operations virtual bool ReadDataType(EOSDataType &outType) = 0; virtual bool ReadName(String &outName) = 0; virtual bool ReadIdentifier(Identifier &outIdentifier) = 0; virtual bool ReadCount(uint32 &outCount) = 0; ///@name Read primitives virtual bool ReadPrimitiveData(uint8 &outPrimitive) = 0; virtual bool ReadPrimitiveData(uint16 &outPrimitive) = 0; virtual bool ReadPrimitiveData(int &outPrimitive) = 0; virtual bool ReadPrimitiveData(uint32 &outPrimitive) = 0; virtual bool ReadPrimitiveData(uint64 &outPrimitive) = 0; virtual bool ReadPrimitiveData(float &outPrimitive) = 0; virtual bool ReadPrimitiveData(double &outPrimitive) = 0; virtual bool ReadPrimitiveData(bool &outPrimitive) = 0; virtual bool ReadPrimitiveData(String &outPrimitive) = 0; virtual bool ReadPrimitiveData(Float3 &outPrimitive) = 0; virtual bool ReadPrimitiveData(Double3 &outPrimitive) = 0; virtual bool ReadPrimitiveData(Vec3 &outPrimitive) = 0; virtual bool ReadPrimitiveData(DVec3 &outPrimitive) = 0; virtual bool ReadPrimitiveData(Vec4 &outPrimitive) = 0; virtual bool ReadPrimitiveData(Quat &outPrimitive) = 0; virtual bool ReadPrimitiveData(Mat44 &outPrimitive) = 0; virtual bool ReadPrimitiveData(DMat44 &outPrimitive) = 0; ///@name Read compounds virtual bool ReadClassData(const char *inClassName, void *inInstance) = 0; virtual bool ReadPointerData(const RTTI *inRTTI, void **inPointer, int inRefCountOffset = -1) = 0; }; /// Interface class for writing to an object stream class IObjectStreamOut : public ObjectStream { public: ///@name Output type specific operations virtual void WriteDataType(EOSDataType inType) = 0; virtual void WriteName(const char *inName) = 0; virtual void WriteIdentifier(Identifier inIdentifier) = 0; virtual void WriteCount(uint32 inCount) = 0; ///@name Write primitives virtual void WritePrimitiveData(const uint8 &inPrimitive) = 0; virtual void WritePrimitiveData(const uint16 &inPrimitive) = 0; virtual void WritePrimitiveData(const int &inPrimitive) = 0; virtual void WritePrimitiveData(const uint32 &inPrimitive) = 0; virtual void WritePrimitiveData(const uint64 &inPrimitive) = 0; virtual void WritePrimitiveData(const float &inPrimitive) = 0; virtual void WritePrimitiveData(const double &inPrimitive) = 0; virtual void WritePrimitiveData(const bool &inPrimitive) = 0; virtual void WritePrimitiveData(const String &inPrimitive) = 0; virtual void WritePrimitiveData(const Float3 &inPrimitive) = 0; virtual void WritePrimitiveData(const Double3 &inPrimitive) = 0; virtual void WritePrimitiveData(const Vec3 &inPrimitive) = 0; virtual void WritePrimitiveData(const DVec3 &inPrimitive) = 0; virtual void WritePrimitiveData(const Vec4 &inPrimitive) = 0; virtual void WritePrimitiveData(const Quat &inPrimitive) = 0; virtual void WritePrimitiveData(const Mat44 &inPrimitive) = 0; virtual void WritePrimitiveData(const DMat44 &inPrimitive) = 0; ///@name Write compounds virtual void WritePointerData(const RTTI *inRTTI, const void *inPointer) = 0; virtual void WriteClassData(const RTTI *inRTTI, const void *inInstance) = 0; ///@name Layout hints (for text output) virtual void HintNextItem() { /* Default is do nothing */ } virtual void HintIndentUp() { /* Default is do nothing */ } virtual void HintIndentDown() { /* Default is do nothing */ } }; // Define macro to declare functions for a specific primitive type #define JPH_DECLARE_PRIMITIVE(name) \ bool OSIsType(name *, int inArrayDepth, EOSDataType inDataType, const char *inClassName); \ bool OSReadData(IObjectStreamIn &ioStream, name &outPrimitive); \ void OSWriteDataType(IObjectStreamOut &ioStream, name *); \ void OSWriteData(IObjectStreamOut &ioStream, const name &inPrimitive); // This file uses the JPH_DECLARE_PRIMITIVE macro to define all types #include <Jolt/ObjectStream/ObjectStreamTypes.h> // Define serialization templates template <class T> bool OSIsType(Array<T> *, int inArrayDepth, EOSDataType inDataType, const char *inClassName) { return (inArrayDepth > 0 && OSIsType((T *)nullptr, inArrayDepth - 1, inDataType, inClassName)); } template <class T, uint N> bool OSIsType(StaticArray<T, N> *, int inArrayDepth, EOSDataType inDataType, const char *inClassName) { return (inArrayDepth > 0 && OSIsType((T *)nullptr, inArrayDepth - 1, inDataType, inClassName)); } template <class T, uint N> bool OSIsType(T (*)[N], int inArrayDepth, EOSDataType inDataType, const char *inClassName) { return (inArrayDepth > 0 && OSIsType((T *)nullptr, inArrayDepth - 1, inDataType, inClassName)); } template <class T> bool OSIsType(Ref<T> *, int inArrayDepth, EOSDataType inDataType, const char *inClassName) { return OSIsType((T *)nullptr, inArrayDepth, inDataType, inClassName); } template <class T> bool OSIsType(RefConst<T> *, int inArrayDepth, EOSDataType inDataType, const char *inClassName) { return OSIsType((T *)nullptr, inArrayDepth, inDataType, inClassName); } /// Define serialization templates for dynamic arrays template <class T> bool OSReadData(IObjectStreamIn &ioStream, Array<T> &inArray) { bool continue_reading = true; // Read array length uint32 array_length; continue_reading = ioStream.ReadCount(array_length); // Read array items if (continue_reading) { inArray.resize(array_length); for (uint32 el = 0; el < array_length && continue_reading; ++el) continue_reading = OSReadData(ioStream, inArray[el]); } return continue_reading; } /// Define serialization templates for static arrays template <class T, uint N> bool OSReadData(IObjectStreamIn &ioStream, StaticArray<T, N> &inArray) { bool continue_reading = true; // Read array length uint32 array_length; continue_reading = ioStream.ReadCount(array_length); // Check if we can fit this many elements if (array_length > N) return false; // Read array items if (continue_reading) { inArray.resize(array_length); for (uint32 el = 0; el < array_length && continue_reading; ++el) continue_reading = OSReadData(ioStream, inArray[el]); } return continue_reading; } /// Define serialization templates for C style arrays template <class T, uint N> bool OSReadData(IObjectStreamIn &ioStream, T (&inArray)[N]) { bool continue_reading = true; // Read array length uint32 array_length; continue_reading = ioStream.ReadCount(array_length); if (array_length != N) return false; // Read array items for (uint32 el = 0; el < N && continue_reading; ++el) continue_reading = OSReadData(ioStream, inArray[el]); return continue_reading; } /// Define serialization templates for references template <class T> bool OSReadData(IObjectStreamIn &ioStream, Ref<T> &inRef) { return ioStream.ReadPointerData(JPH_RTTI(T), inRef.InternalGetPointer(), T::sInternalGetRefCountOffset()); } template <class T> bool OSReadData(IObjectStreamIn &ioStream, RefConst<T> &inRef) { return ioStream.ReadPointerData(JPH_RTTI(T), inRef.InternalGetPointer(), T::sInternalGetRefCountOffset()); } // Define serialization templates for dynamic arrays template <class T> void OSWriteDataType(IObjectStreamOut &ioStream, Array<T> *) { ioStream.WriteDataType(EOSDataType::Array); OSWriteDataType(ioStream, (T *)nullptr); } template <class T> void OSWriteData(IObjectStreamOut &ioStream, const Array<T> &inArray) { // Write size of array ioStream.HintNextItem(); ioStream.WriteCount((uint32)inArray.size()); // Write data in array ioStream.HintIndentUp(); for (const T &v : inArray) OSWriteData(ioStream, v); ioStream.HintIndentDown(); } /// Define serialization templates for static arrays template <class T, uint N> void OSWriteDataType(IObjectStreamOut &ioStream, StaticArray<T, N> *) { ioStream.WriteDataType(EOSDataType::Array); OSWriteDataType(ioStream, (T *)nullptr); } template <class T, uint N> void OSWriteData(IObjectStreamOut &ioStream, const StaticArray<T, N> &inArray) { // Write size of array ioStream.HintNextItem(); ioStream.WriteCount(inArray.size()); // Write data in array ioStream.HintIndentUp(); for (const typename StaticArray<T, N>::value_type &v : inArray) OSWriteData(ioStream, v); ioStream.HintIndentDown(); } /// Define serialization templates for C style arrays template <class T, uint N> void OSWriteDataType(IObjectStreamOut &ioStream, T (*)[N]) { ioStream.WriteDataType(EOSDataType::Array); OSWriteDataType(ioStream, (T *)nullptr); } template <class T, uint N> void OSWriteData(IObjectStreamOut &ioStream, const T (&inArray)[N]) { // Write size of array ioStream.HintNextItem(); ioStream.WriteCount((uint32)N); // Write data in array ioStream.HintIndentUp(); for (const T &v : inArray) OSWriteData(ioStream, v); ioStream.HintIndentDown(); } /// Define serialization templates for references template <class T> void OSWriteDataType(IObjectStreamOut &ioStream, Ref<T> *) { OSWriteDataType(ioStream, (T *)nullptr); } template <class T> void OSWriteData(IObjectStreamOut &ioStream, const Ref<T> &inRef) { if (inRef != nullptr) ioStream.WritePointerData(GetRTTI(inRef.GetPtr()), inRef.GetPtr()); else ioStream.WritePointerData(nullptr, nullptr); } template <class T> void OSWriteDataType(IObjectStreamOut &ioStream, RefConst<T> *) { OSWriteDataType(ioStream, (T *)nullptr); } template <class T> void OSWriteData(IObjectStreamOut &ioStream, const RefConst<T> &inRef) { if (inRef != nullptr) ioStream.WritePointerData(GetRTTI(inRef.GetPtr()), inRef.GetPtr()); else ioStream.WritePointerData(nullptr, nullptr); } JPH_NAMESPACE_END
0
repos/c2z/use_cases/JoltPhysics/include
repos/c2z/use_cases/JoltPhysics/include/ObjectStream/SerializableAttributeEnum.h
// Jolt Physics Library (https://github.com/jrouwe/JoltPhysics) // SPDX-FileCopyrightText: 2021 Jorrit Rouwe // SPDX-License-Identifier: MIT #pragma once #include <Jolt/ObjectStream/SerializableAttribute.h> #include <Jolt/ObjectStream/ObjectStream.h> JPH_NAMESPACE_BEGIN ////////////////////////////////////////////////////////////////////////////////////////// // Macros to add properties to be serialized ////////////////////////////////////////////////////////////////////////////////////////// template <class MemberType> inline void AddSerializableAttributeEnum(RTTI &inRTTI, uint inOffset, const char *inName) { inRTTI.AddAttribute(SerializableAttribute(inName, inOffset, []() -> const RTTI * { return nullptr; }, [](int inArrayDepth, EOSDataType inDataType, [[maybe_unused]] const char *inClassName) { return inArrayDepth == 0 && inDataType == EOSDataType::T_uint32; }, [](IObjectStreamIn &ioStream, void *inObject) { uint32 temporary; if (OSReadData(ioStream, temporary)) { *reinterpret_cast<MemberType *>(inObject) = static_cast<MemberType>(temporary); return true; } return false; }, [](IObjectStreamOut &ioStream, const void *inObject) { static_assert(sizeof(MemberType) <= sizeof(uint32)); uint32 temporary = uint32(*reinterpret_cast<const MemberType *>(inObject)); OSWriteData(ioStream, temporary); }, [](IObjectStreamOut &ioStream) { ioStream.WriteDataType(EOSDataType::T_uint32); })); } // JPH_ADD_ENUM_ATTRIBUTE #define JPH_ADD_ENUM_ATTRIBUTE(class_name, member_name) \ AddSerializableAttributeEnum<decltype(class_name::member_name)>(inRTTI, offsetof(class_name, member_name), #member_name); JPH_NAMESPACE_END
0
repos/c2z/use_cases/JoltPhysics/include
repos/c2z/use_cases/JoltPhysics/include/ObjectStream/ObjectStreamBinaryIn.h
// Jolt Physics Library (https://github.com/jrouwe/JoltPhysics) // SPDX-FileCopyrightText: 2021 Jorrit Rouwe // SPDX-License-Identifier: MIT #pragma once #include <Jolt/ObjectStream/ObjectStreamIn.h> JPH_NAMESPACE_BEGIN /// Implementation of ObjectStream binary input stream. class ObjectStreamBinaryIn : public ObjectStreamIn { public: JPH_OVERRIDE_NEW_DELETE /// Constructor explicit ObjectStreamBinaryIn(istream &inStream); ///@name Input type specific operations virtual bool ReadDataType(EOSDataType &outType) override; virtual bool ReadName(String &outName) override; virtual bool ReadIdentifier(Identifier &outIdentifier) override; virtual bool ReadCount(uint32 &outCount) override; virtual bool ReadPrimitiveData(uint8 &outPrimitive) override; virtual bool ReadPrimitiveData(uint16 &outPrimitive) override; virtual bool ReadPrimitiveData(int &outPrimitive) override; virtual bool ReadPrimitiveData(uint32 &outPrimitive) override; virtual bool ReadPrimitiveData(uint64 &outPrimitive) override; virtual bool ReadPrimitiveData(float &outPrimitive) override; virtual bool ReadPrimitiveData(double &outPrimitive) override; virtual bool ReadPrimitiveData(bool &outPrimitive) override; virtual bool ReadPrimitiveData(String &outPrimitive) override; virtual bool ReadPrimitiveData(Float3 &outPrimitive) override; virtual bool ReadPrimitiveData(Double3 &outPrimitive) override; virtual bool ReadPrimitiveData(Vec3 &outPrimitive) override; virtual bool ReadPrimitiveData(DVec3 &outPrimitive) override; virtual bool ReadPrimitiveData(Vec4 &outPrimitive) override; virtual bool ReadPrimitiveData(Quat &outPrimitive) override; virtual bool ReadPrimitiveData(Mat44 &outPrimitive) override; virtual bool ReadPrimitiveData(DMat44 &outPrimitive) override; private: using StringTable = UnorderedMap<uint32, String>; StringTable mStringTable; uint32 mNextStringID = 0x80000000; }; JPH_NAMESPACE_END
0
repos/c2z/use_cases/JoltPhysics/include
repos/c2z/use_cases/JoltPhysics/include/ObjectStream/GetPrimitiveTypeOfType.h
// Jolt Physics Library (https://github.com/jrouwe/JoltPhysics) // SPDX-FileCopyrightText: 2021 Jorrit Rouwe // SPDX-License-Identifier: MIT #pragma once #include <Jolt/Core/RTTI.h> JPH_NAMESPACE_BEGIN /// Helper functions to get the underlying RTTI type of a type (so e.g. Array<sometype> will return sometype) template <class T> const RTTI *GetPrimitiveTypeOfType(T *) { return GetRTTIOfType((T *)nullptr); } template <class T> const RTTI *GetPrimitiveTypeOfType(T **) { return GetRTTIOfType((T *)nullptr); } template <class T> const RTTI *GetPrimitiveTypeOfType(Ref<T> *) { return GetRTTIOfType((T *)nullptr); } template <class T> const RTTI *GetPrimitiveTypeOfType(RefConst<T> *) { return GetRTTIOfType((T *)nullptr); } template <class T> const RTTI *GetPrimitiveTypeOfType(Array<T> *) { return GetPrimitiveTypeOfType((T *)nullptr); } template <class T, uint N> const RTTI *GetPrimitiveTypeOfType(StaticArray<T, N> *) { return GetPrimitiveTypeOfType((T *)nullptr); } template <class T, uint N> const RTTI *GetPrimitiveTypeOfType(T (*)[N]) { return GetPrimitiveTypeOfType((T *)nullptr); } JPH_NAMESPACE_END
0
repos/c2z/use_cases/JoltPhysics/include
repos/c2z/use_cases/JoltPhysics/include/ObjectStream/SerializableAttributeTyped.h
// Jolt Physics Library (https://github.com/jrouwe/JoltPhysics) // SPDX-FileCopyrightText: 2021 Jorrit Rouwe // SPDX-License-Identifier: MIT #pragma once #include <Jolt/ObjectStream/SerializableAttribute.h> #include <Jolt/ObjectStream/GetPrimitiveTypeOfType.h> #include <Jolt/ObjectStream/ObjectStream.h> JPH_NAMESPACE_BEGIN ////////////////////////////////////////////////////////////////////////////////////////// // Macros to add properties to be serialized ////////////////////////////////////////////////////////////////////////////////////////// template <class MemberType> inline void AddSerializableAttributeTyped(RTTI &inRTTI, uint inOffset, const char *inName) { inRTTI.AddAttribute(SerializableAttribute(inName, inOffset, []() { return GetPrimitiveTypeOfType((MemberType *)nullptr); }, [](int inArrayDepth, EOSDataType inDataType, const char *inClassName) { return OSIsType((MemberType *)nullptr, inArrayDepth, inDataType, inClassName); }, [](IObjectStreamIn &ioStream, void *inObject) { return OSReadData(ioStream, *reinterpret_cast<MemberType *>(inObject)); }, [](IObjectStreamOut &ioStream, const void *inObject) { OSWriteData(ioStream, *reinterpret_cast<const MemberType *>(inObject)); }, [](IObjectStreamOut &ioStream) { OSWriteDataType(ioStream, (MemberType *)nullptr); })); } // JPH_ADD_ATTRIBUTE #define JPH_ADD_ATTRIBUTE(class_name, member_name) \ AddSerializableAttributeTyped<decltype(class_name::member_name)>(inRTTI, offsetof(class_name, member_name), #member_name); JPH_NAMESPACE_END
0
repos/c2z/use_cases/JoltPhysics/include
repos/c2z/use_cases/JoltPhysics/include/ObjectStream/ObjectStreamTypes.h
// Jolt Physics Library (https://github.com/jrouwe/JoltPhysics) // SPDX-FileCopyrightText: 2021 Jorrit Rouwe // SPDX-License-Identifier: MIT // Note: Order is important, an enum is created and its value is stored in a binary stream! JPH_DECLARE_PRIMITIVE(uint8) JPH_DECLARE_PRIMITIVE(uint16) JPH_DECLARE_PRIMITIVE(int) JPH_DECLARE_PRIMITIVE(uint32) JPH_DECLARE_PRIMITIVE(uint64) JPH_DECLARE_PRIMITIVE(float) JPH_DECLARE_PRIMITIVE(bool) JPH_DECLARE_PRIMITIVE(String) JPH_DECLARE_PRIMITIVE(Float3) JPH_DECLARE_PRIMITIVE(Vec3) JPH_DECLARE_PRIMITIVE(Vec4) JPH_DECLARE_PRIMITIVE(Quat) JPH_DECLARE_PRIMITIVE(Mat44) JPH_DECLARE_PRIMITIVE(double) JPH_DECLARE_PRIMITIVE(DVec3) JPH_DECLARE_PRIMITIVE(DMat44) JPH_DECLARE_PRIMITIVE(Double3) #undef JPH_DECLARE_PRIMITIVE
0
repos/c2z/use_cases/JoltPhysics/include
repos/c2z/use_cases/JoltPhysics/include/ObjectStream/ObjectStreamBinaryOut.h
// Jolt Physics Library (https://github.com/jrouwe/JoltPhysics) // SPDX-FileCopyrightText: 2021 Jorrit Rouwe // SPDX-License-Identifier: MIT #pragma once #include <Jolt/ObjectStream/ObjectStreamOut.h> JPH_NAMESPACE_BEGIN /// Implementation of ObjectStream binary output stream. class ObjectStreamBinaryOut : public ObjectStreamOut { public: JPH_OVERRIDE_NEW_DELETE /// Constructor and destructor explicit ObjectStreamBinaryOut(ostream &inStream); ///@name Output type specific operations virtual void WriteDataType(EOSDataType inType) override; virtual void WriteName(const char *inName) override; virtual void WriteIdentifier(Identifier inIdentifier) override; virtual void WriteCount(uint32 inCount) override; virtual void WritePrimitiveData(const uint8 &inPrimitive) override; virtual void WritePrimitiveData(const uint16 &inPrimitive) override; virtual void WritePrimitiveData(const int &inPrimitive) override; virtual void WritePrimitiveData(const uint32 &inPrimitive) override; virtual void WritePrimitiveData(const uint64 &inPrimitive) override; virtual void WritePrimitiveData(const float &inPrimitive) override; virtual void WritePrimitiveData(const double &inPrimitive) override; virtual void WritePrimitiveData(const bool &inPrimitive) override; virtual void WritePrimitiveData(const String &inPrimitive) override; virtual void WritePrimitiveData(const Float3 &inPrimitive) override; virtual void WritePrimitiveData(const Double3 &inPrimitive) override; virtual void WritePrimitiveData(const Vec3 &inPrimitive) override; virtual void WritePrimitiveData(const DVec3 &inPrimitive) override; virtual void WritePrimitiveData(const Vec4 &inPrimitive) override; virtual void WritePrimitiveData(const Quat &inPrimitive) override; virtual void WritePrimitiveData(const Mat44 &inPrimitive) override; virtual void WritePrimitiveData(const DMat44 &inPrimitive) override; private: using StringTable = UnorderedMap<String, uint32>; StringTable mStringTable; uint32 mNextStringID = 0x80000000; }; JPH_NAMESPACE_END
0
repos/c2z/use_cases/JoltPhysics/include
repos/c2z/use_cases/JoltPhysics/include/ObjectStream/ObjectStreamTextOut.h
// Jolt Physics Library (https://github.com/jrouwe/JoltPhysics) // SPDX-FileCopyrightText: 2021 Jorrit Rouwe // SPDX-License-Identifier: MIT #pragma once #include <Jolt/ObjectStream/ObjectStreamOut.h> JPH_NAMESPACE_BEGIN /// Implementation of ObjectStream text output stream. class ObjectStreamTextOut : public ObjectStreamOut { public: JPH_OVERRIDE_NEW_DELETE /// Constructor and destructor explicit ObjectStreamTextOut(ostream &inStream); ///@name Output type specific operations virtual void WriteDataType(EOSDataType inType) override; virtual void WriteName(const char *inName) override; virtual void WriteIdentifier(Identifier inIdentifier) override; virtual void WriteCount(uint32 inCount) override; virtual void WritePrimitiveData(const uint8 &inPrimitive) override; virtual void WritePrimitiveData(const uint16 &inPrimitive) override; virtual void WritePrimitiveData(const int &inPrimitive) override; virtual void WritePrimitiveData(const uint32 &inPrimitive) override; virtual void WritePrimitiveData(const uint64 &inPrimitive) override; virtual void WritePrimitiveData(const float &inPrimitive) override; virtual void WritePrimitiveData(const double &inPrimitive) override; virtual void WritePrimitiveData(const bool &inPrimitive) override; virtual void WritePrimitiveData(const String &inPrimitive) override; virtual void WritePrimitiveData(const Float3 &inPrimitive) override; virtual void WritePrimitiveData(const Double3 &inPrimitive) override; virtual void WritePrimitiveData(const Vec3 &inPrimitive) override; virtual void WritePrimitiveData(const DVec3 &inPrimitive) override; virtual void WritePrimitiveData(const Vec4 &inPrimitive) override; virtual void WritePrimitiveData(const Quat &inPrimitive) override; virtual void WritePrimitiveData(const Mat44 &inPrimitive) override; virtual void WritePrimitiveData(const DMat44 &inPrimitive) override; ///@name Layout hints (for text output) virtual void HintNextItem() override; virtual void HintIndentUp() override; virtual void HintIndentDown() override; private: void WriteChar(char inChar); void WriteWord(const string_view &inWord); int mIndentation = 0; }; JPH_NAMESPACE_END
0
repos/c2z/use_cases/JoltPhysics/include
repos/c2z/use_cases/JoltPhysics/include/ObjectStream/TypeDeclarations.h
// Jolt Physics Library (https://github.com/jrouwe/JoltPhysics) // SPDX-FileCopyrightText: 2021 Jorrit Rouwe // SPDX-License-Identifier: MIT #pragma once #include <Jolt/ObjectStream/SerializableObject.h> #include <Jolt/Core/Color.h> #include <Jolt/Geometry/AABox.h> #include <Jolt/Geometry/Triangle.h> #include <Jolt/Geometry/IndexedTriangle.h> JPH_NAMESPACE_BEGIN JPH_DECLARE_RTTI_OUTSIDE_CLASS(uint8); JPH_DECLARE_RTTI_OUTSIDE_CLASS(uint16); JPH_DECLARE_RTTI_OUTSIDE_CLASS(int); JPH_DECLARE_RTTI_OUTSIDE_CLASS(uint32); JPH_DECLARE_RTTI_OUTSIDE_CLASS(uint64); JPH_DECLARE_RTTI_OUTSIDE_CLASS(float); JPH_DECLARE_RTTI_OUTSIDE_CLASS(double); JPH_DECLARE_RTTI_OUTSIDE_CLASS(bool); JPH_DECLARE_RTTI_OUTSIDE_CLASS(String); JPH_DECLARE_RTTI_OUTSIDE_CLASS(Float3); JPH_DECLARE_RTTI_OUTSIDE_CLASS(Double3); JPH_DECLARE_RTTI_OUTSIDE_CLASS(Vec3); JPH_DECLARE_RTTI_OUTSIDE_CLASS(DVec3); JPH_DECLARE_RTTI_OUTSIDE_CLASS(Vec4); JPH_DECLARE_RTTI_OUTSIDE_CLASS(Quat); JPH_DECLARE_RTTI_OUTSIDE_CLASS(Mat44); JPH_DECLARE_RTTI_OUTSIDE_CLASS(DMat44); JPH_DECLARE_SERIALIZABLE_OUTSIDE_CLASS(Color); JPH_DECLARE_SERIALIZABLE_OUTSIDE_CLASS(AABox); JPH_DECLARE_SERIALIZABLE_OUTSIDE_CLASS(Triangle); JPH_DECLARE_SERIALIZABLE_OUTSIDE_CLASS(IndexedTriangle); JPH_NAMESPACE_END // These need to be added after all types have been registered or else clang under linux will not find GetRTTIOfType for the type #include <Jolt/ObjectStream/SerializableAttributeTyped.h> #include <Jolt/ObjectStream/SerializableAttributeEnum.h>
0
repos/c2z/use_cases/JoltPhysics/include
repos/c2z/use_cases/JoltPhysics/include/ObjectStream/ObjectStreamOut.h
// Jolt Physics Library (https://github.com/jrouwe/JoltPhysics) // SPDX-FileCopyrightText: 2021 Jorrit Rouwe // SPDX-License-Identifier: MIT #pragma once #include <Jolt/ObjectStream/ObjectStream.h> #include <Jolt/Core/RTTI.h> #include <Jolt/Core/UnorderedMap.h> #include <Jolt/Core/UnorderedSet.h> JPH_SUPPRESS_WARNINGS_STD_BEGIN #include <queue> #include <fstream> JPH_SUPPRESS_WARNINGS_STD_END JPH_NAMESPACE_BEGIN template <class T> using Queue = std::queue<T, std::deque<T, STLAllocator<T>>>; /// ObjectStreamOut contains all logic for writing an object to disk. It is the base /// class for the text and binary output streams (ObjectStreamTextOut and ObjectStreamBinaryOut). class ObjectStreamOut : public IObjectStreamOut { private: struct ObjectInfo; public: /// Main function to write an object to a stream template <class T> static bool sWriteObject(ostream &inStream, ObjectStream::EStreamType inType, const T &inObject) { // Create the output stream bool result = false; ObjectStreamOut *stream = ObjectStreamOut::Open(inType, inStream); if (stream) { // Write the object to the stream result = stream->Write((void *)&inObject, GetRTTI(&inObject)); delete stream; } return result; } /// Main function to write an object to a file template <class T> static bool sWriteObject(const char *inFileName, ObjectStream::EStreamType inType, const T &inObject) { std::ofstream stream; stream.open(inFileName, std::ofstream::out | std::ofstream::trunc | std::ofstream::binary); if (!stream.is_open()) return false; return sWriteObject(stream, inType, inObject); } ////////////////////////////////////////////////////// // EVERYTHING BELOW THIS SHOULD NOT DIRECTLY BE CALLED ////////////////////////////////////////////////////// ///@name Serialization operations bool Write(const void *inObject, const RTTI *inRTTI); void WriteObject(const void *inObject); void QueueRTTI(const RTTI *inRTTI); void WriteRTTI(const RTTI *inRTTI); virtual void WriteClassData(const RTTI *inRTTI, const void *inInstance) override; virtual void WritePointerData(const RTTI *inRTTI, const void *inPointer) override; protected: /// Static constructor static ObjectStreamOut * Open(EStreamType inType, ostream &inStream); /// Constructor explicit ObjectStreamOut(ostream &inStream); ostream & mStream; private: struct ObjectInfo { ObjectInfo() : mIdentifier(0), mRTTI(nullptr) { } ObjectInfo(Identifier inIdentifier, const RTTI *inRTTI) : mIdentifier(inIdentifier), mRTTI(inRTTI) { } Identifier mIdentifier; const RTTI * mRTTI; }; using IdentifierMap = UnorderedMap<const void *, ObjectInfo>; using ClassSet = UnorderedSet<const RTTI *>; using ObjectQueue = Queue<const void *>; using ClassQueue = Queue<const RTTI *>; Identifier mNextIdentifier = sNullIdentifier + 1; ///< Next free identifier for this stream IdentifierMap mIdentifierMap; ///< Links object pointer to an identifier ObjectQueue mObjectQueue; ///< Queue of objects to be written ClassSet mClassSet; ///< List of classes already written ClassQueue mClassQueue; ///< List of classes waiting to be written }; JPH_NAMESPACE_END
0
repos/c2z/use_cases/JoltPhysics/include
repos/c2z/use_cases/JoltPhysics/include/ObjectStream/ObjectStreamTextIn.h
// Jolt Physics Library (https://github.com/jrouwe/JoltPhysics) // SPDX-FileCopyrightText: 2021 Jorrit Rouwe // SPDX-License-Identifier: MIT #pragma once #include <Jolt/ObjectStream/ObjectStreamIn.h> JPH_NAMESPACE_BEGIN /// Implementation of ObjectStream text input stream. class ObjectStreamTextIn : public ObjectStreamIn { public: JPH_OVERRIDE_NEW_DELETE /// Constructor explicit ObjectStreamTextIn(istream &inStream); ///@name Input type specific operations virtual bool ReadDataType(EOSDataType &outType) override; virtual bool ReadName(String &outName) override; virtual bool ReadIdentifier(Identifier &outIdentifier) override; virtual bool ReadCount(uint32 &outCount) override; virtual bool ReadPrimitiveData(uint8 &outPrimitive) override; virtual bool ReadPrimitiveData(uint16 &outPrimitive) override; virtual bool ReadPrimitiveData(int &outPrimitive) override; virtual bool ReadPrimitiveData(uint32 &outPrimitive) override; virtual bool ReadPrimitiveData(uint64 &outPrimitive) override; virtual bool ReadPrimitiveData(float &outPrimitive) override; virtual bool ReadPrimitiveData(double &outPrimitive) override; virtual bool ReadPrimitiveData(bool &outPrimitive) override; virtual bool ReadPrimitiveData(String &outPrimitive) override; virtual bool ReadPrimitiveData(Float3 &outPrimitive) override; virtual bool ReadPrimitiveData(Double3 &outPrimitive) override; virtual bool ReadPrimitiveData(Vec3 &outPrimitive) override; virtual bool ReadPrimitiveData(DVec3 &outPrimitive) override; virtual bool ReadPrimitiveData(Vec4 &outPrimitive) override; virtual bool ReadPrimitiveData(Quat &outPrimitive) override; virtual bool ReadPrimitiveData(Mat44 &outPrimitive) override; virtual bool ReadPrimitiveData(DMat44 &outPrimitive) override; private: bool ReadChar(char &outChar); bool ReadWord(String &outWord); }; JPH_NAMESPACE_END
0
repos/c2z/use_cases/JoltPhysics/include
repos/c2z/use_cases/JoltPhysics/include/Math/DynMatrix.h
// Jolt Physics Library (https://github.com/jrouwe/JoltPhysics) // SPDX-FileCopyrightText: 2022 Jorrit Rouwe // SPDX-License-Identifier: MIT #pragma once JPH_NAMESPACE_BEGIN /// Dynamic resizable matrix class class [[nodiscard]] DynMatrix { public: /// Constructor DynMatrix(const DynMatrix &) = default; DynMatrix(uint inRows, uint inCols) : mRows(inRows), mCols(inCols) { mElements.resize(inRows * inCols); } /// Access an element float operator () (uint inRow, uint inCol) const { JPH_ASSERT(inRow < mRows && inCol < mCols); return mElements[inRow * mCols + inCol]; } float & operator () (uint inRow, uint inCol) { JPH_ASSERT(inRow < mRows && inCol < mCols); return mElements[inRow * mCols + inCol]; } /// Get dimensions uint GetCols() const { return mCols; } uint GetRows() const { return mRows; } private: uint mRows; uint mCols; Array<float> mElements; }; JPH_NAMESPACE_END
0
repos/c2z/use_cases/JoltPhysics/include
repos/c2z/use_cases/JoltPhysics/include/Math/Float4.h
// Jolt Physics Library (https://github.com/jrouwe/JoltPhysics) // SPDX-FileCopyrightText: 2021 Jorrit Rouwe // SPDX-License-Identifier: MIT #pragma once JPH_NAMESPACE_BEGIN /// Class that holds 4 float values. Convert to Vec4 to perform calculations. class [[nodiscard]] Float4 { public: JPH_OVERRIDE_NEW_DELETE Float4() = default; ///< Intentionally not initialized for performance reasons Float4(const Float4 &inRHS) = default; Float4(float inX, float inY, float inZ, float inW) : x(inX), y(inY), z(inZ), w(inW) { } float operator [] (int inCoordinate) const { JPH_ASSERT(inCoordinate < 4); return *(&x + inCoordinate); } float x; float y; float z; float w; }; static_assert(is_trivial<Float4>(), "Is supposed to be a trivial type!"); JPH_NAMESPACE_END
0
repos/c2z/use_cases/JoltPhysics/include
repos/c2z/use_cases/JoltPhysics/include/Math/Float2.h
// Jolt Physics Library (https://github.com/jrouwe/JoltPhysics) // SPDX-FileCopyrightText: 2021 Jorrit Rouwe // SPDX-License-Identifier: MIT #pragma once JPH_NAMESPACE_BEGIN /// Class that holds 2 floats, used as a storage class mainly. class [[nodiscard]] Float2 { public: JPH_OVERRIDE_NEW_DELETE Float2() = default; ///< Intentionally not initialized for performance reasons Float2(const Float2 &inRHS) = default; Float2(float inX, float inY) : x(inX), y(inY) { } bool operator == (const Float2 &inRHS) const { return x == inRHS.x && y == inRHS.y; } bool operator != (const Float2 &inRHS) const { return x != inRHS.x || y != inRHS.y; } /// To String friend ostream & operator << (ostream &inStream, const Float2 &inV) { inStream << inV.x << ", " << inV.y; return inStream; } float x; float y; }; static_assert(is_trivial<Float2>(), "Is supposed to be a trivial type!"); JPH_NAMESPACE_END
0
repos/c2z/use_cases/JoltPhysics/include
repos/c2z/use_cases/JoltPhysics/include/Math/Quat.inl
// Jolt Physics Library (https://github.com/jrouwe/JoltPhysics) // SPDX-FileCopyrightText: 2021 Jorrit Rouwe // SPDX-License-Identifier: MIT JPH_NAMESPACE_BEGIN Quat Quat::operator * (QuatArg inRHS) const { #if defined(JPH_USE_SSE4_1) // Taken from: http://momchil-velikov.blogspot.nl/2013/10/fast-sse-quternion-multiplication.html __m128 abcd = mValue.mValue; __m128 xyzw = inRHS.mValue.mValue; __m128 t0 = _mm_shuffle_ps(abcd, abcd, _MM_SHUFFLE(3, 3, 3, 3)); __m128 t1 = _mm_shuffle_ps(xyzw, xyzw, _MM_SHUFFLE(2, 3, 0, 1)); __m128 t3 = _mm_shuffle_ps(abcd, abcd, _MM_SHUFFLE(0, 0, 0, 0)); __m128 t4 = _mm_shuffle_ps(xyzw, xyzw, _MM_SHUFFLE(1, 0, 3, 2)); __m128 t5 = _mm_shuffle_ps(abcd, abcd, _MM_SHUFFLE(1, 1, 1, 1)); __m128 t6 = _mm_shuffle_ps(xyzw, xyzw, _MM_SHUFFLE(2, 0, 3, 1)); // [d,d,d,d] * [z,w,x,y] = [dz,dw,dx,dy] __m128 m0 = _mm_mul_ps(t0, t1); // [a,a,a,a] * [y,x,w,z] = [ay,ax,aw,az] __m128 m1 = _mm_mul_ps(t3, t4); // [b,b,b,b] * [z,x,w,y] = [bz,bx,bw,by] __m128 m2 = _mm_mul_ps(t5, t6); // [c,c,c,c] * [w,z,x,y] = [cw,cz,cx,cy] __m128 t7 = _mm_shuffle_ps(abcd, abcd, _MM_SHUFFLE(2, 2, 2, 2)); __m128 t8 = _mm_shuffle_ps(xyzw, xyzw, _MM_SHUFFLE(3, 2, 0, 1)); __m128 m3 = _mm_mul_ps(t7, t8); // [dz,dw,dx,dy] + -[ay,ax,aw,az] = [dz+ay,dw-ax,dx+aw,dy-az] __m128 e = _mm_addsub_ps(m0, m1); // [dx+aw,dz+ay,dy-az,dw-ax] e = _mm_shuffle_ps(e, e, _MM_SHUFFLE(1, 3, 0, 2)); // [dx+aw,dz+ay,dy-az,dw-ax] + -[bz,bx,bw,by] = [dx+aw+bz,dz+ay-bx,dy-az+bw,dw-ax-by] e = _mm_addsub_ps(e, m2); // [dz+ay-bx,dw-ax-by,dy-az+bw,dx+aw+bz] e = _mm_shuffle_ps(e, e, _MM_SHUFFLE(2, 0, 1, 3)); // [dz+ay-bx,dw-ax-by,dy-az+bw,dx+aw+bz] + -[cw,cz,cx,cy] = [dz+ay-bx+cw,dw-ax-by-cz,dy-az+bw+cx,dx+aw+bz-cy] e = _mm_addsub_ps(e, m3); // [dw-ax-by-cz,dz+ay-bx+cw,dy-az+bw+cx,dx+aw+bz-cy] return Quat(Vec4(_mm_shuffle_ps(e, e, _MM_SHUFFLE(2, 3, 1, 0)))); #else float lx = mValue.GetX(); float ly = mValue.GetY(); float lz = mValue.GetZ(); float lw = mValue.GetW(); float rx = inRHS.mValue.GetX(); float ry = inRHS.mValue.GetY(); float rz = inRHS.mValue.GetZ(); float rw = inRHS.mValue.GetW(); float x = lw * rx + lx * rw + ly * rz - lz * ry; float y = lw * ry - lx * rz + ly * rw + lz * rx; float z = lw * rz + lx * ry - ly * rx + lz * rw; float w = lw * rw - lx * rx - ly * ry - lz * rz; return Quat(x, y, z, w); #endif } Quat Quat::sRotation(Vec3Arg inAxis, float inAngle) { // returns [inAxis * sin(0.5f * inAngle), cos(0.5f * inAngle)] JPH_ASSERT(inAxis.IsNormalized()); Vec4 s, c; Vec4::sReplicate(0.5f * inAngle).SinCos(s, c); return Quat(Vec4::sSelect(Vec4(inAxis) * s, c, UVec4(0, 0, 0, 0xffffffffU))); } void Quat::GetAxisAngle(Vec3 &outAxis, float &outAngle) const { JPH_ASSERT(IsNormalized()); Quat w_pos = EnsureWPositive(); float abs_w = w_pos.GetW(); if (abs_w >= 1.0f) { outAxis = Vec3::sZero(); outAngle = 0.0f; } else { outAngle = 2.0f * ACos(abs_w); outAxis = w_pos.GetXYZ().NormalizedOr(Vec3::sZero()); } } Quat Quat::sFromTo(Vec3Arg inFrom, Vec3Arg inTo) { /* Uses (inFrom = v1, inTo = v2): angle = arcos(v1 . v2 / |v1||v2|) axis = normalize(v1 x v2) Quaternion is then: s = sin(angle / 2) x = axis.x * s y = axis.y * s z = axis.z * s w = cos(angle / 2) Using identities: sin(2 * a) = 2 * sin(a) * cos(a) cos(2 * a) = cos(a)^2 - sin(a)^2 sin(a)^2 + cos(a)^2 = 1 This reduces to: x = (v1 x v2).x y = (v1 x v2).y z = (v1 x v2).z w = |v1||v2| + v1 . v2 which then needs to be normalized because the whole equation was multiplied by 2 cos(angle / 2) */ float len_v1_v2 = sqrt(inFrom.LengthSq() * inTo.LengthSq()); float w = len_v1_v2 + inFrom.Dot(inTo); if (w == 0.0f) { if (len_v1_v2 == 0.0f) { // If either of the vectors has zero length, there is no rotation and we return identity return Quat::sIdentity(); } else { // If vectors are perpendicular, take one of the many 180 degree rotations that exist return Quat(Vec4(inFrom.GetNormalizedPerpendicular(), 0)); } } Vec3 v = inFrom.Cross(inTo); return Quat(Vec4(v, w)).Normalized(); } template <class Random> Quat Quat::sRandom(Random &inRandom) { std::uniform_real_distribution<float> zero_to_one(0.0f, 1.0f); float x0 = zero_to_one(inRandom); float r1 = sqrt(1.0f - x0), r2 = sqrt(x0); std::uniform_real_distribution<float> zero_to_two_pi(0.0f, 2.0f * JPH_PI); Vec4 s, c; Vec4(zero_to_two_pi(inRandom), zero_to_two_pi(inRandom), 0, 0).SinCos(s, c); return Quat(s.GetX() * r1, c.GetX() * r1, s.GetY() * r2, c.GetY() * r2); } Quat Quat::sEulerAngles(Vec3Arg inAngles) { Vec4 half(0.5f * inAngles); Vec4 s, c; half.SinCos(s, c); float cx = c.GetX(); float sx = s.GetX(); float cy = c.GetY(); float sy = s.GetY(); float cz = c.GetZ(); float sz = s.GetZ(); return Quat( cz * sx * cy - sz * cx * sy, cz * cx * sy + sz * sx * cy, sz * cx * cy - cz * sx * sy, cz * cx * cy + sz * sx * sy); } Vec3 Quat::GetEulerAngles() const { float y_sq = GetY() * GetY(); // X float t0 = 2.0f * (GetW() * GetX() + GetY() * GetZ()); float t1 = 1.0f - 2.0f * (GetX() * GetX() + y_sq); // Y float t2 = 2.0f * (GetW() * GetY() - GetZ() * GetX()); t2 = t2 > 1.0f? 1.0f : t2; t2 = t2 < -1.0f? -1.0f : t2; // Z float t3 = 2.0f * (GetW() * GetZ() + GetX() * GetY()); float t4 = 1.0f - 2.0f * (y_sq + GetZ() * GetZ()); return Vec3(ATan2(t0, t1), ASin(t2), ATan2(t3, t4)); } Quat Quat::GetTwist(Vec3Arg inAxis) const { Quat twist(Vec4(GetXYZ().Dot(inAxis) * inAxis, GetW())); float twist_len = twist.LengthSq(); if (twist_len != 0.0f) return twist / sqrt(twist_len); else return Quat::sIdentity(); } void Quat::GetSwingTwist(Quat &outSwing, Quat &outTwist) const { float x = GetX(), y = GetY(), z = GetZ(), w = GetW(); float s = sqrt(Square(w) + Square(x)); if (s != 0.0f) { outTwist = Quat(x / s, 0, 0, w / s); outSwing = Quat(0, (w * y - x * z) / s, (w * z + x * y) / s, s); } else { // If both x and w are zero, this must be a 180 degree rotation around either y or z outTwist = Quat::sIdentity(); outSwing = *this; } } Quat Quat::LERP(QuatArg inDestination, float inFraction) const { float scale0 = 1.0f - inFraction; return Quat(Vec4::sReplicate(scale0) * mValue + Vec4::sReplicate(inFraction) * inDestination.mValue); } Quat Quat::SLERP(QuatArg inDestination, float inFraction) const { // Difference at which to LERP instead of SLERP const float delta = 0.0001f; // Calc cosine float sign_scale1 = 1.0f; float cos_omega = Dot(inDestination); // Adjust signs (if necessary) if (cos_omega < 0.0f) { cos_omega = -cos_omega; sign_scale1 = -1.0f; } // Calculate coefficients float scale0, scale1; if (1.0f - cos_omega > delta) { // Standard case (slerp) float omega = ACos(cos_omega); float sin_omega = Sin(omega); scale0 = Sin((1.0f - inFraction) * omega) / sin_omega; scale1 = sign_scale1 * Sin(inFraction * omega) / sin_omega; } else { // Quaternions are very close so we can do a linear interpolation scale0 = 1.0f - inFraction; scale1 = sign_scale1 * inFraction; } // Interpolate between the two quaternions return Quat(Vec4::sReplicate(scale0) * mValue + Vec4::sReplicate(scale1) * inDestination.mValue).Normalized(); } Vec3 Quat::operator * (Vec3Arg inValue) const { // Rotating a vector by a quaternion is done by: p' = q * p * q^-1 (q^-1 = conjugated(q) for a unit quaternion) JPH_ASSERT(IsNormalized()); return Vec3((*this * Quat(Vec4(inValue, 0)) * Conjugated()).mValue); } Vec3 Quat::InverseRotate(Vec3Arg inValue) const { JPH_ASSERT(IsNormalized()); return Vec3((Conjugated() * Quat(Vec4(inValue, 0)) * *this).mValue); } Vec3 Quat::RotateAxisX() const { // This is *this * Vec3::sAxisX() written out: JPH_ASSERT(IsNormalized()); float x = GetX(), y = GetY(), z = GetZ(), w = GetW(); float tx = 2.0f * x, tw = 2.0f * w; return Vec3(tx * x + tw * w - 1.0f, tx * y + z * tw, tx * z - y * tw); } Vec3 Quat::RotateAxisY() const { // This is *this * Vec3::sAxisY() written out: JPH_ASSERT(IsNormalized()); float x = GetX(), y = GetY(), z = GetZ(), w = GetW(); float ty = 2.0f * y, tw = 2.0f * w; return Vec3(x * ty - z * tw, tw * w + ty * y - 1.0f, x * tw + ty * z); } Vec3 Quat::RotateAxisZ() const { // This is *this * Vec3::sAxisZ() written out: JPH_ASSERT(IsNormalized()); float x = GetX(), y = GetY(), z = GetZ(), w = GetW(); float tz = 2.0f * z, tw = 2.0f * w; return Vec3(x * tz + y * tw, y * tz - x * tw, tw * w + tz * z - 1.0f); } void Quat::StoreFloat3(Float3 *outV) const { JPH_ASSERT(IsNormalized()); EnsureWPositive().GetXYZ().StoreFloat3(outV); } Quat Quat::sLoadFloat3Unsafe(const Float3 &inV) { Vec3 v = Vec3::sLoadFloat3Unsafe(inV); float w = sqrt(max(1.0f - v.LengthSq(), 0.0f)); // It is possible that the length of v is a fraction above 1, and we don't want to introduce NaN's in that case so we clamp to 0 return Quat(Vec4(v, w)); } JPH_NAMESPACE_END
0
repos/c2z/use_cases/JoltPhysics/include
repos/c2z/use_cases/JoltPhysics/include/Math/MathTypes.h
// Jolt Physics Library (https://github.com/jrouwe/JoltPhysics) // SPDX-FileCopyrightText: 2021 Jorrit Rouwe // SPDX-License-Identifier: MIT #pragma once JPH_NAMESPACE_BEGIN class Vec3; class DVec3; class Vec4; class UVec4; class Vec8; class UVec8; class Quat; class Mat44; class DMat44; // Types to use for passing arguments to functions using Vec3Arg = Vec3; #ifdef JPH_USE_AVX using DVec3Arg = DVec3; #else using DVec3Arg = const DVec3 &; #endif using Vec4Arg = Vec4; using UVec4Arg = UVec4; using Vec8Arg = Vec8; using UVec8Arg = UVec8; using QuatArg = Quat; using Mat44Arg = const Mat44 &; using DMat44Arg = const DMat44 &; JPH_NAMESPACE_END
0
repos/c2z/use_cases/JoltPhysics/include
repos/c2z/use_cases/JoltPhysics/include/Math/Swizzle.h
// Jolt Physics Library (https://github.com/jrouwe/JoltPhysics) // SPDX-FileCopyrightText: 2021 Jorrit Rouwe // SPDX-License-Identifier: MIT #pragma once JPH_NAMESPACE_BEGIN /// Enum indicating which component to use when swizzling enum { SWIZZLE_X = 0, ///< Use the X component SWIZZLE_Y = 1, ///< Use the Y component SWIZZLE_Z = 2, ///< Use the Z component SWIZZLE_W = 3, ///< Use the W component SWIZZLE_UNUSED = 2, ///< We always use the Z component when we don't specifically want to initialize a value, this is consistent with what is done in Vec3(x, y, z), Vec3(Float3 &) and Vec3::sLoadFloat3Unsafe }; JPH_NAMESPACE_END
0
repos/c2z/use_cases/JoltPhysics/include
repos/c2z/use_cases/JoltPhysics/include/Math/Vec3.h
// Jolt Physics Library (https://github.com/jrouwe/JoltPhysics) // SPDX-FileCopyrightText: 2021 Jorrit Rouwe // SPDX-License-Identifier: MIT #pragma once #include <Jolt/Math/Float3.h> #include <Jolt/Math/Swizzle.h> #include <Jolt/Math/MathTypes.h> JPH_NAMESPACE_BEGIN /// 3 component vector (stored as 4 vectors). /// Note that we keep the 4th component the same as the 3rd component to avoid divisions by zero when JPH_FLOATING_POINT_EXCEPTIONS_ENABLED defined class [[nodiscard]] alignas(JPH_VECTOR_ALIGNMENT) Vec3 { public: JPH_OVERRIDE_NEW_DELETE // Underlying vector type #if defined(JPH_USE_SSE) using Type = __m128; #elif defined(JPH_USE_NEON) using Type = float32x4_t; #else using Type = Vec4::Type; #endif // Argument type using ArgType = Vec3Arg; /// Constructor Vec3() = default; ///< Intentionally not initialized for performance reasons Vec3(const Vec3 &inRHS) = default; explicit JPH_INLINE Vec3(Vec4Arg inRHS); JPH_INLINE Vec3(Type inRHS) : mValue(inRHS) { CheckW(); } /// Load 3 floats from memory explicit JPH_INLINE Vec3(const Float3 &inV); /// Create a vector from 3 components JPH_INLINE Vec3(float inX, float inY, float inZ); /// Vector with all zeros static JPH_INLINE Vec3 sZero(); /// Vector with all NaN's static JPH_INLINE Vec3 sNaN(); /// Vectors with the principal axis static JPH_INLINE Vec3 sAxisX() { return Vec3(1, 0, 0); } static JPH_INLINE Vec3 sAxisY() { return Vec3(0, 1, 0); } static JPH_INLINE Vec3 sAxisZ() { return Vec3(0, 0, 1); } /// Replicate inV across all components static JPH_INLINE Vec3 sReplicate(float inV); /// Load 3 floats from memory (reads 32 bits extra which it doesn't use) static JPH_INLINE Vec3 sLoadFloat3Unsafe(const Float3 &inV); /// Return the minimum value of each of the components static JPH_INLINE Vec3 sMin(Vec3Arg inV1, Vec3Arg inV2); /// Return the maximum of each of the components static JPH_INLINE Vec3 sMax(Vec3Arg inV1, Vec3Arg inV2); /// Clamp a vector between min and max (component wise) static JPH_INLINE Vec3 sClamp(Vec3Arg inV, Vec3Arg inMin, Vec3Arg inMax); /// Equals (component wise) static JPH_INLINE UVec4 sEquals(Vec3Arg inV1, Vec3Arg inV2); /// Less than (component wise) static JPH_INLINE UVec4 sLess(Vec3Arg inV1, Vec3Arg inV2); /// Less than or equal (component wise) static JPH_INLINE UVec4 sLessOrEqual(Vec3Arg inV1, Vec3Arg inV2); /// Greater than (component wise) static JPH_INLINE UVec4 sGreater(Vec3Arg inV1, Vec3Arg inV2); /// Greater than or equal (component wise) static JPH_INLINE UVec4 sGreaterOrEqual(Vec3Arg inV1, Vec3Arg inV2); /// Calculates inMul1 * inMul2 + inAdd static JPH_INLINE Vec3 sFusedMultiplyAdd(Vec3Arg inMul1, Vec3Arg inMul2, Vec3Arg inAdd); /// Component wise select, returns inV1 when highest bit of inControl = 0 and inV2 when highest bit of inControl = 1 static JPH_INLINE Vec3 sSelect(Vec3Arg inV1, Vec3Arg inV2, UVec4Arg inControl); /// Logical or (component wise) static JPH_INLINE Vec3 sOr(Vec3Arg inV1, Vec3Arg inV2); /// Logical xor (component wise) static JPH_INLINE Vec3 sXor(Vec3Arg inV1, Vec3Arg inV2); /// Logical and (component wise) static JPH_INLINE Vec3 sAnd(Vec3Arg inV1, Vec3Arg inV2); /// Get unit vector given spherical coordinates /// inTheta \f$\in [0, \pi]\f$ is angle between vector and z-axis /// inPhi \f$\in [0, 2 \pi]\f$ is the angle in the xy-plane starting from the x axis and rotating counter clockwise around the z-axis static JPH_INLINE Vec3 sUnitSpherical(float inTheta, float inPhi); /// A set of vectors uniformly spanning the surface of a unit sphere, usable for debug purposes static const std::vector<Vec3> sUnitSphere; /// Get random unit vector template <class Random> static inline Vec3 sRandom(Random &inRandom); /// Get individual components #if defined(JPH_USE_SSE) JPH_INLINE float GetX() const { return _mm_cvtss_f32(mValue); } JPH_INLINE float GetY() const { return mF32[1]; } JPH_INLINE float GetZ() const { return mF32[2]; } #elif defined(JPH_USE_NEON) JPH_INLINE float GetX() const { return vgetq_lane_f32(mValue, 0); } JPH_INLINE float GetY() const { return vgetq_lane_f32(mValue, 1); } JPH_INLINE float GetZ() const { return vgetq_lane_f32(mValue, 2); } #else JPH_INLINE float GetX() const { return mF32[0]; } JPH_INLINE float GetY() const { return mF32[1]; } JPH_INLINE float GetZ() const { return mF32[2]; } #endif /// Set individual components JPH_INLINE void SetX(float inX) { mF32[0] = inX; } JPH_INLINE void SetY(float inY) { mF32[1] = inY; } JPH_INLINE void SetZ(float inZ) { mF32[2] = mF32[3] = inZ; } // Assure Z and W are the same /// Get float component by index JPH_INLINE float operator [] (uint inCoordinate) const { JPH_ASSERT(inCoordinate < 3); return mF32[inCoordinate]; } /// Set float component by index JPH_INLINE void SetComponent(uint inCoordinate, float inValue) { JPH_ASSERT(inCoordinate < 3); mF32[inCoordinate] = inValue; mValue = sFixW(mValue); } // Assure Z and W are the same /// Comparison JPH_INLINE bool operator == (Vec3Arg inV2) const; JPH_INLINE bool operator != (Vec3Arg inV2) const { return !(*this == inV2); } /// Test if two vectors are close JPH_INLINE bool IsClose(Vec3Arg inV2, float inMaxDistSq = 1.0e-12f) const; /// Test if vector is near zero JPH_INLINE bool IsNearZero(float inMaxDistSq = 1.0e-12f) const; /// Test if vector is normalized JPH_INLINE bool IsNormalized(float inTolerance = 1.0e-6f) const; /// Test if vector contains NaN elements JPH_INLINE bool IsNaN() const; /// Multiply two float vectors (component wise) JPH_INLINE Vec3 operator * (Vec3Arg inV2) const; /// Multiply vector with float JPH_INLINE Vec3 operator * (float inV2) const; /// Multiply vector with float friend JPH_INLINE Vec3 operator * (float inV1, Vec3Arg inV2); /// Divide vector by float JPH_INLINE Vec3 operator / (float inV2) const; /// Multiply vector with float JPH_INLINE Vec3 & operator *= (float inV2); /// Multiply vector with vector JPH_INLINE Vec3 & operator *= (Vec3Arg inV2); /// Divide vector by float JPH_INLINE Vec3 & operator /= (float inV2); /// Add two float vectors (component wise) JPH_INLINE Vec3 operator + (Vec3Arg inV2) const; /// Add two float vectors (component wise) JPH_INLINE Vec3 & operator += (Vec3Arg inV2); /// Negate JPH_INLINE Vec3 operator - () const; /// Subtract two float vectors (component wise) JPH_INLINE Vec3 operator - (Vec3Arg inV2) const; /// Add two float vectors (component wise) JPH_INLINE Vec3 & operator -= (Vec3Arg inV2); /// Divide (component wise) JPH_INLINE Vec3 operator / (Vec3Arg inV2) const; /// Swizzle the elements in inV template<uint32 SwizzleX, uint32 SwizzleY, uint32 SwizzleZ> JPH_INLINE Vec3 Swizzle() const; /// Replicate the X component to all components JPH_INLINE Vec4 SplatX() const; /// Replicate the Y component to all components JPH_INLINE Vec4 SplatY() const; /// Replicate the Z component to all components JPH_INLINE Vec4 SplatZ() const; /// Get index of component with lowest value JPH_INLINE int GetLowestComponentIndex() const; /// Get index of component with highest value JPH_INLINE int GetHighestComponentIndex() const; /// Return the absolute value of each of the components JPH_INLINE Vec3 Abs() const; /// Reciprocal vector (1 / value) for each of the components JPH_INLINE Vec3 Reciprocal() const; /// Cross product JPH_INLINE Vec3 Cross(Vec3Arg inV2) const; /// Dot product, returns the dot product in X, Y and Z components JPH_INLINE Vec3 DotV(Vec3Arg inV2) const; /// Dot product, returns the dot product in X, Y, Z and W components JPH_INLINE Vec4 DotV4(Vec3Arg inV2) const; /// Dot product JPH_INLINE float Dot(Vec3Arg inV2) const; /// Squared length of vector JPH_INLINE float LengthSq() const; /// Length of vector JPH_INLINE float Length() const; /// Normalize vector JPH_INLINE Vec3 Normalized() const; /// Normalize vector or return inZeroValue if the length of the vector is zero JPH_INLINE Vec3 NormalizedOr(Vec3Arg inZeroValue) const; /// Store 3 floats to memory JPH_INLINE void StoreFloat3(Float3 *outV) const; /// Convert each component from a float to an int JPH_INLINE UVec4 ToInt() const; /// Reinterpret Vec3 as a UVec4 (doesn't change the bits) JPH_INLINE UVec4 ReinterpretAsInt() const; /// Get the minimum of X, Y and Z JPH_INLINE float ReduceMin() const; /// Get the maximum of X, Y and Z JPH_INLINE float ReduceMax() const; /// Component wise square root JPH_INLINE Vec3 Sqrt() const; /// Get normalized vector that is perpendicular to this vector JPH_INLINE Vec3 GetNormalizedPerpendicular() const; /// Get vector that contains the sign of each element (returns 1.0f if positive, -1.0f if negative) JPH_INLINE Vec3 GetSign() const; /// To String friend ostream & operator << (ostream &inStream, Vec3Arg inV) { inStream << inV.mF32[0] << ", " << inV.mF32[1] << ", " << inV.mF32[2]; return inStream; } /// Internal helper function that checks that W is equal to Z, so e.g. dividing by it should not generate div by 0 JPH_INLINE void CheckW() const; /// Internal helper function that ensures that the Z component is replicated to the W component to prevent divisions by zero static JPH_INLINE Type sFixW(Type inValue); union { Type mValue; float mF32[4]; }; }; static_assert(is_trivial<Vec3>(), "Is supposed to be a trivial type!"); JPH_NAMESPACE_END #include "Vec3.inl"
0
repos/c2z/use_cases/JoltPhysics/include
repos/c2z/use_cases/JoltPhysics/include/Math/DVec3.h
// Jolt Physics Library (https://github.com/jrouwe/JoltPhysics) // SPDX-FileCopyrightText: 2021 Jorrit Rouwe // SPDX-License-Identifier: MIT #pragma once #include <Jolt/Math/Double3.h> JPH_NAMESPACE_BEGIN /// 3 component vector of doubles (stored as 4 vectors). /// Note that we keep the 4th component the same as the 3rd component to avoid divisions by zero when JPH_FLOATING_POINT_EXCEPTIONS_ENABLED defined class [[nodiscard]] alignas(JPH_DVECTOR_ALIGNMENT) DVec3 { public: JPH_OVERRIDE_NEW_DELETE // Underlying vector type #if defined(JPH_USE_AVX) using Type = __m256d; using TypeArg = __m256d; #elif defined(JPH_USE_SSE) using Type = struct { __m128d mLow, mHigh; }; using TypeArg = const Type &; #elif defined(JPH_USE_NEON) using Type = float64x2x2_t; using TypeArg = const Type &; #else using Type = struct { double mData[4]; }; using TypeArg = const Type &; #endif // Argument type using ArgType = DVec3Arg; /// Constructor DVec3() = default; ///< Intentionally not initialized for performance reasons DVec3(const DVec3 &inRHS) = default; JPH_INLINE explicit DVec3(Vec3Arg inRHS); JPH_INLINE explicit DVec3(Vec4Arg inRHS); JPH_INLINE DVec3(TypeArg inRHS) : mValue(inRHS) { CheckW(); } /// Create a vector from 3 components JPH_INLINE DVec3(double inX, double inY, double inZ); /// Load 3 doubles from memory explicit JPH_INLINE DVec3(const Double3 &inV); /// Vector with all zeros static JPH_INLINE DVec3 sZero(); /// Vectors with the principal axis static JPH_INLINE DVec3 sAxisX() { return DVec3(1, 0, 0); } static JPH_INLINE DVec3 sAxisY() { return DVec3(0, 1, 0); } static JPH_INLINE DVec3 sAxisZ() { return DVec3(0, 0, 1); } /// Replicate inV across all components static JPH_INLINE DVec3 sReplicate(double inV); /// Vector with all NaN's static JPH_INLINE DVec3 sNaN(); /// Load 3 doubles from memory (reads 64 bits extra which it doesn't use) static JPH_INLINE DVec3 sLoadDouble3Unsafe(const Double3 &inV); /// Store 3 doubles to memory JPH_INLINE void StoreDouble3(Double3 *outV) const; /// Convert to float vector 3 rounding to nearest JPH_INLINE explicit operator Vec3() const; /// Prepare to convert to float vector 3 rounding towards zero (returns DVec3 that can be converted to a Vec3 to get the rounding) JPH_INLINE DVec3 PrepareRoundToZero() const; /// Prepare to convert to float vector 3 rounding towards positive/negative inf (returns DVec3 that can be converted to a Vec3 to get the rounding) JPH_INLINE DVec3 PrepareRoundToInf() const; /// Convert to float vector 3 rounding down JPH_INLINE Vec3 ToVec3RoundDown() const; /// Convert to float vector 3 rounding up JPH_INLINE Vec3 ToVec3RoundUp() const; /// Return the minimum value of each of the components static JPH_INLINE DVec3 sMin(DVec3Arg inV1, DVec3Arg inV2); /// Return the maximum of each of the components static JPH_INLINE DVec3 sMax(DVec3Arg inV1, DVec3Arg inV2); /// Clamp a vector between min and max (component wise) static JPH_INLINE DVec3 sClamp(DVec3Arg inV, DVec3Arg inMin, DVec3Arg inMax); /// Equals (component wise) static JPH_INLINE DVec3 sEquals(DVec3Arg inV1, DVec3Arg inV2); /// Less than (component wise) static JPH_INLINE DVec3 sLess(DVec3Arg inV1, DVec3Arg inV2); /// Less than or equal (component wise) static JPH_INLINE DVec3 sLessOrEqual(DVec3Arg inV1, DVec3Arg inV2); /// Greater than (component wise) static JPH_INLINE DVec3 sGreater(DVec3Arg inV1, DVec3Arg inV2); /// Greater than or equal (component wise) static JPH_INLINE DVec3 sGreaterOrEqual(DVec3Arg inV1, DVec3Arg inV2); /// Calculates inMul1 * inMul2 + inAdd static JPH_INLINE DVec3 sFusedMultiplyAdd(DVec3Arg inMul1, DVec3Arg inMul2, DVec3Arg inAdd); /// Component wise select, returns inV1 when highest bit of inControl = 0 and inV2 when highest bit of inControl = 1 static JPH_INLINE DVec3 sSelect(DVec3Arg inV1, DVec3Arg inV2, DVec3Arg inControl); /// Logical or (component wise) static JPH_INLINE DVec3 sOr(DVec3Arg inV1, DVec3Arg inV2); /// Logical xor (component wise) static JPH_INLINE DVec3 sXor(DVec3Arg inV1, DVec3Arg inV2); /// Logical and (component wise) static JPH_INLINE DVec3 sAnd(DVec3Arg inV1, DVec3Arg inV2); /// Store if X is true in bit 0, Y in bit 1, Z in bit 2 and W in bit 3 (true is when highest bit of component is set) JPH_INLINE int GetTrues() const; /// Test if any of the components are true (true is when highest bit of component is set) JPH_INLINE bool TestAnyTrue() const; /// Test if all components are true (true is when highest bit of component is set) JPH_INLINE bool TestAllTrue() const; /// Get individual components #if defined(JPH_USE_AVX) JPH_INLINE double GetX() const { return _mm_cvtsd_f64(_mm256_castpd256_pd128(mValue)); } JPH_INLINE double GetY() const { return mF64[1]; } JPH_INLINE double GetZ() const { return mF64[2]; } #elif defined(JPH_USE_SSE) JPH_INLINE double GetX() const { return _mm_cvtsd_f64(mValue.mLow); } JPH_INLINE double GetY() const { return mF64[1]; } JPH_INLINE double GetZ() const { return _mm_cvtsd_f64(mValue.mHigh); } #elif defined(JPH_USE_NEON) JPH_INLINE double GetX() const { return vgetq_lane_f64(mValue.val[0], 0); } JPH_INLINE double GetY() const { return vgetq_lane_f64(mValue.val[0], 1); } JPH_INLINE double GetZ() const { return vgetq_lane_f64(mValue.val[1], 0); } #else JPH_INLINE double GetX() const { return mF64[0]; } JPH_INLINE double GetY() const { return mF64[1]; } JPH_INLINE double GetZ() const { return mF64[2]; } #endif /// Set individual components JPH_INLINE void SetX(double inX) { mF64[0] = inX; } JPH_INLINE void SetY(double inY) { mF64[1] = inY; } JPH_INLINE void SetZ(double inZ) { mF64[2] = mF64[3] = inZ; } // Assure Z and W are the same /// Get double component by index JPH_INLINE double operator [] (uint inCoordinate) const { JPH_ASSERT(inCoordinate < 3); return mF64[inCoordinate]; } /// Set double component by index JPH_INLINE void SetComponent(uint inCoordinate, double inValue) { JPH_ASSERT(inCoordinate < 3); mF64[inCoordinate] = inValue; mValue = sFixW(mValue); } // Assure Z and W are the same /// Comparison JPH_INLINE bool operator == (DVec3Arg inV2) const; JPH_INLINE bool operator != (DVec3Arg inV2) const { return !(*this == inV2); } /// Test if two vectors are close JPH_INLINE bool IsClose(DVec3Arg inV2, double inMaxDistSq = 1.0e-24) const; /// Test if vector is near zero JPH_INLINE bool IsNearZero(double inMaxDistSq = 1.0e-24) const; /// Test if vector is normalized JPH_INLINE bool IsNormalized(double inTolerance = 1.0e-12) const; /// Test if vector contains NaN elements JPH_INLINE bool IsNaN() const; /// Multiply two double vectors (component wise) JPH_INLINE DVec3 operator * (DVec3Arg inV2) const; /// Multiply vector with double JPH_INLINE DVec3 operator * (double inV2) const; /// Multiply vector with double friend JPH_INLINE DVec3 operator * (double inV1, DVec3Arg inV2); /// Divide vector by double JPH_INLINE DVec3 operator / (double inV2) const; /// Multiply vector with double JPH_INLINE DVec3 & operator *= (double inV2); /// Multiply vector with vector JPH_INLINE DVec3 & operator *= (DVec3Arg inV2); /// Divide vector by double JPH_INLINE DVec3 & operator /= (double inV2); /// Add two vectors (component wise) JPH_INLINE DVec3 operator + (Vec3Arg inV2) const; /// Add two double vectors (component wise) JPH_INLINE DVec3 operator + (DVec3Arg inV2) const; /// Add two vectors (component wise) JPH_INLINE DVec3 & operator += (Vec3Arg inV2); /// Add two double vectors (component wise) JPH_INLINE DVec3 & operator += (DVec3Arg inV2); /// Negate JPH_INLINE DVec3 operator - () const; /// Subtract two vectors (component wise) JPH_INLINE DVec3 operator - (Vec3Arg inV2) const; /// Subtract two double vectors (component wise) JPH_INLINE DVec3 operator - (DVec3Arg inV2) const; /// Add two vectors (component wise) JPH_INLINE DVec3 & operator -= (Vec3Arg inV2); /// Add two double vectors (component wise) JPH_INLINE DVec3 & operator -= (DVec3Arg inV2); /// Divide (component wise) JPH_INLINE DVec3 operator / (DVec3Arg inV2) const; /// Return the absolute value of each of the components JPH_INLINE DVec3 Abs() const; /// Reciprocal vector (1 / value) for each of the components JPH_INLINE DVec3 Reciprocal() const; /// Cross product JPH_INLINE DVec3 Cross(DVec3Arg inV2) const; /// Dot product JPH_INLINE double Dot(DVec3Arg inV2) const; /// Squared length of vector JPH_INLINE double LengthSq() const; /// Length of vector JPH_INLINE double Length() const; /// Normalize vector JPH_INLINE DVec3 Normalized() const; /// Component wise square root JPH_INLINE DVec3 Sqrt() const; /// Get vector that contains the sign of each element (returns 1 if positive, -1 if negative) JPH_INLINE DVec3 GetSign() const; /// To String friend ostream & operator << (ostream &inStream, DVec3Arg inV) { inStream << inV.mF64[0] << ", " << inV.mF64[1] << ", " << inV.mF64[2]; return inStream; } /// Internal helper function that checks that W is equal to Z, so e.g. dividing by it should not generate div by 0 JPH_INLINE void CheckW() const; /// Internal helper function that ensures that the Z component is replicated to the W component to prevent divisions by zero static JPH_INLINE Type sFixW(TypeArg inValue); /// Representations of true and false for boolean operations inline static const double cTrue = BitCast<double>(~uint64(0)); inline static const double cFalse = 0.0; union { Type mValue; double mF64[4]; }; }; static_assert(is_trivial<DVec3>(), "Is supposed to be a trivial type!"); JPH_NAMESPACE_END #include "DVec3.inl"
0
repos/c2z/use_cases/JoltPhysics/include
repos/c2z/use_cases/JoltPhysics/include/Math/Double3.h
// Jolt Physics Library (https://github.com/jrouwe/JoltPhysics) // SPDX-FileCopyrightText: 2021 Jorrit Rouwe // SPDX-License-Identifier: MIT #pragma once #include <Jolt/Core/HashCombine.h> JPH_NAMESPACE_BEGIN /// Class that holds 3 doubles. Used as a storage class. Convert to DVec3 for calculations. class [[nodiscard]] Double3 { public: JPH_OVERRIDE_NEW_DELETE Double3() = default; ///< Intentionally not initialized for performance reasons Double3(const Double3 &inRHS) = default; Double3(double inX, double inY, double inZ) : x(inX), y(inY), z(inZ) { } double operator [] (int inCoordinate) const { JPH_ASSERT(inCoordinate < 3); return *(&x + inCoordinate); } bool operator == (const Double3 &inRHS) const { return x == inRHS.x && y == inRHS.y && z == inRHS.z; } bool operator != (const Double3 &inRHS) const { return x != inRHS.x || y != inRHS.y || z != inRHS.z; } double x; double y; double z; }; static_assert(is_trivial<Double3>(), "Is supposed to be a trivial type!"); JPH_NAMESPACE_END // Create a std::hash for Double3 JPH_MAKE_HASHABLE(JPH::Double3, t.x, t.y, t.z)
0
repos/c2z/use_cases/JoltPhysics/include
repos/c2z/use_cases/JoltPhysics/include/Math/Matrix.h
// Jolt Physics Library (https://github.com/jrouwe/JoltPhysics) // SPDX-FileCopyrightText: 2021 Jorrit Rouwe // SPDX-License-Identifier: MIT #pragma once #include <Jolt/Math/Vector.h> #include <Jolt/Math/GaussianElimination.h> JPH_NAMESPACE_BEGIN /// Templatized matrix class template <uint Rows, uint Cols> class [[nodiscard]] Matrix { public: /// Constructor inline Matrix() = default; inline Matrix(const Matrix &inM2) { *this = inM2; } /// Dimensions inline uint GetRows() const { return Rows; } inline uint GetCols() const { return Cols; } /// Zero matrix inline void SetZero() { for (uint c = 0; c < Cols; ++c) mCol[c].SetZero(); } inline static Matrix sZero() { Matrix m; m.SetZero(); return m; } /// Check if this matrix consists of all zeros inline bool IsZero() const { for (uint c = 0; c < Cols; ++c) if (!mCol[c].IsZero()) return false; return true; } /// Identity matrix inline void SetIdentity() { // Clear matrix SetZero(); // Set diagonal to 1 for (uint rc = 0, min_rc = min(Rows, Cols); rc < min_rc; ++rc) mCol[rc].mF32[rc] = 1.0f; } inline static Matrix sIdentity() { Matrix m; m.SetIdentity(); return m; } /// Check if this matrix is identity bool IsIdentity() const { return *this == sIdentity(); } /// Diagonal matrix inline void SetDiagonal(const Vector<Rows < Cols? Rows : Cols> &inV) { // Clear matrix SetZero(); // Set diagonal for (uint rc = 0, min_rc = min(Rows, Cols); rc < min_rc; ++rc) mCol[rc].mF32[rc] = inV[rc]; } inline static Matrix sDiagonal(const Vector<Rows < Cols? Rows : Cols> &inV) { Matrix m; m.SetDiagonal(inV); return m; } /// Copy a (part) of another matrix into this matrix template <class OtherMatrix> void CopyPart(const OtherMatrix &inM, uint inSourceRow, uint inSourceCol, uint inNumRows, uint inNumCols, uint inDestRow, uint inDestCol) { for (uint c = 0; c < inNumCols; ++c) for (uint r = 0; r < inNumRows; ++r) mCol[inDestCol + c].mF32[inDestRow + r] = inM(inSourceRow + r, inSourceCol + c); } /// Get float component by element index inline float operator () (uint inRow, uint inColumn) const { JPH_ASSERT(inRow < Rows); JPH_ASSERT(inColumn < Cols); return mCol[inColumn].mF32[inRow]; } inline float & operator () (uint inRow, uint inColumn) { JPH_ASSERT(inRow < Rows); JPH_ASSERT(inColumn < Cols); return mCol[inColumn].mF32[inRow]; } /// Comparison inline bool operator == (const Matrix &inM2) const { for (uint c = 0; c < Cols; ++c) if (mCol[c] != inM2.mCol[c]) return false; return true; } inline bool operator != (const Matrix &inM2) const { for (uint c = 0; c < Cols; ++c) if (mCol[c] != inM2.mCol[c]) return true; return false; } /// Assignment inline Matrix & operator = (const Matrix &inM2) { for (uint c = 0; c < Cols; ++c) mCol[c] = inM2.mCol[c]; return *this; } /// Multiply matrix by matrix template <uint OtherCols> inline Matrix<Rows, OtherCols> operator * (const Matrix<Cols, OtherCols> &inM) const { Matrix<Rows, OtherCols> m; for (uint c = 0; c < OtherCols; ++c) for (uint r = 0; r < Rows; ++r) { float dot = 0.0f; for (uint i = 0; i < Cols; ++i) dot += mCol[i].mF32[r] * inM.mCol[c].mF32[i]; m.mCol[c].mF32[r] = dot; } return m; } /// Multiply vector by matrix inline Vector<Rows> operator * (const Vector<Cols> &inV) const { Vector<Rows> v; for (uint r = 0; r < Rows; ++r) { float dot = 0.0f; for (uint c = 0; c < Cols; ++c) dot += mCol[c].mF32[r] * inV.mF32[c]; v.mF32[r] = dot; } return v; } /// Multiply matrix with float inline Matrix operator * (float inV) const { Matrix m; for (uint c = 0; c < Cols; ++c) m.mCol[c] = mCol[c] * inV; return m; } inline friend Matrix operator * (float inV, const Matrix &inM) { return inM * inV; } /// Per element addition of matrix inline Matrix operator + (const Matrix &inM) const { Matrix m; for (uint c = 0; c < Cols; ++c) m.mCol[c] = mCol[c] + inM.mCol[c]; return m; } /// Per element subtraction of matrix inline Matrix operator - (const Matrix &inM) const { Matrix m; for (uint c = 0; c < Cols; ++c) m.mCol[c] = mCol[c] - inM.mCol[c]; return m; } /// Transpose matrix inline Matrix<Cols, Rows> Transposed() const { Matrix<Cols, Rows> m; for (uint r = 0; r < Rows; ++r) for (uint c = 0; c < Cols; ++c) m.mCol[r].mF32[c] = mCol[c].mF32[r]; return m; } /// Inverse matrix bool SetInversed(const Matrix &inM) { if constexpr (Rows != Cols) JPH_ASSERT(false); Matrix copy(inM); SetIdentity(); return GaussianElimination(copy, *this); } inline Matrix Inversed() const { Matrix m; m.SetInversed(*this); return m; } /// To String friend ostream & operator << (ostream &inStream, const Matrix &inM) { for (uint i = 0; i < Cols - 1; ++i) inStream << inM.mCol[i] << ", "; inStream << inM.mCol[Cols - 1]; return inStream; } /// Column access const Vector<Rows> & GetColumn(int inIdx) const { return mCol[inIdx]; } Vector<Rows> & GetColumn(int inIdx) { return mCol[inIdx]; } Vector<Rows> mCol[Cols]; ///< Column }; // The template specialization doesn't sit well with Doxygen #ifndef JPH_PLATFORM_DOXYGEN /// Specialization of SetInversed for 2x2 matrix template <> inline bool Matrix<2, 2>::SetInversed(const Matrix<2, 2> &inM) { // Fetch elements float a = inM.mCol[0].mF32[0]; float b = inM.mCol[1].mF32[0]; float c = inM.mCol[0].mF32[1]; float d = inM.mCol[1].mF32[1]; // Calculate determinant float det = a * d - b * c; if (det == 0.0f) return false; // Construct inverse mCol[0].mF32[0] = d / det; mCol[1].mF32[0] = -b / det; mCol[0].mF32[1] = -c / det; mCol[1].mF32[1] = a / det; return true; } #endif // !JPH_PLATFORM_DOXYGEN JPH_NAMESPACE_END
0
repos/c2z/use_cases/JoltPhysics/include
repos/c2z/use_cases/JoltPhysics/include/Math/Math.h
// Jolt Physics Library (https://github.com/jrouwe/JoltPhysics) // SPDX-FileCopyrightText: 2021 Jorrit Rouwe // SPDX-License-Identifier: MIT #pragma once JPH_NAMESPACE_BEGIN /// The constant \f$\pi\f$ static constexpr float JPH_PI = 3.14159265358979323846f; /// Convert a value from degrees to radians constexpr float DegreesToRadians(float inV) { return inV * (JPH_PI / 180.0f); } /// Convert a value from radians to degrees constexpr float RadiansToDegrees(float inV) { return inV * (180.0f / JPH_PI); } /// Convert angle in radians to the range \f$[-\pi, \pi]\f$ inline float CenterAngleAroundZero(float inV) { if (inV < -JPH_PI) { do inV += 2.0f * JPH_PI; while (inV < -JPH_PI); } else if (inV > JPH_PI) { do inV -= 2.0f * JPH_PI; while (inV > JPH_PI); } JPH_ASSERT(inV >= -JPH_PI && inV <= JPH_PI); return inV; } /// Clamp a value between two values template <typename T> constexpr T Clamp(T inV, T inMin, T inMax) { return min(max(inV, inMin), inMax); } /// Square a value template <typename T> constexpr T Square(T inV) { return inV * inV; } /// Returns \f$inV^3\f$. template <typename T> constexpr T Cubed(T inV) { return inV * inV * inV; } /// Get the sign of a value template <typename T> constexpr T Sign(T inV) { return inV < 0? T(-1) : T(1); } /// Check if inV is a power of 2 template <typename T> constexpr bool IsPowerOf2(T inV) { return (inV & (inV - 1)) == 0; } /// Align inV up to the next inAlignment bytes template <typename T> inline T AlignUp(T inV, uint64 inAlignment) { JPH_ASSERT(IsPowerOf2(inAlignment)); return T((uint64(inV) + inAlignment - 1) & ~(inAlignment - 1)); } /// Check if inV is inAlignment aligned template <typename T> inline bool IsAligned(T inV, uint64 inAlignment) { JPH_ASSERT(IsPowerOf2(inAlignment)); return (uint64(inV) & (inAlignment - 1)) == 0; } /// Compute number of trailing zero bits (how many low bits are zero) inline uint CountTrailingZeros(uint32 inValue) { #if defined(JPH_CPU_X86) || defined(JPH_CPU_WASM) #if defined(JPH_USE_TZCNT) return _tzcnt_u32(inValue); #elif defined(JPH_COMPILER_MSVC) if (inValue == 0) return 32; unsigned long result; _BitScanForward(&result, inValue); return result; #else if (inValue == 0) return 32; return __builtin_ctz(inValue); #endif #elif defined(JPH_CPU_ARM) #if defined(JPH_COMPILER_MSVC) if (inValue == 0) return 32; unsigned long result; _BitScanForward(&result, inValue); return result; #else return __builtin_clz(__builtin_bitreverse32(inValue)); #endif #else #error Undefined #endif } /// Compute the number of leading zero bits (how many high bits are zero) inline uint CountLeadingZeros(uint32 inValue) { #if defined(JPH_CPU_X86) || defined(JPH_CPU_WASM) #if defined(JPH_USE_LZCNT) return _lzcnt_u32(inValue); #elif defined(JPH_COMPILER_MSVC) if (inValue == 0) return 32; unsigned long result; _BitScanReverse(&result, inValue); return 31 - result; #else if (inValue == 0) return 32; return __builtin_clz(inValue); #endif #elif defined(JPH_CPU_ARM) #if defined(JPH_COMPILER_MSVC) return _CountLeadingZeros(inValue); #else return __builtin_clz(inValue); #endif #else #error Undefined #endif } /// Count the number of 1 bits in a value inline uint CountBits(uint32 inValue) { #if defined(JPH_COMPILER_CLANG) || defined(JPH_COMPILER_GCC) return __builtin_popcount(inValue); #elif defined(JPH_COMPILER_MSVC) #if defined(JPH_USE_SSE4_2) return _mm_popcnt_u32(inValue); #elif defined(JPH_USE_NEON) && (_MSC_VER >= 1930) // _CountOneBits not available on MSVC2019 return _CountOneBits(inValue); #else inValue = inValue - ((inValue >> 1) & 0x55555555); inValue = (inValue & 0x33333333) + ((inValue >> 2) & 0x33333333); inValue = (inValue + (inValue >> 4)) & 0x0F0F0F0F; return (inValue * 0x01010101) >> 24; #endif #else #error Undefined #endif } /// Get the next higher power of 2 of a value, or the value itself if the value is already a power of 2 inline uint32 GetNextPowerOf2(uint32 inValue) { return inValue <= 1? uint32(1) : uint32(1) << (32 - CountLeadingZeros(inValue - 1)); } // Simple implementation of C++20 std::bit_cast (unfortunately not constexpr) template <class To, class From> JPH_INLINE To BitCast(const From &inValue) { static_assert(std::is_trivially_constructible_v<To>); static_assert(sizeof(From) == sizeof(To)); union FromTo { To mTo; From mFrom; }; FromTo convert; convert.mFrom = inValue; return convert.mTo; } JPH_NAMESPACE_END
0
repos/c2z/use_cases/JoltPhysics/include
repos/c2z/use_cases/JoltPhysics/include/Math/Vec8.h
// Jolt Physics Library (https://github.com/jrouwe/JoltPhysics) // SPDX-FileCopyrightText: 2021 Jorrit Rouwe // SPDX-License-Identifier: MIT #pragma once #include <Jolt/Math/MathTypes.h> JPH_NAMESPACE_BEGIN class [[nodiscard]] Vec8 { public: JPH_OVERRIDE_NEW_DELETE /// Constructor Vec8() = default; ///< Intentionally not initialized for performance reasons Vec8(const Vec8 &inRHS) = default; JPH_INLINE Vec8(__m256 inRHS) : mValue(inRHS) { } /// Set 256 bit vector from 2 128 bit vectors JPH_INLINE Vec8(Vec4Arg inLo, Vec4Arg inHi); /// Vector with all zeros static JPH_INLINE Vec8 sZero(); /// Replicate across all components static JPH_INLINE Vec8 sReplicate(float inV); /// Replicate the X component of inV to all components static JPH_INLINE Vec8 sSplatX(Vec4Arg inV); /// Replicate the Y component of inV to all components static JPH_INLINE Vec8 sSplatY(Vec4Arg inV); /// Replicate the Z component of inV to all components static JPH_INLINE Vec8 sSplatZ(Vec4Arg inV); /// Calculates inMul1 * inMul2 + inAdd static JPH_INLINE Vec8 sFusedMultiplyAdd(Vec8Arg inMul1, Vec8Arg inMul2, Vec8Arg inAdd); /// Component wise select, returns inV1 when highest bit of inControl = 0 and inV2 when highest bit of inControl = 1 static JPH_INLINE Vec8 sSelect(Vec8Arg inV1, Vec8Arg inV2, UVec8Arg inControl); /// Component wise min static JPH_INLINE Vec8 sMin(Vec8Arg inV1, Vec8Arg inV2); /// Component wise max static JPH_INLINE Vec8 sMax(Vec8Arg inV1, Vec8Arg inV2); /// Less than static JPH_INLINE UVec8 sLess(Vec8Arg inV1, Vec8Arg inV2); /// Greater than static JPH_INLINE UVec8 sGreater(Vec8Arg inV1, Vec8Arg inV2); /// Load from memory static JPH_INLINE Vec8 sLoadFloat8(const float *inV); /// Load 8 floats from memory, 32 bytes aligned static JPH_INLINE Vec8 sLoadFloat8Aligned(const float *inV); /// Get float component by index JPH_INLINE float operator [] (uint inCoordinate) const { JPH_ASSERT(inCoordinate < 8); return mF32[inCoordinate]; } JPH_INLINE float & operator [] (uint inCoordinate) { JPH_ASSERT(inCoordinate < 8); return mF32[inCoordinate]; } /// Multiply two float vectors JPH_INLINE Vec8 operator * (Vec8Arg inV2) const; /// Multiply vector by float JPH_INLINE Vec8 operator * (float inV2) const; /// Add two float vectors JPH_INLINE Vec8 operator + (Vec8Arg inV2) const; /// Subtract two float vectors JPH_INLINE Vec8 operator - (Vec8Arg inV2) const; /// Divide JPH_INLINE Vec8 operator / (Vec8Arg inV2) const; /// Reciprocal vector JPH_INLINE Vec8 Reciprocal() const; /// 256 bit variant of Vec::Swizzle (no cross 128 bit lane swizzle) template<uint32 SwizzleX, uint32 SwizzleY, uint32 SwizzleZ, uint32 SwizzleW> JPH_INLINE Vec8 Swizzle() const; /// Get absolute value of all components JPH_INLINE Vec8 Abs() const; /// Fetch the lower 128 bit from a 256 bit variable JPH_INLINE Vec4 LowerVec4() const; /// Fetch the higher 128 bit from a 256 bit variable JPH_INLINE Vec4 UpperVec4() const; /// Get the minimum value of the 8 floats JPH_INLINE float ReduceMin() const; union { __m256 mValue; float mF32[8]; }; }; static_assert(is_trivial<Vec8>(), "Is supposed to be a trivial type!"); JPH_NAMESPACE_END #include "Vec8.inl"
0
repos/c2z/use_cases/JoltPhysics/include
repos/c2z/use_cases/JoltPhysics/include/Math/Vec3.inl
// Jolt Physics Library (https://github.com/jrouwe/JoltPhysics) // SPDX-FileCopyrightText: 2021 Jorrit Rouwe // SPDX-License-Identifier: MIT #include <Jolt/Math/Vec4.h> #include <Jolt/Math/UVec4.h> #include <Jolt/Core/HashCombine.h> JPH_SUPPRESS_WARNINGS_STD_BEGIN #include <random> JPH_SUPPRESS_WARNINGS_STD_END // Create a std::hash for Vec3 JPH_MAKE_HASHABLE(JPH::Vec3, t.GetX(), t.GetY(), t.GetZ()) JPH_NAMESPACE_BEGIN void Vec3::CheckW() const { #ifdef JPH_FLOATING_POINT_EXCEPTIONS_ENABLED // Avoid asserts when both components are NaN JPH_ASSERT(reinterpret_cast<const uint32 *>(mF32)[2] == reinterpret_cast<const uint32 *>(mF32)[3]); #endif // JPH_FLOATING_POINT_EXCEPTIONS_ENABLED } JPH_INLINE Vec3::Type Vec3::sFixW(Type inValue) { #ifdef JPH_FLOATING_POINT_EXCEPTIONS_ENABLED #if defined(JPH_USE_SSE) return _mm_shuffle_ps(inValue, inValue, _MM_SHUFFLE(2, 2, 1, 0)); #elif defined(JPH_USE_NEON) return JPH_NEON_SHUFFLE_F32x4(inValue, inValue, 0, 1, 2, 2); #else Type value; value.mData[0] = inValue.mData[0]; value.mData[1] = inValue.mData[1]; value.mData[2] = inValue.mData[2]; value.mData[3] = inValue.mData[2]; return value; #endif #else return inValue; #endif // JPH_FLOATING_POINT_EXCEPTIONS_ENABLED } Vec3::Vec3(Vec4Arg inRHS) : mValue(sFixW(inRHS.mValue)) { } Vec3::Vec3(const Float3 &inV) { #if defined(JPH_USE_SSE) Type x = _mm_load_ss(&inV.x); Type y = _mm_load_ss(&inV.y); Type z = _mm_load_ss(&inV.z); Type xy = _mm_unpacklo_ps(x, y); mValue = _mm_shuffle_ps(xy, z, _MM_SHUFFLE(0, 0, 1, 0)); // Assure Z and W are the same #elif defined(JPH_USE_NEON) float32x2_t xy = vld1_f32(&inV.x); float32x2_t zz = vdup_n_f32(inV.z); // Assure Z and W are the same mValue = vcombine_f32(xy, zz); #else mF32[0] = inV[0]; mF32[1] = inV[1]; mF32[2] = inV[2]; #ifdef JPH_FLOATING_POINT_EXCEPTIONS_ENABLED mF32[3] = inV[2]; #endif #endif } Vec3::Vec3(float inX, float inY, float inZ) { #if defined(JPH_USE_SSE) mValue = _mm_set_ps(inZ, inZ, inY, inX); #elif defined(JPH_USE_NEON) uint32x2_t xy = vcreate_f32(static_cast<uint64>(*reinterpret_cast<uint32 *>(&inX)) | (static_cast<uint64>(*reinterpret_cast<uint32 *>(&inY)) << 32)); uint32x2_t zz = vcreate_f32(static_cast<uint64>(*reinterpret_cast<uint32* >(&inZ)) | (static_cast<uint64>(*reinterpret_cast<uint32 *>(&inZ)) << 32)); mValue = vcombine_f32(xy, zz); #else mF32[0] = inX; mF32[1] = inY; mF32[2] = inZ; #ifdef JPH_FLOATING_POINT_EXCEPTIONS_ENABLED mF32[3] = inZ; #endif #endif } template<uint32 SwizzleX, uint32 SwizzleY, uint32 SwizzleZ> Vec3 Vec3::Swizzle() const { static_assert(SwizzleX <= 3, "SwizzleX template parameter out of range"); static_assert(SwizzleY <= 3, "SwizzleY template parameter out of range"); static_assert(SwizzleZ <= 3, "SwizzleZ template parameter out of range"); #if defined(JPH_USE_SSE) return _mm_shuffle_ps(mValue, mValue, _MM_SHUFFLE(SwizzleZ, SwizzleZ, SwizzleY, SwizzleX)); // Assure Z and W are the same #elif defined(JPH_USE_NEON) return JPH_NEON_SHUFFLE_F32x4(mValue, mValue, SwizzleX, SwizzleY, SwizzleZ, SwizzleZ); #else return Vec3(mF32[SwizzleX], mF32[SwizzleY], mF32[SwizzleZ]); #endif } Vec3 Vec3::sZero() { #if defined(JPH_USE_SSE) return _mm_setzero_ps(); #elif defined(JPH_USE_NEON) return vdupq_n_f32(0); #else return Vec3(0, 0, 0); #endif } Vec3 Vec3::sReplicate(float inV) { #if defined(JPH_USE_SSE) return _mm_set1_ps(inV); #elif defined(JPH_USE_NEON) return vdupq_n_f32(inV); #else return Vec3(inV, inV, inV); #endif } Vec3 Vec3::sNaN() { return sReplicate(numeric_limits<float>::quiet_NaN()); } Vec3 Vec3::sLoadFloat3Unsafe(const Float3 &inV) { #if defined(JPH_USE_SSE) Type v = _mm_loadu_ps(&inV.x); #elif defined(JPH_USE_NEON) Type v = vld1q_f32(&inV.x); #else Type v = { inV.x, inV.y, inV.z }; #endif return sFixW(v); } Vec3 Vec3::sMin(Vec3Arg inV1, Vec3Arg inV2) { #if defined(JPH_USE_SSE) return _mm_min_ps(inV1.mValue, inV2.mValue); #elif defined(JPH_USE_NEON) return vminq_f32(inV1.mValue, inV2.mValue); #else return Vec3(min(inV1.mF32[0], inV2.mF32[0]), min(inV1.mF32[1], inV2.mF32[1]), min(inV1.mF32[2], inV2.mF32[2])); #endif } Vec3 Vec3::sMax(Vec3Arg inV1, Vec3Arg inV2) { #if defined(JPH_USE_SSE) return _mm_max_ps(inV1.mValue, inV2.mValue); #elif defined(JPH_USE_NEON) return vmaxq_f32(inV1.mValue, inV2.mValue); #else return Vec3(max(inV1.mF32[0], inV2.mF32[0]), max(inV1.mF32[1], inV2.mF32[1]), max(inV1.mF32[2], inV2.mF32[2])); #endif } Vec3 Vec3::sClamp(Vec3Arg inV, Vec3Arg inMin, Vec3Arg inMax) { return sMax(sMin(inV, inMax), inMin); } UVec4 Vec3::sEquals(Vec3Arg inV1, Vec3Arg inV2) { #if defined(JPH_USE_SSE) return _mm_castps_si128(_mm_cmpeq_ps(inV1.mValue, inV2.mValue)); #elif defined(JPH_USE_NEON) return vceqq_f32(inV1.mValue, inV2.mValue); #else uint32 z = inV1.mF32[2] == inV2.mF32[2]? 0xffffffffu : 0; return UVec4(inV1.mF32[0] == inV2.mF32[0]? 0xffffffffu : 0, inV1.mF32[1] == inV2.mF32[1]? 0xffffffffu : 0, z, z); #endif } UVec4 Vec3::sLess(Vec3Arg inV1, Vec3Arg inV2) { #if defined(JPH_USE_SSE) return _mm_castps_si128(_mm_cmplt_ps(inV1.mValue, inV2.mValue)); #elif defined(JPH_USE_NEON) return vcltq_f32(inV1.mValue, inV2.mValue); #else uint32 z = inV1.mF32[2] < inV2.mF32[2]? 0xffffffffu : 0; return UVec4(inV1.mF32[0] < inV2.mF32[0]? 0xffffffffu : 0, inV1.mF32[1] < inV2.mF32[1]? 0xffffffffu : 0, z, z); #endif } UVec4 Vec3::sLessOrEqual(Vec3Arg inV1, Vec3Arg inV2) { #if defined(JPH_USE_SSE) return _mm_castps_si128(_mm_cmple_ps(inV1.mValue, inV2.mValue)); #elif defined(JPH_USE_NEON) return vcleq_f32(inV1.mValue, inV2.mValue); #else uint32 z = inV1.mF32[2] <= inV2.mF32[2]? 0xffffffffu : 0; return UVec4(inV1.mF32[0] <= inV2.mF32[0]? 0xffffffffu : 0, inV1.mF32[1] <= inV2.mF32[1]? 0xffffffffu : 0, z, z); #endif } UVec4 Vec3::sGreater(Vec3Arg inV1, Vec3Arg inV2) { #if defined(JPH_USE_SSE) return _mm_castps_si128(_mm_cmpgt_ps(inV1.mValue, inV2.mValue)); #elif defined(JPH_USE_NEON) return vcgtq_f32(inV1.mValue, inV2.mValue); #else uint32 z = inV1.mF32[2] > inV2.mF32[2]? 0xffffffffu : 0; return UVec4(inV1.mF32[0] > inV2.mF32[0]? 0xffffffffu : 0, inV1.mF32[1] > inV2.mF32[1]? 0xffffffffu : 0, z, z); #endif } UVec4 Vec3::sGreaterOrEqual(Vec3Arg inV1, Vec3Arg inV2) { #if defined(JPH_USE_SSE) return _mm_castps_si128(_mm_cmpge_ps(inV1.mValue, inV2.mValue)); #elif defined(JPH_USE_NEON) return vcgeq_f32(inV1.mValue, inV2.mValue); #else uint32 z = inV1.mF32[2] >= inV2.mF32[2]? 0xffffffffu : 0; return UVec4(inV1.mF32[0] >= inV2.mF32[0]? 0xffffffffu : 0, inV1.mF32[1] >= inV2.mF32[1]? 0xffffffffu : 0, z, z); #endif } Vec3 Vec3::sFusedMultiplyAdd(Vec3Arg inMul1, Vec3Arg inMul2, Vec3Arg inAdd) { #if defined(JPH_USE_SSE) #ifdef JPH_USE_FMADD return _mm_fmadd_ps(inMul1.mValue, inMul2.mValue, inAdd.mValue); #else return _mm_add_ps(_mm_mul_ps(inMul1.mValue, inMul2.mValue), inAdd.mValue); #endif #elif defined(JPH_USE_NEON) return vmlaq_f32(inAdd.mValue, inMul1.mValue, inMul2.mValue); #else return Vec3(inMul1.mF32[0] * inMul2.mF32[0] + inAdd.mF32[0], inMul1.mF32[1] * inMul2.mF32[1] + inAdd.mF32[1], inMul1.mF32[2] * inMul2.mF32[2] + inAdd.mF32[2]); #endif } Vec3 Vec3::sSelect(Vec3Arg inV1, Vec3Arg inV2, UVec4Arg inControl) { #if defined(JPH_USE_SSE4_1) Type v = _mm_blendv_ps(inV1.mValue, inV2.mValue, _mm_castsi128_ps(inControl.mValue)); return sFixW(v); #elif defined(JPH_USE_NEON) Type v = vbslq_f32(vshrq_n_s32(inControl.mValue, 31), inV2.mValue, inV1.mValue); return sFixW(v); #else Vec3 result; for (int i = 0; i < 3; i++) result.mF32[i] = inControl.mU32[i] ? inV2.mF32[i] : inV1.mF32[i]; #ifdef JPH_FLOATING_POINT_EXCEPTIONS_ENABLED result.mF32[3] = result.mF32[2]; #endif // JPH_FLOATING_POINT_EXCEPTIONS_ENABLED return result; #endif } Vec3 Vec3::sOr(Vec3Arg inV1, Vec3Arg inV2) { #if defined(JPH_USE_SSE) return _mm_or_ps(inV1.mValue, inV2.mValue); #elif defined(JPH_USE_NEON) return vorrq_s32(inV1.mValue, inV2.mValue); #else return Vec3(UVec4::sOr(inV1.ReinterpretAsInt(), inV2.ReinterpretAsInt()).ReinterpretAsFloat()); #endif } Vec3 Vec3::sXor(Vec3Arg inV1, Vec3Arg inV2) { #if defined(JPH_USE_SSE) return _mm_xor_ps(inV1.mValue, inV2.mValue); #elif defined(JPH_USE_NEON) return veorq_s32(inV1.mValue, inV2.mValue); #else return Vec3(UVec4::sXor(inV1.ReinterpretAsInt(), inV2.ReinterpretAsInt()).ReinterpretAsFloat()); #endif } Vec3 Vec3::sAnd(Vec3Arg inV1, Vec3Arg inV2) { #if defined(JPH_USE_SSE) return _mm_and_ps(inV1.mValue, inV2.mValue); #elif defined(JPH_USE_NEON) return vandq_s32(inV1.mValue, inV2.mValue); #else return Vec3(UVec4::sAnd(inV1.ReinterpretAsInt(), inV2.ReinterpretAsInt()).ReinterpretAsFloat()); #endif } Vec3 Vec3::sUnitSpherical(float inTheta, float inPhi) { Vec4 s, c; Vec4(inTheta, inPhi, 0, 0).SinCos(s, c); return Vec3(s.GetX() * c.GetY(), s.GetX() * s.GetY(), c.GetX()); } template <class Random> Vec3 Vec3::sRandom(Random &inRandom) { std::uniform_real_distribution<float> zero_to_one(0.0f, 1.0f); float theta = JPH_PI * zero_to_one(inRandom); float phi = 2.0f * JPH_PI * zero_to_one(inRandom); return sUnitSpherical(theta, phi); } bool Vec3::operator == (Vec3Arg inV2) const { return sEquals(*this, inV2).TestAllXYZTrue(); } bool Vec3::IsClose(Vec3Arg inV2, float inMaxDistSq) const { return (inV2 - *this).LengthSq() <= inMaxDistSq; } bool Vec3::IsNearZero(float inMaxDistSq) const { return LengthSq() <= inMaxDistSq; } Vec3 Vec3::operator * (Vec3Arg inV2) const { #if defined(JPH_USE_SSE) return _mm_mul_ps(mValue, inV2.mValue); #elif defined(JPH_USE_NEON) return vmulq_f32(mValue, inV2.mValue); #else return Vec3(mF32[0] * inV2.mF32[0], mF32[1] * inV2.mF32[1], mF32[2] * inV2.mF32[2]); #endif } Vec3 Vec3::operator * (float inV2) const { #if defined(JPH_USE_SSE) return _mm_mul_ps(mValue, _mm_set1_ps(inV2)); #elif defined(JPH_USE_NEON) return vmulq_n_f32(mValue, inV2); #else return Vec3(mF32[0] * inV2, mF32[1] * inV2, mF32[2] * inV2); #endif } Vec3 operator * (float inV1, Vec3Arg inV2) { #if defined(JPH_USE_SSE) return _mm_mul_ps(_mm_set1_ps(inV1), inV2.mValue); #elif defined(JPH_USE_NEON) return vmulq_n_f32(inV2.mValue, inV1); #else return Vec3(inV1 * inV2.mF32[0], inV1 * inV2.mF32[1], inV1 * inV2.mF32[2]); #endif } Vec3 Vec3::operator / (float inV2) const { #if defined(JPH_USE_SSE) return _mm_div_ps(mValue, _mm_set1_ps(inV2)); #elif defined(JPH_USE_NEON) return vdivq_f32(mValue, vdupq_n_f32(inV2)); #else return Vec3(mF32[0] / inV2, mF32[1] / inV2, mF32[2] / inV2); #endif } Vec3 &Vec3::operator *= (float inV2) { #if defined(JPH_USE_SSE) mValue = _mm_mul_ps(mValue, _mm_set1_ps(inV2)); #elif defined(JPH_USE_NEON) mValue = vmulq_n_f32(mValue, inV2); #else for (int i = 0; i < 3; ++i) mF32[i] *= inV2; #ifdef JPH_FLOATING_POINT_EXCEPTIONS_ENABLED mF32[3] = mF32[2]; #endif #endif return *this; } Vec3 &Vec3::operator *= (Vec3Arg inV2) { #if defined(JPH_USE_SSE) mValue = _mm_mul_ps(mValue, inV2.mValue); #elif defined(JPH_USE_NEON) mValue = vmulq_f32(mValue, inV2.mValue); #else for (int i = 0; i < 3; ++i) mF32[i] *= inV2.mF32[i]; #ifdef JPH_FLOATING_POINT_EXCEPTIONS_ENABLED mF32[3] = mF32[2]; #endif #endif return *this; } Vec3 &Vec3::operator /= (float inV2) { #if defined(JPH_USE_SSE) mValue = _mm_div_ps(mValue, _mm_set1_ps(inV2)); #elif defined(JPH_USE_NEON) mValue = vdivq_f32(mValue, vdupq_n_f32(inV2)); #else for (int i = 0; i < 3; ++i) mF32[i] /= inV2; #ifdef JPH_FLOATING_POINT_EXCEPTIONS_ENABLED mF32[3] = mF32[2]; #endif #endif return *this; } Vec3 Vec3::operator + (Vec3Arg inV2) const { #if defined(JPH_USE_SSE) return _mm_add_ps(mValue, inV2.mValue); #elif defined(JPH_USE_NEON) return vaddq_f32(mValue, inV2.mValue); #else return Vec3(mF32[0] + inV2.mF32[0], mF32[1] + inV2.mF32[1], mF32[2] + inV2.mF32[2]); #endif } Vec3 &Vec3::operator += (Vec3Arg inV2) { #if defined(JPH_USE_SSE) mValue = _mm_add_ps(mValue, inV2.mValue); #elif defined(JPH_USE_NEON) mValue = vaddq_f32(mValue, inV2.mValue); #else for (int i = 0; i < 3; ++i) mF32[i] += inV2.mF32[i]; #ifdef JPH_FLOATING_POINT_EXCEPTIONS_ENABLED mF32[3] = mF32[2]; #endif #endif return *this; } Vec3 Vec3::operator - () const { #if defined(JPH_USE_SSE) return _mm_sub_ps(_mm_setzero_ps(), mValue); #elif defined(JPH_USE_NEON) return vnegq_f32(mValue); #else return Vec3(-mF32[0], -mF32[1], -mF32[2]); #endif } Vec3 Vec3::operator - (Vec3Arg inV2) const { #if defined(JPH_USE_SSE) return _mm_sub_ps(mValue, inV2.mValue); #elif defined(JPH_USE_NEON) return vsubq_f32(mValue, inV2.mValue); #else return Vec3(mF32[0] - inV2.mF32[0], mF32[1] - inV2.mF32[1], mF32[2] - inV2.mF32[2]); #endif } Vec3 &Vec3::operator -= (Vec3Arg inV2) { #if defined(JPH_USE_SSE) mValue = _mm_sub_ps(mValue, inV2.mValue); #elif defined(JPH_USE_NEON) mValue = vsubq_f32(mValue, inV2.mValue); #else for (int i = 0; i < 3; ++i) mF32[i] -= inV2.mF32[i]; #ifdef JPH_FLOATING_POINT_EXCEPTIONS_ENABLED mF32[3] = mF32[2]; #endif #endif return *this; } Vec3 Vec3::operator / (Vec3Arg inV2) const { inV2.CheckW(); // Check W equals Z to avoid div by zero #if defined(JPH_USE_SSE) return _mm_div_ps(mValue, inV2.mValue); #elif defined(JPH_USE_NEON) return vdivq_f32(mValue, inV2.mValue); #else return Vec3(mF32[0] / inV2.mF32[0], mF32[1] / inV2.mF32[1], mF32[2] / inV2.mF32[2]); #endif } Vec4 Vec3::SplatX() const { #if defined(JPH_USE_SSE) return _mm_shuffle_ps(mValue, mValue, _MM_SHUFFLE(0, 0, 0, 0)); #elif defined(JPH_USE_NEON) return vdupq_laneq_f32(mValue, 0); #else return Vec4(mF32[0], mF32[0], mF32[0], mF32[0]); #endif } Vec4 Vec3::SplatY() const { #if defined(JPH_USE_SSE) return _mm_shuffle_ps(mValue, mValue, _MM_SHUFFLE(1, 1, 1, 1)); #elif defined(JPH_USE_NEON) return vdupq_laneq_f32(mValue, 1); #else return Vec4(mF32[1], mF32[1], mF32[1], mF32[1]); #endif } Vec4 Vec3::SplatZ() const { #if defined(JPH_USE_SSE) return _mm_shuffle_ps(mValue, mValue, _MM_SHUFFLE(2, 2, 2, 2)); #elif defined(JPH_USE_NEON) return vdupq_laneq_f32(mValue, 2); #else return Vec4(mF32[2], mF32[2], mF32[2], mF32[2]); #endif } int Vec3::GetLowestComponentIndex() const { return GetX() < GetY() ? (GetZ() < GetX() ? 2 : 0) : (GetZ() < GetY() ? 2 : 1); } int Vec3::GetHighestComponentIndex() const { return GetX() > GetY() ? (GetZ() > GetX() ? 2 : 0) : (GetZ() > GetY() ? 2 : 1); } Vec3 Vec3::Abs() const { #if defined(JPH_USE_AVX512) return _mm_range_ps(mValue, mValue, 0b1000); #elif defined(JPH_USE_SSE) return _mm_max_ps(_mm_sub_ps(_mm_setzero_ps(), mValue), mValue); #elif defined(JPH_USE_NEON) return vabsq_f32(mValue); #else return Vec3(abs(mF32[0]), abs(mF32[1]), abs(mF32[2])); #endif } Vec3 Vec3::Reciprocal() const { return sReplicate(1.0f) / mValue; } Vec3 Vec3::Cross(Vec3Arg inV2) const { #if defined(JPH_USE_SSE) Type t1 = _mm_shuffle_ps(inV2.mValue, inV2.mValue, _MM_SHUFFLE(0, 0, 2, 1)); // Assure Z and W are the same t1 = _mm_mul_ps(t1, mValue); Type t2 = _mm_shuffle_ps(mValue, mValue, _MM_SHUFFLE(0, 0, 2, 1)); // Assure Z and W are the same t2 = _mm_mul_ps(t2, inV2.mValue); Type t3 = _mm_sub_ps(t1, t2); return _mm_shuffle_ps(t3, t3, _MM_SHUFFLE(0, 0, 2, 1)); // Assure Z and W are the same #elif defined(JPH_USE_NEON) Type t1 = JPH_NEON_SHUFFLE_F32x4(inV2.mValue, inV2.mValue, 1, 2, 0, 0); // Assure Z and W are the same t1 = vmulq_f32(t1, mValue); Type t2 = JPH_NEON_SHUFFLE_F32x4(mValue, mValue, 1, 2, 0, 0); // Assure Z and W are the same t2 = vmulq_f32(t2, inV2.mValue); Type t3 = vsubq_f32(t1, t2); return JPH_NEON_SHUFFLE_F32x4(t3, t3, 1, 2, 0, 0); // Assure Z and W are the same #else return Vec3(mF32[1] * inV2.mF32[2] - mF32[2] * inV2.mF32[1], mF32[2] * inV2.mF32[0] - mF32[0] * inV2.mF32[2], mF32[0] * inV2.mF32[1] - mF32[1] * inV2.mF32[0]); #endif } Vec3 Vec3::DotV(Vec3Arg inV2) const { #if defined(JPH_USE_SSE4_1) return _mm_dp_ps(mValue, inV2.mValue, 0x7f); #elif defined(JPH_USE_NEON) float32x4_t mul = vmulq_f32(mValue, inV2.mValue); mul = vsetq_lane_f32(0, mul, 3); return vdupq_n_f32(vaddvq_f32(mul)); #else float dot = 0.0f; for (int i = 0; i < 3; i++) dot += mF32[i] * inV2.mF32[i]; return Vec3::sReplicate(dot); #endif } Vec4 Vec3::DotV4(Vec3Arg inV2) const { #if defined(JPH_USE_SSE4_1) return _mm_dp_ps(mValue, inV2.mValue, 0x7f); #elif defined(JPH_USE_NEON) float32x4_t mul = vmulq_f32(mValue, inV2.mValue); mul = vsetq_lane_f32(0, mul, 3); return vdupq_n_f32(vaddvq_f32(mul)); #else float dot = 0.0f; for (int i = 0; i < 3; i++) dot += mF32[i] * inV2.mF32[i]; return Vec4::sReplicate(dot); #endif } float Vec3::Dot(Vec3Arg inV2) const { #if defined(JPH_USE_SSE4_1) return _mm_cvtss_f32(_mm_dp_ps(mValue, inV2.mValue, 0x7f)); #elif defined(JPH_USE_NEON) float32x4_t mul = vmulq_f32(mValue, inV2.mValue); mul = vsetq_lane_f32(0, mul, 3); return vaddvq_f32(mul); #else float dot = 0.0f; for (int i = 0; i < 3; i++) dot += mF32[i] * inV2.mF32[i]; return dot; #endif } float Vec3::LengthSq() const { #if defined(JPH_USE_SSE4_1) return _mm_cvtss_f32(_mm_dp_ps(mValue, mValue, 0x7f)); #elif defined(JPH_USE_NEON) float32x4_t mul = vmulq_f32(mValue, mValue); mul = vsetq_lane_f32(0, mul, 3); return vaddvq_f32(mul); #else float len_sq = 0.0f; for (int i = 0; i < 3; i++) len_sq += mF32[i] * mF32[i]; return len_sq; #endif } float Vec3::Length() const { #if defined(JPH_USE_SSE4_1) return _mm_cvtss_f32(_mm_sqrt_ss(_mm_dp_ps(mValue, mValue, 0x7f))); #elif defined(JPH_USE_NEON) float32x4_t mul = vmulq_f32(mValue, mValue); mul = vsetq_lane_f32(0, mul, 3); float32x2_t sum = vdup_n_f32(vaddvq_f32(mul)); return vget_lane_f32(vsqrt_f32(sum), 0); #else return sqrt(LengthSq()); #endif } Vec3 Vec3::Sqrt() const { #if defined(JPH_USE_SSE) return _mm_sqrt_ps(mValue); #elif defined(JPH_USE_NEON) return vsqrtq_f32(mValue); #else return Vec3(sqrt(mF32[0]), sqrt(mF32[1]), sqrt(mF32[2])); #endif } Vec3 Vec3::Normalized() const { #if defined(JPH_USE_SSE4_1) return _mm_div_ps(mValue, _mm_sqrt_ps(_mm_dp_ps(mValue, mValue, 0x7f))); #elif defined(JPH_USE_NEON) float32x4_t mul = vmulq_f32(mValue, mValue); mul = vsetq_lane_f32(0, mul, 3); float32x4_t sum = vdupq_n_f32(vaddvq_f32(mul)); return vdivq_f32(mValue, vsqrtq_f32(sum)); #else return *this / Length(); #endif } Vec3 Vec3::NormalizedOr(Vec3Arg inZeroValue) const { #if defined(JPH_USE_SSE4_1) Type len_sq = _mm_dp_ps(mValue, mValue, 0x7f); Type is_zero = _mm_cmpeq_ps(len_sq, _mm_setzero_ps()); #ifdef JPH_FLOATING_POINT_EXCEPTIONS_ENABLED if (_mm_movemask_ps(is_zero) == 0xf) return inZeroValue; else return _mm_div_ps(mValue, _mm_sqrt_ps(len_sq)); #else return _mm_blendv_ps(_mm_div_ps(mValue, _mm_sqrt_ps(len_sq)), inZeroValue.mValue, is_zero); #endif // JPH_FLOATING_POINT_EXCEPTIONS_ENABLED #elif defined(JPH_USE_NEON) float32x4_t mul = vmulq_f32(mValue, mValue); mul = vsetq_lane_f32(0, mul, 3); float32x4_t sum = vdupq_n_f32(vaddvq_f32(mul)); float32x4_t len = vsqrtq_f32(sum); float32x4_t is_zero = vceqq_f32(len, vdupq_n_f32(0)); return vbslq_f32(is_zero, inZeroValue.mValue, vdivq_f32(mValue, len)); #else float len_sq = LengthSq(); if (len_sq == 0.0f) return inZeroValue; else return *this / sqrt(len_sq); #endif } bool Vec3::IsNormalized(float inTolerance) const { return abs(LengthSq() - 1.0f) <= inTolerance; } bool Vec3::IsNaN() const { #if defined(JPH_USE_AVX512) return (_mm_fpclass_ps_mask(mValue, 0b10000001) & 0x7) != 0; #elif defined(JPH_USE_SSE) return (_mm_movemask_ps(_mm_cmpunord_ps(mValue, mValue)) & 0x7) != 0; #elif defined(JPH_USE_NEON) uint32x4_t mask = JPH_NEON_UINT32x4(1, 1, 1, 0); uint32x4_t is_equal = vceqq_f32(mValue, mValue); // If a number is not equal to itself it's a NaN return vaddvq_u32(vandq_u32(is_equal, mask)) != 3; #else return isnan(mF32[0]) || isnan(mF32[1]) || isnan(mF32[2]); #endif } void Vec3::StoreFloat3(Float3 *outV) const { #if defined(JPH_USE_SSE) _mm_store_ss(&outV->x, mValue); Vec3 t = Swizzle<SWIZZLE_Y, SWIZZLE_Z, SWIZZLE_UNUSED>(); _mm_store_ss(&outV->y, t.mValue); t = t.Swizzle<SWIZZLE_Y, SWIZZLE_UNUSED, SWIZZLE_UNUSED>(); _mm_store_ss(&outV->z, t.mValue); #elif defined(JPH_USE_NEON) float32x2_t xy = vget_low_f32(mValue); vst1_f32(&outV->x, xy); vst1q_lane_f32(&outV->z, mValue, 2); #else outV->x = mF32[0]; outV->y = mF32[1]; outV->z = mF32[2]; #endif } UVec4 Vec3::ToInt() const { #if defined(JPH_USE_SSE) return _mm_cvttps_epi32(mValue); #elif defined(JPH_USE_NEON) return vcvtq_u32_f32(mValue); #else return UVec4(uint32(mF32[0]), uint32(mF32[1]), uint32(mF32[2]), uint32(mF32[3])); #endif } UVec4 Vec3::ReinterpretAsInt() const { #if defined(JPH_USE_SSE) return UVec4(_mm_castps_si128(mValue)); #elif defined(JPH_USE_NEON) return vreinterpretq_u32_f32(mValue); #else return *reinterpret_cast<const UVec4 *>(this); #endif } float Vec3::ReduceMin() const { Vec3 v = sMin(mValue, Swizzle<SWIZZLE_Y, SWIZZLE_UNUSED, SWIZZLE_Z>()); v = sMin(v, v.Swizzle<SWIZZLE_Z, SWIZZLE_UNUSED, SWIZZLE_UNUSED>()); return v.GetX(); } float Vec3::ReduceMax() const { Vec3 v = sMax(mValue, Swizzle<SWIZZLE_Y, SWIZZLE_UNUSED, SWIZZLE_Z>()); v = sMax(v, v.Swizzle<SWIZZLE_Z, SWIZZLE_UNUSED, SWIZZLE_UNUSED>()); return v.GetX(); } Vec3 Vec3::GetNormalizedPerpendicular() const { if (abs(mF32[0]) > abs(mF32[1])) { float len = sqrt(mF32[0] * mF32[0] + mF32[2] * mF32[2]); return Vec3(mF32[2], 0.0f, -mF32[0]) / len; } else { float len = sqrt(mF32[1] * mF32[1] + mF32[2] * mF32[2]); return Vec3(0.0f, mF32[2], -mF32[1]) / len; } } Vec3 Vec3::GetSign() const { #if defined(JPH_USE_AVX512) return _mm_fixupimm_ps(mValue, mValue, _mm_set1_epi32(0xA9A90A00), 0); #elif defined(JPH_USE_SSE) Type minus_one = _mm_set1_ps(-1.0f); Type one = _mm_set1_ps(1.0f); return _mm_or_ps(_mm_and_ps(mValue, minus_one), one); #elif defined(JPH_USE_NEON) Type minus_one = vdupq_n_f32(-1.0f); Type one = vdupq_n_f32(1.0f); return vorrq_s32(vandq_s32(mValue, minus_one), one); #else return Vec3(signbit(mF32[0])? -1.0f : 1.0f, signbit(mF32[1])? -1.0f : 1.0f, signbit(mF32[2])? -1.0f : 1.0f); #endif } JPH_NAMESPACE_END
0
repos/c2z/use_cases/JoltPhysics/include
repos/c2z/use_cases/JoltPhysics/include/Math/Vec8.inl
// Jolt Physics Library (https://github.com/jrouwe/JoltPhysics) // SPDX-FileCopyrightText: 2021 Jorrit Rouwe // SPDX-License-Identifier: MIT #include <Jolt/Math/UVec8.h> JPH_NAMESPACE_BEGIN Vec8::Vec8(Vec4Arg inLo, Vec4Arg inHi) : mValue(_mm256_insertf128_ps(_mm256_castps128_ps256(inLo.mValue), inHi.mValue, 1)) { } Vec8 Vec8::sZero() { return _mm256_setzero_ps(); } Vec8 Vec8::sReplicate(float inV) { return _mm256_set1_ps(inV); } Vec8 Vec8::sSplatX(Vec4Arg inV) { return _mm256_set1_ps(inV.GetX()); } Vec8 Vec8::sSplatY(Vec4Arg inV) { return _mm256_set1_ps(inV.GetY()); } Vec8 Vec8::sSplatZ(Vec4Arg inV) { return _mm256_set1_ps(inV.GetZ()); } Vec8 Vec8::sFusedMultiplyAdd(Vec8Arg inMul1, Vec8Arg inMul2, Vec8Arg inAdd) { #ifdef JPH_USE_FMADD return _mm256_fmadd_ps(inMul1.mValue, inMul2.mValue, inAdd.mValue); #else return _mm256_add_ps(_mm256_mul_ps(inMul1.mValue, inMul2.mValue), inAdd.mValue); #endif } Vec8 Vec8::sSelect(Vec8Arg inV1, Vec8Arg inV2, UVec8Arg inControl) { return _mm256_blendv_ps(inV1.mValue, inV2.mValue, _mm256_castsi256_ps(inControl.mValue)); } Vec8 Vec8::sMin(Vec8Arg inV1, Vec8Arg inV2) { return _mm256_min_ps(inV1.mValue, inV2.mValue); } Vec8 Vec8::sMax(Vec8Arg inV1, Vec8Arg inV2) { return _mm256_max_ps(inV1.mValue, inV2.mValue); } UVec8 Vec8::sLess(Vec8Arg inV1, Vec8Arg inV2) { return _mm256_castps_si256(_mm256_cmp_ps(inV1.mValue, inV2.mValue, _CMP_LT_OQ)); } UVec8 Vec8::sGreater(Vec8Arg inV1, Vec8Arg inV2) { return _mm256_castps_si256(_mm256_cmp_ps(inV1.mValue, inV2.mValue, _CMP_GT_OQ)); } Vec8 Vec8::sLoadFloat8(const float *inV) { return _mm256_loadu_ps(inV); } Vec8 Vec8::sLoadFloat8Aligned(const float *inV) { return _mm256_load_ps(inV); } Vec8 Vec8::operator * (Vec8Arg inV2) const { return _mm256_mul_ps(mValue, inV2.mValue); } Vec8 Vec8::operator * (float inV2) const { return _mm256_mul_ps(mValue, _mm256_set1_ps(inV2)); } Vec8 Vec8::operator + (Vec8Arg inV2) const { return _mm256_add_ps(mValue, inV2.mValue); } Vec8 Vec8::operator - (Vec8Arg inV2) const { return _mm256_sub_ps(mValue, inV2.mValue); } Vec8 Vec8::operator / (Vec8Arg inV2) const { return _mm256_div_ps(mValue, inV2.mValue); } Vec8 Vec8::Reciprocal() const { return Vec8::sReplicate(1.0f) / mValue; } template<uint32 SwizzleX, uint32 SwizzleY, uint32 SwizzleZ, uint32 SwizzleW> Vec8 Vec8::Swizzle() const { static_assert(SwizzleX <= 3, "SwizzleX template parameter out of range"); static_assert(SwizzleY <= 3, "SwizzleY template parameter out of range"); static_assert(SwizzleZ <= 3, "SwizzleZ template parameter out of range"); static_assert(SwizzleW <= 3, "SwizzleW template parameter out of range"); return _mm256_shuffle_ps(mValue, mValue, _MM_SHUFFLE(SwizzleW, SwizzleZ, SwizzleY, SwizzleX)); } Vec8 Vec8::Abs() const { #if defined(JPH_USE_AVX512) return _mm256_range_ps(mValue, mValue, 0b1000); #else return _mm256_max_ps(_mm256_sub_ps(_mm256_setzero_ps(), mValue), mValue); #endif } Vec4 Vec8::LowerVec4() const { return _mm256_castps256_ps128(mValue); } Vec4 Vec8::UpperVec4() const { return _mm256_extractf128_ps(mValue, 1); } float Vec8::ReduceMin() const { return Vec4::sMin(LowerVec4(), UpperVec4()).ReduceMin(); } JPH_NAMESPACE_END
0
repos/c2z/use_cases/JoltPhysics/include
repos/c2z/use_cases/JoltPhysics/include/Math/UVec4.inl
// Jolt Physics Library (https://github.com/jrouwe/JoltPhysics) // SPDX-FileCopyrightText: 2021 Jorrit Rouwe // SPDX-License-Identifier: MIT JPH_NAMESPACE_BEGIN UVec4::UVec4(uint32 inX, uint32 inY, uint32 inZ, uint32 inW) { #if defined(JPH_USE_SSE) mValue = _mm_set_epi32(int(inW), int(inZ), int(inY), int(inX)); #elif defined(JPH_USE_NEON) uint32x2_t xy = vcreate_u32(static_cast<uint64>(inX) | (static_cast<uint64>(inY) << 32)); uint32x2_t zw = vcreate_u32(static_cast<uint64>(inZ) | (static_cast<uint64>(inW) << 32)); mValue = vcombine_u32(xy, zw); #else mU32[0] = inX; mU32[1] = inY; mU32[2] = inZ; mU32[3] = inW; #endif } bool UVec4::operator == (UVec4Arg inV2) const { return sEquals(*this, inV2).TestAllTrue(); } template<uint32 SwizzleX, uint32 SwizzleY, uint32 SwizzleZ, uint32 SwizzleW> UVec4 UVec4::Swizzle() const { static_assert(SwizzleX <= 3, "SwizzleX template parameter out of range"); static_assert(SwizzleY <= 3, "SwizzleY template parameter out of range"); static_assert(SwizzleZ <= 3, "SwizzleZ template parameter out of range"); static_assert(SwizzleW <= 3, "SwizzleW template parameter out of range"); #if defined(JPH_USE_SSE) return _mm_shuffle_epi32(mValue, _MM_SHUFFLE(SwizzleW, SwizzleZ, SwizzleY, SwizzleX)); #elif defined(JPH_USE_NEON) return JPH_NEON_SHUFFLE_F32x4(mValue, mValue, SwizzleX, SwizzleY, SwizzleZ, SwizzleW); #else return UVec4(mU32[SwizzleX], mU32[SwizzleY], mU32[SwizzleZ], mU32[SwizzleW]); #endif } UVec4 UVec4::sZero() { #if defined(JPH_USE_SSE) return _mm_setzero_si128(); #elif defined(JPH_USE_NEON) return vdupq_n_u32(0); #else return UVec4(0, 0, 0, 0); #endif } UVec4 UVec4::sReplicate(uint32 inV) { #if defined(JPH_USE_SSE) return _mm_set1_epi32(int(inV)); #elif defined(JPH_USE_NEON) return vdupq_n_u32(inV); #else return UVec4(inV, inV, inV, inV); #endif } UVec4 UVec4::sLoadInt(const uint32 *inV) { #if defined(JPH_USE_SSE) return _mm_castps_si128(_mm_load_ss(reinterpret_cast<const float*>(inV))); #elif defined(JPH_USE_NEON) return vsetq_lane_u32(*inV, vdupq_n_u32(0), 0); #else return UVec4(*inV, 0, 0, 0); #endif } UVec4 UVec4::sLoadInt4(const uint32 *inV) { #if defined(JPH_USE_SSE) return _mm_loadu_si128(reinterpret_cast<const __m128i *>(inV)); #elif defined(JPH_USE_NEON) return vld1q_u32(inV); #else return UVec4(inV[0], inV[1], inV[2], inV[3]); #endif } UVec4 UVec4::sLoadInt4Aligned(const uint32 *inV) { #if defined(JPH_USE_SSE) return _mm_load_si128(reinterpret_cast<const __m128i *>(inV)); #elif defined(JPH_USE_NEON) return vld1q_u32(inV); // ARM doesn't make distinction between aligned or not #else return UVec4(inV[0], inV[1], inV[2], inV[3]); #endif } template <const int Scale> UVec4 UVec4::sGatherInt4(const uint32 *inBase, UVec4Arg inOffsets) { #ifdef JPH_USE_AVX2 return _mm_i32gather_epi32(reinterpret_cast<const int *>(inBase), inOffsets.mValue, Scale); #else return Vec4::sGatherFloat4<Scale>(reinterpret_cast<const float *>(inBase), inOffsets).ReinterpretAsInt(); #endif } UVec4 UVec4::sMin(UVec4Arg inV1, UVec4Arg inV2) { #if defined(JPH_USE_SSE4_1) return _mm_min_epu32(inV1.mValue, inV2.mValue); #elif defined(JPH_USE_NEON) return vminq_u32(inV1.mValue, inV2.mValue); #else UVec4 result; for (int i = 0; i < 4; i++) result.mU32[i] = min(inV1.mU32[i], inV2.mU32[i]); return result; #endif } UVec4 UVec4::sMax(UVec4Arg inV1, UVec4Arg inV2) { #if defined(JPH_USE_SSE4_1) return _mm_max_epu32(inV1.mValue, inV2.mValue); #elif defined(JPH_USE_NEON) return vmaxq_u32(inV1.mValue, inV2.mValue); #else UVec4 result; for (int i = 0; i < 4; i++) result.mU32[i] = max(inV1.mU32[i], inV2.mU32[i]); return result; #endif } UVec4 UVec4::sEquals(UVec4Arg inV1, UVec4Arg inV2) { #if defined(JPH_USE_SSE) return _mm_cmpeq_epi32(inV1.mValue, inV2.mValue); #elif defined(JPH_USE_NEON) return vceqq_u32(inV1.mValue, inV2.mValue); #else return UVec4(inV1.mU32[0] == inV2.mU32[0]? 0xffffffffu : 0, inV1.mU32[1] == inV2.mU32[1]? 0xffffffffu : 0, inV1.mU32[2] == inV2.mU32[2]? 0xffffffffu : 0, inV1.mU32[3] == inV2.mU32[3]? 0xffffffffu : 0); #endif } UVec4 UVec4::sSelect(UVec4Arg inV1, UVec4Arg inV2, UVec4Arg inControl) { #if defined(JPH_USE_SSE4_1) return _mm_castps_si128(_mm_blendv_ps(_mm_castsi128_ps(inV1.mValue), _mm_castsi128_ps(inV2.mValue), _mm_castsi128_ps(inControl.mValue))); #elif defined(JPH_USE_NEON) return vbslq_u32(vshrq_n_s32(inControl.mValue, 31), inV2.mValue, inV1.mValue); #else UVec4 result; for (int i = 0; i < 4; i++) result.mU32[i] = inControl.mU32[i] ? inV2.mU32[i] : inV1.mU32[i]; return result; #endif } UVec4 UVec4::sOr(UVec4Arg inV1, UVec4Arg inV2) { #if defined(JPH_USE_SSE) return _mm_or_si128(inV1.mValue, inV2.mValue); #elif defined(JPH_USE_NEON) return vorrq_u32(inV1.mValue, inV2.mValue); #else return UVec4(inV1.mU32[0] | inV2.mU32[0], inV1.mU32[1] | inV2.mU32[1], inV1.mU32[2] | inV2.mU32[2], inV1.mU32[3] | inV2.mU32[3]); #endif } UVec4 UVec4::sXor(UVec4Arg inV1, UVec4Arg inV2) { #if defined(JPH_USE_SSE) return _mm_xor_si128(inV1.mValue, inV2.mValue); #elif defined(JPH_USE_NEON) return veorq_u32(inV1.mValue, inV2.mValue); #else return UVec4(inV1.mU32[0] ^ inV2.mU32[0], inV1.mU32[1] ^ inV2.mU32[1], inV1.mU32[2] ^ inV2.mU32[2], inV1.mU32[3] ^ inV2.mU32[3]); #endif } UVec4 UVec4::sAnd(UVec4Arg inV1, UVec4Arg inV2) { #if defined(JPH_USE_SSE) return _mm_and_si128(inV1.mValue, inV2.mValue); #elif defined(JPH_USE_NEON) return vandq_u32(inV1.mValue, inV2.mValue); #else return UVec4(inV1.mU32[0] & inV2.mU32[0], inV1.mU32[1] & inV2.mU32[1], inV1.mU32[2] & inV2.mU32[2], inV1.mU32[3] & inV2.mU32[3]); #endif } UVec4 UVec4::sNot(UVec4Arg inV1) { #if defined(JPH_USE_AVX512) return _mm_ternarylogic_epi32(inV1.mValue, inV1.mValue, inV1.mValue, 0b01010101); #elif defined(JPH_USE_SSE) return sXor(inV1, sReplicate(0xffffffff)); #elif defined(JPH_USE_NEON) return vmvnq_u32(inV1.mValue); #else return UVec4(~inV1.mU32[0], ~inV1.mU32[1], ~inV1.mU32[2], ~inV1.mU32[3]); #endif } UVec4 UVec4::sSort4True(UVec4Arg inValue, UVec4Arg inIndex) { // If inValue.z is false then shift W to Z UVec4 v = UVec4::sSelect(inIndex.Swizzle<SWIZZLE_X, SWIZZLE_Y, SWIZZLE_W, SWIZZLE_W>(), inIndex, inValue.SplatZ()); // If inValue.y is false then shift Z and further to Y and further v = UVec4::sSelect(v.Swizzle<SWIZZLE_X, SWIZZLE_Z, SWIZZLE_W, SWIZZLE_W>(), v, inValue.SplatY()); // If inValue.x is false then shift X and furhter to Y and furhter v = UVec4::sSelect(v.Swizzle<SWIZZLE_Y, SWIZZLE_Z, SWIZZLE_W, SWIZZLE_W>(), v, inValue.SplatX()); return v; } UVec4 UVec4::operator * (UVec4Arg inV2) const { #if defined(JPH_USE_SSE4_1) return _mm_mullo_epi32(mValue, inV2.mValue); #elif defined(JPH_USE_NEON) return vmulq_u32(mValue, inV2.mValue); #else UVec4 result; for (int i = 0; i < 4; i++) result.mU32[i] = mU32[i] * inV2.mU32[i]; return result; #endif } UVec4 UVec4::operator + (UVec4Arg inV2) { #if defined(JPH_USE_SSE) return _mm_add_epi32(mValue, inV2.mValue); #elif defined(JPH_USE_NEON) return vaddq_u32(mValue, inV2.mValue); #else return UVec4(mU32[0] + inV2.mU32[0], mU32[1] + inV2.mU32[1], mU32[2] + inV2.mU32[2], mU32[3] + inV2.mU32[3]); #endif } UVec4 &UVec4::operator += (UVec4Arg inV2) { #if defined(JPH_USE_SSE) mValue = _mm_add_epi32(mValue, inV2.mValue); #elif defined(JPH_USE_NEON) mValue = vaddq_u32(mValue, inV2.mValue); #else for (int i = 0; i < 4; ++i) mU32[i] += inV2.mU32[i]; #endif return *this; } UVec4 UVec4::SplatX() const { #if defined(JPH_USE_SSE) return _mm_shuffle_epi32(mValue, _MM_SHUFFLE(0, 0, 0, 0)); #elif defined(JPH_USE_NEON) return vdupq_laneq_u32(mValue, 0); #else return UVec4(mU32[0], mU32[0], mU32[0], mU32[0]); #endif } UVec4 UVec4::SplatY() const { #if defined(JPH_USE_SSE) return _mm_shuffle_epi32(mValue, _MM_SHUFFLE(1, 1, 1, 1)); #elif defined(JPH_USE_NEON) return vdupq_laneq_u32(mValue, 1); #else return UVec4(mU32[1], mU32[1], mU32[1], mU32[1]); #endif } UVec4 UVec4::SplatZ() const { #if defined(JPH_USE_SSE) return _mm_shuffle_epi32(mValue, _MM_SHUFFLE(2, 2, 2, 2)); #elif defined(JPH_USE_NEON) return vdupq_laneq_u32(mValue, 2); #else return UVec4(mU32[2], mU32[2], mU32[2], mU32[2]); #endif } UVec4 UVec4::SplatW() const { #if defined(JPH_USE_SSE) return _mm_shuffle_epi32(mValue, _MM_SHUFFLE(3, 3, 3, 3)); #elif defined(JPH_USE_NEON) return vdupq_laneq_u32(mValue, 3); #else return UVec4(mU32[3], mU32[3], mU32[3], mU32[3]); #endif } Vec4 UVec4::ToFloat() const { #if defined(JPH_USE_SSE) return _mm_cvtepi32_ps(mValue); #elif defined(JPH_USE_NEON) return vcvtq_f32_s32(mValue); #else return Vec4((float)mU32[0], (float)mU32[1], (float)mU32[2], (float)mU32[3]); #endif } Vec4 UVec4::ReinterpretAsFloat() const { #if defined(JPH_USE_SSE) return Vec4(_mm_castsi128_ps(mValue)); #elif defined(JPH_USE_NEON) return vreinterpretq_f32_s32(mValue); #else return *reinterpret_cast<const Vec4 *>(this); #endif } void UVec4::StoreInt4(uint32 *outV) const { #if defined(JPH_USE_SSE) _mm_storeu_si128(reinterpret_cast<__m128i *>(outV), mValue); #elif defined(JPH_USE_NEON) vst1q_u32(outV, mValue); #else for (int i = 0; i < 4; ++i) outV[i] = mU32[i]; #endif } void UVec4::StoreInt4Aligned(uint32 *outV) const { #if defined(JPH_USE_SSE) _mm_store_si128(reinterpret_cast<__m128i *>(outV), mValue); #elif defined(JPH_USE_NEON) vst1q_u32(outV, mValue); // ARM doesn't make distinction between aligned or not #else for (int i = 0; i < 4; ++i) outV[i] = mU32[i]; #endif } int UVec4::CountTrues() const { #if defined(JPH_USE_SSE) return CountBits(_mm_movemask_ps(_mm_castsi128_ps(mValue))); #elif defined(JPH_USE_NEON) return vaddvq_u32(vshrq_n_u32(mValue, 31)); #else return (mU32[0] >> 31) + (mU32[1] >> 31) + (mU32[2] >> 31) + (mU32[3] >> 31); #endif } int UVec4::GetTrues() const { #if defined(JPH_USE_SSE) return _mm_movemask_ps(_mm_castsi128_ps(mValue)); #elif defined(JPH_USE_NEON) int32x4_t shift = JPH_NEON_INT32x4(0, 1, 2, 3); return vaddvq_u32(vshlq_u32(vshrq_n_u32(mValue, 31), shift)); #else return (mU32[0] >> 31) | ((mU32[1] >> 31) << 1) | ((mU32[2] >> 31) << 2) | ((mU32[3] >> 31) << 3); #endif } bool UVec4::TestAnyTrue() const { return GetTrues() != 0; } bool UVec4::TestAnyXYZTrue() const { return (GetTrues() & 0b111) != 0; } bool UVec4::TestAllTrue() const { return GetTrues() == 0b1111; } bool UVec4::TestAllXYZTrue() const { return (GetTrues() & 0b111) == 0b111; } template <const uint Count> UVec4 UVec4::LogicalShiftLeft() const { static_assert(Count <= 31, "Invalid shift"); #if defined(JPH_USE_SSE) return _mm_slli_epi32(mValue, Count); #elif defined(JPH_USE_NEON) return vshlq_n_u32(mValue, Count); #else return UVec4(mU32[0] << Count, mU32[1] << Count, mU32[2] << Count, mU32[3] << Count); #endif } template <const uint Count> UVec4 UVec4::LogicalShiftRight() const { static_assert(Count <= 31, "Invalid shift"); #if defined(JPH_USE_SSE) return _mm_srli_epi32(mValue, Count); #elif defined(JPH_USE_NEON) return vshrq_n_u32(mValue, Count); #else return UVec4(mU32[0] >> Count, mU32[1] >> Count, mU32[2] >> Count, mU32[3] >> Count); #endif } template <const uint Count> UVec4 UVec4::ArithmeticShiftRight() const { static_assert(Count <= 31, "Invalid shift"); #if defined(JPH_USE_SSE) return _mm_srai_epi32(mValue, Count); #elif defined(JPH_USE_NEON) return vshrq_n_s32(mValue, Count); #else return UVec4(uint32(int32_t(mU32[0]) >> Count), uint32(int32_t(mU32[1]) >> Count), uint32(int32_t(mU32[2]) >> Count), uint32(int32_t(mU32[3]) >> Count)); #endif } UVec4 UVec4::Expand4Uint16Lo() const { #if defined(JPH_USE_SSE) return _mm_unpacklo_epi16(mValue, _mm_castps_si128(_mm_setzero_ps())); #elif defined(JPH_USE_NEON) int16x4_t value = vget_low_s16(mValue); int16x4_t zero = vdup_n_s16(0); return vcombine_s16(vzip1_s16(value, zero), vzip2_s16(value, zero)); #else return UVec4(mU32[0] & 0xffff, (mU32[0] >> 16) & 0xffff, mU32[1] & 0xffff, (mU32[1] >> 16) & 0xffff); #endif } UVec4 UVec4::Expand4Uint16Hi() const { #if defined(JPH_USE_SSE) return _mm_unpackhi_epi16(mValue, _mm_castps_si128(_mm_setzero_ps())); #elif defined(JPH_USE_NEON) int16x4_t value = vget_high_s16(mValue); int16x4_t zero = vdup_n_s16(0); return vcombine_s16(vzip1_s16(value, zero), vzip2_s16(value, zero)); #else return UVec4(mU32[2] & 0xffff, (mU32[2] >> 16) & 0xffff, mU32[3] & 0xffff, (mU32[3] >> 16) & 0xffff); #endif } UVec4 UVec4::Expand4Byte0() const { #if defined(JPH_USE_SSE4_1) return _mm_shuffle_epi8(mValue, _mm_set_epi32(int(0xffffff03), int(0xffffff02), int(0xffffff01), int(0xffffff00))); #elif defined(JPH_USE_NEON) int8x16_t idx = JPH_NEON_INT8x16(0x00, 0x7f, 0x7f, 0x7f, 0x01, 0x7f, 0x7f, 0x7f, 0x02, 0x7f, 0x7f, 0x7f, 0x03, 0x7f, 0x7f, 0x7f); return vreinterpretq_u32_s8(vqtbl1q_s8(vreinterpretq_s8_u32(mValue), idx)); #else UVec4 result; for (int i = 0; i < 4; i++) result.mU32[i] = (mU32[0] >> (i * 8)) & 0xff; return result; #endif } UVec4 UVec4::Expand4Byte4() const { #if defined(JPH_USE_SSE4_1) return _mm_shuffle_epi8(mValue, _mm_set_epi32(int(0xffffff07), int(0xffffff06), int(0xffffff05), int(0xffffff04))); #elif defined(JPH_USE_NEON) int8x16_t idx = JPH_NEON_INT8x16(0x04, 0x7f, 0x7f, 0x7f, 0x05, 0x7f, 0x7f, 0x7f, 0x06, 0x7f, 0x7f, 0x7f, 0x07, 0x7f, 0x7f, 0x7f); return vreinterpretq_u32_s8(vqtbl1q_s8(vreinterpretq_s8_u32(mValue), idx)); #else UVec4 result; for (int i = 0; i < 4; i++) result.mU32[i] = (mU32[1] >> (i * 8)) & 0xff; return result; #endif } UVec4 UVec4::Expand4Byte8() const { #if defined(JPH_USE_SSE4_1) return _mm_shuffle_epi8(mValue, _mm_set_epi32(int(0xffffff0b), int(0xffffff0a), int(0xffffff09), int(0xffffff08))); #elif defined(JPH_USE_NEON) int8x16_t idx = JPH_NEON_INT8x16(0x08, 0x7f, 0x7f, 0x7f, 0x09, 0x7f, 0x7f, 0x7f, 0x0a, 0x7f, 0x7f, 0x7f, 0x0b, 0x7f, 0x7f, 0x7f); return vreinterpretq_u32_s8(vqtbl1q_s8(vreinterpretq_s8_u32(mValue), idx)); #else UVec4 result; for (int i = 0; i < 4; i++) result.mU32[i] = (mU32[2] >> (i * 8)) & 0xff; return result; #endif } UVec4 UVec4::Expand4Byte12() const { #if defined(JPH_USE_SSE4_1) return _mm_shuffle_epi8(mValue, _mm_set_epi32(int(0xffffff0f), int(0xffffff0e), int(0xffffff0d), int(0xffffff0c))); #elif defined(JPH_USE_NEON) int8x16_t idx = JPH_NEON_INT8x16(0x0c, 0x7f, 0x7f, 0x7f, 0x0d, 0x7f, 0x7f, 0x7f, 0x0e, 0x7f, 0x7f, 0x7f, 0x0f, 0x7f, 0x7f, 0x7f); return vreinterpretq_u32_s8(vqtbl1q_s8(vreinterpretq_s8_u32(mValue), idx)); #else UVec4 result; for (int i = 0; i < 4; i++) result.mU32[i] = (mU32[3] >> (i * 8)) & 0xff; return result; #endif } UVec4 UVec4::ShiftComponents4Minus(int inCount) const { #if defined(JPH_USE_SSE4_1) return _mm_shuffle_epi8(mValue, sFourMinusXShuffle[inCount].mValue); #elif defined(JPH_USE_NEON) uint8x16_t idx = vreinterpretq_u8_u32(sFourMinusXShuffle[inCount].mValue); return vreinterpretq_u32_s8(vqtbl1q_s8(vreinterpretq_s8_u32(mValue), idx)); #else UVec4 result = UVec4::sZero(); for (int i = 0; i < inCount; i++) result.mU32[i] = mU32[i + 4 - inCount]; return result; #endif } JPH_NAMESPACE_END
0
repos/c2z/use_cases/JoltPhysics/include
repos/c2z/use_cases/JoltPhysics/include/Math/Mat44.h
// Jolt Physics Library (https://github.com/jrouwe/JoltPhysics) // SPDX-FileCopyrightText: 2021 Jorrit Rouwe // SPDX-License-Identifier: MIT #pragma once #include <Jolt/Math/MathTypes.h> JPH_NAMESPACE_BEGIN /// Holds a 4x4 matrix of floats, but supports also operations on the 3x3 upper left part of the matrix. class [[nodiscard]] alignas(JPH_VECTOR_ALIGNMENT) Mat44 { public: JPH_OVERRIDE_NEW_DELETE // Underlying column type using Type = Vec4::Type; // Argument type using ArgType = Mat44Arg; /// Constructor Mat44() = default; ///< Intentionally not initialized for performance reasons JPH_INLINE Mat44(Vec4Arg inC1, Vec4Arg inC2, Vec4Arg inC3, Vec4Arg inC4); JPH_INLINE Mat44(Vec4Arg inC1, Vec4Arg inC2, Vec4Arg inC3, Vec3Arg inC4); Mat44(const Mat44 &inM2) = default; JPH_INLINE Mat44(Type inC1, Type inC2, Type inC3, Type inC4); /// Zero matrix static JPH_INLINE Mat44 sZero(); /// Identity matrix static JPH_INLINE Mat44 sIdentity(); /// Matrix filled with NaN's static JPH_INLINE Mat44 sNaN(); /// Load 16 floats from memory static JPH_INLINE Mat44 sLoadFloat4x4(const Float4 *inV); /// Load 16 floats from memory, 16 bytes aligned static JPH_INLINE Mat44 sLoadFloat4x4Aligned(const Float4 *inV); /// Rotate around X, Y or Z axis (angle in radians) static JPH_INLINE Mat44 sRotationX(float inX); static JPH_INLINE Mat44 sRotationY(float inY); static JPH_INLINE Mat44 sRotationZ(float inZ); /// Rotate around arbitrary axis static JPH_INLINE Mat44 sRotation(Vec3Arg inAxis, float inAngle); /// Rotate from quaternion static JPH_INLINE Mat44 sRotation(QuatArg inQuat); /// Get matrix that translates static JPH_INLINE Mat44 sTranslation(Vec3Arg inV); /// Get matrix that rotates and translates static JPH_INLINE Mat44 sRotationTranslation(QuatArg inR, Vec3Arg inT); /// Get inverse matrix of sRotationTranslation static JPH_INLINE Mat44 sInverseRotationTranslation(QuatArg inR, Vec3Arg inT); /// Get matrix that scales uniformly static JPH_INLINE Mat44 sScale(float inScale); /// Get matrix that scales (produces a matrix with (inV, 1) on its diagonal) static JPH_INLINE Mat44 sScale(Vec3Arg inV); /// Get outer product of inV and inV2 (equivalent to \f$inV1 \otimes inV2\f$) static JPH_INLINE Mat44 sOuterProduct(Vec3Arg inV1, Vec3Arg inV2); /// Get matrix that represents a cross product \f$A \times B = \text{sCrossProduct}(A) \: B\f$ static JPH_INLINE Mat44 sCrossProduct(Vec3Arg inV); /// Returns matrix ML so that \f$ML(q) \: p = q \: p\f$ (where p and q are quaternions) static JPH_INLINE Mat44 sQuatLeftMultiply(QuatArg inQ); /// Returns matrix MR so that \f$MR(q) \: p = p \: q\f$ (where p and q are quaternions) static JPH_INLINE Mat44 sQuatRightMultiply(QuatArg inQ); /// Returns a look at matrix that transforms from world space to view space /// @param inPos Position of the camera /// @param inTarget Target of the camera /// @param inUp Up vector static JPH_INLINE Mat44 sLookAt(Vec3Arg inPos, Vec3Arg inTarget, Vec3Arg inUp); /// Get float component by element index JPH_INLINE float operator () (uint inRow, uint inColumn) const { JPH_ASSERT(inRow < 4); JPH_ASSERT(inColumn < 4); return mCol[inColumn].mF32[inRow]; } JPH_INLINE float & operator () (uint inRow, uint inColumn) { JPH_ASSERT(inRow < 4); JPH_ASSERT(inColumn < 4); return mCol[inColumn].mF32[inRow]; } /// Comparsion JPH_INLINE bool operator == (Mat44Arg inM2) const; JPH_INLINE bool operator != (Mat44Arg inM2) const { return !(*this == inM2); } /// Test if two matrices are close JPH_INLINE bool IsClose(Mat44Arg inM2, float inMaxDistSq = 1.0e-12f) const; /// Multiply matrix by matrix JPH_INLINE Mat44 operator * (Mat44Arg inM) const; /// Multiply vector by matrix JPH_INLINE Vec3 operator * (Vec3Arg inV) const; JPH_INLINE Vec4 operator * (Vec4Arg inV) const; /// Multiply vector by only 3x3 part of the matrix JPH_INLINE Vec3 Multiply3x3(Vec3Arg inV) const; /// Multiply vector by only 3x3 part of the transpose of the matrix (\f$result = this^T \: inV\f$) JPH_INLINE Vec3 Multiply3x3Transposed(Vec3Arg inV) const; /// Multiply 3x3 matrix by 3x3 matrix JPH_INLINE Mat44 Multiply3x3(Mat44Arg inM) const; /// Multiply transpose of 3x3 matrix by 3x3 matrix (\f$result = this^T \: inM\f$) JPH_INLINE Mat44 Multiply3x3LeftTransposed(Mat44Arg inM) const; /// Multiply 3x3 matrix by the transpose of a 3x3 matrix (\f$result = this \: inM^T\f$) JPH_INLINE Mat44 Multiply3x3RightTransposed(Mat44Arg inM) const; /// Multiply matrix with float JPH_INLINE Mat44 operator * (float inV) const; friend JPH_INLINE Mat44 operator * (float inV, Mat44Arg inM) { return inM * inV; } /// Multiply matrix with float JPH_INLINE Mat44 & operator *= (float inV); /// Per element addition of matrix JPH_INLINE Mat44 operator + (Mat44Arg inM) const; /// Negate JPH_INLINE Mat44 operator - () const; /// Per element subtraction of matrix JPH_INLINE Mat44 operator - (Mat44Arg inM) const; /// Per element addition of matrix JPH_INLINE Mat44 & operator += (Mat44Arg inM); /// Access to the columns JPH_INLINE Vec3 GetAxisX() const { return Vec3(mCol[0]); } JPH_INLINE void SetAxisX(Vec3Arg inV) { mCol[0] = Vec4(inV, 0.0f); } JPH_INLINE Vec3 GetAxisY() const { return Vec3(mCol[1]); } JPH_INLINE void SetAxisY(Vec3Arg inV) { mCol[1] = Vec4(inV, 0.0f); } JPH_INLINE Vec3 GetAxisZ() const { return Vec3(mCol[2]); } JPH_INLINE void SetAxisZ(Vec3Arg inV) { mCol[2] = Vec4(inV, 0.0f); } JPH_INLINE Vec3 GetTranslation() const { return Vec3(mCol[3]); } JPH_INLINE void SetTranslation(Vec3Arg inV) { mCol[3] = Vec4(inV, 1.0f); } JPH_INLINE Vec3 GetDiagonal3() const { return Vec3(mCol[0][0], mCol[1][1], mCol[2][2]); } JPH_INLINE void SetDiagonal3(Vec3Arg inV) { mCol[0][0] = inV.GetX(); mCol[1][1] = inV.GetY(); mCol[2][2] = inV.GetZ(); } JPH_INLINE Vec4 GetDiagonal4() const { return Vec4(mCol[0][0], mCol[1][1], mCol[2][2], mCol[3][3]); } JPH_INLINE void SetDiagonal4(Vec4Arg inV) { mCol[0][0] = inV.GetX(); mCol[1][1] = inV.GetY(); mCol[2][2] = inV.GetZ(); mCol[3][3] = inV.GetW(); } JPH_INLINE Vec3 GetColumn3(uint inCol) const { JPH_ASSERT(inCol < 4); return Vec3(mCol[inCol]); } JPH_INLINE void SetColumn3(uint inCol, Vec3Arg inV) { JPH_ASSERT(inCol < 4); mCol[inCol] = Vec4(inV, inCol == 3? 1.0f : 0.0f); } JPH_INLINE Vec4 GetColumn4(uint inCol) const { JPH_ASSERT(inCol < 4); return mCol[inCol]; } JPH_INLINE void SetColumn4(uint inCol, Vec4Arg inV) { JPH_ASSERT(inCol < 4); mCol[inCol] = inV; } /// Store matrix to memory JPH_INLINE void StoreFloat4x4(Float4 *outV) const; /// Transpose matrix JPH_INLINE Mat44 Transposed() const; /// Transpose 3x3 subpart of matrix JPH_INLINE Mat44 Transposed3x3() const; /// Inverse 4x4 matrix JPH_INLINE Mat44 Inversed() const; /// Inverse 4x4 matrix when it only contains rotation and translation JPH_INLINE Mat44 InversedRotationTranslation() const; /// Get the determinant of a 3x3 matrix JPH_INLINE float GetDeterminant3x3() const; /// Get the adjoint of a 3x3 matrix JPH_INLINE Mat44 Adjointed3x3() const; /// Inverse 3x3 matrix JPH_INLINE Mat44 Inversed3x3() const; /// Get rotation part only (note: retains the first 3 values from the bottom row) JPH_INLINE Mat44 GetRotation() const; /// Get rotation part only (note: also clears the bottom row) JPH_INLINE Mat44 GetRotationSafe() const; /// Updates the rotation part of this matrix (the first 3 columns) JPH_INLINE void SetRotation(Mat44Arg inRotation); /// Convert to quaternion JPH_INLINE Quat GetQuaternion() const; /// Get matrix that transforms a direction with the same transform as this matrix (length is not preserved) JPH_INLINE Mat44 GetDirectionPreservingMatrix() const { return GetRotation().Inversed3x3().Transposed3x3(); } /// Pre multiply by translation matrix: result = this * Mat44::sTranslation(inTranslation) JPH_INLINE Mat44 PreTranslated(Vec3Arg inTranslation) const; /// Post multiply by translation matrix: result = Mat44::sTranslation(inTranslation) * this (i.e. add inTranslation to the 4-th column) JPH_INLINE Mat44 PostTranslated(Vec3Arg inTranslation) const; /// Scale a matrix: result = this * Mat44::sScale(inScale) JPH_INLINE Mat44 PreScaled(Vec3Arg inScale) const; /// Scale a matrix: result = Mat44::sScale(inScale) * this JPH_INLINE Mat44 PostScaled(Vec3Arg inScale) const; /// Decompose a matrix into a rotation & translation part and into a scale part so that: /// this = return_value * Mat44::sScale(outScale). /// This equation only holds when the matrix is orthogonal, if it is not the returned matrix /// will be made orthogonal using the modified Gram-Schmidt algorithm (see: https://en.wikipedia.org/wiki/Gram%E2%80%93Schmidt_process) JPH_INLINE Mat44 Decompose(Vec3 &outScale) const; #ifndef JPH_DOUBLE_PRECISION /// In single precision mode just return the matrix itself JPH_INLINE Mat44 ToMat44() const { return *this; } #endif // !JPH_DOUBLE_PRECISION /// To String friend ostream & operator << (ostream &inStream, Mat44Arg inM) { inStream << inM.mCol[0] << ", " << inM.mCol[1] << ", " << inM.mCol[2] << ", " << inM.mCol[3]; return inStream; } private: Vec4 mCol[4]; ///< Column }; static_assert(is_trivial<Mat44>(), "Is supposed to be a trivial type!"); JPH_NAMESPACE_END #include "Mat44.inl"
0
repos/c2z/use_cases/JoltPhysics/include
repos/c2z/use_cases/JoltPhysics/include/Math/Quat.h
// Jolt Physics Library (https://github.com/jrouwe/JoltPhysics) // SPDX-FileCopyrightText: 2021 Jorrit Rouwe // SPDX-License-Identifier: MIT #pragma once #include <Jolt/Math/Vec3.h> #include <Jolt/Math/Vec4.h> JPH_NAMESPACE_BEGIN /// Quaternion class, quaternions are 4 dimensional vectors which can describe rotations in 3 dimensional /// space if their length is 1. /// /// They are written as: /// /// \f$q = w + x \: i + y \: j + z \: k\f$ /// /// or in vector notation: /// /// \f$q = [w, v] = [w, x, y, z]\f$ /// /// Where: /// /// w = the real part /// v = the imaginary part, (x, y, z) /// /// Note that we store the quaternion in a Vec4 as [x, y, z, w] because that makes /// it easy to extract the rotation axis of the quaternion: /// /// q = [cos(angle / 2), sin(angle / 2) * rotation_axis] class [[nodiscard]] alignas(JPH_VECTOR_ALIGNMENT) Quat { public: JPH_OVERRIDE_NEW_DELETE ///@name Constructors ///@{ inline Quat() = default; ///< Intentionally not initialized for performance reasons Quat(const Quat &inRHS) = default; inline Quat(float inX, float inY, float inZ, float inW) : mValue(inX, inY, inZ, inW) { } inline explicit Quat(Vec4Arg inV) : mValue(inV) { } ///@} ///@name Tests ///@{ /// Check if two quaternions are exactly equal inline bool operator == (QuatArg inRHS) const { return mValue == inRHS.mValue; } /// Check if two quaternions are different inline bool operator != (QuatArg inRHS) const { return mValue != inRHS.mValue; } /// If this quaternion is close to inRHS. Note that q and -q represent the same rotation, this is not checked here. inline bool IsClose(QuatArg inRHS, float inMaxDistSq = 1.0e-12f) const { return mValue.IsClose(inRHS.mValue, inMaxDistSq); } /// If the length of this quaternion is 1 +/- inTolerance inline bool IsNormalized(float inTolerance = 1.0e-5f) const { return mValue.IsNormalized(inTolerance); } /// If any component of this quaternion is a NaN (not a number) inline bool IsNaN() const { return mValue.IsNaN(); } ///@} ///@name Get components ///@{ /// Get X component (imaginary part i) JPH_INLINE float GetX() const { return mValue.GetX(); } /// Get Y component (imaginary part j) JPH_INLINE float GetY() const { return mValue.GetY(); } /// Get Z component (imaginary part k) JPH_INLINE float GetZ() const { return mValue.GetZ(); } /// Get W component (real part) JPH_INLINE float GetW() const { return mValue.GetW(); } /// Get the imaginary part of the quaternion JPH_INLINE Vec3 GetXYZ() const { return Vec3(mValue); } /// Get the quaternion as a Vec4 JPH_INLINE Vec4 GetXYZW() const { return mValue; } ///@} ///@name Default quaternions ///@{ /// @return [0, 0, 0, 0] JPH_INLINE static Quat sZero() { return Quat(Vec4::sZero()); } /// @return [1, 0, 0, 0] (or in storage format Quat(0, 0, 0, 1)) JPH_INLINE static Quat sIdentity() { return Quat(0, 0, 0, 1); } ///@} /// Rotation from axis and angle JPH_INLINE static Quat sRotation(Vec3Arg inAxis, float inAngle); /// Get axis and angle that represents this quaternion, outAngle will always be in the range \f$[0, \pi]\f$ JPH_INLINE void GetAxisAngle(Vec3 &outAxis, float &outAngle) const; /// Create quaternion that rotates a vector from the direction of inFrom to the direction of inTo along the shortest path /// @see https://www.euclideanspace.com/maths/algebra/vectors/angleBetween/index.htm JPH_INLINE static Quat sFromTo(Vec3Arg inFrom, Vec3Arg inTo); /// Random unit quaternion template <class Random> inline static Quat sRandom(Random &inRandom); /// Conversion from Euler angles inline static Quat sEulerAngles(Vec3Arg inAngles); /// Conversion to Euler angles inline Vec3 GetEulerAngles() const; ///@name Length / normalization operations ///@{ /// Squared length of quaternion. /// @return Squared length of quaternion (\f$|v|^2\f$) JPH_INLINE float LengthSq() const { return mValue.LengthSq(); } /// Length of quaternion. /// @return Length of quaternion (\f$|v|\f$) JPH_INLINE float Length() const { return mValue.Length(); } /// Normalize the quaternion (make it length 1) JPH_INLINE Quat Normalized() const { return Quat(mValue.Normalized()); } ///@} ///@name Additions / multiplications ///@{ JPH_INLINE void operator += (QuatArg inRHS) { mValue += inRHS.mValue; } JPH_INLINE void operator -= (QuatArg inRHS) { mValue -= inRHS.mValue; } JPH_INLINE void operator *= (float inValue) { mValue *= inValue; } JPH_INLINE void operator /= (float inValue) { mValue /= inValue; } JPH_INLINE Quat operator - () const { return Quat(-mValue); } JPH_INLINE Quat operator + (QuatArg inRHS) const { return Quat(mValue + inRHS.mValue); } JPH_INLINE Quat operator - (QuatArg inRHS) const { return Quat(mValue - inRHS.mValue); } JPH_INLINE Quat operator * (QuatArg inRHS) const; JPH_INLINE Quat operator * (float inValue) const { return Quat(mValue * inValue); } inline friend Quat operator * (float inValue, QuatArg inRHS) { return Quat(inRHS.mValue * inValue); } JPH_INLINE Quat operator / (float inValue) const { return Quat(mValue / inValue); } ///@} /// Rotate a vector by this quaternion JPH_INLINE Vec3 operator * (Vec3Arg inValue) const; /// Rotate a vector by the inverse of this quaternion JPH_INLINE Vec3 InverseRotate(Vec3Arg inValue) const; /// Rotate a the vector (1, 0, 0) with this quaternion JPH_INLINE Vec3 RotateAxisX() const; /// Rotate a the vector (0, 1, 0) with this quaternion JPH_INLINE Vec3 RotateAxisY() const; /// Rotate a the vector (0, 0, 1) with this quaternion JPH_INLINE Vec3 RotateAxisZ() const; /// Dot product JPH_INLINE float Dot(QuatArg inRHS) const { return mValue.Dot(inRHS.mValue); } /// The conjugate [w, -x, -y, -z] is the same as the inverse for unit quaternions JPH_INLINE Quat Conjugated() const { return Quat(Vec4::sXor(mValue, UVec4(0x80000000, 0x80000000, 0x80000000, 0).ReinterpretAsFloat())); } /// Get inverse quaternion JPH_INLINE Quat Inversed() const { return Conjugated() / Length(); } /// Ensures that the W component is positive by negating the entire quaternion if it is not. This is useful when you want to store a quaternion as a 3 vector by discarding W and reconstructing it as sqrt(1 - x^2 - y^2 - z^2). JPH_INLINE Quat EnsureWPositive() const { return Quat(Vec4::sXor(mValue, Vec4::sAnd(mValue.SplatW(), UVec4::sReplicate(0x80000000).ReinterpretAsFloat()))); } /// Get a quaternion that is perpendicular to this quaternion JPH_INLINE Quat GetPerpendicular() const { return Quat(Vec4(1, -1, 1, -1) * mValue.Swizzle<SWIZZLE_Y, SWIZZLE_X, SWIZZLE_W, SWIZZLE_Z>()); } /// Get rotation angle around inAxis (uses Swing Twist Decomposition to get the twist quaternion and uses q(axis, angle) = [cos(angle / 2), axis * sin(angle / 2)]) JPH_INLINE float GetRotationAngle(Vec3Arg inAxis) const { return GetW() == 0.0f? JPH_PI : 2.0f * ATan(GetXYZ().Dot(inAxis) / GetW()); } /// Swing Twist Decomposition: any quaternion can be split up as: /// /// \f[q = q_{swing} \: q_{twist}\f] /// /// where \f$q_{twist}\f$ rotates only around axis v. /// /// \f$q_{twist}\f$ is: /// /// \f[q_{twist} = \frac{[q_w, q_{ijk} \cdot v \: v]}{\left|[q_w, q_{ijk} \cdot v \: v]\right|}\f] /// /// where q_w is the real part of the quaternion and q_i the imaginary part (a 3 vector). /// /// The swing can then be calculated as: /// /// \f[q_{swing} = q \: q_{twist}^* \f] /// /// Where \f$q_{twist}^*\f$ = complex conjugate of \f$q_{twist}\f$ JPH_INLINE Quat GetTwist(Vec3Arg inAxis) const; /// Decomposes quaternion into swing and twist component: /// /// \f$q = q_{swing} \: q_{twist}\f$ /// /// where \f$q_{swing} \: \hat{x} = q_{twist} \: \hat{y} = q_{twist} \: \hat{z} = 0\f$ /// /// In other words: /// /// - \f$q_{twist}\f$ only rotates around the X-axis. /// - \f$q_{swing}\f$ only rotates around the Y and Z-axis. /// /// @see Gino van den Bergen - Rotational Joint Limits in Quaternion Space - GDC 2016 JPH_INLINE void GetSwingTwist(Quat &outSwing, Quat &outTwist) const; /// Linear interpolation between two quaternions (for small steps). /// @param inFraction is in the range [0, 1] /// @param inDestination The destination quaternion /// @return (1 - inFraction) * this + fraction * inDestination JPH_INLINE Quat LERP(QuatArg inDestination, float inFraction) const; /// Spherical linear interpolation between two quaternions. /// @param inFraction is in the range [0, 1] /// @param inDestination The destination quaternion /// @return When fraction is zero this quaternion is returned, when fraction is 1 inDestination is returned. /// When fraction is between 0 and 1 an interpolation along the shortest path is returned. JPH_INLINE Quat SLERP(QuatArg inDestination, float inFraction) const; /// Load 3 floats from memory (X, Y and Z component and then calculates W) reads 32 bits extra which it doesn't use static JPH_INLINE Quat sLoadFloat3Unsafe(const Float3 &inV); /// Store 3 as floats to memory (X, Y and Z component) JPH_INLINE void StoreFloat3(Float3 *outV) const; /// To String friend ostream & operator << (ostream &inStream, QuatArg inQ) { inStream << inQ.mValue; return inStream; } /// 4 vector that stores [x, y, z, w] parts of the quaternion Vec4 mValue; }; static_assert(is_trivial<Quat>(), "Is supposed to be a trivial type!"); JPH_NAMESPACE_END #include "Quat.inl"
0
repos/c2z/use_cases/JoltPhysics/include
repos/c2z/use_cases/JoltPhysics/include/Math/DVec3.inl
// Jolt Physics Library (https://github.com/jrouwe/JoltPhysics) // SPDX-FileCopyrightText: 2021 Jorrit Rouwe // SPDX-License-Identifier: MIT #pragma once #include <Jolt/Core/HashCombine.h> // Create a std::hash for DVec3 JPH_MAKE_HASHABLE(JPH::DVec3, t.GetX(), t.GetY(), t.GetZ()) JPH_NAMESPACE_BEGIN DVec3::DVec3(Vec3Arg inRHS) { #if defined(JPH_USE_AVX) mValue = _mm256_cvtps_pd(inRHS.mValue); #elif defined(JPH_USE_SSE) mValue.mLow = _mm_cvtps_pd(inRHS.mValue); mValue.mHigh = _mm_cvtps_pd(_mm_shuffle_ps(inRHS.mValue, inRHS.mValue, _MM_SHUFFLE(2, 2, 2, 2))); #elif defined(JPH_USE_NEON) mValue.val[0] = vcvt_f64_f32(vget_low_f32(inRHS.mValue)); mValue.val[1] = vcvt_high_f64_f32(inRHS.mValue); #else mF64[0] = (double)inRHS.GetX(); mF64[1] = (double)inRHS.GetY(); mF64[2] = (double)inRHS.GetZ(); #ifdef JPH_FLOATING_POINT_EXCEPTIONS_ENABLED mF64[3] = mF64[2]; #endif #endif } DVec3::DVec3(Vec4Arg inRHS) : DVec3(Vec3(inRHS)) { } DVec3::DVec3(double inX, double inY, double inZ) { #if defined(JPH_USE_AVX) mValue = _mm256_set_pd(inZ, inZ, inY, inX); // Assure Z and W are the same #elif defined(JPH_USE_SSE) mValue.mLow = _mm_set_pd(inY, inX); mValue.mHigh = _mm_set_pd1(inZ); #elif defined(JPH_USE_NEON) mValue.val[0] = vcombine_f64(vcreate_f64(*reinterpret_cast<uint64 *>(&inX)), vcreate_f64(*reinterpret_cast<uint64 *>(&inY))); mValue.val[1] = vdupq_n_f64(inZ); #else mF64[0] = inX; mF64[1] = inY; mF64[2] = inZ; #ifdef JPH_FLOATING_POINT_EXCEPTIONS_ENABLED mF64[3] = mF64[2]; #endif #endif } DVec3::DVec3(const Double3 &inV) { #if defined(JPH_USE_AVX) Type x = _mm256_castpd128_pd256(_mm_load_sd(&inV.x)); Type y = _mm256_castpd128_pd256(_mm_load_sd(&inV.y)); Type z = _mm256_broadcast_sd(&inV.z); Type xy = _mm256_unpacklo_pd(x, y); mValue = _mm256_blend_pd(xy, z, 0b1100); // Assure Z and W are the same #elif defined(JPH_USE_SSE) mValue.mLow = _mm_load_pd(&inV.x); mValue.mHigh = _mm_set_pd1(inV.z); #elif defined(JPH_USE_NEON) mValue.val[0] = vld1q_f64(&inV.x); mValue.val[1] = vdupq_n_f64(inV.z); #else mF64[0] = inV.x; mF64[1] = inV.y; mF64[2] = inV.z; #ifdef JPH_FLOATING_POINT_EXCEPTIONS_ENABLED mF64[3] = mF64[2]; #endif #endif } void DVec3::CheckW() const { #ifdef JPH_FLOATING_POINT_EXCEPTIONS_ENABLED // Avoid asserts when both components are NaN JPH_ASSERT(reinterpret_cast<const uint64 *>(mF64)[2] == reinterpret_cast<const uint64 *>(mF64)[3]); #endif // JPH_FLOATING_POINT_EXCEPTIONS_ENABLED } /// Internal helper function that ensures that the Z component is replicated to the W component to prevent divisions by zero DVec3::Type DVec3::sFixW(TypeArg inValue) { #ifdef JPH_FLOATING_POINT_EXCEPTIONS_ENABLED #if defined(JPH_USE_AVX) return _mm256_shuffle_pd(inValue, inValue, 2); #elif defined(JPH_USE_SSE) Type value; value.mLow = inValue.mLow; value.mHigh = _mm_shuffle_pd(inValue.mHigh, inValue.mHigh, 0); return value; #elif defined(JPH_USE_NEON) Type value; value.val[0] = inValue.val[0]; value.val[1] = vdupq_laneq_f64(inValue.val[1], 0); return value; #else Type value; value.mData[0] = inValue.mData[0]; value.mData[1] = inValue.mData[1]; value.mData[2] = inValue.mData[2]; value.mData[3] = inValue.mData[2]; return value; #endif #else return inValue; #endif // JPH_FLOATING_POINT_EXCEPTIONS_ENABLED } DVec3 DVec3::sZero() { #if defined(JPH_USE_AVX) return _mm256_setzero_pd(); #elif defined(JPH_USE_SSE) __m128d zero = _mm_setzero_pd(); return DVec3({ zero, zero }); #elif defined(JPH_USE_NEON) float64x2_t zero = vdupq_n_f64(0.0); return DVec3({ zero, zero }); #else return DVec3(0, 0, 0); #endif } DVec3 DVec3::sReplicate(double inV) { #if defined(JPH_USE_AVX) return _mm256_set1_pd(inV); #elif defined(JPH_USE_SSE) __m128d value = _mm_set1_pd(inV); return DVec3({ value, value }); #elif defined(JPH_USE_NEON) float64x2_t value = vdupq_n_f64(inV); return DVec3({ value, value }); #else return DVec3(inV, inV, inV); #endif } DVec3 DVec3::sNaN() { return sReplicate(numeric_limits<double>::quiet_NaN()); } DVec3 DVec3::sLoadDouble3Unsafe(const Double3 &inV) { #if defined(JPH_USE_AVX) Type v = _mm256_loadu_pd(&inV.x); #elif defined(JPH_USE_SSE) Type v; v.mLow = _mm_loadu_pd(&inV.x); v.mHigh = _mm_set1_pd(inV.z); #elif defined(JPH_USE_NEON) Type v = vld1q_f64_x2(&inV.x); #else Type v = { inV.x, inV.y, inV.z }; #endif return sFixW(v); } void DVec3::StoreDouble3(Double3 *outV) const { outV->x = mF64[0]; outV->y = mF64[1]; outV->z = mF64[2]; } DVec3::operator Vec3() const { #if defined(JPH_USE_AVX) return _mm256_cvtpd_ps(mValue); #elif defined(JPH_USE_SSE) __m128 low = _mm_cvtpd_ps(mValue.mLow); __m128 high = _mm_cvtpd_ps(mValue.mHigh); return _mm_shuffle_ps(low, high, _MM_SHUFFLE(1, 0, 1, 0)); #elif defined(JPH_USE_NEON) return vcvt_high_f32_f64(vcvtx_f32_f64(mValue.val[0]), mValue.val[1]); #else return Vec3((float)GetX(), (float)GetY(), (float)GetZ()); #endif } DVec3 DVec3::sMin(DVec3Arg inV1, DVec3Arg inV2) { #if defined(JPH_USE_AVX) return _mm256_min_pd(inV1.mValue, inV2.mValue); #elif defined(JPH_USE_SSE) return DVec3({ _mm_min_pd(inV1.mValue.mLow, inV2.mValue.mLow), _mm_min_pd(inV1.mValue.mHigh, inV2.mValue.mHigh) }); #elif defined(JPH_USE_NEON) return DVec3({ vminq_f64(inV1.mValue.val[0], inV2.mValue.val[0]), vminq_f64(inV1.mValue.val[1], inV2.mValue.val[1]) }); #else return DVec3(min(inV1.mF64[0], inV2.mF64[0]), min(inV1.mF64[1], inV2.mF64[1]), min(inV1.mF64[2], inV2.mF64[2])); #endif } DVec3 DVec3::sMax(DVec3Arg inV1, DVec3Arg inV2) { #if defined(JPH_USE_AVX) return _mm256_max_pd(inV1.mValue, inV2.mValue); #elif defined(JPH_USE_SSE) return DVec3({ _mm_max_pd(inV1.mValue.mLow, inV2.mValue.mLow), _mm_max_pd(inV1.mValue.mHigh, inV2.mValue.mHigh) }); #elif defined(JPH_USE_NEON) return DVec3({ vmaxq_f64(inV1.mValue.val[0], inV2.mValue.val[0]), vmaxq_f64(inV1.mValue.val[1], inV2.mValue.val[1]) }); #else return DVec3(max(inV1.mF64[0], inV2.mF64[0]), max(inV1.mF64[1], inV2.mF64[1]), max(inV1.mF64[2], inV2.mF64[2])); #endif } DVec3 DVec3::sClamp(DVec3Arg inV, DVec3Arg inMin, DVec3Arg inMax) { return sMax(sMin(inV, inMax), inMin); } DVec3 DVec3::sEquals(DVec3Arg inV1, DVec3Arg inV2) { #if defined(JPH_USE_AVX) return _mm256_cmp_pd(inV1.mValue, inV2.mValue, _CMP_EQ_OQ); #elif defined(JPH_USE_SSE) return DVec3({ _mm_cmpeq_pd(inV1.mValue.mLow, inV2.mValue.mLow), _mm_cmpeq_pd(inV1.mValue.mHigh, inV2.mValue.mHigh) }); #elif defined(JPH_USE_NEON) return DVec3({ vreinterpretq_u64_f64(vceqq_f64(inV1.mValue.val[0], inV2.mValue.val[0])), vreinterpretq_u64_f64(vceqq_f64(inV1.mValue.val[1], inV2.mValue.val[1])) }); #else return DVec3(inV1.mF64[0] == inV2.mF64[0]? cTrue : cFalse, inV1.mF64[1] == inV2.mF64[1]? cTrue : cFalse, inV1.mF64[2] == inV2.mF64[2]? cTrue : cFalse); #endif } DVec3 DVec3::sLess(DVec3Arg inV1, DVec3Arg inV2) { #if defined(JPH_USE_AVX) return _mm256_cmp_pd(inV1.mValue, inV2.mValue, _CMP_LT_OQ); #elif defined(JPH_USE_SSE) return DVec3({ _mm_cmplt_pd(inV1.mValue.mLow, inV2.mValue.mLow), _mm_cmplt_pd(inV1.mValue.mHigh, inV2.mValue.mHigh) }); #elif defined(JPH_USE_NEON) return DVec3({ vreinterpretq_u64_f64(vcltq_f64(inV1.mValue.val[0], inV2.mValue.val[0])), vreinterpretq_u64_f64(vcltq_f64(inV1.mValue.val[1], inV2.mValue.val[1])) }); #else return DVec3(inV1.mF64[0] < inV2.mF64[0]? cTrue : cFalse, inV1.mF64[1] < inV2.mF64[1]? cTrue : cFalse, inV1.mF64[2] < inV2.mF64[2]? cTrue : cFalse); #endif } DVec3 DVec3::sLessOrEqual(DVec3Arg inV1, DVec3Arg inV2) { #if defined(JPH_USE_AVX) return _mm256_cmp_pd(inV1.mValue, inV2.mValue, _CMP_LE_OQ); #elif defined(JPH_USE_SSE) return DVec3({ _mm_cmple_pd(inV1.mValue.mLow, inV2.mValue.mLow), _mm_cmple_pd(inV1.mValue.mHigh, inV2.mValue.mHigh) }); #elif defined(JPH_USE_NEON) return DVec3({ vreinterpretq_u64_f64(vcleq_f64(inV1.mValue.val[0], inV2.mValue.val[0])), vreinterpretq_u64_f64(vcleq_f64(inV1.mValue.val[1], inV2.mValue.val[1])) }); #else return DVec3(inV1.mF64[0] <= inV2.mF64[0]? cTrue : cFalse, inV1.mF64[1] <= inV2.mF64[1]? cTrue : cFalse, inV1.mF64[2] <= inV2.mF64[2]? cTrue : cFalse); #endif } DVec3 DVec3::sGreater(DVec3Arg inV1, DVec3Arg inV2) { #if defined(JPH_USE_AVX) return _mm256_cmp_pd(inV1.mValue, inV2.mValue, _CMP_GT_OQ); #elif defined(JPH_USE_SSE) return DVec3({ _mm_cmpgt_pd(inV1.mValue.mLow, inV2.mValue.mLow), _mm_cmpgt_pd(inV1.mValue.mHigh, inV2.mValue.mHigh) }); #elif defined(JPH_USE_NEON) return DVec3({ vreinterpretq_u64_f64(vcgtq_f64(inV1.mValue.val[0], inV2.mValue.val[0])), vreinterpretq_u64_f64(vcgtq_f64(inV1.mValue.val[1], inV2.mValue.val[1])) }); #else return DVec3(inV1.mF64[0] > inV2.mF64[0]? cTrue : cFalse, inV1.mF64[1] > inV2.mF64[1]? cTrue : cFalse, inV1.mF64[2] > inV2.mF64[2]? cTrue : cFalse); #endif } DVec3 DVec3::sGreaterOrEqual(DVec3Arg inV1, DVec3Arg inV2) { #if defined(JPH_USE_AVX) return _mm256_cmp_pd(inV1.mValue, inV2.mValue, _CMP_GE_OQ); #elif defined(JPH_USE_SSE) return DVec3({ _mm_cmpge_pd(inV1.mValue.mLow, inV2.mValue.mLow), _mm_cmpge_pd(inV1.mValue.mHigh, inV2.mValue.mHigh) }); #elif defined(JPH_USE_NEON) return DVec3({ vreinterpretq_u64_f64(vcgeq_f64(inV1.mValue.val[0], inV2.mValue.val[0])), vreinterpretq_u64_f64(vcgeq_f64(inV1.mValue.val[1], inV2.mValue.val[1])) }); #else return DVec3(inV1.mF64[0] >= inV2.mF64[0]? cTrue : cFalse, inV1.mF64[1] >= inV2.mF64[1]? cTrue : cFalse, inV1.mF64[2] >= inV2.mF64[2]? cTrue : cFalse); #endif } DVec3 DVec3::sFusedMultiplyAdd(DVec3Arg inMul1, DVec3Arg inMul2, DVec3Arg inAdd) { #if defined(JPH_USE_AVX) #ifdef JPH_USE_FMADD return _mm256_fmadd_pd(inMul1.mValue, inMul2.mValue, inAdd.mValue); #else return _mm256_add_pd(_mm256_mul_pd(inMul1.mValue, inMul2.mValue), inAdd.mValue); #endif #elif defined(JPH_USE_NEON) return DVec3({ vmlaq_f64(inAdd.mValue.val[0], inMul1.mValue.val[0], inMul2.mValue.val[0]), vmlaq_f64(inAdd.mValue.val[1], inMul1.mValue.val[1], inMul2.mValue.val[1]) }); #else return inMul1 * inMul2 + inAdd; #endif } DVec3 DVec3::sSelect(DVec3Arg inV1, DVec3Arg inV2, DVec3Arg inControl) { #if defined(JPH_USE_AVX) return _mm256_blendv_pd(inV1.mValue, inV2.mValue, inControl.mValue); #elif defined(JPH_USE_SSE4_1) Type v = { _mm_blendv_pd(inV1.mValue.mLow, inV2.mValue.mLow, inControl.mValue.mLow), _mm_blendv_pd(inV1.mValue.mHigh, inV2.mValue.mHigh, inControl.mValue.mHigh) }; return sFixW(v); #elif defined(JPH_USE_NEON) Type v = { vbslq_f64(vshrq_n_s64(inControl.mValue.val[0], 63), inV2.mValue.val[0], inV1.mValue.val[0]), vbslq_f64(vshrq_n_s64(inControl.mValue.val[1], 63), inV2.mValue.val[1], inV1.mValue.val[1]) }; return sFixW(v); #else DVec3 result; for (int i = 0; i < 3; i++) result.mF64[i] = BitCast<uint64>(inControl.mF64[i])? inV2.mF64[i] : inV1.mF64[i]; #ifdef JPH_FLOATING_POINT_EXCEPTIONS_ENABLED result.mF64[3] = result.mF64[2]; #endif // JPH_FLOATING_POINT_EXCEPTIONS_ENABLED return result; #endif } DVec3 DVec3::sOr(DVec3Arg inV1, DVec3Arg inV2) { #if defined(JPH_USE_AVX) return _mm256_or_pd(inV1.mValue, inV2.mValue); #elif defined(JPH_USE_SSE) return DVec3({ _mm_or_pd(inV1.mValue.mLow, inV2.mValue.mLow), _mm_or_pd(inV1.mValue.mHigh, inV2.mValue.mHigh) }); #elif defined(JPH_USE_NEON) return DVec3({ vorrq_s64(inV1.mValue.val[0], inV2.mValue.val[0]), vorrq_s64(inV1.mValue.val[1], inV2.mValue.val[1]) }); #else return DVec3(BitCast<double>(BitCast<uint64>(inV1.mF64[0]) | BitCast<uint64>(inV2.mF64[0])), BitCast<double>(BitCast<uint64>(inV1.mF64[1]) | BitCast<uint64>(inV2.mF64[1])), BitCast<double>(BitCast<uint64>(inV1.mF64[2]) | BitCast<uint64>(inV2.mF64[2]))); #endif } DVec3 DVec3::sXor(DVec3Arg inV1, DVec3Arg inV2) { #if defined(JPH_USE_AVX) return _mm256_xor_pd(inV1.mValue, inV2.mValue); #elif defined(JPH_USE_SSE) return DVec3({ _mm_xor_pd(inV1.mValue.mLow, inV2.mValue.mLow), _mm_xor_pd(inV1.mValue.mHigh, inV2.mValue.mHigh) }); #elif defined(JPH_USE_NEON) return DVec3({ veorq_s64(inV1.mValue.val[0], inV2.mValue.val[0]), veorq_s64(inV1.mValue.val[1], inV2.mValue.val[1]) }); #else return DVec3(BitCast<double>(BitCast<uint64>(inV1.mF64[0]) ^ BitCast<uint64>(inV2.mF64[0])), BitCast<double>(BitCast<uint64>(inV1.mF64[1]) ^ BitCast<uint64>(inV2.mF64[1])), BitCast<double>(BitCast<uint64>(inV1.mF64[2]) ^ BitCast<uint64>(inV2.mF64[2]))); #endif } DVec3 DVec3::sAnd(DVec3Arg inV1, DVec3Arg inV2) { #if defined(JPH_USE_AVX) return _mm256_and_pd(inV1.mValue, inV2.mValue); #elif defined(JPH_USE_SSE) return DVec3({ _mm_and_pd(inV1.mValue.mLow, inV2.mValue.mLow), _mm_and_pd(inV1.mValue.mHigh, inV2.mValue.mHigh) }); #elif defined(JPH_USE_NEON) return DVec3({ vandq_s64(inV1.mValue.val[0], inV2.mValue.val[0]), vandq_s64(inV1.mValue.val[1], inV2.mValue.val[1]) }); #else return DVec3(BitCast<double>(BitCast<uint64>(inV1.mF64[0]) & BitCast<uint64>(inV2.mF64[0])), BitCast<double>(BitCast<uint64>(inV1.mF64[1]) & BitCast<uint64>(inV2.mF64[1])), BitCast<double>(BitCast<uint64>(inV1.mF64[2]) & BitCast<uint64>(inV2.mF64[2]))); #endif } int DVec3::GetTrues() const { #if defined(JPH_USE_AVX) return _mm256_movemask_pd(mValue) & 0x7; #elif defined(JPH_USE_SSE) return (_mm_movemask_pd(mValue.mLow) + (_mm_movemask_pd(mValue.mHigh) << 2)) & 0x7; #else return int((BitCast<uint64>(mF64[0]) >> 63) | ((BitCast<uint64>(mF64[1]) >> 63) << 1) | ((BitCast<uint64>(mF64[2]) >> 63) << 2)); #endif } bool DVec3::TestAnyTrue() const { return GetTrues() != 0; } bool DVec3::TestAllTrue() const { return GetTrues() == 0x7; } bool DVec3::operator == (DVec3Arg inV2) const { return sEquals(*this, inV2).TestAllTrue(); } bool DVec3::IsClose(DVec3Arg inV2, double inMaxDistSq) const { return (inV2 - *this).LengthSq() <= inMaxDistSq; } bool DVec3::IsNearZero(double inMaxDistSq) const { return LengthSq() <= inMaxDistSq; } DVec3 DVec3::operator * (DVec3Arg inV2) const { #if defined(JPH_USE_AVX) return _mm256_mul_pd(mValue, inV2.mValue); #elif defined(JPH_USE_SSE) return DVec3({ _mm_mul_pd(mValue.mLow, inV2.mValue.mLow), _mm_mul_pd(mValue.mHigh, inV2.mValue.mHigh) }); #elif defined(JPH_USE_NEON) return DVec3({ vmulq_f64(mValue.val[0], inV2.mValue.val[0]), vmulq_f64(mValue.val[1], inV2.mValue.val[1]) }); #else return DVec3(mF64[0] * inV2.mF64[0], mF64[1] * inV2.mF64[1], mF64[2] * inV2.mF64[2]); #endif } DVec3 DVec3::operator * (double inV2) const { #if defined(JPH_USE_AVX) return _mm256_mul_pd(mValue, _mm256_set1_pd(inV2)); #elif defined(JPH_USE_SSE) __m128d v = _mm_set1_pd(inV2); return DVec3({ _mm_mul_pd(mValue.mLow, v), _mm_mul_pd(mValue.mHigh, v) }); #elif defined(JPH_USE_NEON) return DVec3({ vmulq_n_f64(mValue.val[0], inV2), vmulq_n_f64(mValue.val[1], inV2) }); #else return DVec3(mF64[0] * inV2, mF64[1] * inV2, mF64[2] * inV2); #endif } DVec3 operator * (double inV1, DVec3Arg inV2) { #if defined(JPH_USE_AVX) return _mm256_mul_pd(_mm256_set1_pd(inV1), inV2.mValue); #elif defined(JPH_USE_SSE) __m128d v = _mm_set1_pd(inV1); return DVec3({ _mm_mul_pd(v, inV2.mValue.mLow), _mm_mul_pd(v, inV2.mValue.mHigh) }); #elif defined(JPH_USE_NEON) return DVec3({ vmulq_n_f64(inV2.mValue.val[0], inV1), vmulq_n_f64(inV2.mValue.val[1], inV1) }); #else return DVec3(inV1 * inV2.mF64[0], inV1 * inV2.mF64[1], inV1 * inV2.mF64[2]); #endif } DVec3 DVec3::operator / (double inV2) const { #if defined(JPH_USE_AVX) return _mm256_div_pd(mValue, _mm256_set1_pd(inV2)); #elif defined(JPH_USE_SSE) __m128d v = _mm_set1_pd(inV2); return DVec3({ _mm_div_pd(mValue.mLow, v), _mm_div_pd(mValue.mHigh, v) }); #elif defined(JPH_USE_NEON) float64x2_t v = vdupq_n_f64(inV2); return DVec3({ vdivq_f64(mValue.val[0], v), vdivq_f64(mValue.val[1], v) }); #else return DVec3(mF64[0] / inV2, mF64[1] / inV2, mF64[2] / inV2); #endif } DVec3 &DVec3::operator *= (double inV2) { #if defined(JPH_USE_AVX) mValue = _mm256_mul_pd(mValue, _mm256_set1_pd(inV2)); #elif defined(JPH_USE_SSE) __m128d v = _mm_set1_pd(inV2); mValue.mLow = _mm_mul_pd(mValue.mLow, v); mValue.mHigh = _mm_mul_pd(mValue.mHigh, v); #elif defined(JPH_USE_NEON) mValue.val[0] = vmulq_n_f64(mValue.val[0], inV2); mValue.val[1] = vmulq_n_f64(mValue.val[1], inV2); #else for (int i = 0; i < 3; ++i) mF64[i] *= inV2; #ifdef JPH_FLOATING_POINT_EXCEPTIONS_ENABLED mF64[3] = mF64[2]; #endif #endif return *this; } DVec3 &DVec3::operator *= (DVec3Arg inV2) { #if defined(JPH_USE_AVX) mValue = _mm256_mul_pd(mValue, inV2.mValue); #elif defined(JPH_USE_SSE) mValue.mLow = _mm_mul_pd(mValue.mLow, inV2.mValue.mLow); mValue.mHigh = _mm_mul_pd(mValue.mHigh, inV2.mValue.mHigh); #elif defined(JPH_USE_NEON) mValue.val[0] = vmulq_f64(mValue.val[0], inV2.mValue.val[0]); mValue.val[1] = vmulq_f64(mValue.val[1], inV2.mValue.val[1]); #else for (int i = 0; i < 3; ++i) mF64[i] *= inV2.mF64[i]; #ifdef JPH_FLOATING_POINT_EXCEPTIONS_ENABLED mF64[3] = mF64[2]; #endif #endif return *this; } DVec3 &DVec3::operator /= (double inV2) { #if defined(JPH_USE_AVX) mValue = _mm256_div_pd(mValue, _mm256_set1_pd(inV2)); #elif defined(JPH_USE_SSE) __m128d v = _mm_set1_pd(inV2); mValue.mLow = _mm_div_pd(mValue.mLow, v); mValue.mHigh = _mm_div_pd(mValue.mHigh, v); #elif defined(JPH_USE_NEON) float64x2_t v = vdupq_n_f64(inV2); mValue.val[0] = vdivq_f64(mValue.val[0], v); mValue.val[1] = vdivq_f64(mValue.val[1], v); #else for (int i = 0; i < 3; ++i) mF64[i] /= inV2; #ifdef JPH_FLOATING_POINT_EXCEPTIONS_ENABLED mF64[3] = mF64[2]; #endif #endif return *this; } DVec3 DVec3::operator + (Vec3Arg inV2) const { #if defined(JPH_USE_AVX) return _mm256_add_pd(mValue, _mm256_cvtps_pd(inV2.mValue)); #elif defined(JPH_USE_SSE) return DVec3({ _mm_add_pd(mValue.mLow, _mm_cvtps_pd(inV2.mValue)), _mm_add_pd(mValue.mHigh, _mm_cvtps_pd(_mm_shuffle_ps(inV2.mValue, inV2.mValue, _MM_SHUFFLE(2, 2, 2, 2)))) }); #elif defined(JPH_USE_NEON) return DVec3({ vaddq_f64(mValue.val[0], vcvt_f64_f32(vget_low_f32(inV2.mValue))), vaddq_f64(mValue.val[1], vcvt_high_f64_f32(inV2.mValue)) }); #else return DVec3(mF64[0] + inV2.mF32[0], mF64[1] + inV2.mF32[1], mF64[2] + inV2.mF32[2]); #endif } DVec3 DVec3::operator + (DVec3Arg inV2) const { #if defined(JPH_USE_AVX) return _mm256_add_pd(mValue, inV2.mValue); #elif defined(JPH_USE_SSE) return DVec3({ _mm_add_pd(mValue.mLow, inV2.mValue.mLow), _mm_add_pd(mValue.mHigh, inV2.mValue.mHigh) }); #elif defined(JPH_USE_NEON) return DVec3({ vaddq_f64(mValue.val[0], inV2.mValue.val[0]), vaddq_f64(mValue.val[1], inV2.mValue.val[1]) }); #else return DVec3(mF64[0] + inV2.mF64[0], mF64[1] + inV2.mF64[1], mF64[2] + inV2.mF64[2]); #endif } DVec3 &DVec3::operator += (Vec3Arg inV2) { #if defined(JPH_USE_AVX) mValue = _mm256_add_pd(mValue, _mm256_cvtps_pd(inV2.mValue)); #elif defined(JPH_USE_SSE) mValue.mLow = _mm_add_pd(mValue.mLow, _mm_cvtps_pd(inV2.mValue)); mValue.mHigh = _mm_add_pd(mValue.mHigh, _mm_cvtps_pd(_mm_shuffle_ps(inV2.mValue, inV2.mValue, _MM_SHUFFLE(2, 2, 2, 2)))); #elif defined(JPH_USE_NEON) mValue.val[0] = vaddq_f64(mValue.val[0], vcvt_f64_f32(vget_low_f32(inV2.mValue))); mValue.val[1] = vaddq_f64(mValue.val[1], vcvt_high_f64_f32(inV2.mValue)); #else for (int i = 0; i < 3; ++i) mF64[i] += inV2.mF32[i]; #ifdef JPH_FLOATING_POINT_EXCEPTIONS_ENABLED mF64[3] = mF64[2]; #endif #endif return *this; } DVec3 &DVec3::operator += (DVec3Arg inV2) { #if defined(JPH_USE_AVX) mValue = _mm256_add_pd(mValue, inV2.mValue); #elif defined(JPH_USE_SSE) mValue.mLow = _mm_add_pd(mValue.mLow, inV2.mValue.mLow); mValue.mHigh = _mm_add_pd(mValue.mHigh, inV2.mValue.mHigh); #elif defined(JPH_USE_NEON) mValue.val[0] = vaddq_f64(mValue.val[0], inV2.mValue.val[0]); mValue.val[1] = vaddq_f64(mValue.val[1], inV2.mValue.val[1]); #else for (int i = 0; i < 3; ++i) mF64[i] += inV2.mF64[i]; #ifdef JPH_FLOATING_POINT_EXCEPTIONS_ENABLED mF64[3] = mF64[2]; #endif #endif return *this; } DVec3 DVec3::operator - () const { #if defined(JPH_USE_AVX) return _mm256_sub_pd(_mm256_setzero_pd(), mValue); #elif defined(JPH_USE_SSE) __m128d zero = _mm_setzero_pd(); return DVec3({ _mm_sub_pd(zero, mValue.mLow), _mm_sub_pd(zero, mValue.mHigh) }); #elif defined(JPH_USE_NEON) return DVec3({ vnegq_f64(mValue.val[0]), vnegq_f64(mValue.val[1]) }); #else return DVec3(-mF64[0], -mF64[1], -mF64[2]); #endif } DVec3 DVec3::operator - (Vec3Arg inV2) const { #if defined(JPH_USE_AVX) return _mm256_sub_pd(mValue, _mm256_cvtps_pd(inV2.mValue)); #elif defined(JPH_USE_SSE) return DVec3({ _mm_sub_pd(mValue.mLow, _mm_cvtps_pd(inV2.mValue)), _mm_sub_pd(mValue.mHigh, _mm_cvtps_pd(_mm_shuffle_ps(inV2.mValue, inV2.mValue, _MM_SHUFFLE(2, 2, 2, 2)))) }); #elif defined(JPH_USE_NEON) return DVec3({ vsubq_f64(mValue.val[0], vcvt_f64_f32(vget_low_f32(inV2.mValue))), vsubq_f64(mValue.val[1], vcvt_high_f64_f32(inV2.mValue)) }); #else return DVec3(mF64[0] - inV2.mF32[0], mF64[1] - inV2.mF32[1], mF64[2] - inV2.mF32[2]); #endif } DVec3 DVec3::operator - (DVec3Arg inV2) const { #if defined(JPH_USE_AVX) return _mm256_sub_pd(mValue, inV2.mValue); #elif defined(JPH_USE_SSE) return DVec3({ _mm_sub_pd(mValue.mLow, inV2.mValue.mLow), _mm_sub_pd(mValue.mHigh, inV2.mValue.mHigh) }); #elif defined(JPH_USE_NEON) return DVec3({ vsubq_f64(mValue.val[0], inV2.mValue.val[0]), vsubq_f64(mValue.val[1], inV2.mValue.val[1]) }); #else return DVec3(mF64[0] - inV2.mF64[0], mF64[1] - inV2.mF64[1], mF64[2] - inV2.mF64[2]); #endif } DVec3 &DVec3::operator -= (Vec3Arg inV2) { #if defined(JPH_USE_AVX) mValue = _mm256_sub_pd(mValue, _mm256_cvtps_pd(inV2.mValue)); #elif defined(JPH_USE_SSE) mValue.mLow = _mm_sub_pd(mValue.mLow, _mm_cvtps_pd(inV2.mValue)); mValue.mHigh = _mm_sub_pd(mValue.mHigh, _mm_cvtps_pd(_mm_shuffle_ps(inV2.mValue, inV2.mValue, _MM_SHUFFLE(2, 2, 2, 2)))); #elif defined(JPH_USE_NEON) mValue.val[0] = vsubq_f64(mValue.val[0], vcvt_f64_f32(vget_low_f32(inV2.mValue))); mValue.val[1] = vsubq_f64(mValue.val[1], vcvt_high_f64_f32(inV2.mValue)); #else for (int i = 0; i < 3; ++i) mF64[i] -= inV2.mF32[i]; #ifdef JPH_FLOATING_POINT_EXCEPTIONS_ENABLED mF64[3] = mF64[2]; #endif #endif return *this; } DVec3 &DVec3::operator -= (DVec3Arg inV2) { #if defined(JPH_USE_AVX) mValue = _mm256_sub_pd(mValue, inV2.mValue); #elif defined(JPH_USE_SSE) mValue.mLow = _mm_sub_pd(mValue.mLow, inV2.mValue.mLow); mValue.mHigh = _mm_sub_pd(mValue.mHigh, inV2.mValue.mHigh); #elif defined(JPH_USE_NEON) mValue.val[0] = vsubq_f64(mValue.val[0], inV2.mValue.val[0]); mValue.val[1] = vsubq_f64(mValue.val[1], inV2.mValue.val[1]); #else for (int i = 0; i < 3; ++i) mF64[i] -= inV2.mF64[i]; #ifdef JPH_FLOATING_POINT_EXCEPTIONS_ENABLED mF64[3] = mF64[2]; #endif #endif return *this; } DVec3 DVec3::operator / (DVec3Arg inV2) const { inV2.CheckW(); #if defined(JPH_USE_AVX) return _mm256_div_pd(mValue, inV2.mValue); #elif defined(JPH_USE_SSE) return DVec3({ _mm_div_pd(mValue.mLow, inV2.mValue.mLow), _mm_div_pd(mValue.mHigh, inV2.mValue.mHigh) }); #elif defined(JPH_USE_NEON) return DVec3({ vdivq_f64(mValue.val[0], inV2.mValue.val[0]), vdivq_f64(mValue.val[1], inV2.mValue.val[1]) }); #else return DVec3(mF64[0] / inV2.mF64[0], mF64[1] / inV2.mF64[1], mF64[2] / inV2.mF64[2]); #endif } DVec3 DVec3::Abs() const { #if defined(JPH_USE_AVX512) return _mm256_range_pd(mValue, mValue, 0b1000); #elif defined(JPH_USE_AVX) return _mm256_max_pd(_mm256_sub_pd(_mm256_setzero_pd(), mValue), mValue); #elif defined(JPH_USE_SSE) __m128d zero = _mm_setzero_pd(); return DVec3({ _mm_max_pd(_mm_sub_pd(zero, mValue.mLow), mValue.mLow), _mm_max_pd(_mm_sub_pd(zero, mValue.mHigh), mValue.mHigh) }); #elif defined(JPH_USE_NEON) return DVec3({ vabsq_f64(mValue.val[0]), vabsq_f64(mValue.val[1]) }); #else return DVec3(abs(mF64[0]), abs(mF64[1]), abs(mF64[2])); #endif } DVec3 DVec3::Reciprocal() const { return sReplicate(1.0) / mValue; } DVec3 DVec3::Cross(DVec3Arg inV2) const { #if defined(JPH_USE_AVX2) __m256d t1 = _mm256_permute4x64_pd(inV2.mValue, _MM_SHUFFLE(0, 0, 2, 1)); // Assure Z and W are the same t1 = _mm256_mul_pd(t1, mValue); __m256d t2 = _mm256_permute4x64_pd(mValue, _MM_SHUFFLE(0, 0, 2, 1)); // Assure Z and W are the same t2 = _mm256_mul_pd(t2, inV2.mValue); __m256d t3 = _mm256_sub_pd(t1, t2); return _mm256_permute4x64_pd(t3, _MM_SHUFFLE(0, 0, 2, 1)); // Assure Z and W are the same #else return DVec3(mF64[1] * inV2.mF64[2] - mF64[2] * inV2.mF64[1], mF64[2] * inV2.mF64[0] - mF64[0] * inV2.mF64[2], mF64[0] * inV2.mF64[1] - mF64[1] * inV2.mF64[0]); #endif } double DVec3::Dot(DVec3Arg inV2) const { #if defined(JPH_USE_AVX) __m256d mul = _mm256_mul_pd(mValue, inV2.mValue); __m128d xy = _mm256_castpd256_pd128(mul); __m128d yx = _mm_shuffle_pd(xy, xy, 1); __m128d sum = _mm_add_pd(xy, yx); __m128d zw = _mm256_extractf128_pd(mul, 1); sum = _mm_add_pd(sum, zw); return _mm_cvtsd_f64(sum); #elif defined(JPH_USE_SSE) __m128d xy = _mm_mul_pd(mValue.mLow, inV2.mValue.mLow); __m128d yx = _mm_shuffle_pd(xy, xy, 1); __m128d sum = _mm_add_pd(xy, yx); __m128d z = _mm_mul_sd(mValue.mHigh, inV2.mValue.mHigh); sum = _mm_add_pd(sum, z); return _mm_cvtsd_f64(sum); #elif defined(JPH_USE_NEON) float64x2_t mul_low = vmulq_f64(mValue.val[0], inV2.mValue.val[0]); float64x2_t mul_high = vmulq_f64(mValue.val[1], inV2.mValue.val[1]); return vaddvq_f64(mul_low) + vgetq_lane_f64(mul_high, 0); #else double dot = 0.0; for (int i = 0; i < 3; i++) dot += mF64[i] * inV2.mF64[i]; return dot; #endif } double DVec3::LengthSq() const { return Dot(*this); } DVec3 DVec3::Sqrt() const { #if defined(JPH_USE_AVX) return _mm256_sqrt_pd(mValue); #elif defined(JPH_USE_SSE) return DVec3({ _mm_sqrt_pd(mValue.mLow), _mm_sqrt_pd(mValue.mHigh) }); #elif defined(JPH_USE_NEON) return DVec3({ vsqrtq_f64(mValue.val[0]), vsqrtq_f64(mValue.val[1]) }); #else return DVec3(sqrt(mF64[0]), sqrt(mF64[1]), sqrt(mF64[2])); #endif } double DVec3::Length() const { return sqrt(Dot(*this)); } DVec3 DVec3::Normalized() const { return *this / Length(); } bool DVec3::IsNormalized(double inTolerance) const { return abs(LengthSq() - 1.0) <= inTolerance; } bool DVec3::IsNaN() const { #if defined(JPH_USE_AVX) return (_mm256_movemask_pd(_mm256_cmp_pd(mValue, mValue, _CMP_UNORD_Q)) & 0x7) != 0; #elif defined(JPH_USE_SSE) return ((_mm_movemask_pd(_mm_cmpunord_pd(mValue.mLow, mValue.mLow)) + (_mm_movemask_pd(_mm_cmpunord_pd(mValue.mHigh, mValue.mHigh)) << 2)) & 0x7) != 0; #else return isnan(mF64[0]) || isnan(mF64[1]) || isnan(mF64[2]); #endif } DVec3 DVec3::GetSign() const { #if defined(JPH_USE_AVX) __m256d minus_one = _mm256_set1_pd(-1.0); __m256d one = _mm256_set1_pd(1.0); return _mm256_or_pd(_mm256_and_pd(mValue, minus_one), one); #elif defined(JPH_USE_SSE) __m128d minus_one = _mm_set1_pd(-1.0); __m128d one = _mm_set1_pd(1.0); return DVec3({ _mm_or_pd(_mm_and_pd(mValue.mLow, minus_one), one), _mm_or_pd(_mm_and_pd(mValue.mHigh, minus_one), one) }); #elif defined(JPH_USE_NEON) float64x2_t minus_one = vdupq_n_f64(-1.0f); float64x2_t one = vdupq_n_f64(1.0f); return DVec3({ vorrq_s64(vandq_s64(mValue.val[0], minus_one), one), vorrq_s64(vandq_s64(mValue.val[1], minus_one), one) }); #else return DVec3(std::signbit(mF64[0])? -1.0 : 1.0, std::signbit(mF64[1])? -1.0 : 1.0, std::signbit(mF64[2])? -1.0 : 1.0); #endif } DVec3 DVec3::PrepareRoundToZero() const { // Float has 23 bit mantissa, double 52 bit mantissa => we lose 29 bits when converting from double to float constexpr uint64 cDoubleToFloatMantissaLoss = (1U << 29) - 1; #if defined(JPH_USE_AVX) return _mm256_and_pd(mValue, _mm256_castsi256_pd(_mm256_set1_epi64x(int64_t(~cDoubleToFloatMantissaLoss)))); #elif defined(JPH_USE_SSE) __m128d mask = _mm_castsi128_pd(_mm_set1_epi64x(int64_t(~cDoubleToFloatMantissaLoss))); return DVec3({ _mm_and_pd(mValue.mLow, mask), _mm_and_pd(mValue.mHigh, mask) }); #elif defined(JPH_USE_NEON) float64x2_t mask = vreinterpretq_f64_u64(vdupq_n_u64(~cDoubleToFloatMantissaLoss)); return DVec3({ vandq_s64(mValue.val[0], mask), vandq_s64(mValue.val[1], mask) }); #else double x = BitCast<double>(BitCast<uint64>(mF64[0]) & ~cDoubleToFloatMantissaLoss); double y = BitCast<double>(BitCast<uint64>(mF64[1]) & ~cDoubleToFloatMantissaLoss); double z = BitCast<double>(BitCast<uint64>(mF64[2]) & ~cDoubleToFloatMantissaLoss); return DVec3(x, y, z); #endif } DVec3 DVec3::PrepareRoundToInf() const { // Float has 23 bit mantissa, double 52 bit mantissa => we lose 29 bits when converting from double to float constexpr uint64 cDoubleToFloatMantissaLoss = (1U << 29) - 1; #if defined(JPH_USE_AVX) __m256i mantissa_loss = _mm256_set1_epi64x(cDoubleToFloatMantissaLoss); __m256d value_and_mantissa_loss = _mm256_and_pd(mValue, _mm256_castsi256_pd(mantissa_loss)); __m256d is_zero = _mm256_cmp_pd(value_and_mantissa_loss, _mm256_setzero_pd(), _CMP_EQ_OQ); __m256d value_or_mantissa_loss = _mm256_or_pd(mValue, _mm256_castsi256_pd(mantissa_loss)); return _mm256_blendv_pd(value_or_mantissa_loss, mValue, is_zero); #elif defined(JPH_USE_SSE4_1) __m128i mantissa_loss = _mm_set1_epi64x(cDoubleToFloatMantissaLoss); __m128d zero = _mm_setzero_pd(); __m128d value_and_mantissa_loss_low = _mm_and_pd(mValue.mLow, _mm_castsi128_pd(mantissa_loss)); __m128d is_zero_low = _mm_cmpeq_pd(value_and_mantissa_loss_low, zero); __m128d value_or_mantissa_loss_low = _mm_or_pd(mValue.mLow, _mm_castsi128_pd(mantissa_loss)); __m128d value_and_mantissa_loss_high = _mm_and_pd(mValue.mHigh, _mm_castsi128_pd(mantissa_loss)); __m128d is_zero_high = _mm_cmpeq_pd(value_and_mantissa_loss_high, zero); __m128d value_or_mantissa_loss_high = _mm_or_pd(mValue.mHigh, _mm_castsi128_pd(mantissa_loss)); return DVec3({ _mm_blendv_pd(value_or_mantissa_loss_low, mValue.mLow, is_zero_low), _mm_blendv_pd(value_or_mantissa_loss_high, mValue.mHigh, is_zero_high) }); #elif defined(JPH_USE_NEON) float64x2_t mantissa_loss = vreinterpretq_f64_u64(vdupq_n_u64(cDoubleToFloatMantissaLoss)); float64x2_t zero = vdupq_n_f64(0.0); float64x2_t value_and_mantissa_loss_low = vandq_s64(mValue.val[0], mantissa_loss); float64x2_t is_zero_low = vceqq_f64(value_and_mantissa_loss_low, zero); float64x2_t value_or_mantissa_loss_low = vorrq_s64(mValue.val[0], mantissa_loss); float64x2_t value_and_mantissa_loss_high = vandq_s64(mValue.val[1], mantissa_loss); float64x2_t value_low = vbslq_f64(is_zero_low, mValue.val[0], value_or_mantissa_loss_low); float64x2_t is_zero_high = vceqq_f64(value_and_mantissa_loss_high, zero); float64x2_t value_or_mantissa_loss_high = vorrq_s64(mValue.val[1], mantissa_loss); float64x2_t value_high = vbslq_f64(is_zero_high, mValue.val[1], value_or_mantissa_loss_high); return DVec3({ value_low, value_high }); #else uint64 ux = BitCast<uint64>(mF64[0]); uint64 uy = BitCast<uint64>(mF64[1]); uint64 uz = BitCast<uint64>(mF64[2]); double x = BitCast<double>((ux & cDoubleToFloatMantissaLoss) == 0? ux : (ux | cDoubleToFloatMantissaLoss)); double y = BitCast<double>((uy & cDoubleToFloatMantissaLoss) == 0? uy : (uy | cDoubleToFloatMantissaLoss)); double z = BitCast<double>((uz & cDoubleToFloatMantissaLoss) == 0? uz : (uz | cDoubleToFloatMantissaLoss)); return DVec3(x, y, z); #endif } Vec3 DVec3::ToVec3RoundDown() const { DVec3 to_zero = PrepareRoundToZero(); DVec3 to_inf = PrepareRoundToInf(); return Vec3(DVec3::sSelect(to_zero, to_inf, DVec3::sLess(*this, DVec3::sZero()))); } Vec3 DVec3::ToVec3RoundUp() const { DVec3 to_zero = PrepareRoundToZero(); DVec3 to_inf = PrepareRoundToInf(); return Vec3(DVec3::sSelect(to_inf, to_zero, DVec3::sLess(*this, DVec3::sZero()))); } JPH_NAMESPACE_END
0
repos/c2z/use_cases/JoltPhysics/include
repos/c2z/use_cases/JoltPhysics/include/Math/FindRoot.h
// Jolt Physics Library (https://github.com/jrouwe/JoltPhysics) // SPDX-FileCopyrightText: 2021 Jorrit Rouwe // SPDX-License-Identifier: MIT #pragma once JPH_NAMESPACE_BEGIN /// Find the roots of \f$inA \: x^2 + inB \: x + inC = 0\f$. /// @return The number of roots, actual roots in outX1 and outX2. /// If number of roots returned is 1 then outX1 == outX2. template <typename T> inline int FindRoot(const T inA, const T inB, const T inC, T &outX1, T &outX2) { // Check if this is a linear equation if (inA == T(0)) { // Check if this is a constant equation if (inB == T(0)) return 0; // Linear equation with 1 solution outX1 = outX2 = -inC / inB; return 1; } // See Numerical Recipes in C, Chapter 5.6 Quadratic and Cubic Equations T det = Square(inB) - T(4) * inA * inC; if (det < T(0)) return 0; T q = (inB + Sign(inB) * sqrt(det)) / T(-2); outX1 = q / inA; if (q == T(0)) { outX2 = outX1; return 1; } outX2 = inC / q; return 2; } JPH_NAMESPACE_END
0
repos/c2z/use_cases/JoltPhysics/include
repos/c2z/use_cases/JoltPhysics/include/Math/Mat44.inl
// Jolt Physics Library (https://github.com/jrouwe/JoltPhysics) // SPDX-FileCopyrightText: 2021 Jorrit Rouwe // SPDX-License-Identifier: MIT #pragma once #include <Jolt/Math/Vec3.h> #include <Jolt/Math/Vec4.h> #include <Jolt/Math/Quat.h> JPH_NAMESPACE_BEGIN #define JPH_EL(r, c) mCol[c].mF32[r] Mat44::Mat44(Vec4Arg inC1, Vec4Arg inC2, Vec4Arg inC3, Vec4Arg inC4) : mCol { inC1, inC2, inC3, inC4 } { } Mat44::Mat44(Vec4Arg inC1, Vec4Arg inC2, Vec4Arg inC3, Vec3Arg inC4) : mCol { inC1, inC2, inC3, Vec4(inC4, 1.0f) } { } Mat44::Mat44(Type inC1, Type inC2, Type inC3, Type inC4) : mCol { inC1, inC2, inC3, inC4 } { } Mat44 Mat44::sZero() { return Mat44(Vec4::sZero(), Vec4::sZero(), Vec4::sZero(), Vec4::sZero()); } Mat44 Mat44::sIdentity() { return Mat44(Vec4(1, 0, 0, 0), Vec4(0, 1, 0, 0), Vec4(0, 0, 1, 0), Vec4(0, 0, 0, 1)); } Mat44 Mat44::sNaN() { return Mat44(Vec4::sNaN(), Vec4::sNaN(), Vec4::sNaN(), Vec4::sNaN()); } Mat44 Mat44::sLoadFloat4x4(const Float4 *inV) { Mat44 result; for (int c = 0; c < 4; ++c) result.mCol[c] = Vec4::sLoadFloat4(inV + c); return result; } Mat44 Mat44::sLoadFloat4x4Aligned(const Float4 *inV) { Mat44 result; for (int c = 0; c < 4; ++c) result.mCol[c] = Vec4::sLoadFloat4Aligned(inV + c); return result; } Mat44 Mat44::sRotationX(float inX) { Vec4 sv, cv; Vec4::sReplicate(inX).SinCos(sv, cv); float s = sv.GetX(), c = cv.GetX(); return Mat44(Vec4(1, 0, 0, 0), Vec4(0, c, s, 0), Vec4(0, -s, c, 0), Vec4(0, 0, 0, 1)); } Mat44 Mat44::sRotationY(float inY) { Vec4 sv, cv; Vec4::sReplicate(inY).SinCos(sv, cv); float s = sv.GetX(), c = cv.GetX(); return Mat44(Vec4(c, 0, -s, 0), Vec4(0, 1, 0, 0), Vec4(s, 0, c, 0), Vec4(0, 0, 0, 1)); } Mat44 Mat44::sRotationZ(float inZ) { Vec4 sv, cv; Vec4::sReplicate(inZ).SinCos(sv, cv); float s = sv.GetX(), c = cv.GetX(); return Mat44(Vec4(c, s, 0, 0), Vec4(-s, c, 0, 0), Vec4(0, 0, 1, 0), Vec4(0, 0, 0, 1)); } Mat44 Mat44::sRotation(QuatArg inQuat) { JPH_ASSERT(inQuat.IsNormalized()); // See: https://en.wikipedia.org/wiki/Quaternions_and_spatial_rotation section 'Quaternion-derived rotation matrix' #ifdef JPH_USE_SSE4_1 __m128 xyzw = inQuat.mValue.mValue; __m128 two_xyzw = _mm_add_ps(xyzw, xyzw); __m128 yzxw = _mm_shuffle_ps(xyzw, xyzw, _MM_SHUFFLE(3, 0, 2, 1)); __m128 two_yzxw = _mm_add_ps(yzxw, yzxw); __m128 zxyw = _mm_shuffle_ps(xyzw, xyzw, _MM_SHUFFLE(3, 1, 0, 2)); __m128 two_zxyw = _mm_add_ps(zxyw, zxyw); __m128 wwww = _mm_shuffle_ps(xyzw, xyzw, _MM_SHUFFLE(3, 3, 3, 3)); __m128 diagonal = _mm_sub_ps(_mm_sub_ps(_mm_set1_ps(1.0f), _mm_mul_ps(two_yzxw, yzxw)), _mm_mul_ps(two_zxyw, zxyw)); // (1 - 2 y^2 - 2 z^2, 1 - 2 x^2 - 2 z^2, 1 - 2 x^2 - 2 y^2, 1 - 4 w^2) __m128 plus = _mm_add_ps(_mm_mul_ps(two_xyzw, zxyw), _mm_mul_ps(two_yzxw, wwww)); // 2 * (xz + yw, xy + zw, yz + xw, ww) __m128 minus = _mm_sub_ps(_mm_mul_ps(two_yzxw, xyzw), _mm_mul_ps(two_zxyw, wwww)); // 2 * (xy - zw, yz - xw, xz - yw, 0) // Workaround for compiler changing _mm_sub_ps(_mm_mul_ps(...), ...) into a fused multiply sub instruction, resulting in w not being 0 // There doesn't appear to be a reliable way to turn this off in Clang minus = _mm_insert_ps(minus, minus, 0b1000); __m128 col0 = _mm_blend_ps(_mm_blend_ps(plus, diagonal, 0b0001), minus, 0b1100); // (1 - 2 y^2 - 2 z^2, 2 xy + 2 zw, 2 xz - 2 yw, 0) __m128 col1 = _mm_blend_ps(_mm_blend_ps(diagonal, minus, 0b1001), plus, 0b0100); // (2 xy - 2 zw, 1 - 2 x^2 - 2 z^2, 2 yz + 2 xw, 0) __m128 col2 = _mm_blend_ps(_mm_blend_ps(minus, plus, 0b0001), diagonal, 0b0100); // (2 xz + 2 yw, 2 yz - 2 xw, 1 - 2 x^2 - 2 y^2, 0) __m128 col3 = _mm_set_ps(1, 0, 0, 0); return Mat44(col0, col1, col2, col3); #else float x = inQuat.GetX(); float y = inQuat.GetY(); float z = inQuat.GetZ(); float w = inQuat.GetW(); float tx = x + x; // Note: Using x + x instead of 2.0f * x to force this function to return the same value as the SSE4.1 version across platforms. float ty = y + y; float tz = z + z; float xx = tx * x; float yy = ty * y; float zz = tz * z; float xy = tx * y; float xz = tx * z; float xw = tx * w; float yz = ty * z; float yw = ty * w; float zw = tz * w; return Mat44(Vec4((1.0f - yy) - zz, xy + zw, xz - yw, 0.0f), // Note: Added extra brackets to force this function to return the same value as the SSE4.1 version across platforms. Vec4(xy - zw, (1.0f - zz) - xx, yz + xw, 0.0f), Vec4(xz + yw, yz - xw, (1.0f - xx) - yy, 0.0f), Vec4(0.0f, 0.0f, 0.0f, 1.0f)); #endif } Mat44 Mat44::sRotation(Vec3Arg inAxis, float inAngle) { return sRotation(Quat::sRotation(inAxis, inAngle)); } Mat44 Mat44::sTranslation(Vec3Arg inV) { return Mat44(Vec4(1, 0, 0, 0), Vec4(0, 1, 0, 0), Vec4(0, 0, 1, 0), Vec4(inV, 1)); } Mat44 Mat44::sRotationTranslation(QuatArg inR, Vec3Arg inT) { Mat44 m = sRotation(inR); m.SetTranslation(inT); return m; } Mat44 Mat44::sInverseRotationTranslation(QuatArg inR, Vec3Arg inT) { Mat44 m = sRotation(inR.Conjugated()); m.SetTranslation(-m.Multiply3x3(inT)); return m; } Mat44 Mat44::sScale(float inScale) { return Mat44(Vec4(inScale, 0, 0, 0), Vec4(0, inScale, 0, 0), Vec4(0, 0, inScale, 0), Vec4(0, 0, 0, 1)); } Mat44 Mat44::sScale(Vec3Arg inV) { return Mat44(Vec4(inV.GetX(), 0, 0, 0), Vec4(0, inV.GetY(), 0, 0), Vec4(0, 0, inV.GetZ(), 0), Vec4(0, 0, 0, 1)); } Mat44 Mat44::sOuterProduct(Vec3Arg inV1, Vec3Arg inV2) { Vec4 v1(inV1, 0); return Mat44(v1 * inV2.SplatX(), v1 * inV2.SplatY(), v1 * inV2.SplatZ(), Vec4(0, 0, 0, 1)); } Mat44 Mat44::sCrossProduct(Vec3Arg inV) { #ifdef JPH_USE_SSE4_1 // Zero out the W component __m128 zero = _mm_setzero_ps(); __m128 v = _mm_blend_ps(inV.mValue, zero, 0b1000); // Negate __m128 min_v = _mm_sub_ps(zero, v); return Mat44( _mm_shuffle_ps(v, min_v, _MM_SHUFFLE(3, 1, 2, 3)), // [0, z, -y, 0] _mm_shuffle_ps(min_v, v, _MM_SHUFFLE(3, 0, 3, 2)), // [-z, 0, x, 0] _mm_blend_ps(_mm_shuffle_ps(v, v, _MM_SHUFFLE(3, 3, 3, 1)), _mm_shuffle_ps(min_v, min_v, _MM_SHUFFLE(3, 3, 0, 3)), 0b0010), // [y, -x, 0, 0] Vec4(0, 0, 0, 1)); #else float x = inV.GetX(); float y = inV.GetY(); float z = inV.GetZ(); return Mat44( Vec4(0, z, -y, 0), Vec4(-z, 0, x, 0), Vec4(y, -x, 0, 0), Vec4(0, 0, 0, 1)); #endif } Mat44 Mat44::sLookAt(Vec3Arg inPos, Vec3Arg inTarget, Vec3Arg inUp) { Vec3 direction = (inTarget - inPos).NormalizedOr(-Vec3::sAxisZ()); Vec3 right = direction.Cross(inUp).NormalizedOr(Vec3::sAxisX()); Vec3 up = right.Cross(direction); return Mat44(Vec4(right, 0), Vec4(up, 0), Vec4(-direction, 0), Vec4(inPos, 1)).InversedRotationTranslation(); } bool Mat44::operator == (Mat44Arg inM2) const { return UVec4::sAnd( UVec4::sAnd(Vec4::sEquals(mCol[0], inM2.mCol[0]), Vec4::sEquals(mCol[1], inM2.mCol[1])), UVec4::sAnd(Vec4::sEquals(mCol[2], inM2.mCol[2]), Vec4::sEquals(mCol[3], inM2.mCol[3])) ).TestAllTrue(); } bool Mat44::IsClose(Mat44Arg inM2, float inMaxDistSq) const { for (int i = 0; i < 4; ++i) if (!mCol[i].IsClose(inM2.mCol[i], inMaxDistSq)) return false; return true; } Mat44 Mat44::operator * (Mat44Arg inM) const { Mat44 result; #if defined(JPH_USE_SSE) for (int i = 0; i < 4; ++i) { __m128 c = inM.mCol[i].mValue; __m128 t = _mm_mul_ps(mCol[0].mValue, _mm_shuffle_ps(c, c, _MM_SHUFFLE(0, 0, 0, 0))); t = _mm_add_ps(t, _mm_mul_ps(mCol[1].mValue, _mm_shuffle_ps(c, c, _MM_SHUFFLE(1, 1, 1, 1)))); t = _mm_add_ps(t, _mm_mul_ps(mCol[2].mValue, _mm_shuffle_ps(c, c, _MM_SHUFFLE(2, 2, 2, 2)))); t = _mm_add_ps(t, _mm_mul_ps(mCol[3].mValue, _mm_shuffle_ps(c, c, _MM_SHUFFLE(3, 3, 3, 3)))); result.mCol[i].mValue = t; } #elif defined(JPH_USE_NEON) for (int i = 0; i < 4; ++i) { Type c = inM.mCol[i].mValue; Type t = vmulq_f32(mCol[0].mValue, vdupq_laneq_f32(c, 0)); t = vmlaq_f32(t, mCol[1].mValue, vdupq_laneq_f32(c, 1)); t = vmlaq_f32(t, mCol[2].mValue, vdupq_laneq_f32(c, 2)); t = vmlaq_f32(t, mCol[3].mValue, vdupq_laneq_f32(c, 3)); result.mCol[i].mValue = t; } #else for (int i = 0; i < 4; ++i) result.mCol[i] = mCol[0] * inM.mCol[i].mF32[0] + mCol[1] * inM.mCol[i].mF32[1] + mCol[2] * inM.mCol[i].mF32[2] + mCol[3] * inM.mCol[i].mF32[3]; #endif return result; } Vec3 Mat44::operator * (Vec3Arg inV) const { #if defined(JPH_USE_SSE) __m128 t = _mm_mul_ps(mCol[0].mValue, _mm_shuffle_ps(inV.mValue, inV.mValue, _MM_SHUFFLE(0, 0, 0, 0))); t = _mm_add_ps(t, _mm_mul_ps(mCol[1].mValue, _mm_shuffle_ps(inV.mValue, inV.mValue, _MM_SHUFFLE(1, 1, 1, 1)))); t = _mm_add_ps(t, _mm_mul_ps(mCol[2].mValue, _mm_shuffle_ps(inV.mValue, inV.mValue, _MM_SHUFFLE(2, 2, 2, 2)))); t = _mm_add_ps(t, mCol[3].mValue); return Vec3::sFixW(t); #elif defined(JPH_USE_NEON) Type t = vmulq_f32(mCol[0].mValue, vdupq_laneq_f32(inV.mValue, 0)); t = vmlaq_f32(t, mCol[1].mValue, vdupq_laneq_f32(inV.mValue, 1)); t = vmlaq_f32(t, mCol[2].mValue, vdupq_laneq_f32(inV.mValue, 2)); t = vaddq_f32(t, mCol[3].mValue); // Don't combine this with the first mul into a fused multiply add, causes precision issues return Vec3::sFixW(t); #else return Vec3( mCol[0].mF32[0] * inV.mF32[0] + mCol[1].mF32[0] * inV.mF32[1] + mCol[2].mF32[0] * inV.mF32[2] + mCol[3].mF32[0], mCol[0].mF32[1] * inV.mF32[0] + mCol[1].mF32[1] * inV.mF32[1] + mCol[2].mF32[1] * inV.mF32[2] + mCol[3].mF32[1], mCol[0].mF32[2] * inV.mF32[0] + mCol[1].mF32[2] * inV.mF32[1] + mCol[2].mF32[2] * inV.mF32[2] + mCol[3].mF32[2]); #endif } Vec4 Mat44::operator * (Vec4Arg inV) const { #if defined(JPH_USE_SSE) __m128 t = _mm_mul_ps(mCol[0].mValue, _mm_shuffle_ps(inV.mValue, inV.mValue, _MM_SHUFFLE(0, 0, 0, 0))); t = _mm_add_ps(t, _mm_mul_ps(mCol[1].mValue, _mm_shuffle_ps(inV.mValue, inV.mValue, _MM_SHUFFLE(1, 1, 1, 1)))); t = _mm_add_ps(t, _mm_mul_ps(mCol[2].mValue, _mm_shuffle_ps(inV.mValue, inV.mValue, _MM_SHUFFLE(2, 2, 2, 2)))); t = _mm_add_ps(t, _mm_mul_ps(mCol[3].mValue, _mm_shuffle_ps(inV.mValue, inV.mValue, _MM_SHUFFLE(3, 3, 3, 3)))); return t; #elif defined(JPH_USE_NEON) Type t = vmulq_f32(mCol[0].mValue, vdupq_laneq_f32(inV.mValue, 0)); t = vmlaq_f32(t, mCol[1].mValue, vdupq_laneq_f32(inV.mValue, 1)); t = vmlaq_f32(t, mCol[2].mValue, vdupq_laneq_f32(inV.mValue, 2)); t = vmlaq_f32(t, mCol[3].mValue, vdupq_laneq_f32(inV.mValue, 3)); return t; #else return Vec4( mCol[0].mF32[0] * inV.mF32[0] + mCol[1].mF32[0] * inV.mF32[1] + mCol[2].mF32[0] * inV.mF32[2] + mCol[3].mF32[0] * inV.mF32[3], mCol[0].mF32[1] * inV.mF32[0] + mCol[1].mF32[1] * inV.mF32[1] + mCol[2].mF32[1] * inV.mF32[2] + mCol[3].mF32[1] * inV.mF32[3], mCol[0].mF32[2] * inV.mF32[0] + mCol[1].mF32[2] * inV.mF32[1] + mCol[2].mF32[2] * inV.mF32[2] + mCol[3].mF32[2] * inV.mF32[3], mCol[0].mF32[3] * inV.mF32[0] + mCol[1].mF32[3] * inV.mF32[1] + mCol[2].mF32[3] * inV.mF32[2] + mCol[3].mF32[3] * inV.mF32[3]); #endif } Vec3 Mat44::Multiply3x3(Vec3Arg inV) const { #if defined(JPH_USE_SSE) __m128 t = _mm_mul_ps(mCol[0].mValue, _mm_shuffle_ps(inV.mValue, inV.mValue, _MM_SHUFFLE(0, 0, 0, 0))); t = _mm_add_ps(t, _mm_mul_ps(mCol[1].mValue, _mm_shuffle_ps(inV.mValue, inV.mValue, _MM_SHUFFLE(1, 1, 1, 1)))); t = _mm_add_ps(t, _mm_mul_ps(mCol[2].mValue, _mm_shuffle_ps(inV.mValue, inV.mValue, _MM_SHUFFLE(2, 2, 2, 2)))); return Vec3::sFixW(t); #elif defined(JPH_USE_NEON) Type t = vmulq_f32(mCol[0].mValue, vdupq_laneq_f32(inV.mValue, 0)); t = vmlaq_f32(t, mCol[1].mValue, vdupq_laneq_f32(inV.mValue, 1)); t = vmlaq_f32(t, mCol[2].mValue, vdupq_laneq_f32(inV.mValue, 2)); return Vec3::sFixW(t); #else return Vec3( mCol[0].mF32[0] * inV.mF32[0] + mCol[1].mF32[0] * inV.mF32[1] + mCol[2].mF32[0] * inV.mF32[2], mCol[0].mF32[1] * inV.mF32[0] + mCol[1].mF32[1] * inV.mF32[1] + mCol[2].mF32[1] * inV.mF32[2], mCol[0].mF32[2] * inV.mF32[0] + mCol[1].mF32[2] * inV.mF32[1] + mCol[2].mF32[2] * inV.mF32[2]); #endif } Vec3 Mat44::Multiply3x3Transposed(Vec3Arg inV) const { #if defined(JPH_USE_SSE4_1) __m128 x = _mm_dp_ps(mCol[0].mValue, inV.mValue, 0x7f); __m128 y = _mm_dp_ps(mCol[1].mValue, inV.mValue, 0x7f); __m128 xy = _mm_blend_ps(x, y, 0b0010); __m128 z = _mm_dp_ps(mCol[2].mValue, inV.mValue, 0x7f); __m128 xyzz = _mm_blend_ps(xy, z, 0b1100); return xyzz; #else return Transposed3x3().Multiply3x3(inV); #endif } Mat44 Mat44::Multiply3x3(Mat44Arg inM) const { JPH_ASSERT(mCol[0][3] == 0.0f); JPH_ASSERT(mCol[1][3] == 0.0f); JPH_ASSERT(mCol[2][3] == 0.0f); Mat44 result; #if defined(JPH_USE_SSE) for (int i = 0; i < 3; ++i) { __m128 c = inM.mCol[i].mValue; __m128 t = _mm_mul_ps(mCol[0].mValue, _mm_shuffle_ps(c, c, _MM_SHUFFLE(0, 0, 0, 0))); t = _mm_add_ps(t, _mm_mul_ps(mCol[1].mValue, _mm_shuffle_ps(c, c, _MM_SHUFFLE(1, 1, 1, 1)))); t = _mm_add_ps(t, _mm_mul_ps(mCol[2].mValue, _mm_shuffle_ps(c, c, _MM_SHUFFLE(2, 2, 2, 2)))); result.mCol[i].mValue = t; } #elif defined(JPH_USE_NEON) for (int i = 0; i < 3; ++i) { Type c = inM.mCol[i].mValue; Type t = vmulq_f32(mCol[0].mValue, vdupq_laneq_f32(c, 0)); t = vmlaq_f32(t, mCol[1].mValue, vdupq_laneq_f32(c, 1)); t = vmlaq_f32(t, mCol[2].mValue, vdupq_laneq_f32(c, 2)); result.mCol[i].mValue = t; } #else for (int i = 0; i < 3; ++i) result.mCol[i] = mCol[0] * inM.mCol[i].mF32[0] + mCol[1] * inM.mCol[i].mF32[1] + mCol[2] * inM.mCol[i].mF32[2]; #endif result.mCol[3] = Vec4(0, 0, 0, 1); return result; } Mat44 Mat44::Multiply3x3LeftTransposed(Mat44Arg inM) const { // Transpose left hand side Mat44 trans = Transposed3x3(); // Do 3x3 matrix multiply Mat44 result; result.mCol[0] = trans.mCol[0] * inM.mCol[0].SplatX() + trans.mCol[1] * inM.mCol[0].SplatY() + trans.mCol[2] * inM.mCol[0].SplatZ(); result.mCol[1] = trans.mCol[0] * inM.mCol[1].SplatX() + trans.mCol[1] * inM.mCol[1].SplatY() + trans.mCol[2] * inM.mCol[1].SplatZ(); result.mCol[2] = trans.mCol[0] * inM.mCol[2].SplatX() + trans.mCol[1] * inM.mCol[2].SplatY() + trans.mCol[2] * inM.mCol[2].SplatZ(); result.mCol[3] = Vec4(0, 0, 0, 1); return result; } Mat44 Mat44::Multiply3x3RightTransposed(Mat44Arg inM) const { JPH_ASSERT(mCol[0][3] == 0.0f); JPH_ASSERT(mCol[1][3] == 0.0f); JPH_ASSERT(mCol[2][3] == 0.0f); Mat44 result; result.mCol[0] = mCol[0] * inM.mCol[0].SplatX() + mCol[1] * inM.mCol[1].SplatX() + mCol[2] * inM.mCol[2].SplatX(); result.mCol[1] = mCol[0] * inM.mCol[0].SplatY() + mCol[1] * inM.mCol[1].SplatY() + mCol[2] * inM.mCol[2].SplatY(); result.mCol[2] = mCol[0] * inM.mCol[0].SplatZ() + mCol[1] * inM.mCol[1].SplatZ() + mCol[2] * inM.mCol[2].SplatZ(); result.mCol[3] = Vec4(0, 0, 0, 1); return result; } Mat44 Mat44::operator * (float inV) const { Vec4 multiplier = Vec4::sReplicate(inV); Mat44 result; for (int c = 0; c < 4; ++c) result.mCol[c] = mCol[c] * multiplier; return result; } Mat44 &Mat44::operator *= (float inV) { for (int c = 0; c < 4; ++c) mCol[c] *= inV; return *this; } Mat44 Mat44::operator + (Mat44Arg inM) const { Mat44 result; for (int i = 0; i < 4; ++i) result.mCol[i] = mCol[i] + inM.mCol[i]; return result; } Mat44 Mat44::operator - () const { Mat44 result; for (int i = 0; i < 4; ++i) result.mCol[i] = -mCol[i]; return result; } Mat44 Mat44::operator - (Mat44Arg inM) const { Mat44 result; for (int i = 0; i < 4; ++i) result.mCol[i] = mCol[i] - inM.mCol[i]; return result; } Mat44 &Mat44::operator += (Mat44Arg inM) { for (int c = 0; c < 4; ++c) mCol[c] += inM.mCol[c]; return *this; } void Mat44::StoreFloat4x4(Float4 *outV) const { for (int c = 0; c < 4; ++c) mCol[c].StoreFloat4(outV + c); } Mat44 Mat44::Transposed() const { #if defined(JPH_USE_SSE) __m128 tmp1 = _mm_shuffle_ps(mCol[0].mValue, mCol[1].mValue, _MM_SHUFFLE(1, 0, 1, 0)); __m128 tmp3 = _mm_shuffle_ps(mCol[0].mValue, mCol[1].mValue, _MM_SHUFFLE(3, 2, 3, 2)); __m128 tmp2 = _mm_shuffle_ps(mCol[2].mValue, mCol[3].mValue, _MM_SHUFFLE(1, 0, 1, 0)); __m128 tmp4 = _mm_shuffle_ps(mCol[2].mValue, mCol[3].mValue, _MM_SHUFFLE(3, 2, 3, 2)); Mat44 result; result.mCol[0].mValue = _mm_shuffle_ps(tmp1, tmp2, _MM_SHUFFLE(2, 0, 2, 0)); result.mCol[1].mValue = _mm_shuffle_ps(tmp1, tmp2, _MM_SHUFFLE(3, 1, 3, 1)); result.mCol[2].mValue = _mm_shuffle_ps(tmp3, tmp4, _MM_SHUFFLE(2, 0, 2, 0)); result.mCol[3].mValue = _mm_shuffle_ps(tmp3, tmp4, _MM_SHUFFLE(3, 1, 3, 1)); return result; #elif defined(JPH_USE_NEON) float32x4x2_t tmp1 = vzipq_f32(mCol[0].mValue, mCol[2].mValue); float32x4x2_t tmp2 = vzipq_f32(mCol[1].mValue, mCol[3].mValue); float32x4x2_t tmp3 = vzipq_f32(tmp1.val[0], tmp2.val[0]); float32x4x2_t tmp4 = vzipq_f32(tmp1.val[1], tmp2.val[1]); Mat44 result; result.mCol[0].mValue = tmp3.val[0]; result.mCol[1].mValue = tmp3.val[1]; result.mCol[2].mValue = tmp4.val[0]; result.mCol[3].mValue = tmp4.val[1]; return result; #else Mat44 result; for (int c = 0; c < 4; ++c) for (int r = 0; r < 4; ++r) result.mCol[r].mF32[c] = mCol[c].mF32[r]; return result; #endif } Mat44 Mat44::Transposed3x3() const { #if defined(JPH_USE_SSE) __m128 zero = _mm_setzero_ps(); __m128 tmp1 = _mm_shuffle_ps(mCol[0].mValue, mCol[1].mValue, _MM_SHUFFLE(1, 0, 1, 0)); __m128 tmp3 = _mm_shuffle_ps(mCol[0].mValue, mCol[1].mValue, _MM_SHUFFLE(3, 2, 3, 2)); __m128 tmp2 = _mm_shuffle_ps(mCol[2].mValue, zero, _MM_SHUFFLE(1, 0, 1, 0)); __m128 tmp4 = _mm_shuffle_ps(mCol[2].mValue, zero, _MM_SHUFFLE(3, 2, 3, 2)); Mat44 result; result.mCol[0].mValue = _mm_shuffle_ps(tmp1, tmp2, _MM_SHUFFLE(2, 0, 2, 0)); result.mCol[1].mValue = _mm_shuffle_ps(tmp1, tmp2, _MM_SHUFFLE(3, 1, 3, 1)); result.mCol[2].mValue = _mm_shuffle_ps(tmp3, tmp4, _MM_SHUFFLE(2, 0, 2, 0)); #elif defined(JPH_USE_NEON) float32x4x2_t tmp1 = vzipq_f32(mCol[0].mValue, mCol[2].mValue); float32x4x2_t tmp2 = vzipq_f32(mCol[1].mValue, vdupq_n_f32(0)); float32x4x2_t tmp3 = vzipq_f32(tmp1.val[0], tmp2.val[0]); float32x4x2_t tmp4 = vzipq_f32(tmp1.val[1], tmp2.val[1]); Mat44 result; result.mCol[0].mValue = tmp3.val[0]; result.mCol[1].mValue = tmp3.val[1]; result.mCol[2].mValue = tmp4.val[0]; #else Mat44 result; for (int c = 0; c < 3; ++c) { for (int r = 0; r < 3; ++r) result.mCol[c].mF32[r] = mCol[r].mF32[c]; result.mCol[c].mF32[3] = 0; } #endif result.mCol[3] = Vec4(0, 0, 0, 1); return result; } Mat44 Mat44::Inversed() const { #if defined(JPH_USE_SSE) // Algorithm from: http://download.intel.com/design/PentiumIII/sml/24504301.pdf // Streaming SIMD Extensions - Inverse of 4x4 Matrix // Adapted to load data using _mm_shuffle_ps instead of loading from memory // Replaced _mm_rcp_ps with _mm_div_ps for better accuracy __m128 tmp1 = _mm_shuffle_ps(mCol[0].mValue, mCol[1].mValue, _MM_SHUFFLE(1, 0, 1, 0)); __m128 row1 = _mm_shuffle_ps(mCol[2].mValue, mCol[3].mValue, _MM_SHUFFLE(1, 0, 1, 0)); __m128 row0 = _mm_shuffle_ps(tmp1, row1, _MM_SHUFFLE(2, 0, 2, 0)); row1 = _mm_shuffle_ps(row1, tmp1, _MM_SHUFFLE(3, 1, 3, 1)); tmp1 = _mm_shuffle_ps(mCol[0].mValue, mCol[1].mValue, _MM_SHUFFLE(3, 2, 3, 2)); __m128 row3 = _mm_shuffle_ps(mCol[2].mValue, mCol[3].mValue, _MM_SHUFFLE(3, 2, 3, 2)); __m128 row2 = _mm_shuffle_ps(tmp1, row3, _MM_SHUFFLE(2, 0, 2, 0)); row3 = _mm_shuffle_ps(row3, tmp1, _MM_SHUFFLE(3, 1, 3, 1)); tmp1 = _mm_mul_ps(row2, row3); tmp1 = _mm_shuffle_ps(tmp1, tmp1, _MM_SHUFFLE(2, 3, 0, 1)); __m128 minor0 = _mm_mul_ps(row1, tmp1); __m128 minor1 = _mm_mul_ps(row0, tmp1); tmp1 = _mm_shuffle_ps(tmp1, tmp1, _MM_SHUFFLE(1, 0, 3, 2)); minor0 = _mm_sub_ps(_mm_mul_ps(row1, tmp1), minor0); minor1 = _mm_sub_ps(_mm_mul_ps(row0, tmp1), minor1); minor1 = _mm_shuffle_ps(minor1, minor1, _MM_SHUFFLE(1, 0, 3, 2)); tmp1 = _mm_mul_ps(row1, row2); tmp1 = _mm_shuffle_ps(tmp1, tmp1, _MM_SHUFFLE(2, 3, 0, 1)); minor0 = _mm_add_ps(_mm_mul_ps(row3, tmp1), minor0); __m128 minor3 = _mm_mul_ps(row0, tmp1); tmp1 = _mm_shuffle_ps(tmp1, tmp1, _MM_SHUFFLE(1, 0, 3, 2)); minor0 = _mm_sub_ps(minor0, _mm_mul_ps(row3, tmp1)); minor3 = _mm_sub_ps(_mm_mul_ps(row0, tmp1), minor3); minor3 = _mm_shuffle_ps(minor3, minor3, _MM_SHUFFLE(1, 0, 3, 2)); tmp1 = _mm_mul_ps(_mm_shuffle_ps(row1, row1, _MM_SHUFFLE(1, 0, 3, 2)), row3); tmp1 = _mm_shuffle_ps(tmp1, tmp1, _MM_SHUFFLE(2, 3, 0, 1)); row2 = _mm_shuffle_ps(row2, row2, _MM_SHUFFLE(1, 0, 3, 2)); minor0 = _mm_add_ps(_mm_mul_ps(row2, tmp1), minor0); __m128 minor2 = _mm_mul_ps(row0, tmp1); tmp1 = _mm_shuffle_ps(tmp1, tmp1, _MM_SHUFFLE(1, 0, 3, 2)); minor0 = _mm_sub_ps(minor0, _mm_mul_ps(row2, tmp1)); minor2 = _mm_sub_ps(_mm_mul_ps(row0, tmp1), minor2); minor2 = _mm_shuffle_ps(minor2, minor2, _MM_SHUFFLE(1, 0, 3, 2)); tmp1 = _mm_mul_ps(row0, row1); tmp1 = _mm_shuffle_ps(tmp1, tmp1, _MM_SHUFFLE(2, 3, 0, 1)); minor2 = _mm_add_ps(_mm_mul_ps(row3, tmp1), minor2); minor3 = _mm_sub_ps(_mm_mul_ps(row2, tmp1), minor3); tmp1 = _mm_shuffle_ps(tmp1, tmp1, _MM_SHUFFLE(1, 0, 3, 2)); minor2 = _mm_sub_ps(_mm_mul_ps(row3, tmp1), minor2); minor3 = _mm_sub_ps(minor3, _mm_mul_ps(row2, tmp1)); tmp1 = _mm_mul_ps(row0, row3); tmp1 = _mm_shuffle_ps(tmp1, tmp1, _MM_SHUFFLE(2, 3, 0, 1)); minor1 = _mm_sub_ps(minor1, _mm_mul_ps(row2, tmp1)); minor2 = _mm_add_ps(_mm_mul_ps(row1, tmp1), minor2); tmp1 = _mm_shuffle_ps(tmp1, tmp1, _MM_SHUFFLE(1, 0, 3, 2)); minor1 = _mm_add_ps(_mm_mul_ps(row2, tmp1), minor1); minor2 = _mm_sub_ps(minor2, _mm_mul_ps(row1, tmp1)); tmp1 = _mm_mul_ps(row0, row2); tmp1 = _mm_shuffle_ps(tmp1, tmp1, _MM_SHUFFLE(2, 3, 0, 1)); minor1 = _mm_add_ps(_mm_mul_ps(row3, tmp1), minor1); minor3 = _mm_sub_ps(minor3, _mm_mul_ps(row1, tmp1)); tmp1 = _mm_shuffle_ps(tmp1, tmp1, _MM_SHUFFLE(1, 0, 3, 2)); minor1 = _mm_sub_ps(minor1, _mm_mul_ps(row3, tmp1)); minor3 = _mm_add_ps(_mm_mul_ps(row1, tmp1), minor3); __m128 det = _mm_mul_ps(row0, minor0); det = _mm_add_ps(_mm_shuffle_ps(det, det, _MM_SHUFFLE(2, 3, 0, 1)), det); // Original code did (x + z) + (y + w), changed to (x + y) + (z + w) to match the ARM code below and make the result cross platform deterministic det = _mm_add_ss(_mm_shuffle_ps(det, det, _MM_SHUFFLE(1, 0, 3, 2)), det); det = _mm_div_ss(_mm_set_ss(1.0f), det); det = _mm_shuffle_ps(det, det, _MM_SHUFFLE(0, 0, 0, 0)); Mat44 result; result.mCol[0].mValue = _mm_mul_ps(det, minor0); result.mCol[1].mValue = _mm_mul_ps(det, minor1); result.mCol[2].mValue = _mm_mul_ps(det, minor2); result.mCol[3].mValue = _mm_mul_ps(det, minor3); return result; #elif defined(JPH_USE_NEON) // Adapted from the SSE version, there's surprising few articles about efficient ways of calculating an inverse for ARM on the internet Type tmp1 = JPH_NEON_SHUFFLE_F32x4(mCol[0].mValue, mCol[1].mValue, 0, 1, 4, 5); Type row1 = JPH_NEON_SHUFFLE_F32x4(mCol[2].mValue, mCol[3].mValue, 0, 1, 4, 5); Type row0 = JPH_NEON_SHUFFLE_F32x4(tmp1, row1, 0, 2, 4, 6); row1 = JPH_NEON_SHUFFLE_F32x4(row1, tmp1, 1, 3, 5, 7); tmp1 = JPH_NEON_SHUFFLE_F32x4(mCol[0].mValue, mCol[1].mValue, 2, 3, 6, 7); Type row3 = JPH_NEON_SHUFFLE_F32x4(mCol[2].mValue, mCol[3].mValue, 2, 3, 6, 7); Type row2 = JPH_NEON_SHUFFLE_F32x4(tmp1, row3, 0, 2, 4, 6); row3 = JPH_NEON_SHUFFLE_F32x4(row3, tmp1, 1, 3, 5, 7); tmp1 = vmulq_f32(row2, row3); tmp1 = JPH_NEON_SHUFFLE_F32x4(tmp1, tmp1, 1, 0, 3, 2); Type minor0 = vmulq_f32(row1, tmp1); Type minor1 = vmulq_f32(row0, tmp1); tmp1 = JPH_NEON_SHUFFLE_F32x4(tmp1, tmp1, 2, 3, 0, 1); minor0 = vsubq_f32(vmulq_f32(row1, tmp1), minor0); minor1 = vsubq_f32(vmulq_f32(row0, tmp1), minor1); minor1 = JPH_NEON_SHUFFLE_F32x4(minor1, minor1, 2, 3, 0, 1); tmp1 = vmulq_f32(row1, row2); tmp1 = JPH_NEON_SHUFFLE_F32x4(tmp1, tmp1, 1, 0, 3, 2); minor0 = vaddq_f32(vmulq_f32(row3, tmp1), minor0); Type minor3 = vmulq_f32(row0, tmp1); tmp1 = JPH_NEON_SHUFFLE_F32x4(tmp1, tmp1, 2, 3, 0, 1); minor0 = vsubq_f32(minor0, vmulq_f32(row3, tmp1)); minor3 = vsubq_f32(vmulq_f32(row0, tmp1), minor3); minor3 = JPH_NEON_SHUFFLE_F32x4(minor3, minor3, 2, 3, 0, 1); tmp1 = JPH_NEON_SHUFFLE_F32x4(row1, row1, 2, 3, 0, 1); tmp1 = vmulq_f32(tmp1, row3); tmp1 = JPH_NEON_SHUFFLE_F32x4(tmp1, tmp1, 1, 0, 3, 2); row2 = JPH_NEON_SHUFFLE_F32x4(row2, row2, 2, 3, 0, 1); minor0 = vaddq_f32(vmulq_f32(row2, tmp1), minor0); Type minor2 = vmulq_f32(row0, tmp1); tmp1 = JPH_NEON_SHUFFLE_F32x4(tmp1, tmp1, 2, 3, 0, 1); minor0 = vsubq_f32(minor0, vmulq_f32(row2, tmp1)); minor2 = vsubq_f32(vmulq_f32(row0, tmp1), minor2); minor2 = JPH_NEON_SHUFFLE_F32x4(minor2, minor2, 2, 3, 0, 1); tmp1 = vmulq_f32(row0, row1); tmp1 = JPH_NEON_SHUFFLE_F32x4(tmp1, tmp1, 1, 0, 3, 2); minor2 = vaddq_f32(vmulq_f32(row3, tmp1), minor2); minor3 = vsubq_f32(vmulq_f32(row2, tmp1), minor3); tmp1 = JPH_NEON_SHUFFLE_F32x4(tmp1, tmp1, 2, 3, 0, 1); minor2 = vsubq_f32(vmulq_f32(row3, tmp1), minor2); minor3 = vsubq_f32(minor3, vmulq_f32(row2, tmp1)); tmp1 = vmulq_f32(row0, row3); tmp1 = JPH_NEON_SHUFFLE_F32x4(tmp1, tmp1, 1, 0, 3, 2); minor1 = vsubq_f32(minor1, vmulq_f32(row2, tmp1)); minor2 = vaddq_f32(vmulq_f32(row1, tmp1), minor2); tmp1 = JPH_NEON_SHUFFLE_F32x4(tmp1, tmp1, 2, 3, 0, 1); minor1 = vaddq_f32(vmulq_f32(row2, tmp1), minor1); minor2 = vsubq_f32(minor2, vmulq_f32(row1, tmp1)); tmp1 = vmulq_f32(row0, row2); tmp1 = JPH_NEON_SHUFFLE_F32x4(tmp1, tmp1, 1, 0, 3, 2); minor1 = vaddq_f32(vmulq_f32(row3, tmp1), minor1); minor3 = vsubq_f32(minor3, vmulq_f32(row1, tmp1)); tmp1 = JPH_NEON_SHUFFLE_F32x4(tmp1, tmp1, 2, 3, 0, 1); minor1 = vsubq_f32(minor1, vmulq_f32(row3, tmp1)); minor3 = vaddq_f32(vmulq_f32(row1, tmp1), minor3); Type det = vmulq_f32(row0, minor0); det = vdupq_n_f32(vaddvq_f32(det)); det = vdivq_f32(vdupq_n_f32(1.0f), det); Mat44 result; result.mCol[0].mValue = vmulq_f32(det, minor0); result.mCol[1].mValue = vmulq_f32(det, minor1); result.mCol[2].mValue = vmulq_f32(det, minor2); result.mCol[3].mValue = vmulq_f32(det, minor3); return result; #else float m00 = JPH_EL(0, 0), m10 = JPH_EL(1, 0), m20 = JPH_EL(2, 0), m30 = JPH_EL(3, 0); float m01 = JPH_EL(0, 1), m11 = JPH_EL(1, 1), m21 = JPH_EL(2, 1), m31 = JPH_EL(3, 1); float m02 = JPH_EL(0, 2), m12 = JPH_EL(1, 2), m22 = JPH_EL(2, 2), m32 = JPH_EL(3, 2); float m03 = JPH_EL(0, 3), m13 = JPH_EL(1, 3), m23 = JPH_EL(2, 3), m33 = JPH_EL(3, 3); float m10211120 = m10 * m21 - m11 * m20; float m10221220 = m10 * m22 - m12 * m20; float m10231320 = m10 * m23 - m13 * m20; float m10311130 = m10 * m31 - m11 * m30; float m10321230 = m10 * m32 - m12 * m30; float m10331330 = m10 * m33 - m13 * m30; float m11221221 = m11 * m22 - m12 * m21; float m11231321 = m11 * m23 - m13 * m21; float m11321231 = m11 * m32 - m12 * m31; float m11331331 = m11 * m33 - m13 * m31; float m12231322 = m12 * m23 - m13 * m22; float m12331332 = m12 * m33 - m13 * m32; float m20312130 = m20 * m31 - m21 * m30; float m20322230 = m20 * m32 - m22 * m30; float m20332330 = m20 * m33 - m23 * m30; float m21322231 = m21 * m32 - m22 * m31; float m21332331 = m21 * m33 - m23 * m31; float m22332332 = m22 * m33 - m23 * m32; Vec4 col0(m11 * m22332332 - m12 * m21332331 + m13 * m21322231, -m10 * m22332332 + m12 * m20332330 - m13 * m20322230, m10 * m21332331 - m11 * m20332330 + m13 * m20312130, -m10 * m21322231 + m11 * m20322230 - m12 * m20312130); Vec4 col1(-m01 * m22332332 + m02 * m21332331 - m03 * m21322231, m00 * m22332332 - m02 * m20332330 + m03 * m20322230, -m00 * m21332331 + m01 * m20332330 - m03 * m20312130, m00 * m21322231 - m01 * m20322230 + m02 * m20312130); Vec4 col2(m01 * m12331332 - m02 * m11331331 + m03 * m11321231, -m00 * m12331332 + m02 * m10331330 - m03 * m10321230, m00 * m11331331 - m01 * m10331330 + m03 * m10311130, -m00 * m11321231 + m01 * m10321230 - m02 * m10311130); Vec4 col3(-m01 * m12231322 + m02 * m11231321 - m03 * m11221221, m00 * m12231322 - m02 * m10231320 + m03 * m10221220, -m00 * m11231321 + m01 * m10231320 - m03 * m10211120, m00 * m11221221 - m01 * m10221220 + m02 * m10211120); float det = m00 * col0.mF32[0] + m01 * col0.mF32[1] + m02 * col0.mF32[2] + m03 * col0.mF32[3]; return Mat44(col0 / det, col1 / det, col2 / det, col3 / det); #endif } Mat44 Mat44::InversedRotationTranslation() const { Mat44 m = Transposed3x3(); m.SetTranslation(-m.Multiply3x3(GetTranslation())); return m; } float Mat44::GetDeterminant3x3() const { return GetAxisX().Dot(GetAxisY().Cross(GetAxisZ())); } Mat44 Mat44::Adjointed3x3() const { // Adapted from Inversed() to remove 4th column and the division by the determinant // Note: This can be optimized. JPH_ASSERT(mCol[0][3] == 0.0f); JPH_ASSERT(mCol[1][3] == 0.0f); JPH_ASSERT(mCol[2][3] == 0.0f); #if defined(JPH_USE_SSE) __m128 tmp1 = _mm_shuffle_ps(mCol[0].mValue, mCol[1].mValue, _MM_SHUFFLE(1, 0, 1, 0)); __m128 row1 = _mm_shuffle_ps(mCol[2].mValue, _mm_setzero_ps(), _MM_SHUFFLE(1, 0, 1, 0)); __m128 row0 = _mm_shuffle_ps(tmp1, row1, _MM_SHUFFLE(2, 0, 2, 0)); row1 = _mm_shuffle_ps(row1, tmp1, _MM_SHUFFLE(3, 1, 3, 1)); tmp1 = _mm_shuffle_ps(mCol[0].mValue, mCol[1].mValue, _MM_SHUFFLE(3, 2, 3, 2)); __m128 row3 = _mm_shuffle_ps(mCol[2].mValue, _mm_set_ps(1, 0, 0, 0), _MM_SHUFFLE(3, 2, 3, 2)); __m128 row2 = _mm_shuffle_ps(tmp1, row3, _MM_SHUFFLE(2, 0, 2, 0)); row3 = _mm_shuffle_ps(row3, tmp1, _MM_SHUFFLE(3, 1, 3, 1)); tmp1 = _mm_mul_ps(row2, row3); tmp1 = _mm_shuffle_ps(tmp1, tmp1, _MM_SHUFFLE(2, 3, 0, 1)); __m128 minor0 = _mm_mul_ps(row1, tmp1); __m128 minor1 = _mm_mul_ps(row0, tmp1); tmp1 = _mm_shuffle_ps(tmp1, tmp1, _MM_SHUFFLE(1, 0, 3, 2)); minor0 = _mm_sub_ps(_mm_mul_ps(row1, tmp1), minor0); minor1 = _mm_sub_ps(_mm_mul_ps(row0, tmp1), minor1); minor1 = _mm_shuffle_ps(minor1, minor1, _MM_SHUFFLE(1, 0, 3, 2)); tmp1 = _mm_mul_ps(row1, row2); tmp1 = _mm_shuffle_ps(tmp1, tmp1, _MM_SHUFFLE(2, 3, 0, 1)); minor0 = _mm_add_ps(_mm_mul_ps(row3, tmp1), minor0); tmp1 = _mm_shuffle_ps(tmp1, tmp1, _MM_SHUFFLE(1, 0, 3, 2)); minor0 = _mm_sub_ps(minor0, _mm_mul_ps(row3, tmp1)); tmp1 = _mm_mul_ps(_mm_shuffle_ps(row1, row1, _MM_SHUFFLE(1, 0, 3, 2)), row3); tmp1 = _mm_shuffle_ps(tmp1, tmp1, _MM_SHUFFLE(2, 3, 0, 1)); row2 = _mm_shuffle_ps(row2, row2, _MM_SHUFFLE(1, 0, 3, 2)); minor0 = _mm_add_ps(_mm_mul_ps(row2, tmp1), minor0); __m128 minor2 = _mm_mul_ps(row0, tmp1); tmp1 = _mm_shuffle_ps(tmp1, tmp1, _MM_SHUFFLE(1, 0, 3, 2)); minor0 = _mm_sub_ps(minor0, _mm_mul_ps(row2, tmp1)); minor2 = _mm_sub_ps(_mm_mul_ps(row0, tmp1), minor2); minor2 = _mm_shuffle_ps(minor2, minor2, _MM_SHUFFLE(1, 0, 3, 2)); tmp1 = _mm_mul_ps(row0, row1); tmp1 = _mm_shuffle_ps(tmp1, tmp1, _MM_SHUFFLE(2, 3, 0, 1)); minor2 = _mm_add_ps(_mm_mul_ps(row3, tmp1), minor2); tmp1 = _mm_shuffle_ps(tmp1, tmp1, _MM_SHUFFLE(1, 0, 3, 2)); minor2 = _mm_sub_ps(_mm_mul_ps(row3, tmp1), minor2); tmp1 = _mm_mul_ps(row0, row3); tmp1 = _mm_shuffle_ps(tmp1, tmp1, _MM_SHUFFLE(2, 3, 0, 1)); minor1 = _mm_sub_ps(minor1, _mm_mul_ps(row2, tmp1)); minor2 = _mm_add_ps(_mm_mul_ps(row1, tmp1), minor2); tmp1 = _mm_shuffle_ps(tmp1, tmp1, _MM_SHUFFLE(1, 0, 3, 2)); minor1 = _mm_add_ps(_mm_mul_ps(row2, tmp1), minor1); minor2 = _mm_sub_ps(minor2, _mm_mul_ps(row1, tmp1)); tmp1 = _mm_mul_ps(row0, row2); tmp1 = _mm_shuffle_ps(tmp1, tmp1, _MM_SHUFFLE(2, 3, 0, 1)); minor1 = _mm_add_ps(_mm_mul_ps(row3, tmp1), minor1); tmp1 = _mm_shuffle_ps(tmp1, tmp1, _MM_SHUFFLE(1, 0, 3, 2)); minor1 = _mm_sub_ps(minor1, _mm_mul_ps(row3, tmp1)); Mat44 result; result.mCol[0].mValue = minor0; result.mCol[1].mValue = minor1; result.mCol[2].mValue = minor2; result.mCol[3] = Vec4(0, 0, 0, 1); return result; #elif defined(JPH_USE_NEON) Type v0001 = vsetq_lane_f32(1, vdupq_n_f32(0), 3); Type tmp1 = JPH_NEON_SHUFFLE_F32x4(mCol[0].mValue, mCol[1].mValue, 0, 1, 4, 5); Type row1 = JPH_NEON_SHUFFLE_F32x4(mCol[2].mValue, v0001, 0, 1, 4, 5); Type row0 = JPH_NEON_SHUFFLE_F32x4(tmp1, row1, 0, 2, 4, 6); row1 = JPH_NEON_SHUFFLE_F32x4(row1, tmp1, 1, 3, 5, 7); tmp1 = JPH_NEON_SHUFFLE_F32x4(mCol[0].mValue, mCol[1].mValue, 2, 3, 6, 7); Type row3 = JPH_NEON_SHUFFLE_F32x4(mCol[2].mValue, v0001, 2, 3, 6, 7); Type row2 = JPH_NEON_SHUFFLE_F32x4(tmp1, row3, 0, 2, 4, 6); row3 = JPH_NEON_SHUFFLE_F32x4(row3, tmp1, 1, 3, 5, 7); tmp1 = vmulq_f32(row2, row3); tmp1 = JPH_NEON_SHUFFLE_F32x4(tmp1, tmp1, 1, 0, 3, 2); Type minor0 = vmulq_f32(row1, tmp1); Type minor1 = vmulq_f32(row0, tmp1); tmp1 = JPH_NEON_SHUFFLE_F32x4(tmp1, tmp1, 2, 3, 0, 1); minor0 = vsubq_f32(vmulq_f32(row1, tmp1), minor0); minor1 = vsubq_f32(vmulq_f32(row0, tmp1), minor1); minor1 = JPH_NEON_SHUFFLE_F32x4(minor1, minor1, 2, 3, 0, 1); tmp1 = vmulq_f32(row1, row2); tmp1 = JPH_NEON_SHUFFLE_F32x4(tmp1, tmp1, 1, 0, 3, 2); minor0 = vaddq_f32(vmulq_f32(row3, tmp1), minor0); tmp1 = JPH_NEON_SHUFFLE_F32x4(tmp1, tmp1, 2, 3, 0, 1); minor0 = vsubq_f32(minor0, vmulq_f32(row3, tmp1)); tmp1 = JPH_NEON_SHUFFLE_F32x4(row1, row1, 2, 3, 0, 1); tmp1 = vmulq_f32(tmp1, row3); tmp1 = JPH_NEON_SHUFFLE_F32x4(tmp1, tmp1, 1, 0, 3, 2); row2 = JPH_NEON_SHUFFLE_F32x4(row2, row2, 2, 3, 0, 1); minor0 = vaddq_f32(vmulq_f32(row2, tmp1), minor0); Type minor2 = vmulq_f32(row0, tmp1); tmp1 = JPH_NEON_SHUFFLE_F32x4(tmp1, tmp1, 2, 3, 0, 1); minor0 = vsubq_f32(minor0, vmulq_f32(row2, tmp1)); minor2 = vsubq_f32(vmulq_f32(row0, tmp1), minor2); minor2 = JPH_NEON_SHUFFLE_F32x4(minor2, minor2, 2, 3, 0, 1); tmp1 = vmulq_f32(row0, row1); tmp1 = JPH_NEON_SHUFFLE_F32x4(tmp1, tmp1, 1, 0, 3, 2); minor2 = vaddq_f32(vmulq_f32(row3, tmp1), minor2); tmp1 = JPH_NEON_SHUFFLE_F32x4(tmp1, tmp1, 2, 3, 0, 1); minor2 = vsubq_f32(vmulq_f32(row3, tmp1), minor2); tmp1 = vmulq_f32(row0, row3); tmp1 = JPH_NEON_SHUFFLE_F32x4(tmp1, tmp1, 1, 0, 3, 2); minor1 = vsubq_f32(minor1, vmulq_f32(row2, tmp1)); minor2 = vaddq_f32(vmulq_f32(row1, tmp1), minor2); tmp1 = JPH_NEON_SHUFFLE_F32x4(tmp1, tmp1, 2, 3, 0, 1); minor1 = vaddq_f32(vmulq_f32(row2, tmp1), minor1); minor2 = vsubq_f32(minor2, vmulq_f32(row1, tmp1)); tmp1 = vmulq_f32(row0, row2); tmp1 = JPH_NEON_SHUFFLE_F32x4(tmp1, tmp1, 1, 0, 3, 2); minor1 = vaddq_f32(vmulq_f32(row3, tmp1), minor1); tmp1 = JPH_NEON_SHUFFLE_F32x4(tmp1, tmp1, 2, 3, 0, 1); minor1 = vsubq_f32(minor1, vmulq_f32(row3, tmp1)); Mat44 result; result.mCol[0].mValue = minor0; result.mCol[1].mValue = minor1; result.mCol[2].mValue = minor2; result.mCol[3].mValue = v0001; return result; #else return Mat44( Vec4(JPH_EL(1, 1) * JPH_EL(2, 2) - JPH_EL(1, 2) * JPH_EL(2, 1), JPH_EL(1, 2) * JPH_EL(2, 0) - JPH_EL(1, 0) * JPH_EL(2, 2), JPH_EL(1, 0) * JPH_EL(2, 1) - JPH_EL(1, 1) * JPH_EL(2, 0), 0), Vec4(JPH_EL(0, 2) * JPH_EL(2, 1) - JPH_EL(0, 1) * JPH_EL(2, 2), JPH_EL(0, 0) * JPH_EL(2, 2) - JPH_EL(0, 2) * JPH_EL(2, 0), JPH_EL(0, 1) * JPH_EL(2, 0) - JPH_EL(0, 0) * JPH_EL(2, 1), 0), Vec4(JPH_EL(0, 1) * JPH_EL(1, 2) - JPH_EL(0, 2) * JPH_EL(1, 1), JPH_EL(0, 2) * JPH_EL(1, 0) - JPH_EL(0, 0) * JPH_EL(1, 2), JPH_EL(0, 0) * JPH_EL(1, 1) - JPH_EL(0, 1) * JPH_EL(1, 0), 0), Vec4(0, 0, 0, 1)); #endif } Mat44 Mat44::Inversed3x3() const { // Adapted from Inversed() to remove 4th column // Note: This can be optimized. JPH_ASSERT(mCol[0][3] == 0.0f); JPH_ASSERT(mCol[1][3] == 0.0f); JPH_ASSERT(mCol[2][3] == 0.0f); #if defined(JPH_USE_SSE) __m128 tmp1 = _mm_shuffle_ps(mCol[0].mValue, mCol[1].mValue, _MM_SHUFFLE(1, 0, 1, 0)); __m128 row1 = _mm_shuffle_ps(mCol[2].mValue, _mm_setzero_ps(), _MM_SHUFFLE(1, 0, 1, 0)); __m128 row0 = _mm_shuffle_ps(tmp1, row1, _MM_SHUFFLE(2, 0, 2, 0)); row1 = _mm_shuffle_ps(row1, tmp1, _MM_SHUFFLE(3, 1, 3, 1)); tmp1 = _mm_shuffle_ps(mCol[0].mValue, mCol[1].mValue, _MM_SHUFFLE(3, 2, 3, 2)); __m128 row3 = _mm_shuffle_ps(mCol[2].mValue, _mm_set_ps(1, 0, 0, 0), _MM_SHUFFLE(3, 2, 3, 2)); __m128 row2 = _mm_shuffle_ps(tmp1, row3, _MM_SHUFFLE(2, 0, 2, 0)); row3 = _mm_shuffle_ps(row3, tmp1, _MM_SHUFFLE(3, 1, 3, 1)); tmp1 = _mm_mul_ps(row2, row3); tmp1 = _mm_shuffle_ps(tmp1, tmp1, _MM_SHUFFLE(2, 3, 0, 1)); __m128 minor0 = _mm_mul_ps(row1, tmp1); __m128 minor1 = _mm_mul_ps(row0, tmp1); tmp1 = _mm_shuffle_ps(tmp1, tmp1, _MM_SHUFFLE(1, 0, 3, 2)); minor0 = _mm_sub_ps(_mm_mul_ps(row1, tmp1), minor0); minor1 = _mm_sub_ps(_mm_mul_ps(row0, tmp1), minor1); minor1 = _mm_shuffle_ps(minor1, minor1, _MM_SHUFFLE(1, 0, 3, 2)); tmp1 = _mm_mul_ps(row1, row2); tmp1 = _mm_shuffle_ps(tmp1, tmp1, _MM_SHUFFLE(2, 3, 0, 1)); minor0 = _mm_add_ps(_mm_mul_ps(row3, tmp1), minor0); tmp1 = _mm_shuffle_ps(tmp1, tmp1, _MM_SHUFFLE(1, 0, 3, 2)); minor0 = _mm_sub_ps(minor0, _mm_mul_ps(row3, tmp1)); tmp1 = _mm_mul_ps(_mm_shuffle_ps(row1, row1, _MM_SHUFFLE(1, 0, 3, 2)), row3); tmp1 = _mm_shuffle_ps(tmp1, tmp1, _MM_SHUFFLE(2, 3, 0, 1)); row2 = _mm_shuffle_ps(row2, row2, _MM_SHUFFLE(1, 0, 3, 2)); minor0 = _mm_add_ps(_mm_mul_ps(row2, tmp1), minor0); __m128 minor2 = _mm_mul_ps(row0, tmp1); tmp1 = _mm_shuffle_ps(tmp1, tmp1, _MM_SHUFFLE(1, 0, 3, 2)); minor0 = _mm_sub_ps(minor0, _mm_mul_ps(row2, tmp1)); minor2 = _mm_sub_ps(_mm_mul_ps(row0, tmp1), minor2); minor2 = _mm_shuffle_ps(minor2, minor2, _MM_SHUFFLE(1, 0, 3, 2)); tmp1 = _mm_mul_ps(row0, row1); tmp1 = _mm_shuffle_ps(tmp1, tmp1, _MM_SHUFFLE(2, 3, 0, 1)); minor2 = _mm_add_ps(_mm_mul_ps(row3, tmp1), minor2); tmp1 = _mm_shuffle_ps(tmp1, tmp1, _MM_SHUFFLE(1, 0, 3, 2)); minor2 = _mm_sub_ps(_mm_mul_ps(row3, tmp1), minor2); tmp1 = _mm_mul_ps(row0, row3); tmp1 = _mm_shuffle_ps(tmp1, tmp1, _MM_SHUFFLE(2, 3, 0, 1)); minor1 = _mm_sub_ps(minor1, _mm_mul_ps(row2, tmp1)); minor2 = _mm_add_ps(_mm_mul_ps(row1, tmp1), minor2); tmp1 = _mm_shuffle_ps(tmp1, tmp1, _MM_SHUFFLE(1, 0, 3, 2)); minor1 = _mm_add_ps(_mm_mul_ps(row2, tmp1), minor1); minor2 = _mm_sub_ps(minor2, _mm_mul_ps(row1, tmp1)); tmp1 = _mm_mul_ps(row0, row2); tmp1 = _mm_shuffle_ps(tmp1, tmp1, _MM_SHUFFLE(2, 3, 0, 1)); minor1 = _mm_add_ps(_mm_mul_ps(row3, tmp1), minor1); tmp1 = _mm_shuffle_ps(tmp1, tmp1, _MM_SHUFFLE(1, 0, 3, 2)); minor1 = _mm_sub_ps(minor1, _mm_mul_ps(row3, tmp1)); __m128 det = _mm_mul_ps(row0, minor0); det = _mm_add_ps(_mm_shuffle_ps(det, det, _MM_SHUFFLE(2, 3, 0, 1)), det); // Original code did (x + z) + (y + w), changed to (x + y) + (z + w) to match the ARM code below and make the result cross platform deterministic det = _mm_add_ss(_mm_shuffle_ps(det, det, _MM_SHUFFLE(1, 0, 3, 2)), det); det = _mm_div_ss(_mm_set_ss(1.0f), det); det = _mm_shuffle_ps(det, det, _MM_SHUFFLE(0, 0, 0, 0)); Mat44 result; result.mCol[0].mValue = _mm_mul_ps(det, minor0); result.mCol[1].mValue = _mm_mul_ps(det, minor1); result.mCol[2].mValue = _mm_mul_ps(det, minor2); result.mCol[3] = Vec4(0, 0, 0, 1); return result; #elif defined(JPH_USE_NEON) Type v0001 = vsetq_lane_f32(1, vdupq_n_f32(0), 3); Type tmp1 = JPH_NEON_SHUFFLE_F32x4(mCol[0].mValue, mCol[1].mValue, 0, 1, 4, 5); Type row1 = JPH_NEON_SHUFFLE_F32x4(mCol[2].mValue, v0001, 0, 1, 4, 5); Type row0 = JPH_NEON_SHUFFLE_F32x4(tmp1, row1, 0, 2, 4, 6); row1 = JPH_NEON_SHUFFLE_F32x4(row1, tmp1, 1, 3, 5, 7); tmp1 = JPH_NEON_SHUFFLE_F32x4(mCol[0].mValue, mCol[1].mValue, 2, 3, 6, 7); Type row3 = JPH_NEON_SHUFFLE_F32x4(mCol[2].mValue, v0001, 2, 3, 6, 7); Type row2 = JPH_NEON_SHUFFLE_F32x4(tmp1, row3, 0, 2, 4, 6); row3 = JPH_NEON_SHUFFLE_F32x4(row3, tmp1, 1, 3, 5, 7); tmp1 = vmulq_f32(row2, row3); tmp1 = JPH_NEON_SHUFFLE_F32x4(tmp1, tmp1, 1, 0, 3, 2); Type minor0 = vmulq_f32(row1, tmp1); Type minor1 = vmulq_f32(row0, tmp1); tmp1 = JPH_NEON_SHUFFLE_F32x4(tmp1, tmp1, 2, 3, 0, 1); minor0 = vsubq_f32(vmulq_f32(row1, tmp1), minor0); minor1 = vsubq_f32(vmulq_f32(row0, tmp1), minor1); minor1 = JPH_NEON_SHUFFLE_F32x4(minor1, minor1, 2, 3, 0, 1); tmp1 = vmulq_f32(row1, row2); tmp1 = JPH_NEON_SHUFFLE_F32x4(tmp1, tmp1, 1, 0, 3, 2); minor0 = vaddq_f32(vmulq_f32(row3, tmp1), minor0); tmp1 = JPH_NEON_SHUFFLE_F32x4(tmp1, tmp1, 2, 3, 0, 1); minor0 = vsubq_f32(minor0, vmulq_f32(row3, tmp1)); tmp1 = JPH_NEON_SHUFFLE_F32x4(row1, row1, 2, 3, 0, 1); tmp1 = vmulq_f32(tmp1, row3); tmp1 = JPH_NEON_SHUFFLE_F32x4(tmp1, tmp1, 1, 0, 3, 2); row2 = JPH_NEON_SHUFFLE_F32x4(row2, row2, 2, 3, 0, 1); minor0 = vaddq_f32(vmulq_f32(row2, tmp1), minor0); Type minor2 = vmulq_f32(row0, tmp1); tmp1 = JPH_NEON_SHUFFLE_F32x4(tmp1, tmp1, 2, 3, 0, 1); minor0 = vsubq_f32(minor0, vmulq_f32(row2, tmp1)); minor2 = vsubq_f32(vmulq_f32(row0, tmp1), minor2); minor2 = JPH_NEON_SHUFFLE_F32x4(minor2, minor2, 2, 3, 0, 1); tmp1 = vmulq_f32(row0, row1); tmp1 = JPH_NEON_SHUFFLE_F32x4(tmp1, tmp1, 1, 0, 3, 2); minor2 = vaddq_f32(vmulq_f32(row3, tmp1), minor2); tmp1 = JPH_NEON_SHUFFLE_F32x4(tmp1, tmp1, 2, 3, 0, 1); minor2 = vsubq_f32(vmulq_f32(row3, tmp1), minor2); tmp1 = vmulq_f32(row0, row3); tmp1 = JPH_NEON_SHUFFLE_F32x4(tmp1, tmp1, 1, 0, 3, 2); minor1 = vsubq_f32(minor1, vmulq_f32(row2, tmp1)); minor2 = vaddq_f32(vmulq_f32(row1, tmp1), minor2); tmp1 = JPH_NEON_SHUFFLE_F32x4(tmp1, tmp1, 2, 3, 0, 1); minor1 = vaddq_f32(vmulq_f32(row2, tmp1), minor1); minor2 = vsubq_f32(minor2, vmulq_f32(row1, tmp1)); tmp1 = vmulq_f32(row0, row2); tmp1 = JPH_NEON_SHUFFLE_F32x4(tmp1, tmp1, 1, 0, 3, 2); minor1 = vaddq_f32(vmulq_f32(row3, tmp1), minor1); tmp1 = JPH_NEON_SHUFFLE_F32x4(tmp1, tmp1, 2, 3, 0, 1); minor1 = vsubq_f32(minor1, vmulq_f32(row3, tmp1)); Type det = vmulq_f32(row0, minor0); det = vdupq_n_f32(vaddvq_f32(det)); det = vdivq_f32(vdupq_n_f32(1.0f), det); Mat44 result; result.mCol[0].mValue = vmulq_f32(det, minor0); result.mCol[1].mValue = vmulq_f32(det, minor1); result.mCol[2].mValue = vmulq_f32(det, minor2); result.mCol[3].mValue = v0001; return result; #else float det = GetDeterminant3x3(); return Mat44( Vec4((JPH_EL(1, 1) * JPH_EL(2, 2) - JPH_EL(1, 2) * JPH_EL(2, 1)) / det, (JPH_EL(1, 2) * JPH_EL(2, 0) - JPH_EL(1, 0) * JPH_EL(2, 2)) / det, (JPH_EL(1, 0) * JPH_EL(2, 1) - JPH_EL(1, 1) * JPH_EL(2, 0)) / det, 0), Vec4((JPH_EL(0, 2) * JPH_EL(2, 1) - JPH_EL(0, 1) * JPH_EL(2, 2)) / det, (JPH_EL(0, 0) * JPH_EL(2, 2) - JPH_EL(0, 2) * JPH_EL(2, 0)) / det, (JPH_EL(0, 1) * JPH_EL(2, 0) - JPH_EL(0, 0) * JPH_EL(2, 1)) / det, 0), Vec4((JPH_EL(0, 1) * JPH_EL(1, 2) - JPH_EL(0, 2) * JPH_EL(1, 1)) / det, (JPH_EL(0, 2) * JPH_EL(1, 0) - JPH_EL(0, 0) * JPH_EL(1, 2)) / det, (JPH_EL(0, 0) * JPH_EL(1, 1) - JPH_EL(0, 1) * JPH_EL(1, 0)) / det, 0), Vec4(0, 0, 0, 1)); #endif } Quat Mat44::GetQuaternion() const { JPH_ASSERT(mCol[3] == Vec4(0, 0, 0, 1)); float tr = mCol[0].mF32[0] + mCol[1].mF32[1] + mCol[2].mF32[2]; if (tr >= 0.0f) { float s = sqrt(tr + 1.0f); float is = 0.5f / s; return Quat( (mCol[1].mF32[2] - mCol[2].mF32[1]) * is, (mCol[2].mF32[0] - mCol[0].mF32[2]) * is, (mCol[0].mF32[1] - mCol[1].mF32[0]) * is, 0.5f * s); } else { int i = 0; if (mCol[1].mF32[1] > mCol[0].mF32[0]) i = 1; if (mCol[2].mF32[2] > mCol[i].mF32[i]) i = 2; if (i == 0) { float s = sqrt(mCol[0].mF32[0] - (mCol[1].mF32[1] + mCol[2].mF32[2]) + 1); float is = 0.5f / s; return Quat( 0.5f * s, (mCol[1].mF32[0] + mCol[0].mF32[1]) * is, (mCol[0].mF32[2] + mCol[2].mF32[0]) * is, (mCol[1].mF32[2] - mCol[2].mF32[1]) * is); } else if (i == 1) { float s = sqrt(mCol[1].mF32[1] - (mCol[2].mF32[2] + mCol[0].mF32[0]) + 1); float is = 0.5f / s; return Quat( (mCol[1].mF32[0] + mCol[0].mF32[1]) * is, 0.5f * s, (mCol[2].mF32[1] + mCol[1].mF32[2]) * is, (mCol[2].mF32[0] - mCol[0].mF32[2]) * is); } else { JPH_ASSERT(i == 2); float s = sqrt(mCol[2].mF32[2] - (mCol[0].mF32[0] + mCol[1].mF32[1]) + 1); float is = 0.5f / s; return Quat( (mCol[0].mF32[2] + mCol[2].mF32[0]) * is, (mCol[2].mF32[1] + mCol[1].mF32[2]) * is, 0.5f * s, (mCol[0].mF32[1] - mCol[1].mF32[0]) * is); } } } Mat44 Mat44::sQuatLeftMultiply(QuatArg inQ) { return Mat44( Vec4(1, 1, -1, -1) * inQ.mValue.Swizzle<SWIZZLE_W, SWIZZLE_Z, SWIZZLE_Y, SWIZZLE_X>(), Vec4(-1, 1, 1, -1) * inQ.mValue.Swizzle<SWIZZLE_Z, SWIZZLE_W, SWIZZLE_X, SWIZZLE_Y>(), Vec4(1, -1, 1, -1) * inQ.mValue.Swizzle<SWIZZLE_Y, SWIZZLE_X, SWIZZLE_W, SWIZZLE_Z>(), inQ.mValue); } Mat44 Mat44::sQuatRightMultiply(QuatArg inQ) { return Mat44( Vec4(1, -1, 1, -1) * inQ.mValue.Swizzle<SWIZZLE_W, SWIZZLE_Z, SWIZZLE_Y, SWIZZLE_X>(), Vec4(1, 1, -1, -1) * inQ.mValue.Swizzle<SWIZZLE_Z, SWIZZLE_W, SWIZZLE_X, SWIZZLE_Y>(), Vec4(-1, 1, 1, -1) * inQ.mValue.Swizzle<SWIZZLE_Y, SWIZZLE_X, SWIZZLE_W, SWIZZLE_Z>(), inQ.mValue); } Mat44 Mat44::GetRotation() const { JPH_ASSERT(mCol[0][3] == 0.0f); JPH_ASSERT(mCol[1][3] == 0.0f); JPH_ASSERT(mCol[2][3] == 0.0f); return Mat44(mCol[0], mCol[1], mCol[2], Vec4(0, 0, 0, 1)); } Mat44 Mat44::GetRotationSafe() const { #if defined(JPH_USE_AVX512) return Mat44(_mm_maskz_mov_ps(0b0111, mCol[0].mValue), _mm_maskz_mov_ps(0b0111, mCol[1].mValue), _mm_maskz_mov_ps(0b0111, mCol[2].mValue), Vec4(0, 0, 0, 1)); #elif defined(JPH_USE_SSE4_1) __m128 zero = _mm_setzero_ps(); return Mat44(_mm_blend_ps(mCol[0].mValue, zero, 8), _mm_blend_ps(mCol[1].mValue, zero, 8), _mm_blend_ps(mCol[2].mValue, zero, 8), Vec4(0, 0, 0, 1)); #elif defined(JPH_USE_NEON) return Mat44(vsetq_lane_f32(0, mCol[0].mValue, 3), vsetq_lane_f32(0, mCol[1].mValue, 3), vsetq_lane_f32(0, mCol[2].mValue, 3), Vec4(0, 0, 0, 1)); #else return Mat44(Vec4(mCol[0].mF32[0], mCol[0].mF32[1], mCol[0].mF32[2], 0), Vec4(mCol[1].mF32[0], mCol[1].mF32[1], mCol[1].mF32[2], 0), Vec4(mCol[2].mF32[0], mCol[2].mF32[1], mCol[2].mF32[2], 0), Vec4(0, 0, 0, 1)); #endif } void Mat44::SetRotation(Mat44Arg inRotation) { mCol[0] = inRotation.mCol[0]; mCol[1] = inRotation.mCol[1]; mCol[2] = inRotation.mCol[2]; } Mat44 Mat44::PreTranslated(Vec3Arg inTranslation) const { return Mat44(mCol[0], mCol[1], mCol[2], Vec4(GetTranslation() + Multiply3x3(inTranslation), 1)); } Mat44 Mat44::PostTranslated(Vec3Arg inTranslation) const { return Mat44(mCol[0], mCol[1], mCol[2], Vec4(GetTranslation() + inTranslation, 1)); } Mat44 Mat44::PreScaled(Vec3Arg inScale) const { return Mat44(inScale.GetX() * mCol[0], inScale.GetY() * mCol[1], inScale.GetZ() * mCol[2], mCol[3]); } Mat44 Mat44::PostScaled(Vec3Arg inScale) const { Vec4 scale(inScale, 1); return Mat44(scale * mCol[0], scale * mCol[1], scale * mCol[2], scale * mCol[3]); } Mat44 Mat44::Decompose(Vec3 &outScale) const { // Start the modified Gram-Schmidt algorithm // X axis will just be normalized Vec3 x = GetAxisX(); // Make Y axis perpendicular to X Vec3 y = GetAxisY(); float x_dot_x = x.LengthSq(); y -= (x.Dot(y) / x_dot_x) * x; // Make Z axis perpendicular to X Vec3 z = GetAxisZ(); z -= (x.Dot(z) / x_dot_x) * x; // Make Z axis perpendicular to Y float y_dot_y = y.LengthSq(); z -= (y.Dot(z) / y_dot_y) * y; // Determine the scale float z_dot_z = z.LengthSq(); outScale = Vec3(x_dot_x, y_dot_y, z_dot_z).Sqrt(); // If the resulting x, y and z vectors don't form a right handed matrix, flip the z axis. if (x.Cross(y).Dot(z) < 0.0f) outScale.SetZ(-outScale.GetZ()); // Determine the rotation and translation return Mat44(Vec4(x / outScale.GetX(), 0), Vec4(y / outScale.GetY(), 0), Vec4(z / outScale.GetZ(), 0), GetColumn4(3)); } #undef JPH_EL JPH_NAMESPACE_END
0
repos/c2z/use_cases/JoltPhysics/include
repos/c2z/use_cases/JoltPhysics/include/Math/UVec4.h
// Jolt Physics Library (https://github.com/jrouwe/JoltPhysics) // SPDX-FileCopyrightText: 2021 Jorrit Rouwe // SPDX-License-Identifier: MIT #pragma once #include <Jolt/Math/Vec4.h> JPH_NAMESPACE_BEGIN class [[nodiscard]] alignas(JPH_VECTOR_ALIGNMENT) UVec4 { public: JPH_OVERRIDE_NEW_DELETE // Underlying vector type #if defined(JPH_USE_SSE) using Type = __m128i; #elif defined(JPH_USE_NEON) using Type = uint32x4_t; #else using Type = struct { uint32 mData[4]; }; #endif /// Constructor UVec4() = default; ///< Intentionally not initialized for performance reasons UVec4(const UVec4 &inRHS) = default; JPH_INLINE UVec4(Type inRHS) : mValue(inRHS) { } /// Create a vector from 4 integer components JPH_INLINE UVec4(uint32 inX, uint32 inY, uint32 inZ, uint32 inW); /// Comparison JPH_INLINE bool operator == (UVec4Arg inV2) const; JPH_INLINE bool operator != (UVec4Arg inV2) const { return !(*this == inV2); } /// Swizzle the elements in inV template<uint32 SwizzleX, uint32 SwizzleY, uint32 SwizzleZ, uint32 SwizzleW> JPH_INLINE UVec4 Swizzle() const; /// Vector with all zeros static JPH_INLINE UVec4 sZero(); /// Replicate int inV across all components static JPH_INLINE UVec4 sReplicate(uint32 inV); /// Load 1 int from memory and place it in the X component, zeros Y, Z and W static JPH_INLINE UVec4 sLoadInt(const uint32 *inV); /// Load 4 ints from memory static JPH_INLINE UVec4 sLoadInt4(const uint32 *inV); /// Load 4 ints from memory, aligned to 16 bytes static JPH_INLINE UVec4 sLoadInt4Aligned(const uint32 *inV); /// Gather 4 ints from memory at inBase + inOffsets[i] * Scale template <const int Scale> static JPH_INLINE UVec4 sGatherInt4(const uint32 *inBase, UVec4Arg inOffsets); /// Return the minimum value of each of the components static JPH_INLINE UVec4 sMin(UVec4Arg inV1, UVec4Arg inV2); /// Return the maximum of each of the components static JPH_INLINE UVec4 sMax(UVec4Arg inV1, UVec4Arg inV2); /// Equals (component wise) static JPH_INLINE UVec4 sEquals(UVec4Arg inV1, UVec4Arg inV2); /// Component wise select, returns inV1 when highest bit of inControl = 0 and inV2 when highest bit of inControl = 1 static JPH_INLINE UVec4 sSelect(UVec4Arg inV1, UVec4Arg inV2, UVec4Arg inControl); /// Logical or (component wise) static JPH_INLINE UVec4 sOr(UVec4Arg inV1, UVec4Arg inV2); /// Logical xor (component wise) static JPH_INLINE UVec4 sXor(UVec4Arg inV1, UVec4Arg inV2); /// Logical and (component wise) static JPH_INLINE UVec4 sAnd(UVec4Arg inV1, UVec4Arg inV2); /// Logical not (component wise) static JPH_INLINE UVec4 sNot(UVec4Arg inV1); /// Sorts the elements in inIndex so that the values that correspond to trues in inValue are the first elements. /// The remaining elements will be set to inValue.w. /// I.e. if inValue = (true, false, true, false) and inIndex = (1, 2, 3, 4) the function returns (1, 3, 4, 4). static JPH_INLINE UVec4 sSort4True(UVec4Arg inValue, UVec4Arg inIndex); /// Get individual components #if defined(JPH_USE_SSE) JPH_INLINE uint32 GetX() const { return (uint32)_mm_cvtsi128_si32(mValue); } JPH_INLINE uint32 GetY() const { return mU32[1]; } JPH_INLINE uint32 GetZ() const { return mU32[2]; } JPH_INLINE uint32 GetW() const { return mU32[3]; } #elif defined(JPH_USE_NEON) JPH_INLINE uint32 GetX() const { return vgetq_lane_u32(mValue, 0); } JPH_INLINE uint32 GetY() const { return vgetq_lane_u32(mValue, 1); } JPH_INLINE uint32 GetZ() const { return vgetq_lane_u32(mValue, 2); } JPH_INLINE uint32 GetW() const { return vgetq_lane_u32(mValue, 3); } #else JPH_INLINE uint32 GetX() const { return mU32[0]; } JPH_INLINE uint32 GetY() const { return mU32[1]; } JPH_INLINE uint32 GetZ() const { return mU32[2]; } JPH_INLINE uint32 GetW() const { return mU32[3]; } #endif /// Set individual components JPH_INLINE void SetX(uint32 inX) { mU32[0] = inX; } JPH_INLINE void SetY(uint32 inY) { mU32[1] = inY; } JPH_INLINE void SetZ(uint32 inZ) { mU32[2] = inZ; } JPH_INLINE void SetW(uint32 inW) { mU32[3] = inW; } /// Get component by index JPH_INLINE uint32 operator [] (uint inCoordinate) const { JPH_ASSERT(inCoordinate < 4); return mU32[inCoordinate]; } JPH_INLINE uint32 & operator [] (uint inCoordinate) { JPH_ASSERT(inCoordinate < 4); return mU32[inCoordinate]; } /// Multiplies each of the 4 integer components with an integer (discards any overflow) JPH_INLINE UVec4 operator * (UVec4Arg inV2) const; /// Adds an integer value to all integer components (discards any overflow) JPH_INLINE UVec4 operator + (UVec4Arg inV2); /// Add two integer vectors (component wise) JPH_INLINE UVec4 & operator += (UVec4Arg inV2); /// Replicate the X component to all components JPH_INLINE UVec4 SplatX() const; /// Replicate the Y component to all components JPH_INLINE UVec4 SplatY() const; /// Replicate the Z component to all components JPH_INLINE UVec4 SplatZ() const; /// Replicate the W component to all components JPH_INLINE UVec4 SplatW() const; /// Convert each component from an int to a float JPH_INLINE Vec4 ToFloat() const; /// Reinterpret UVec4 as a Vec4 (doesn't change the bits) JPH_INLINE Vec4 ReinterpretAsFloat() const; /// Store 4 ints to memory JPH_INLINE void StoreInt4(uint32 *outV) const; /// Store 4 ints to memory, aligned to 16 bytes JPH_INLINE void StoreInt4Aligned(uint32 *outV) const; /// Test if any of the components are true (true is when highest bit of component is set) JPH_INLINE bool TestAnyTrue() const; /// Test if any of X, Y or Z components are true (true is when highest bit of component is set) JPH_INLINE bool TestAnyXYZTrue() const; /// Test if all components are true (true is when highest bit of component is set) JPH_INLINE bool TestAllTrue() const; /// Test if X, Y and Z components are true (true is when highest bit of component is set) JPH_INLINE bool TestAllXYZTrue() const; /// Count the number of components that are true (true is when highest bit of component is set) JPH_INLINE int CountTrues() const; /// Store if X is true in bit 0, Y in bit 1, Z in bit 2 and W in bit 3 (true is when highest bit of component is set) JPH_INLINE int GetTrues() const; /// Shift all components by Count bits to the left (filling with zeros from the left) template <const uint Count> JPH_INLINE UVec4 LogicalShiftLeft() const; /// Shift all components by Count bits to the right (filling with zeros from the right) template <const uint Count> JPH_INLINE UVec4 LogicalShiftRight() const; /// Shift all components by Count bits to the right (shifting in the value of the highest bit) template <const uint Count> JPH_INLINE UVec4 ArithmeticShiftRight() const; /// Takes the lower 4 16 bits and expands them to X, Y, Z and W JPH_INLINE UVec4 Expand4Uint16Lo() const; /// Takes the upper 4 16 bits and expands them to X, Y, Z and W JPH_INLINE UVec4 Expand4Uint16Hi() const; /// Takes byte 0 .. 3 and expands them to X, Y, Z and W JPH_INLINE UVec4 Expand4Byte0() const; /// Takes byte 4 .. 7 and expands them to X, Y, Z and W JPH_INLINE UVec4 Expand4Byte4() const; /// Takes byte 8 .. 11 and expands them to X, Y, Z and W JPH_INLINE UVec4 Expand4Byte8() const; /// Takes byte 12 .. 15 and expands them to X, Y, Z and W JPH_INLINE UVec4 Expand4Byte12() const; /// Shift vector components by 4 - Count floats to the left, so if Count = 1 the resulting vector is (W, 0, 0, 0), when Count = 3 the resulting vector is (Y, Z, W, 0) JPH_INLINE UVec4 ShiftComponents4Minus(int inCount) const; /// To String friend ostream & operator << (ostream &inStream, UVec4Arg inV) { inStream << inV.mU32[0] << ", " << inV.mU32[1] << ", " << inV.mU32[2] << ", " << inV.mU32[3]; return inStream; } union { Type mValue; uint32 mU32[4]; }; private: static const UVec4 sFourMinusXShuffle[]; }; static_assert(is_trivial<UVec4>(), "Is supposed to be a trivial type!"); JPH_NAMESPACE_END #include "UVec4.inl"
0
repos/c2z/use_cases/JoltPhysics/include
repos/c2z/use_cases/JoltPhysics/include/Math/EigenValueSymmetric.h
// Jolt Physics Library (https://github.com/jrouwe/JoltPhysics) // SPDX-FileCopyrightText: 2021 Jorrit Rouwe // SPDX-License-Identifier: MIT #pragma once #include <Jolt/Core/FPFlushDenormals.h> JPH_NAMESPACE_BEGIN /// Function to determine the eigen vectors and values of a N x N real symmetric matrix /// by Jacobi transformations. This method is most suitable for N < 10. /// /// Taken and adapted from Numerical Recipies paragraph 11.1 /// /// An eigen vector is a vector v for which \f$A \: v = \lambda \: v\f$ /// /// Where: /// A: A square matrix. /// \f$\lambda\f$: a non-zero constant value. /// /// @see https://en.wikipedia.org/wiki/Eigenvalues_and_eigenvectors /// /// Matrix is a matrix type, which has dimensions N x N. /// @param inMatrix is the matrix of which to return the eigenvalues and vectors /// @param outEigVec will contain a matrix whose columns contain the normalized eigenvectors (must be identity before call) /// @param outEigVal will contain the eigenvalues template <class Vector, class Matrix> bool EigenValueSymmetric(const Matrix &inMatrix, Matrix &outEigVec, Vector &outEigVal) { // This algorithm works with very small numbers and can trigger invalid float exceptions when not flushing denormals FPFlushDenormals flush_denormals; (void)flush_denormals; // Maximum number of sweeps to make const int cMaxSweeps = 50; // Get problem dimension const uint n = inMatrix.GetRows(); // Make sure the dimensions are right JPH_ASSERT(inMatrix.GetRows() == n); JPH_ASSERT(inMatrix.GetCols() == n); JPH_ASSERT(outEigVec.GetRows() == n); JPH_ASSERT(outEigVec.GetCols() == n); JPH_ASSERT(outEigVal.GetRows() == n); JPH_ASSERT(outEigVec.IsIdentity()); // Get the matrix in a so we can mess with it Matrix a = inMatrix; Vector b, z; for (uint ip = 0; ip < n; ++ip) { // Initialize b to diagonal of a b[ip] = a(ip, ip); // Initialize output to diagonal of a outEigVal[ip] = a(ip, ip); // Reset z z[ip] = 0.0f; } for (int sweep = 0; sweep < cMaxSweeps; ++sweep) { // Get the sum of the off-diagonal elements of a float sm = 0.0f; for (uint ip = 0; ip < n - 1; ++ip) for (uint iq = ip + 1; iq < n; ++iq) sm += abs(a(ip, iq)); // Normal return, convergence to machine underflow if (sm == 0.0f) { // Sanity checks #ifdef JPH_ENABLE_ASSERTS for (uint c = 0; c < n; ++c) { // Check if the eigenvector is normalized JPH_ASSERT(outEigVec.GetColumn(c).IsNormalized()); // Check if inMatrix * eigen_vector = eigen_value * eigen_vector Vector mat_eigvec = inMatrix * outEigVec.GetColumn(c); Vector eigval_eigvec = outEigVal[c] * outEigVec.GetColumn(c); JPH_ASSERT(mat_eigvec.IsClose(eigval_eigvec, max(mat_eigvec.LengthSq(), eigval_eigvec.LengthSq()) * 1.0e-6f)); } #endif // Success return true; } // On the first three sweeps use a fraction of the sum of the off diagonal elements as treshold float tresh = sweep < 4? 0.2f * sm / Square(n) : 0.0f; for (uint ip = 0; ip < n - 1; ++ip) for (uint iq = ip + 1; iq < n; ++iq) { float g = 100.0f * abs(a(ip, iq)); // After four sweeps, skip the rotation if the off-diagonal element is small if (sweep > 4 && abs(outEigVal[ip]) + g == abs(outEigVal[ip]) && abs(outEigVal[iq]) + g == abs(outEigVal[iq])) { a(ip, iq) = 0.0f; } else if (abs(a(ip, iq)) > tresh) { float h = outEigVal[iq] - outEigVal[ip]; float t; if (abs(h) + g == abs(h)) { t = a(ip, iq) / h; } else { float theta = 0.5f * h / a(ip, iq); // Warning: Can become inf if a(ip, iq) too small t = 1.0f / (abs(theta) + sqrt(1.0f + theta * theta)); // Warning: Squaring large value can make it inf if (theta < 0.0f) t = -t; } float c = 1.0f / sqrt(1.0f + t * t); float s = t * c; float tau = s / (1.0f + c); h = t * a(ip, iq); a(ip, iq) = 0.0f; // !Modification from Numerical Recipes! // h can become infinite due to numerical overflow, this only happens when a(ip, iq) is very small // so we can safely set a(ip, iq) to zero and skip the rotation, see lines marked with 'Warning' above. if (!isnan(h)) { z[ip] -= h; z[iq] += h; outEigVal[ip] -= h; outEigVal[iq] += h; #define JPH_EVS_ROTATE(a, i, j, k, l) \ g = a(i, j), \ h = a(k, l), \ a(i, j) = g - s * (h + g * tau), \ a(k, l) = h + s * (g - h * tau) uint j; for (j = 0; j < ip; ++j) JPH_EVS_ROTATE(a, j, ip, j, iq); for (j = ip + 1; j < iq; ++j) JPH_EVS_ROTATE(a, ip, j, j, iq); for (j = iq + 1; j < n; ++j) JPH_EVS_ROTATE(a, ip, j, iq, j); for (j = 0; j < n; ++j) JPH_EVS_ROTATE(outEigVec, j, ip, j, iq); #undef JPH_EVS_ROTATE } } } // Update eigenvalues with the sum of ta_pq and reinitialize z for (uint ip = 0; ip < n; ++ip) { b[ip] += z[ip]; outEigVal[ip] = b[ip]; z[ip] = 0.0f; } } // Failure JPH_ASSERT(false, "Too many iterations"); return false; } JPH_NAMESPACE_END
0
repos/c2z/use_cases/JoltPhysics/include
repos/c2z/use_cases/JoltPhysics/include/Math/Vec4.inl
// Jolt Physics Library (https://github.com/jrouwe/JoltPhysics) // SPDX-FileCopyrightText: 2021 Jorrit Rouwe // SPDX-License-Identifier: MIT #include <Jolt/Math/Trigonometry.h> #include <Jolt/Math/Vec3.h> #include <Jolt/Math/UVec4.h> JPH_NAMESPACE_BEGIN // Constructor Vec4::Vec4(Vec3Arg inRHS) : mValue(inRHS.mValue) { } Vec4::Vec4(Vec3Arg inRHS, float inW) { #if defined(JPH_USE_SSE4_1) mValue = _mm_blend_ps(inRHS.mValue, _mm_set1_ps(inW), 8); #elif defined(JPH_USE_NEON) mValue = vsetq_lane_f32(inW, inRHS.mValue, 3); #else for (int i = 0; i < 3; i++) mF32[i] = inRHS.mF32[i]; mF32[3] = inW; #endif } Vec4::Vec4(float inX, float inY, float inZ, float inW) { #if defined(JPH_USE_SSE) mValue = _mm_set_ps(inW, inZ, inY, inX); #elif defined(JPH_USE_NEON) uint32x2_t xy = vcreate_f32(static_cast<uint64>(*reinterpret_cast<uint32 *>(&inX)) | (static_cast<uint64>(*reinterpret_cast<uint32 *>(&inY)) << 32)); uint32x2_t zw = vcreate_f32(static_cast<uint64>(*reinterpret_cast<uint32* >(&inZ)) | (static_cast<uint64>(*reinterpret_cast<uint32 *>(&inW)) << 32)); mValue = vcombine_f32(xy, zw); #else mF32[0] = inX; mF32[1] = inY; mF32[2] = inZ; mF32[3] = inW; #endif } template<uint32 SwizzleX, uint32 SwizzleY, uint32 SwizzleZ, uint32 SwizzleW> Vec4 Vec4::Swizzle() const { static_assert(SwizzleX <= 3, "SwizzleX template parameter out of range"); static_assert(SwizzleY <= 3, "SwizzleY template parameter out of range"); static_assert(SwizzleZ <= 3, "SwizzleZ template parameter out of range"); static_assert(SwizzleW <= 3, "SwizzleW template parameter out of range"); #if defined(JPH_USE_SSE) return _mm_shuffle_ps(mValue, mValue, _MM_SHUFFLE(SwizzleW, SwizzleZ, SwizzleY, SwizzleX)); #elif defined(JPH_USE_NEON) return JPH_NEON_SHUFFLE_F32x4(mValue, mValue, SwizzleX, SwizzleY, SwizzleZ, SwizzleW); #else return Vec4(mF32[SwizzleX], mF32[SwizzleY], mF32[SwizzleZ], mF32[SwizzleW]); #endif } Vec4 Vec4::sZero() { #if defined(JPH_USE_SSE) return _mm_setzero_ps(); #elif defined(JPH_USE_NEON) return vdupq_n_f32(0); #else return Vec4(0, 0, 0, 0); #endif } Vec4 Vec4::sReplicate(float inV) { #if defined(JPH_USE_SSE) return _mm_set1_ps(inV); #elif defined(JPH_USE_NEON) return vdupq_n_f32(inV); #else return Vec4(inV, inV, inV, inV); #endif } Vec4 Vec4::sNaN() { return sReplicate(numeric_limits<float>::quiet_NaN()); } Vec4 Vec4::sLoadFloat4(const Float4 *inV) { #if defined(JPH_USE_SSE) return _mm_loadu_ps(&inV->x); #elif defined(JPH_USE_NEON) return vld1q_f32(&inV->x); #else return Vec4(inV->x, inV->y, inV->z, inV->w); #endif } Vec4 Vec4::sLoadFloat4Aligned(const Float4 *inV) { #if defined(JPH_USE_SSE) return _mm_load_ps(&inV->x); #elif defined(JPH_USE_NEON) return vld1q_f32(&inV->x); #else return Vec4(inV->x, inV->y, inV->z, inV->w); #endif } template <const int Scale> Vec4 Vec4::sGatherFloat4(const float *inBase, UVec4Arg inOffsets) { #if defined(JPH_USE_SSE) #ifdef JPH_USE_AVX2 return _mm_i32gather_ps(inBase, inOffsets.mValue, Scale); #else const uint8 *base = reinterpret_cast<const uint8 *>(inBase); Type x = _mm_load_ss(reinterpret_cast<const float *>(base + inOffsets.GetX() * Scale)); Type y = _mm_load_ss(reinterpret_cast<const float *>(base + inOffsets.GetY() * Scale)); Type xy = _mm_unpacklo_ps(x, y); Type z = _mm_load_ss(reinterpret_cast<const float *>(base + inOffsets.GetZ() * Scale)); Type w = _mm_load_ss(reinterpret_cast<const float *>(base + inOffsets.GetW() * Scale)); Type zw = _mm_unpacklo_ps(z, w); return _mm_movelh_ps(xy, zw); #endif #else const uint8 *base = reinterpret_cast<const uint8 *>(inBase); float x = *reinterpret_cast<const float *>(base + inOffsets.GetX() * Scale); float y = *reinterpret_cast<const float *>(base + inOffsets.GetY() * Scale); float z = *reinterpret_cast<const float *>(base + inOffsets.GetZ() * Scale); float w = *reinterpret_cast<const float *>(base + inOffsets.GetW() * Scale); return Vec4(x, y, z, w); #endif } Vec4 Vec4::sMin(Vec4Arg inV1, Vec4Arg inV2) { #if defined(JPH_USE_SSE) return _mm_min_ps(inV1.mValue, inV2.mValue); #elif defined(JPH_USE_NEON) return vminq_f32(inV1.mValue, inV2.mValue); #else return Vec4(min(inV1.mF32[0], inV2.mF32[0]), min(inV1.mF32[1], inV2.mF32[1]), min(inV1.mF32[2], inV2.mF32[2]), min(inV1.mF32[3], inV2.mF32[3])); #endif } Vec4 Vec4::sMax(Vec4Arg inV1, Vec4Arg inV2) { #if defined(JPH_USE_SSE) return _mm_max_ps(inV1.mValue, inV2.mValue); #elif defined(JPH_USE_NEON) return vmaxq_f32(inV1.mValue, inV2.mValue); #else return Vec4(max(inV1.mF32[0], inV2.mF32[0]), max(inV1.mF32[1], inV2.mF32[1]), max(inV1.mF32[2], inV2.mF32[2]), max(inV1.mF32[3], inV2.mF32[3])); #endif } UVec4 Vec4::sEquals(Vec4Arg inV1, Vec4Arg inV2) { #if defined(JPH_USE_SSE) return _mm_castps_si128(_mm_cmpeq_ps(inV1.mValue, inV2.mValue)); #elif defined(JPH_USE_NEON) return vceqq_f32(inV1.mValue, inV2.mValue); #else return UVec4(inV1.mF32[0] == inV2.mF32[0]? 0xffffffffu : 0, inV1.mF32[1] == inV2.mF32[1]? 0xffffffffu : 0, inV1.mF32[2] == inV2.mF32[2]? 0xffffffffu : 0, inV1.mF32[3] == inV2.mF32[3]? 0xffffffffu : 0); #endif } UVec4 Vec4::sLess(Vec4Arg inV1, Vec4Arg inV2) { #if defined(JPH_USE_SSE) return _mm_castps_si128(_mm_cmplt_ps(inV1.mValue, inV2.mValue)); #elif defined(JPH_USE_NEON) return vcltq_f32(inV1.mValue, inV2.mValue); #else return UVec4(inV1.mF32[0] < inV2.mF32[0]? 0xffffffffu : 0, inV1.mF32[1] < inV2.mF32[1]? 0xffffffffu : 0, inV1.mF32[2] < inV2.mF32[2]? 0xffffffffu : 0, inV1.mF32[3] < inV2.mF32[3]? 0xffffffffu : 0); #endif } UVec4 Vec4::sLessOrEqual(Vec4Arg inV1, Vec4Arg inV2) { #if defined(JPH_USE_SSE) return _mm_castps_si128(_mm_cmple_ps(inV1.mValue, inV2.mValue)); #elif defined(JPH_USE_NEON) return vcleq_f32(inV1.mValue, inV2.mValue); #else return UVec4(inV1.mF32[0] <= inV2.mF32[0]? 0xffffffffu : 0, inV1.mF32[1] <= inV2.mF32[1]? 0xffffffffu : 0, inV1.mF32[2] <= inV2.mF32[2]? 0xffffffffu : 0, inV1.mF32[3] <= inV2.mF32[3]? 0xffffffffu : 0); #endif } UVec4 Vec4::sGreater(Vec4Arg inV1, Vec4Arg inV2) { #if defined(JPH_USE_SSE) return _mm_castps_si128(_mm_cmpgt_ps(inV1.mValue, inV2.mValue)); #elif defined(JPH_USE_NEON) return vcgtq_f32(inV1.mValue, inV2.mValue); #else return UVec4(inV1.mF32[0] > inV2.mF32[0]? 0xffffffffu : 0, inV1.mF32[1] > inV2.mF32[1]? 0xffffffffu : 0, inV1.mF32[2] > inV2.mF32[2]? 0xffffffffu : 0, inV1.mF32[3] > inV2.mF32[3]? 0xffffffffu : 0); #endif } UVec4 Vec4::sGreaterOrEqual(Vec4Arg inV1, Vec4Arg inV2) { #if defined(JPH_USE_SSE) return _mm_castps_si128(_mm_cmpge_ps(inV1.mValue, inV2.mValue)); #elif defined(JPH_USE_NEON) return vcgeq_f32(inV1.mValue, inV2.mValue); #else return UVec4(inV1.mF32[0] >= inV2.mF32[0]? 0xffffffffu : 0, inV1.mF32[1] >= inV2.mF32[1]? 0xffffffffu : 0, inV1.mF32[2] >= inV2.mF32[2]? 0xffffffffu : 0, inV1.mF32[3] >= inV2.mF32[3]? 0xffffffffu : 0); #endif } Vec4 Vec4::sFusedMultiplyAdd(Vec4Arg inMul1, Vec4Arg inMul2, Vec4Arg inAdd) { #if defined(JPH_USE_SSE) #ifdef JPH_USE_FMADD return _mm_fmadd_ps(inMul1.mValue, inMul2.mValue, inAdd.mValue); #else return _mm_add_ps(_mm_mul_ps(inMul1.mValue, inMul2.mValue), inAdd.mValue); #endif #elif defined(JPH_USE_NEON) return vmlaq_f32(inAdd.mValue, inMul1.mValue, inMul2.mValue); #else return Vec4(inMul1.mF32[0] * inMul2.mF32[0] + inAdd.mF32[0], inMul1.mF32[1] * inMul2.mF32[1] + inAdd.mF32[1], inMul1.mF32[2] * inMul2.mF32[2] + inAdd.mF32[2], inMul1.mF32[3] * inMul2.mF32[3] + inAdd.mF32[3]); #endif } Vec4 Vec4::sSelect(Vec4Arg inV1, Vec4Arg inV2, UVec4Arg inControl) { #if defined(JPH_USE_SSE4_1) return _mm_blendv_ps(inV1.mValue, inV2.mValue, _mm_castsi128_ps(inControl.mValue)); #elif defined(JPH_USE_NEON) return vbslq_f32(vshrq_n_s32(inControl.mValue, 31), inV2.mValue, inV1.mValue); #else Vec4 result; for (int i = 0; i < 4; i++) result.mF32[i] = inControl.mU32[i] ? inV2.mF32[i] : inV1.mF32[i]; return result; #endif } Vec4 Vec4::sOr(Vec4Arg inV1, Vec4Arg inV2) { #if defined(JPH_USE_SSE) return _mm_or_ps(inV1.mValue, inV2.mValue); #elif defined(JPH_USE_NEON) return vorrq_s32(inV1.mValue, inV2.mValue); #else return UVec4::sOr(inV1.ReinterpretAsInt(), inV2.ReinterpretAsInt()).ReinterpretAsFloat(); #endif } Vec4 Vec4::sXor(Vec4Arg inV1, Vec4Arg inV2) { #if defined(JPH_USE_SSE) return _mm_xor_ps(inV1.mValue, inV2.mValue); #elif defined(JPH_USE_NEON) return veorq_s32(inV1.mValue, inV2.mValue); #else return UVec4::sXor(inV1.ReinterpretAsInt(), inV2.ReinterpretAsInt()).ReinterpretAsFloat(); #endif } Vec4 Vec4::sAnd(Vec4Arg inV1, Vec4Arg inV2) { #if defined(JPH_USE_SSE) return _mm_and_ps(inV1.mValue, inV2.mValue); #elif defined(JPH_USE_NEON) return vandq_s32(inV1.mValue, inV2.mValue); #else return UVec4::sAnd(inV1.ReinterpretAsInt(), inV2.ReinterpretAsInt()).ReinterpretAsFloat(); #endif } void Vec4::sSort4(Vec4 &ioValue, UVec4 &ioIndex) { // Pass 1, test 1st vs 3rd, 2nd vs 4th Vec4 v1 = ioValue.Swizzle<SWIZZLE_Z, SWIZZLE_W, SWIZZLE_X, SWIZZLE_Y>(); UVec4 i1 = ioIndex.Swizzle<SWIZZLE_Z, SWIZZLE_W, SWIZZLE_X, SWIZZLE_Y>(); UVec4 c1 = sLess(ioValue, v1).Swizzle<SWIZZLE_Z, SWIZZLE_W, SWIZZLE_Z, SWIZZLE_W>(); ioValue = sSelect(ioValue, v1, c1); ioIndex = UVec4::sSelect(ioIndex, i1, c1); // Pass 2, test 1st vs 2nd, 3rd vs 4th Vec4 v2 = ioValue.Swizzle<SWIZZLE_Y, SWIZZLE_X, SWIZZLE_W, SWIZZLE_Z>(); UVec4 i2 = ioIndex.Swizzle<SWIZZLE_Y, SWIZZLE_X, SWIZZLE_W, SWIZZLE_Z>(); UVec4 c2 = sLess(ioValue, v2).Swizzle<SWIZZLE_Y, SWIZZLE_Y, SWIZZLE_W, SWIZZLE_W>(); ioValue = sSelect(ioValue, v2, c2); ioIndex = UVec4::sSelect(ioIndex, i2, c2); // Pass 3, test 2nd vs 3rd component Vec4 v3 = ioValue.Swizzle<SWIZZLE_X, SWIZZLE_Z, SWIZZLE_Y, SWIZZLE_W>(); UVec4 i3 = ioIndex.Swizzle<SWIZZLE_X, SWIZZLE_Z, SWIZZLE_Y, SWIZZLE_W>(); UVec4 c3 = sLess(ioValue, v3).Swizzle<SWIZZLE_X, SWIZZLE_Z, SWIZZLE_Z, SWIZZLE_W>(); ioValue = sSelect(ioValue, v3, c3); ioIndex = UVec4::sSelect(ioIndex, i3, c3); } void Vec4::sSort4Reverse(Vec4 &ioValue, UVec4 &ioIndex) { // Pass 1, test 1st vs 3rd, 2nd vs 4th Vec4 v1 = ioValue.Swizzle<SWIZZLE_Z, SWIZZLE_W, SWIZZLE_X, SWIZZLE_Y>(); UVec4 i1 = ioIndex.Swizzle<SWIZZLE_Z, SWIZZLE_W, SWIZZLE_X, SWIZZLE_Y>(); UVec4 c1 = sGreater(ioValue, v1).Swizzle<SWIZZLE_Z, SWIZZLE_W, SWIZZLE_Z, SWIZZLE_W>(); ioValue = sSelect(ioValue, v1, c1); ioIndex = UVec4::sSelect(ioIndex, i1, c1); // Pass 2, test 1st vs 2nd, 3rd vs 4th Vec4 v2 = ioValue.Swizzle<SWIZZLE_Y, SWIZZLE_X, SWIZZLE_W, SWIZZLE_Z>(); UVec4 i2 = ioIndex.Swizzle<SWIZZLE_Y, SWIZZLE_X, SWIZZLE_W, SWIZZLE_Z>(); UVec4 c2 = sGreater(ioValue, v2).Swizzle<SWIZZLE_Y, SWIZZLE_Y, SWIZZLE_W, SWIZZLE_W>(); ioValue = sSelect(ioValue, v2, c2); ioIndex = UVec4::sSelect(ioIndex, i2, c2); // Pass 3, test 2nd vs 3rd component Vec4 v3 = ioValue.Swizzle<SWIZZLE_X, SWIZZLE_Z, SWIZZLE_Y, SWIZZLE_W>(); UVec4 i3 = ioIndex.Swizzle<SWIZZLE_X, SWIZZLE_Z, SWIZZLE_Y, SWIZZLE_W>(); UVec4 c3 = sGreater(ioValue, v3).Swizzle<SWIZZLE_X, SWIZZLE_Z, SWIZZLE_Z, SWIZZLE_W>(); ioValue = sSelect(ioValue, v3, c3); ioIndex = UVec4::sSelect(ioIndex, i3, c3); } bool Vec4::operator == (Vec4Arg inV2) const { return sEquals(*this, inV2).TestAllTrue(); } bool Vec4::IsClose(Vec4Arg inV2, float inMaxDistSq) const { return (inV2 - *this).LengthSq() <= inMaxDistSq; } bool Vec4::IsNormalized(float inTolerance) const { return abs(LengthSq() - 1.0f) <= inTolerance; } bool Vec4::IsNaN() const { #if defined(JPH_USE_AVX512) return _mm_fpclass_ps_mask(mValue, 0b10000001) != 0; #elif defined(JPH_USE_SSE) return _mm_movemask_ps(_mm_cmpunord_ps(mValue, mValue)) != 0; #elif defined(JPH_USE_NEON) uint32x4_t is_equal = vceqq_f32(mValue, mValue); // If a number is not equal to itself it's a NaN return vaddvq_u32(vshrq_n_u32(is_equal, 31)) != 4; #else return isnan(mF32[0]) || isnan(mF32[1]) || isnan(mF32[2]) || isnan(mF32[3]); #endif } Vec4 Vec4::operator * (Vec4Arg inV2) const { #if defined(JPH_USE_SSE) return _mm_mul_ps(mValue, inV2.mValue); #elif defined(JPH_USE_NEON) return vmulq_f32(mValue, inV2.mValue); #else return Vec4(mF32[0] * inV2.mF32[0], mF32[1] * inV2.mF32[1], mF32[2] * inV2.mF32[2], mF32[3] * inV2.mF32[3]); #endif } Vec4 Vec4::operator * (float inV2) const { #if defined(JPH_USE_SSE) return _mm_mul_ps(mValue, _mm_set1_ps(inV2)); #elif defined(JPH_USE_NEON) return vmulq_n_f32(mValue, inV2); #else return Vec4(mF32[0] * inV2, mF32[1] * inV2, mF32[2] * inV2, mF32[3] * inV2); #endif } /// Multiply vector with float Vec4 operator * (float inV1, Vec4Arg inV2) { #if defined(JPH_USE_SSE) return _mm_mul_ps(_mm_set1_ps(inV1), inV2.mValue); #elif defined(JPH_USE_NEON) return vmulq_n_f32(inV2.mValue, inV1); #else return Vec4(inV1 * inV2.mF32[0], inV1 * inV2.mF32[1], inV1 * inV2.mF32[2], inV1 * inV2.mF32[3]); #endif } Vec4 Vec4::operator / (float inV2) const { #if defined(JPH_USE_SSE) return _mm_div_ps(mValue, _mm_set1_ps(inV2)); #elif defined(JPH_USE_NEON) return vdivq_f32(mValue, vdupq_n_f32(inV2)); #else return Vec4(mF32[0] / inV2, mF32[1] / inV2, mF32[2] / inV2, mF32[3] / inV2); #endif } Vec4 &Vec4::operator *= (float inV2) { #if defined(JPH_USE_SSE) mValue = _mm_mul_ps(mValue, _mm_set1_ps(inV2)); #elif defined(JPH_USE_NEON) mValue = vmulq_n_f32(mValue, inV2); #else for (int i = 0; i < 4; ++i) mF32[i] *= inV2; #endif return *this; } Vec4 &Vec4::operator *= (Vec4Arg inV2) { #if defined(JPH_USE_SSE) mValue = _mm_mul_ps(mValue, inV2.mValue); #elif defined(JPH_USE_NEON) mValue = vmulq_f32(mValue, inV2.mValue); #else for (int i = 0; i < 4; ++i) mF32[i] *= inV2.mF32[i]; #endif return *this; } Vec4 &Vec4::operator /= (float inV2) { #if defined(JPH_USE_SSE) mValue = _mm_div_ps(mValue, _mm_set1_ps(inV2)); #elif defined(JPH_USE_NEON) mValue = vdivq_f32(mValue, vdupq_n_f32(inV2)); #else for (int i = 0; i < 4; ++i) mF32[i] /= inV2; #endif return *this; } Vec4 Vec4::operator + (Vec4Arg inV2) const { #if defined(JPH_USE_SSE) return _mm_add_ps(mValue, inV2.mValue); #elif defined(JPH_USE_NEON) return vaddq_f32(mValue, inV2.mValue); #else return Vec4(mF32[0] + inV2.mF32[0], mF32[1] + inV2.mF32[1], mF32[2] + inV2.mF32[2], mF32[3] + inV2.mF32[3]); #endif } Vec4 &Vec4::operator += (Vec4Arg inV2) { #if defined(JPH_USE_SSE) mValue = _mm_add_ps(mValue, inV2.mValue); #elif defined(JPH_USE_NEON) mValue = vaddq_f32(mValue, inV2.mValue); #else for (int i = 0; i < 4; ++i) mF32[i] += inV2.mF32[i]; #endif return *this; } Vec4 Vec4::operator - () const { #if defined(JPH_USE_SSE) return _mm_sub_ps(_mm_setzero_ps(), mValue); #elif defined(JPH_USE_NEON) return vnegq_f32(mValue); #else return Vec4(-mF32[0], -mF32[1], -mF32[2], -mF32[3]); #endif } Vec4 Vec4::operator - (Vec4Arg inV2) const { #if defined(JPH_USE_SSE) return _mm_sub_ps(mValue, inV2.mValue); #elif defined(JPH_USE_NEON) return vsubq_f32(mValue, inV2.mValue); #else return Vec4(mF32[0] - inV2.mF32[0], mF32[1] - inV2.mF32[1], mF32[2] - inV2.mF32[2], mF32[3] - inV2.mF32[3]); #endif } Vec4 &Vec4::operator -= (Vec4Arg inV2) { #if defined(JPH_USE_SSE) mValue = _mm_sub_ps(mValue, inV2.mValue); #elif defined(JPH_USE_NEON) mValue = vsubq_f32(mValue, inV2.mValue); #else for (int i = 0; i < 4; ++i) mF32[i] -= inV2.mF32[i]; #endif return *this; } Vec4 Vec4::operator / (Vec4Arg inV2) const { #if defined(JPH_USE_SSE) return _mm_div_ps(mValue, inV2.mValue); #elif defined(JPH_USE_NEON) return vdivq_f32(mValue, inV2.mValue); #else return Vec4(mF32[0] / inV2.mF32[0], mF32[1] / inV2.mF32[1], mF32[2] / inV2.mF32[2], mF32[3] / inV2.mF32[3]); #endif } Vec4 Vec4::SplatX() const { #if defined(JPH_USE_SSE) return _mm_shuffle_ps(mValue, mValue, _MM_SHUFFLE(0, 0, 0, 0)); #elif defined(JPH_USE_NEON) return vdupq_laneq_f32(mValue, 0); #else return Vec4(mF32[0], mF32[0], mF32[0], mF32[0]); #endif } Vec4 Vec4::SplatY() const { #if defined(JPH_USE_SSE) return _mm_shuffle_ps(mValue, mValue, _MM_SHUFFLE(1, 1, 1, 1)); #elif defined(JPH_USE_NEON) return vdupq_laneq_f32(mValue, 1); #else return Vec4(mF32[1], mF32[1], mF32[1], mF32[1]); #endif } Vec4 Vec4::SplatZ() const { #if defined(JPH_USE_SSE) return _mm_shuffle_ps(mValue, mValue, _MM_SHUFFLE(2, 2, 2, 2)); #elif defined(JPH_USE_NEON) return vdupq_laneq_f32(mValue, 2); #else return Vec4(mF32[2], mF32[2], mF32[2], mF32[2]); #endif } Vec4 Vec4::SplatW() const { #if defined(JPH_USE_SSE) return _mm_shuffle_ps(mValue, mValue, _MM_SHUFFLE(3, 3, 3, 3)); #elif defined(JPH_USE_NEON) return vdupq_laneq_f32(mValue, 3); #else return Vec4(mF32[3], mF32[3], mF32[3], mF32[3]); #endif } Vec4 Vec4::Abs() const { #if defined(JPH_USE_AVX512) return _mm_range_ps(mValue, mValue, 0b1000); #elif defined(JPH_USE_SSE) return _mm_max_ps(_mm_sub_ps(_mm_setzero_ps(), mValue), mValue); #elif defined(JPH_USE_NEON) return vabsq_f32(mValue); #else return Vec4(abs(mF32[0]), abs(mF32[1]), abs(mF32[2]), abs(mF32[3])); #endif } Vec4 Vec4::Reciprocal() const { return sReplicate(1.0f) / mValue; } Vec4 Vec4::DotV(Vec4Arg inV2) const { #if defined(JPH_USE_SSE4_1) return _mm_dp_ps(mValue, inV2.mValue, 0xff); #elif defined(JPH_USE_NEON) float32x4_t mul = vmulq_f32(mValue, inV2.mValue); return vdupq_n_f32(vaddvq_f32(mul)); #else float dot = 0.0f; for (int i = 0; i < 4; i++) dot += mF32[i] * inV2.mF32[i]; return Vec4::sReplicate(dot); #endif } float Vec4::Dot(Vec4Arg inV2) const { #if defined(JPH_USE_SSE4_1) return _mm_cvtss_f32(_mm_dp_ps(mValue, inV2.mValue, 0xff)); #elif defined(JPH_USE_NEON) float32x4_t mul = vmulq_f32(mValue, inV2.mValue); return vaddvq_f32(mul); #else float dot = 0.0f; for (int i = 0; i < 4; i++) dot += mF32[i] * inV2.mF32[i]; return dot; #endif } float Vec4::LengthSq() const { #if defined(JPH_USE_SSE4_1) return _mm_cvtss_f32(_mm_dp_ps(mValue, mValue, 0xff)); #elif defined(JPH_USE_NEON) float32x4_t mul = vmulq_f32(mValue, mValue); return vaddvq_f32(mul); #else float len_sq = 0.0f; for (int i = 0; i < 4; i++) len_sq += mF32[i] * mF32[i]; return len_sq; #endif } float Vec4::Length() const { #if defined(JPH_USE_SSE4_1) return _mm_cvtss_f32(_mm_sqrt_ss(_mm_dp_ps(mValue, mValue, 0xff))); #elif defined(JPH_USE_NEON) float32x4_t mul = vmulq_f32(mValue, mValue); float32x2_t sum = vdup_n_f32(vaddvq_f32(mul)); return vget_lane_f32(vsqrt_f32(sum), 0); #else return sqrt(LengthSq()); #endif } Vec4 Vec4::Sqrt() const { #if defined(JPH_USE_SSE) return _mm_sqrt_ps(mValue); #elif defined(JPH_USE_NEON) return vsqrtq_f32(mValue); #else return Vec4(sqrt(mF32[0]), sqrt(mF32[1]), sqrt(mF32[2]), sqrt(mF32[3])); #endif } Vec4 Vec4::GetSign() const { #if defined(JPH_USE_AVX512) return _mm_fixupimm_ps(mValue, mValue, _mm_set1_epi32(0xA9A90A00), 0); #elif defined(JPH_USE_SSE) Type minus_one = _mm_set1_ps(-1.0f); Type one = _mm_set1_ps(1.0f); return _mm_or_ps(_mm_and_ps(mValue, minus_one), one); #elif defined(JPH_USE_NEON) Type minus_one = vdupq_n_f32(-1.0f); Type one = vdupq_n_f32(1.0f); return vorrq_s32(vandq_s32(mValue, minus_one), one); #else return Vec4(signbit(mF32[0])? -1.0f : 1.0f, signbit(mF32[1])? -1.0f : 1.0f, signbit(mF32[2])? -1.0f : 1.0f, signbit(mF32[3])? -1.0f : 1.0f); #endif } Vec4 Vec4::Normalized() const { #if defined(JPH_USE_SSE4_1) return _mm_div_ps(mValue, _mm_sqrt_ps(_mm_dp_ps(mValue, mValue, 0xff))); #elif defined(JPH_USE_NEON) float32x4_t mul = vmulq_f32(mValue, mValue); float32x4_t sum = vdupq_n_f32(vaddvq_f32(mul)); return vdivq_f32(mValue, vsqrtq_f32(sum)); #else return *this / Length(); #endif } void Vec4::StoreFloat4(Float4 *outV) const { #if defined(JPH_USE_SSE) _mm_storeu_ps(&outV->x, mValue); #elif defined(JPH_USE_NEON) vst1q_f32(&outV->x, mValue); #else for (int i = 0; i < 4; ++i) (&outV->x)[i] = mF32[i]; #endif } UVec4 Vec4::ToInt() const { #if defined(JPH_USE_SSE) return _mm_cvttps_epi32(mValue); #elif defined(JPH_USE_NEON) return vcvtq_u32_f32(mValue); #else return UVec4(uint32(mF32[0]), uint32(mF32[1]), uint32(mF32[2]), uint32(mF32[3])); #endif } UVec4 Vec4::ReinterpretAsInt() const { #if defined(JPH_USE_SSE) return UVec4(_mm_castps_si128(mValue)); #elif defined(JPH_USE_NEON) return vreinterpretq_u32_f32(mValue); #else return *reinterpret_cast<const UVec4 *>(this); #endif } int Vec4::GetSignBits() const { #if defined(JPH_USE_SSE) return _mm_movemask_ps(mValue); #elif defined(JPH_USE_NEON) int32x4_t shift = JPH_NEON_INT32x4(0, 1, 2, 3); return vaddvq_u32(vshlq_u32(vshrq_n_u32(vreinterpretq_u32_f32(mValue), 31), shift)); #else return (signbit(mF32[0])? 1 : 0) | (signbit(mF32[1])? 2 : 0) | (signbit(mF32[2])? 4 : 0) | (signbit(mF32[3])? 8 : 0); #endif } float Vec4::ReduceMin() const { Vec4 v = sMin(mValue, Swizzle<SWIZZLE_Y, SWIZZLE_UNUSED, SWIZZLE_W, SWIZZLE_UNUSED>()); v = sMin(v, v.Swizzle<SWIZZLE_Z, SWIZZLE_UNUSED, SWIZZLE_UNUSED, SWIZZLE_UNUSED>()); return v.GetX(); } float Vec4::ReduceMax() const { Vec4 v = sMax(mValue, Swizzle<SWIZZLE_Y, SWIZZLE_UNUSED, SWIZZLE_W, SWIZZLE_UNUSED>()); v = sMax(v, v.Swizzle<SWIZZLE_Z, SWIZZLE_UNUSED, SWIZZLE_UNUSED, SWIZZLE_UNUSED>()); return v.GetX(); } void Vec4::SinCos(Vec4 &outSin, Vec4 &outCos) const { // Implementation based on sinf.c from the cephes library, combines sinf and cosf in a single function, changes octants to quadrants and vectorizes it // Original implementation by Stephen L. Moshier (See: http://www.moshier.net/) // Make argument positive and remember sign for sin only since cos is symmetric around x (highest bit of a float is the sign bit) UVec4 sin_sign = UVec4::sAnd(ReinterpretAsInt(), UVec4::sReplicate(0x80000000U)); Vec4 x = Vec4::sXor(*this, sin_sign.ReinterpretAsFloat()); // x / (PI / 2) rounded to nearest int gives us the quadrant closest to x UVec4 quadrant = (0.6366197723675814f * x + Vec4::sReplicate(0.5f)).ToInt(); // Make x relative to the closest quadrant. // This does x = x - quadrant * PI / 2 using a two step Cody-Waite argument reduction. // This improves the accuracy of the result by avoiding loss of significant bits in the subtraction. // We start with x = x - quadrant * PI / 2, PI / 2 in hexadecimal notation is 0x3fc90fdb, we remove the lowest 16 bits to // get 0x3fc90000 (= 1.5703125) this means we can now multiply with a number of up to 2^16 without losing any bits. // This leaves us with: x = (x - quadrant * 1.5703125) - quadrant * (PI / 2 - 1.5703125). // PI / 2 - 1.5703125 in hexadecimal is 0x39fdaa22, stripping the lowest 12 bits we get 0x39fda000 (= 0.0004837512969970703125) // This leaves uw with: x = ((x - quadrant * 1.5703125) - quadrant * 0.0004837512969970703125) - quadrant * (PI / 2 - 1.5703125 - 0.0004837512969970703125) // See: https://stackoverflow.com/questions/42455143/sine-cosine-modular-extended-precision-arithmetic // After this we have x in the range [-PI / 4, PI / 4]. Vec4 float_quadrant = quadrant.ToFloat(); x = ((x - float_quadrant * 1.5703125f) - float_quadrant * 0.0004837512969970703125f) - float_quadrant * 7.549789948768648e-8f; // Calculate x2 = x^2 Vec4 x2 = x * x; // Taylor expansion: // Cos(x) = 1 - x^2/2! + x^4/4! - x^6/6! + x^8/8! + ... = (((x2/8!- 1/6!) * x2 + 1/4!) * x2 - 1/2!) * x2 + 1 Vec4 taylor_cos = ((2.443315711809948e-5f * x2 - Vec4::sReplicate(1.388731625493765e-3f)) * x2 + Vec4::sReplicate(4.166664568298827e-2f)) * x2 * x2 - 0.5f * x2 + Vec4::sReplicate(1.0f); // Sin(x) = x - x^3/3! + x^5/5! - x^7/7! + ... = ((-x2/7! + 1/5!) * x2 - 1/3!) * x2 * x + x Vec4 taylor_sin = ((-1.9515295891e-4f * x2 + Vec4::sReplicate(8.3321608736e-3f)) * x2 - Vec4::sReplicate(1.6666654611e-1f)) * x2 * x + x; // The lowest 2 bits of quadrant indicate the quadrant that we are in. // Let x be the original input value and x' our value that has been mapped to the range [-PI / 4, PI / 4]. // since cos(x) = sin(x - PI / 2) and since we want to use the Taylor expansion as close as possible to 0, // we can alternate between using the Taylor expansion for sin and cos according to the following table: // // quadrant sin(x) cos(x) // XXX00b sin(x') cos(x') // XXX01b cos(x') -sin(x') // XXX10b -sin(x') -cos(x') // XXX11b -cos(x') sin(x') // // So: sin_sign = bit2, cos_sign = bit1 ^ bit2, bit1 determines if we use sin or cos Taylor expansion UVec4 bit1 = quadrant.LogicalShiftLeft<31>(); UVec4 bit2 = UVec4::sAnd(quadrant.LogicalShiftLeft<30>(), UVec4::sReplicate(0x80000000U)); // Select which one of the results is sin and which one is cos Vec4 s = Vec4::sSelect(taylor_sin, taylor_cos, bit1); Vec4 c = Vec4::sSelect(taylor_cos, taylor_sin, bit1); // Update the signs sin_sign = UVec4::sXor(sin_sign, bit2); UVec4 cos_sign = UVec4::sXor(bit1, bit2); // Correct the signs outSin = Vec4::sXor(s, sin_sign.ReinterpretAsFloat()); outCos = Vec4::sXor(c, cos_sign.ReinterpretAsFloat()); } Vec4 Vec4::Tan() const { // Implementation based on tanf.c from the cephes library, see Vec4::SinCos for further details // Original implementation by Stephen L. Moshier (See: http://www.moshier.net/) // Make argument positive UVec4 tan_sign = UVec4::sAnd(ReinterpretAsInt(), UVec4::sReplicate(0x80000000U)); Vec4 x = Vec4::sXor(*this, tan_sign.ReinterpretAsFloat()); // x / (PI / 2) rounded to nearest int gives us the quadrant closest to x UVec4 quadrant = (0.6366197723675814f * x + Vec4::sReplicate(0.5f)).ToInt(); // Remap x to range [-PI / 4, PI / 4], see Vec4::SinCos Vec4 float_quadrant = quadrant.ToFloat(); x = ((x - float_quadrant * 1.5703125f) - float_quadrant * 0.0004837512969970703125f) - float_quadrant * 7.549789948768648e-8f; // Calculate x2 = x^2 Vec4 x2 = x * x; // Roughly equivalent to the Taylor expansion: // Tan(x) = x + x^3/3 + 2*x^5/15 + 17*x^7/315 + 62*x^9/2835 + ... Vec4 tan = (((((9.38540185543e-3f * x2 + Vec4::sReplicate(3.11992232697e-3f)) * x2 + Vec4::sReplicate(2.44301354525e-2f)) * x2 + Vec4::sReplicate(5.34112807005e-2f)) * x2 + Vec4::sReplicate(1.33387994085e-1f)) * x2 + Vec4::sReplicate(3.33331568548e-1f)) * x2 * x + x; // For the 2nd and 4th quadrant we need to invert the value UVec4 bit1 = quadrant.LogicalShiftLeft<31>(); tan = Vec4::sSelect(tan, Vec4::sReplicate(-1.0f) / (tan JPH_IF_FLOATING_POINT_EXCEPTIONS_ENABLED(+ Vec4::sReplicate(FLT_MIN))), bit1); // Add small epsilon to prevent div by zero, works because tan is always positive // Put the sign back return Vec4::sXor(tan, tan_sign.ReinterpretAsFloat()); } Vec4 Vec4::ASin() const { // Implementation based on asinf.c from the cephes library // Original implementation by Stephen L. Moshier (See: http://www.moshier.net/) // Make argument positive UVec4 asin_sign = UVec4::sAnd(ReinterpretAsInt(), UVec4::sReplicate(0x80000000U)); Vec4 a = Vec4::sXor(*this, asin_sign.ReinterpretAsFloat()); // ASin is not defined outside the range [-1, 1] but it often happens that a value is slightly above 1 so we just clamp here a = Vec4::sMin(a, Vec4::sReplicate(1.0f)); // When |x| <= 0.5 we use the asin approximation as is Vec4 z1 = a * a; Vec4 x1 = a; // When |x| > 0.5 we use the identity asin(x) = PI / 2 - 2 * asin(sqrt((1 - x) / 2)) Vec4 z2 = 0.5f * (Vec4::sReplicate(1.0f) - a); Vec4 x2 = z2.Sqrt(); // Select which of the two situations we have UVec4 greater = Vec4::sGreater(a, Vec4::sReplicate(0.5f)); Vec4 z = Vec4::sSelect(z1, z2, greater); Vec4 x = Vec4::sSelect(x1, x2, greater); // Polynomial approximation of asin z = ((((4.2163199048e-2f * z + Vec4::sReplicate(2.4181311049e-2f)) * z + Vec4::sReplicate(4.5470025998e-2f)) * z + Vec4::sReplicate(7.4953002686e-2f)) * z + Vec4::sReplicate(1.6666752422e-1f)) * z * x + x; // If |x| > 0.5 we need to apply the remainder of the identity above z = Vec4::sSelect(z, Vec4::sReplicate(0.5f * JPH_PI) - (z + z), greater); // Put the sign back return Vec4::sXor(z, asin_sign.ReinterpretAsFloat()); } Vec4 Vec4::ACos() const { // Not the most accurate, but simple return Vec4::sReplicate(0.5f * JPH_PI) - ASin(); } Vec4 Vec4::ATan() const { // Implementation based on atanf.c from the cephes library // Original implementation by Stephen L. Moshier (See: http://www.moshier.net/) // Make argument positive UVec4 atan_sign = UVec4::sAnd(ReinterpretAsInt(), UVec4::sReplicate(0x80000000U)); Vec4 x = Vec4::sXor(*this, atan_sign.ReinterpretAsFloat()); Vec4 y = Vec4::sZero(); // If x > Tan(PI / 8) UVec4 greater1 = Vec4::sGreater(x, Vec4::sReplicate(0.4142135623730950f)); Vec4 x1 = (x - Vec4::sReplicate(1.0f)) / (x + Vec4::sReplicate(1.0f)); // If x > Tan(3 * PI / 8) UVec4 greater2 = Vec4::sGreater(x, Vec4::sReplicate(2.414213562373095f)); Vec4 x2 = Vec4::sReplicate(-1.0f) / (x JPH_IF_FLOATING_POINT_EXCEPTIONS_ENABLED(+ Vec4::sReplicate(FLT_MIN))); // Add small epsilon to prevent div by zero, works because x is always positive // Apply first if x = Vec4::sSelect(x, x1, greater1); y = Vec4::sSelect(y, Vec4::sReplicate(0.25f * JPH_PI), greater1); // Apply second if x = Vec4::sSelect(x, x2, greater2); y = Vec4::sSelect(y, Vec4::sReplicate(0.5f * JPH_PI), greater2); // Polynomial approximation Vec4 z = x * x; y += (((8.05374449538e-2f * z - Vec4::sReplicate(1.38776856032e-1f)) * z + Vec4::sReplicate(1.99777106478e-1f)) * z - Vec4::sReplicate(3.33329491539e-1f)) * z * x + x; // Put the sign back return Vec4::sXor(y, atan_sign.ReinterpretAsFloat()); } Vec4 Vec4::sATan2(Vec4Arg inY, Vec4Arg inX) { UVec4 sign_mask = UVec4::sReplicate(0x80000000U); // Determine absolute value and sign of y UVec4 y_sign = UVec4::sAnd(inY.ReinterpretAsInt(), sign_mask); Vec4 y_abs = Vec4::sXor(inY, y_sign.ReinterpretAsFloat()); // Determine absolute value and sign of x UVec4 x_sign = UVec4::sAnd(inX.ReinterpretAsInt(), sign_mask); Vec4 x_abs = Vec4::sXor(inX, x_sign.ReinterpretAsFloat()); // Always divide smallest / largest to avoid dividing by zero UVec4 x_is_numerator = Vec4::sLess(x_abs, y_abs); Vec4 numerator = Vec4::sSelect(y_abs, x_abs, x_is_numerator); Vec4 denominator = Vec4::sSelect(x_abs, y_abs, x_is_numerator); Vec4 atan = (numerator / denominator).ATan(); // If we calculated x / y instead of y / x the result is PI / 2 - result (note that this is true because we know the result is positive because the input was positive) atan = Vec4::sSelect(atan, Vec4::sReplicate(0.5f * JPH_PI) - atan, x_is_numerator); // Now we need to map to the correct quadrant // x_sign y_sign result // +1 +1 atan // -1 +1 -atan + PI // -1 -1 atan - PI // +1 -1 -atan // This can be written as: x_sign * y_sign * (atan - (x_sign < 0? PI : 0)) atan -= Vec4::sAnd(x_sign.ArithmeticShiftRight<31>().ReinterpretAsFloat(), Vec4::sReplicate(JPH_PI)); atan = Vec4::sXor(atan, UVec4::sXor(x_sign, y_sign).ReinterpretAsFloat()); return atan; } JPH_NAMESPACE_END
0
repos/c2z/use_cases/JoltPhysics/include
repos/c2z/use_cases/JoltPhysics/include/Math/DMat44.inl
// Jolt Physics Library (https://github.com/jrouwe/JoltPhysics) // SPDX-FileCopyrightText: 2021 Jorrit Rouwe // SPDX-License-Identifier: MIT #pragma once #include <Jolt/Math/DVec3.h> JPH_NAMESPACE_BEGIN DMat44::DMat44(Vec4Arg inC1, Vec4Arg inC2, Vec4Arg inC3, DVec3Arg inC4) : mCol { inC1, inC2, inC3 }, mCol3(inC4) { } DMat44::DMat44(Type inC1, Type inC2, Type inC3, DTypeArg inC4) : mCol { inC1, inC2, inC3 }, mCol3(inC4) { } DMat44::DMat44(Mat44Arg inM) : mCol { inM.GetColumn4(0), inM.GetColumn4(1), inM.GetColumn4(2) }, mCol3(inM.GetTranslation()) { } DMat44::DMat44(Mat44Arg inRot, DVec3Arg inT) : mCol { inRot.GetColumn4(0), inRot.GetColumn4(1), inRot.GetColumn4(2) }, mCol3(inT) { } DMat44 DMat44::sZero() { return DMat44(Vec4::sZero(), Vec4::sZero(), Vec4::sZero(), DVec3::sZero()); } DMat44 DMat44::sIdentity() { return DMat44(Vec4(1, 0, 0, 0), Vec4(0, 1, 0, 0), Vec4(0, 0, 1, 0), DVec3::sZero()); } DMat44 DMat44::sInverseRotationTranslation(QuatArg inR, DVec3Arg inT) { Mat44 m = Mat44::sRotation(inR.Conjugated()); DMat44 dm(m, DVec3::sZero()); dm.SetTranslation(-dm.Multiply3x3(inT)); return dm; } bool DMat44::operator == (DMat44Arg inM2) const { return mCol[0] == inM2.mCol[0] && mCol[1] == inM2.mCol[1] && mCol[2] == inM2.mCol[2] && mCol3 == inM2.mCol3; } bool DMat44::IsClose(DMat44Arg inM2, float inMaxDistSq) const { for (int i = 0; i < 3; ++i) if (!mCol[i].IsClose(inM2.mCol[i], inMaxDistSq)) return false; return mCol3.IsClose(inM2.mCol3, double(inMaxDistSq)); } DVec3 DMat44::operator * (Vec3Arg inV) const { #if defined(JPH_USE_AVX) __m128 t = _mm_mul_ps(mCol[0].mValue, _mm_shuffle_ps(inV.mValue, inV.mValue, _MM_SHUFFLE(0, 0, 0, 0))); t = _mm_add_ps(t, _mm_mul_ps(mCol[1].mValue, _mm_shuffle_ps(inV.mValue, inV.mValue, _MM_SHUFFLE(1, 1, 1, 1)))); t = _mm_add_ps(t, _mm_mul_ps(mCol[2].mValue, _mm_shuffle_ps(inV.mValue, inV.mValue, _MM_SHUFFLE(2, 2, 2, 2)))); return DVec3::sFixW(_mm256_add_pd(mCol3.mValue, _mm256_cvtps_pd(t))); #elif defined(JPH_USE_SSE) __m128 t = _mm_mul_ps(mCol[0].mValue, _mm_shuffle_ps(inV.mValue, inV.mValue, _MM_SHUFFLE(0, 0, 0, 0))); t = _mm_add_ps(t, _mm_mul_ps(mCol[1].mValue, _mm_shuffle_ps(inV.mValue, inV.mValue, _MM_SHUFFLE(1, 1, 1, 1)))); t = _mm_add_ps(t, _mm_mul_ps(mCol[2].mValue, _mm_shuffle_ps(inV.mValue, inV.mValue, _MM_SHUFFLE(2, 2, 2, 2)))); __m128d low = _mm_add_pd(mCol3.mValue.mLow, _mm_cvtps_pd(t)); __m128d high = _mm_add_pd(mCol3.mValue.mHigh, _mm_cvtps_pd(_mm_shuffle_ps(t, t, _MM_SHUFFLE(2, 2, 2, 2)))); return DVec3({ low, high }); #elif defined(JPH_USE_NEON) float32x4_t t = vmulq_f32(mCol[0].mValue, vdupq_laneq_f32(inV.mValue, 0)); t = vmlaq_f32(t, mCol[1].mValue, vdupq_laneq_f32(inV.mValue, 1)); t = vmlaq_f32(t, mCol[2].mValue, vdupq_laneq_f32(inV.mValue, 2)); float64x2_t low = vaddq_f64(mCol3.mValue.val[0], vcvt_f64_f32(vget_low_f32(t))); float64x2_t high = vaddq_f64(mCol3.mValue.val[1], vcvt_high_f64_f32(t)); return DVec3::sFixW({ low, high }); #else return DVec3( mCol3.mF64[0] + double(mCol[0].mF32[0] * inV.mF32[0] + mCol[1].mF32[0] * inV.mF32[1] + mCol[2].mF32[0] * inV.mF32[2]), mCol3.mF64[1] + double(mCol[0].mF32[1] * inV.mF32[0] + mCol[1].mF32[1] * inV.mF32[1] + mCol[2].mF32[1] * inV.mF32[2]), mCol3.mF64[2] + double(mCol[0].mF32[2] * inV.mF32[0] + mCol[1].mF32[2] * inV.mF32[1] + mCol[2].mF32[2] * inV.mF32[2])); #endif } DVec3 DMat44::operator * (DVec3Arg inV) const { #if defined(JPH_USE_AVX) __m256d t = _mm256_add_pd(mCol3.mValue, _mm256_mul_pd(_mm256_cvtps_pd(mCol[0].mValue), _mm256_set1_pd(inV.mF64[0]))); t = _mm256_add_pd(t, _mm256_mul_pd(_mm256_cvtps_pd(mCol[1].mValue), _mm256_set1_pd(inV.mF64[1]))); t = _mm256_add_pd(t, _mm256_mul_pd(_mm256_cvtps_pd(mCol[2].mValue), _mm256_set1_pd(inV.mF64[2]))); return DVec3::sFixW(t); #elif defined(JPH_USE_SSE) __m128d xxxx = _mm_set1_pd(inV.mF64[0]); __m128d yyyy = _mm_set1_pd(inV.mF64[1]); __m128d zzzz = _mm_set1_pd(inV.mF64[2]); __m128 col0 = mCol[0].mValue; __m128 col1 = mCol[1].mValue; __m128 col2 = mCol[2].mValue; __m128d t_low = _mm_add_pd(mCol3.mValue.mLow, _mm_mul_pd(_mm_cvtps_pd(col0), xxxx)); t_low = _mm_add_pd(t_low, _mm_mul_pd(_mm_cvtps_pd(col1), yyyy)); t_low = _mm_add_pd(t_low, _mm_mul_pd(_mm_cvtps_pd(col2), zzzz)); __m128d t_high = _mm_add_pd(mCol3.mValue.mHigh, _mm_mul_pd(_mm_cvtps_pd(_mm_shuffle_ps(col0, col0, _MM_SHUFFLE(2, 2, 2, 2))), xxxx)); t_high = _mm_add_pd(t_high, _mm_mul_pd(_mm_cvtps_pd(_mm_shuffle_ps(col1, col1, _MM_SHUFFLE(2, 2, 2, 2))), yyyy)); t_high = _mm_add_pd(t_high, _mm_mul_pd(_mm_cvtps_pd(_mm_shuffle_ps(col2, col2, _MM_SHUFFLE(2, 2, 2, 2))), zzzz)); return DVec3({ t_low, t_high }); #elif defined(JPH_USE_NEON) float64x2_t xxxx = vdupq_laneq_f64(inV.mValue.val[0], 0); float64x2_t yyyy = vdupq_laneq_f64(inV.mValue.val[0], 1); float64x2_t zzzz = vdupq_laneq_f64(inV.mValue.val[1], 0); float32x4_t col0 = mCol[0].mValue; float32x4_t col1 = mCol[1].mValue; float32x4_t col2 = mCol[2].mValue; float64x2_t t_low = vaddq_f64(mCol3.mValue.val[0], vmulq_f64(vcvt_f64_f32(vget_low_f32(col0)), xxxx)); t_low = vaddq_f64(t_low, vmulq_f64(vcvt_f64_f32(vget_low_f32(col1)), yyyy)); t_low = vaddq_f64(t_low, vmulq_f64(vcvt_f64_f32(vget_low_f32(col2)), zzzz)); float64x2_t t_high = vaddq_f64(mCol3.mValue.val[1], vmulq_f64(vcvt_high_f64_f32(col0), xxxx)); t_high = vaddq_f64(t_high, vmulq_f64(vcvt_high_f64_f32(col1), yyyy)); t_high = vaddq_f64(t_high, vmulq_f64(vcvt_high_f64_f32(col2), zzzz)); return DVec3::sFixW({ t_low, t_high }); #else return DVec3( mCol3.mF64[0] + double(mCol[0].mF32[0]) * inV.mF64[0] + double(mCol[1].mF32[0]) * inV.mF64[1] + double(mCol[2].mF32[0]) * inV.mF64[2], mCol3.mF64[1] + double(mCol[0].mF32[1]) * inV.mF64[0] + double(mCol[1].mF32[1]) * inV.mF64[1] + double(mCol[2].mF32[1]) * inV.mF64[2], mCol3.mF64[2] + double(mCol[0].mF32[2]) * inV.mF64[0] + double(mCol[1].mF32[2]) * inV.mF64[1] + double(mCol[2].mF32[2]) * inV.mF64[2]); #endif } DVec3 DMat44::Multiply3x3(DVec3Arg inV) const { #if defined(JPH_USE_AVX) __m256d t = _mm256_mul_pd(_mm256_cvtps_pd(mCol[0].mValue), _mm256_set1_pd(inV.mF64[0])); t = _mm256_add_pd(t, _mm256_mul_pd(_mm256_cvtps_pd(mCol[1].mValue), _mm256_set1_pd(inV.mF64[1]))); t = _mm256_add_pd(t, _mm256_mul_pd(_mm256_cvtps_pd(mCol[2].mValue), _mm256_set1_pd(inV.mF64[2]))); return DVec3::sFixW(t); #elif defined(JPH_USE_SSE) __m128d xxxx = _mm_set1_pd(inV.mF64[0]); __m128d yyyy = _mm_set1_pd(inV.mF64[1]); __m128d zzzz = _mm_set1_pd(inV.mF64[2]); __m128 col0 = mCol[0].mValue; __m128 col1 = mCol[1].mValue; __m128 col2 = mCol[2].mValue; __m128d t_low = _mm_mul_pd(_mm_cvtps_pd(col0), xxxx); t_low = _mm_add_pd(t_low, _mm_mul_pd(_mm_cvtps_pd(col1), yyyy)); t_low = _mm_add_pd(t_low, _mm_mul_pd(_mm_cvtps_pd(col2), zzzz)); __m128d t_high = _mm_mul_pd(_mm_cvtps_pd(_mm_shuffle_ps(col0, col0, _MM_SHUFFLE(2, 2, 2, 2))), xxxx); t_high = _mm_add_pd(t_high, _mm_mul_pd(_mm_cvtps_pd(_mm_shuffle_ps(col1, col1, _MM_SHUFFLE(2, 2, 2, 2))), yyyy)); t_high = _mm_add_pd(t_high, _mm_mul_pd(_mm_cvtps_pd(_mm_shuffle_ps(col2, col2, _MM_SHUFFLE(2, 2, 2, 2))), zzzz)); return DVec3({ t_low, t_high }); #elif defined(JPH_USE_NEON) float64x2_t xxxx = vdupq_laneq_f64(inV.mValue.val[0], 0); float64x2_t yyyy = vdupq_laneq_f64(inV.mValue.val[0], 1); float64x2_t zzzz = vdupq_laneq_f64(inV.mValue.val[1], 0); float32x4_t col0 = mCol[0].mValue; float32x4_t col1 = mCol[1].mValue; float32x4_t col2 = mCol[2].mValue; float64x2_t t_low = vmulq_f64(vcvt_f64_f32(vget_low_f32(col0)), xxxx); t_low = vaddq_f64(t_low, vmulq_f64(vcvt_f64_f32(vget_low_f32(col1)), yyyy)); t_low = vaddq_f64(t_low, vmulq_f64(vcvt_f64_f32(vget_low_f32(col2)), zzzz)); float64x2_t t_high = vmulq_f64(vcvt_high_f64_f32(col0), xxxx); t_high = vaddq_f64(t_high, vmulq_f64(vcvt_high_f64_f32(col1), yyyy)); t_high = vaddq_f64(t_high, vmulq_f64(vcvt_high_f64_f32(col2), zzzz)); return DVec3::sFixW({ t_low, t_high }); #else return DVec3( double(mCol[0].mF32[0]) * inV.mF64[0] + double(mCol[1].mF32[0]) * inV.mF64[1] + double(mCol[2].mF32[0]) * inV.mF64[2], double(mCol[0].mF32[1]) * inV.mF64[0] + double(mCol[1].mF32[1]) * inV.mF64[1] + double(mCol[2].mF32[1]) * inV.mF64[2], double(mCol[0].mF32[2]) * inV.mF64[0] + double(mCol[1].mF32[2]) * inV.mF64[1] + double(mCol[2].mF32[2]) * inV.mF64[2]); #endif } DMat44 DMat44::operator * (Mat44Arg inM) const { DMat44 result; // Rotation part #if defined(JPH_USE_SSE) for (int i = 0; i < 3; ++i) { __m128 c = inM.GetColumn4(i).mValue; __m128 t = _mm_mul_ps(mCol[0].mValue, _mm_shuffle_ps(c, c, _MM_SHUFFLE(0, 0, 0, 0))); t = _mm_add_ps(t, _mm_mul_ps(mCol[1].mValue, _mm_shuffle_ps(c, c, _MM_SHUFFLE(1, 1, 1, 1)))); t = _mm_add_ps(t, _mm_mul_ps(mCol[2].mValue, _mm_shuffle_ps(c, c, _MM_SHUFFLE(2, 2, 2, 2)))); result.mCol[i].mValue = t; } #elif defined(JPH_USE_NEON) for (int i = 0; i < 3; ++i) { Type c = inM.GetColumn4(i).mValue; Type t = vmulq_f32(mCol[0].mValue, vdupq_laneq_f32(c, 0)); t = vmlaq_f32(t, mCol[1].mValue, vdupq_laneq_f32(c, 1)); t = vmlaq_f32(t, mCol[2].mValue, vdupq_laneq_f32(c, 2)); result.mCol[i].mValue = t; } #else for (int i = 0; i < 3; ++i) { Vec4 coli = inM.GetColumn4(i); result.mCol[i] = mCol[0] * coli.mF32[0] + mCol[1] * coli.mF32[1] + mCol[2] * coli.mF32[2]; } #endif // Translation part result.mCol3 = *this * inM.GetTranslation(); return result; } DMat44 DMat44::operator * (DMat44Arg inM) const { DMat44 result; // Rotation part #if defined(JPH_USE_SSE) for (int i = 0; i < 3; ++i) { __m128 c = inM.mCol[i].mValue; __m128 t = _mm_mul_ps(mCol[0].mValue, _mm_shuffle_ps(c, c, _MM_SHUFFLE(0, 0, 0, 0))); t = _mm_add_ps(t, _mm_mul_ps(mCol[1].mValue, _mm_shuffle_ps(c, c, _MM_SHUFFLE(1, 1, 1, 1)))); t = _mm_add_ps(t, _mm_mul_ps(mCol[2].mValue, _mm_shuffle_ps(c, c, _MM_SHUFFLE(2, 2, 2, 2)))); result.mCol[i].mValue = t; } #elif defined(JPH_USE_NEON) for (int i = 0; i < 3; ++i) { Type c = inM.GetColumn4(i).mValue; Type t = vmulq_f32(mCol[0].mValue, vdupq_laneq_f32(c, 0)); t = vmlaq_f32(t, mCol[1].mValue, vdupq_laneq_f32(c, 1)); t = vmlaq_f32(t, mCol[2].mValue, vdupq_laneq_f32(c, 2)); result.mCol[i].mValue = t; } #else for (int i = 0; i < 3; ++i) { Vec4 coli = inM.mCol[i]; result.mCol[i] = mCol[0] * coli.mF32[0] + mCol[1] * coli.mF32[1] + mCol[2] * coli.mF32[2]; } #endif // Translation part result.mCol3 = *this * inM.GetTranslation(); return result; } void DMat44::SetRotation(Mat44Arg inRotation) { mCol[0] = inRotation.GetColumn4(0); mCol[1] = inRotation.GetColumn4(1); mCol[2] = inRotation.GetColumn4(2); } DMat44 DMat44::PreScaled(Vec3Arg inScale) const { return DMat44(inScale.GetX() * mCol[0], inScale.GetY() * mCol[1], inScale.GetZ() * mCol[2], mCol3); } DMat44 DMat44::PostScaled(Vec3Arg inScale) const { Vec4 scale(inScale, 1); return DMat44(scale * mCol[0], scale * mCol[1], scale * mCol[2], DVec3(scale) * mCol3); } DMat44 DMat44::PreTranslated(Vec3Arg inTranslation) const { return DMat44(mCol[0], mCol[1], mCol[2], GetTranslation() + Multiply3x3(inTranslation)); } DMat44 DMat44::PreTranslated(DVec3Arg inTranslation) const { return DMat44(mCol[0], mCol[1], mCol[2], GetTranslation() + Multiply3x3(inTranslation)); } DMat44 DMat44::PostTranslated(Vec3Arg inTranslation) const { return DMat44(mCol[0], mCol[1], mCol[2], GetTranslation() + inTranslation); } DMat44 DMat44::PostTranslated(DVec3Arg inTranslation) const { return DMat44(mCol[0], mCol[1], mCol[2], GetTranslation() + inTranslation); } DMat44 DMat44::Inversed() const { DMat44 m(GetRotation().Inversed3x3()); m.mCol3 = -m.Multiply3x3(mCol3); return m; } DMat44 DMat44::InversedRotationTranslation() const { DMat44 m(GetRotation().Transposed3x3()); m.mCol3 = -m.Multiply3x3(mCol3); return m; } JPH_NAMESPACE_END
0
repos/c2z/use_cases/JoltPhysics/include
repos/c2z/use_cases/JoltPhysics/include/Math/Vec4.h
// Jolt Physics Library (https://github.com/jrouwe/JoltPhysics) // SPDX-FileCopyrightText: 2021 Jorrit Rouwe // SPDX-License-Identifier: MIT #pragma once #include <Jolt/Math/Float4.h> #include <Jolt/Math/Swizzle.h> #include <Jolt/Math/MathTypes.h> JPH_NAMESPACE_BEGIN class [[nodiscard]] alignas(JPH_VECTOR_ALIGNMENT) Vec4 { public: JPH_OVERRIDE_NEW_DELETE // Underlying vector type #if defined(JPH_USE_SSE) using Type = __m128; #elif defined(JPH_USE_NEON) using Type = float32x4_t; #else using Type = struct { float mData[4]; }; #endif /// Constructor Vec4() = default; ///< Intentionally not initialized for performance reasons Vec4(const Vec4 &inRHS) = default; explicit JPH_INLINE Vec4(Vec3Arg inRHS); ///< WARNING: W component undefined! JPH_INLINE Vec4(Vec3Arg inRHS, float inW); JPH_INLINE Vec4(Type inRHS) : mValue(inRHS) { } /// Create a vector from 4 components JPH_INLINE Vec4(float inX, float inY, float inZ, float inW); /// Vector with all zeros static JPH_INLINE Vec4 sZero(); /// Vector with all NaN's static JPH_INLINE Vec4 sNaN(); /// Replicate inV across all components static JPH_INLINE Vec4 sReplicate(float inV); /// Load 4 floats from memory static JPH_INLINE Vec4 sLoadFloat4(const Float4 *inV); /// Load 4 floats from memory, 16 bytes aligned static JPH_INLINE Vec4 sLoadFloat4Aligned(const Float4 *inV); /// Gather 4 floats from memory at inBase + inOffsets[i] * Scale template <const int Scale> static JPH_INLINE Vec4 sGatherFloat4(const float *inBase, UVec4Arg inOffsets); /// Return the minimum value of each of the components static JPH_INLINE Vec4 sMin(Vec4Arg inV1, Vec4Arg inV2); /// Return the maximum of each of the components static JPH_INLINE Vec4 sMax(Vec4Arg inV1, Vec4Arg inV2); /// Equals (component wise) static JPH_INLINE UVec4 sEquals(Vec4Arg inV1, Vec4Arg inV2); /// Less than (component wise) static JPH_INLINE UVec4 sLess(Vec4Arg inV1, Vec4Arg inV2); /// Less than or equal (component wise) static JPH_INLINE UVec4 sLessOrEqual(Vec4Arg inV1, Vec4Arg inV2); /// Greater than (component wise) static JPH_INLINE UVec4 sGreater(Vec4Arg inV1, Vec4Arg inV2); /// Greater than or equal (component wise) static JPH_INLINE UVec4 sGreaterOrEqual(Vec4Arg inV1, Vec4Arg inV2); /// Calculates inMul1 * inMul2 + inAdd static JPH_INLINE Vec4 sFusedMultiplyAdd(Vec4Arg inMul1, Vec4Arg inMul2, Vec4Arg inAdd); /// Component wise select, returns inV1 when highest bit of inControl = 0 and inV2 when highest bit of inControl = 1 static JPH_INLINE Vec4 sSelect(Vec4Arg inV1, Vec4Arg inV2, UVec4Arg inControl); /// Logical or (component wise) static JPH_INLINE Vec4 sOr(Vec4Arg inV1, Vec4Arg inV2); /// Logical xor (component wise) static JPH_INLINE Vec4 sXor(Vec4Arg inV1, Vec4Arg inV2); /// Logical and (component wise) static JPH_INLINE Vec4 sAnd(Vec4Arg inV1, Vec4Arg inV2); /// Sort the four elements of ioValue and sort ioIndex at the same time. /// Based on a sorting network: http://en.wikipedia.org/wiki/Sorting_network static JPH_INLINE void sSort4(Vec4 &ioValue, UVec4 &ioIndex); /// Reverse sort the four elements of ioValue (highest first) and sort ioIndex at the same time. /// Based on a sorting network: http://en.wikipedia.org/wiki/Sorting_network static JPH_INLINE void sSort4Reverse(Vec4 &ioValue, UVec4 &ioIndex); /// Get individual components #if defined(JPH_USE_SSE) JPH_INLINE float GetX() const { return _mm_cvtss_f32(mValue); } JPH_INLINE float GetY() const { return mF32[1]; } JPH_INLINE float GetZ() const { return mF32[2]; } JPH_INLINE float GetW() const { return mF32[3]; } #elif defined(JPH_USE_NEON) JPH_INLINE float GetX() const { return vgetq_lane_f32(mValue, 0); } JPH_INLINE float GetY() const { return vgetq_lane_f32(mValue, 1); } JPH_INLINE float GetZ() const { return vgetq_lane_f32(mValue, 2); } JPH_INLINE float GetW() const { return vgetq_lane_f32(mValue, 3); } #else JPH_INLINE float GetX() const { return mF32[0]; } JPH_INLINE float GetY() const { return mF32[1]; } JPH_INLINE float GetZ() const { return mF32[2]; } JPH_INLINE float GetW() const { return mF32[3]; } #endif /// Set individual components JPH_INLINE void SetX(float inX) { mF32[0] = inX; } JPH_INLINE void SetY(float inY) { mF32[1] = inY; } JPH_INLINE void SetZ(float inZ) { mF32[2] = inZ; } JPH_INLINE void SetW(float inW) { mF32[3] = inW; } /// Get float component by index JPH_INLINE float operator [] (uint inCoordinate) const { JPH_ASSERT(inCoordinate < 4); return mF32[inCoordinate]; } JPH_INLINE float & operator [] (uint inCoordinate) { JPH_ASSERT(inCoordinate < 4); return mF32[inCoordinate]; } /// Comparison JPH_INLINE bool operator == (Vec4Arg inV2) const; JPH_INLINE bool operator != (Vec4Arg inV2) const { return !(*this == inV2); } /// Test if two vectors are close JPH_INLINE bool IsClose(Vec4Arg inV2, float inMaxDistSq = 1.0e-12f) const; /// Test if vector is normalized JPH_INLINE bool IsNormalized(float inTolerance = 1.0e-6f) const; /// Test if vector contains NaN elements JPH_INLINE bool IsNaN() const; /// Multiply two float vectors (component wise) JPH_INLINE Vec4 operator * (Vec4Arg inV2) const; /// Multiply vector with float JPH_INLINE Vec4 operator * (float inV2) const; /// Multiply vector with float friend JPH_INLINE Vec4 operator * (float inV1, Vec4Arg inV2); /// Divide vector by float JPH_INLINE Vec4 operator / (float inV2) const; /// Multiply vector with float JPH_INLINE Vec4 & operator *= (float inV2); /// Multiply vector with vector JPH_INLINE Vec4 & operator *= (Vec4Arg inV2); /// Divide vector by float JPH_INLINE Vec4 & operator /= (float inV2); /// Add two float vectors (component wise) JPH_INLINE Vec4 operator + (Vec4Arg inV2) const; /// Add two float vectors (component wise) JPH_INLINE Vec4 & operator += (Vec4Arg inV2); /// Negate JPH_INLINE Vec4 operator - () const; /// Subtract two float vectors (component wise) JPH_INLINE Vec4 operator - (Vec4Arg inV2) const; /// Add two float vectors (component wise) JPH_INLINE Vec4 & operator -= (Vec4Arg inV2); /// Divide (component wise) JPH_INLINE Vec4 operator / (Vec4Arg inV2) const; /// Swizzle the elements in inV template<uint32 SwizzleX, uint32 SwizzleY, uint32 SwizzleZ, uint32 SwizzleW> JPH_INLINE Vec4 Swizzle() const; /// Replicate the X component to all components JPH_INLINE Vec4 SplatX() const; /// Replicate the Y component to all components JPH_INLINE Vec4 SplatY() const; /// Replicate the Z component to all components JPH_INLINE Vec4 SplatZ() const; /// Replicate the W component to all components JPH_INLINE Vec4 SplatW() const; /// Return the absolute value of each of the components JPH_INLINE Vec4 Abs() const; /// Reciprocal vector (1 / value) for each of the components JPH_INLINE Vec4 Reciprocal() const; /// Dot product, returns the dot product in X, Y and Z components JPH_INLINE Vec4 DotV(Vec4Arg inV2) const; /// Dot product JPH_INLINE float Dot(Vec4Arg inV2) const; /// Squared length of vector JPH_INLINE float LengthSq() const; /// Length of vector JPH_INLINE float Length() const; /// Normalize vector JPH_INLINE Vec4 Normalized() const; /// Store 4 floats to memory JPH_INLINE void StoreFloat4(Float4 *outV) const; /// Convert each component from a float to an int JPH_INLINE UVec4 ToInt() const; /// Reinterpret Vec4 as a UVec4 (doesn't change the bits) JPH_INLINE UVec4 ReinterpretAsInt() const; /// Store if X is negative in bit 0, Y in bit 1, Z in bit 2 and W in bit 3 JPH_INLINE int GetSignBits() const; /// Get the minimum of X, Y, Z and W JPH_INLINE float ReduceMin() const; /// Get the maximum of X, Y, Z and W JPH_INLINE float ReduceMax() const; /// Component wise square root JPH_INLINE Vec4 Sqrt() const; /// Get vector that contains the sign of each element (returns 1.0f if positive, -1.0f if negative) JPH_INLINE Vec4 GetSign() const; /// Calcluate the sine and cosine for each element of this vector (input in radians) inline void SinCos(Vec4 &outSin, Vec4 &outCos) const; /// Calcluate the tangent for each element of this vector (input in radians) inline Vec4 Tan() const; /// Calculate the arc sine for each element of this vector (returns value in the range [-PI / 2, PI / 2]) /// Note that all input values will be clamped to the range [-1, 1] and this function will not return NaNs like std::asin inline Vec4 ASin() const; /// Calculate the arc cosine for each element of this vector (returns value in the range [0, PI]) /// Note that all input values will be clamped to the range [-1, 1] and this function will not return NaNs like std::acos inline Vec4 ACos() const; /// Calculate the arc tangent for each element of this vector (returns value in the range [-PI / 2, PI / 2]) inline Vec4 ATan() const; /// Calculate the arc tangent of y / x using the signs of the arguments to determine the correct quadrant (returns value in the range [-PI, PI]) inline static Vec4 sATan2(Vec4Arg inY, Vec4Arg inX); /// To String friend ostream & operator << (ostream &inStream, Vec4Arg inV) { inStream << inV.mF32[0] << ", " << inV.mF32[1] << ", " << inV.mF32[2] << ", " << inV.mF32[3]; return inStream; } union { Type mValue; float mF32[4]; }; }; static_assert(is_trivial<Vec4>(), "Is supposed to be a trivial type!"); JPH_NAMESPACE_END #include "Vec4.inl"
0
repos/c2z/use_cases/JoltPhysics/include
repos/c2z/use_cases/JoltPhysics/include/Math/HalfFloat.h
// Jolt Physics Library (https://github.com/jrouwe/JoltPhysics) // SPDX-FileCopyrightText: 2021 Jorrit Rouwe // SPDX-License-Identifier: MIT #pragma once #include <Jolt/Math/Vec4.h> JPH_NAMESPACE_BEGIN using HalfFloat = uint16; // Define half float constant values static constexpr HalfFloat HALF_FLT_MAX = 0x7bff; static constexpr HalfFloat HALF_FLT_MAX_NEGATIVE = 0xfbff; static constexpr HalfFloat HALF_FLT_INF = 0x7c00; static constexpr HalfFloat HALF_FLT_INF_NEGATIVE = 0xfc00; static constexpr HalfFloat HALF_FLT_NANQ = 0x7e00; static constexpr HalfFloat HALF_FLT_NANQ_NEGATIVE = 0xfe00; namespace HalfFloatConversion { // Layout of a float static constexpr int FLOAT_SIGN_POS = 31; static constexpr int FLOAT_EXPONENT_POS = 23; static constexpr int FLOAT_EXPONENT_BITS = 8; static constexpr int FLOAT_EXPONENT_MASK = (1 << FLOAT_EXPONENT_BITS) - 1; static constexpr int FLOAT_EXPONENT_BIAS = 127; static constexpr int FLOAT_MANTISSA_BITS = 23; static constexpr int FLOAT_MANTISSA_MASK = (1 << FLOAT_MANTISSA_BITS) - 1; static constexpr int FLOAT_EXPONENT_AND_MANTISSA_MASK = FLOAT_MANTISSA_MASK + (FLOAT_EXPONENT_MASK << FLOAT_EXPONENT_POS); // Layout of half float static constexpr int HALF_FLT_SIGN_POS = 15; static constexpr int HALF_FLT_EXPONENT_POS = 10; static constexpr int HALF_FLT_EXPONENT_BITS = 5; static constexpr int HALF_FLT_EXPONENT_MASK = (1 << HALF_FLT_EXPONENT_BITS) - 1; static constexpr int HALF_FLT_EXPONENT_BIAS = 15; static constexpr int HALF_FLT_MANTISSA_BITS = 10; static constexpr int HALF_FLT_MANTISSA_MASK = (1 << HALF_FLT_MANTISSA_BITS) - 1; static constexpr int HALF_FLT_EXPONENT_AND_MANTISSA_MASK = HALF_FLT_MANTISSA_MASK + (HALF_FLT_EXPONENT_MASK << HALF_FLT_EXPONENT_POS); /// Define half-float rounding modes enum ERoundingMode { ROUND_TO_NEG_INF, ///< Round to negative infinity ROUND_TO_POS_INF, ///< Round to positive infinity ROUND_TO_NEAREST, ///< Round to nearest value }; /// Convert a float (32-bits) to a half float (16-bits), fallback version when no intrinsics available template <int RoundingMode> inline HalfFloat FromFloatFallback(float inV) { // Reinterpret the float as an uint32 uint32 value = BitCast<uint32>(inV); // Extract exponent uint32 exponent = (value >> FLOAT_EXPONENT_POS) & FLOAT_EXPONENT_MASK; // Extract mantissa uint32 mantissa = value & FLOAT_MANTISSA_MASK; // Extract the sign and move it into the right spot for the half float (so we can just or it in at the end) HalfFloat hf_sign = HalfFloat(value >> (FLOAT_SIGN_POS - HALF_FLT_SIGN_POS)) & (1 << HALF_FLT_SIGN_POS); // Check NaN or INF if (exponent == FLOAT_EXPONENT_MASK) // NaN or INF return hf_sign | (mantissa == 0? HALF_FLT_INF : HALF_FLT_NANQ); // Rebias the exponent for half floats int rebiased_exponent = int(exponent) - FLOAT_EXPONENT_BIAS + HALF_FLT_EXPONENT_BIAS; // Check overflow to infinity if (rebiased_exponent >= HALF_FLT_EXPONENT_MASK) { bool round_up = RoundingMode == ROUND_TO_NEAREST || (hf_sign == 0) == (RoundingMode == ROUND_TO_POS_INF); return hf_sign | (round_up? HALF_FLT_INF : HALF_FLT_MAX); } // Check underflow to zero if (rebiased_exponent < -HALF_FLT_MANTISSA_BITS) { bool round_up = RoundingMode != ROUND_TO_NEAREST && (hf_sign == 0) == (RoundingMode == ROUND_TO_POS_INF) && (value & FLOAT_EXPONENT_AND_MANTISSA_MASK) != 0; return hf_sign | (round_up? 1 : 0); } HalfFloat hf_exponent; int shift; if (rebiased_exponent <= 0) { // Underflow to denormalized number hf_exponent = 0; mantissa |= 1 << FLOAT_MANTISSA_BITS; // Add the implicit 1 bit to the mantissa shift = FLOAT_MANTISSA_BITS - HALF_FLT_MANTISSA_BITS + 1 - rebiased_exponent; } else { // Normal half float hf_exponent = HalfFloat(rebiased_exponent << HALF_FLT_EXPONENT_POS); shift = FLOAT_MANTISSA_BITS - HALF_FLT_MANTISSA_BITS; } // Compose the half float HalfFloat hf_mantissa = HalfFloat(mantissa >> shift); HalfFloat hf = hf_sign | hf_exponent | hf_mantissa; // Calculate the remaining bits that we're discarding uint remainder = mantissa & ((1 << shift) - 1); if constexpr (RoundingMode == ROUND_TO_NEAREST) { // Round to nearest uint round_threshold = 1 << (shift - 1); if (remainder > round_threshold // Above threshold, we must always round || (remainder == round_threshold && (hf_mantissa & 1))) // When equal, round to nearest even hf++; // May overflow to infinity } else { // Round up or down (truncate) depending on the rounding mode bool round_up = (hf_sign == 0) == (RoundingMode == ROUND_TO_POS_INF) && remainder != 0; if (round_up) hf++; // May overflow to infinity } return hf; } /// Convert a float (32-bits) to a half float (16-bits) template <int RoundingMode> JPH_INLINE HalfFloat FromFloat(float inV) { #ifdef JPH_USE_F16C union { __m128i u128; HalfFloat u16[8]; } hf; __m128 val = _mm_load_ss(&inV); switch (RoundingMode) { case ROUND_TO_NEG_INF: hf.u128 = _mm_cvtps_ph(val, _MM_FROUND_TO_NEG_INF); break; case ROUND_TO_POS_INF: hf.u128 = _mm_cvtps_ph(val, _MM_FROUND_TO_POS_INF); break; case ROUND_TO_NEAREST: hf.u128 = _mm_cvtps_ph(val, _MM_FROUND_TO_NEAREST_INT); break; } return hf.u16[0]; #else return FromFloatFallback<RoundingMode>(inV); #endif } /// Convert 4 half floats (lower 64 bits) to floats, fallback version when no intrinsics available inline Vec4 ToFloatFallback(UVec4Arg inValue) { // Unpack half floats to 4 uint32's UVec4 value = inValue.Expand4Uint16Lo(); // Normal half float path, extract the exponent and mantissa, shift them into place and update the exponent bias UVec4 exponent_mantissa = UVec4::sAnd(value, UVec4::sReplicate(HALF_FLT_EXPONENT_AND_MANTISSA_MASK)).LogicalShiftLeft<FLOAT_EXPONENT_POS - HALF_FLT_EXPONENT_POS>() + UVec4::sReplicate((FLOAT_EXPONENT_BIAS - HALF_FLT_EXPONENT_BIAS) << FLOAT_EXPONENT_POS); // Denormalized half float path, renormalize the float UVec4 exponent_mantissa_denormalized = ((exponent_mantissa + UVec4::sReplicate(1 << FLOAT_EXPONENT_POS)).ReinterpretAsFloat() - UVec4::sReplicate((FLOAT_EXPONENT_BIAS - HALF_FLT_EXPONENT_BIAS + 1) << FLOAT_EXPONENT_POS).ReinterpretAsFloat()).ReinterpretAsInt(); // NaN / INF path, set all exponent bits UVec4 exponent_mantissa_nan_inf = UVec4::sOr(exponent_mantissa, UVec4::sReplicate(FLOAT_EXPONENT_MASK << FLOAT_EXPONENT_POS)); // Get the exponent to determine which of the paths we should take UVec4 exponent_mask = UVec4::sReplicate(HALF_FLT_EXPONENT_MASK << HALF_FLT_EXPONENT_POS); UVec4 exponent = UVec4::sAnd(value, exponent_mask); UVec4 is_denormalized = UVec4::sEquals(exponent, UVec4::sZero()); UVec4 is_nan_inf = UVec4::sEquals(exponent, exponent_mask); // Select the correct result UVec4 result_exponent_mantissa = UVec4::sSelect(UVec4::sSelect(exponent_mantissa, exponent_mantissa_nan_inf, is_nan_inf), exponent_mantissa_denormalized, is_denormalized); // Extract the sign bit and shift it to the left UVec4 sign = UVec4::sAnd(value, UVec4::sReplicate(1 << HALF_FLT_SIGN_POS)).LogicalShiftLeft<FLOAT_SIGN_POS - HALF_FLT_SIGN_POS>(); // Construct the float return UVec4::sOr(sign, result_exponent_mantissa).ReinterpretAsFloat(); } /// Convert 4 half floats (lower 64 bits) to floats JPH_INLINE Vec4 ToFloat(UVec4Arg inValue) { #if defined(JPH_USE_F16C) return _mm_cvtph_ps(inValue.mValue); #elif defined(JPH_USE_NEON) return vcvt_f32_f16(vreinterpret_f16_f32(vget_low_f32(inValue.mValue))); #else return ToFloatFallback(inValue); #endif } } // HalfFloatConversion JPH_NAMESPACE_END
0
repos/c2z/use_cases/JoltPhysics/include
repos/c2z/use_cases/JoltPhysics/include/Math/UVec8.inl
// Jolt Physics Library (https://github.com/jrouwe/JoltPhysics) // SPDX-FileCopyrightText: 2021 Jorrit Rouwe // SPDX-License-Identifier: MIT JPH_NAMESPACE_BEGIN UVec8::UVec8(UVec4Arg inLo, UVec4Arg inHi) : mValue(_mm256_insertf128_si256(_mm256_castsi128_si256(inLo.mValue), inHi.mValue, 1)) { } bool UVec8::operator == (UVec8Arg inV2) const { return sEquals(*this, inV2).TestAllTrue(); } UVec8 UVec8::sReplicate(uint32 inV) { return _mm256_set1_epi32(int(inV)); } UVec8 UVec8::sSplatX(UVec4Arg inV) { return _mm256_set1_epi32(inV.GetX()); } UVec8 UVec8::sSplatY(UVec4Arg inV) { return _mm256_set1_epi32(inV.GetY()); } UVec8 UVec8::sSplatZ(UVec4Arg inV) { return _mm256_set1_epi32(inV.GetZ()); } UVec8 UVec8::sEquals(UVec8Arg inV1, UVec8Arg inV2) { #ifdef JPH_USE_AVX2 return _mm256_cmpeq_epi32(inV1.mValue, inV2.mValue); #else return UVec8(UVec4::sEquals(inV1.LowerVec4(), inV2.LowerVec4()), UVec4::sEquals(inV1.UpperVec4(), inV2.UpperVec4())); #endif } UVec8 UVec8::sSelect(UVec8Arg inV1, UVec8Arg inV2, UVec8Arg inControl) { return _mm256_castps_si256(_mm256_blendv_ps(_mm256_castsi256_ps(inV1.mValue), _mm256_castsi256_ps(inV2.mValue), _mm256_castsi256_ps(inControl.mValue))); } UVec8 UVec8::sOr(UVec8Arg inV1, UVec8Arg inV2) { return _mm256_castps_si256(_mm256_or_ps(_mm256_castsi256_ps(inV1.mValue), _mm256_castsi256_ps(inV2.mValue))); } UVec8 UVec8::sXor(UVec8Arg inV1, UVec8Arg inV2) { return _mm256_castps_si256(_mm256_xor_ps(_mm256_castsi256_ps(inV1.mValue), _mm256_castsi256_ps(inV2.mValue))); } UVec8 UVec8::sAnd(UVec8Arg inV1, UVec8Arg inV2) { return _mm256_castps_si256(_mm256_and_ps(_mm256_castsi256_ps(inV1.mValue), _mm256_castsi256_ps(inV2.mValue))); } template<uint32 SwizzleX, uint32 SwizzleY, uint32 SwizzleZ, uint32 SwizzleW> UVec8 UVec8::Swizzle() const { static_assert(SwizzleX <= 3, "SwizzleX template parameter out of range"); static_assert(SwizzleY <= 3, "SwizzleY template parameter out of range"); static_assert(SwizzleZ <= 3, "SwizzleZ template parameter out of range"); static_assert(SwizzleW <= 3, "SwizzleW template parameter out of range"); return _mm256_castps_si256(_mm256_shuffle_ps(_mm256_castsi256_ps(mValue), _mm256_castsi256_ps(mValue), _MM_SHUFFLE(SwizzleW, SwizzleZ, SwizzleY, SwizzleX))); } bool UVec8::TestAnyTrue() const { return _mm256_movemask_ps(_mm256_castsi256_ps(mValue)) != 0; } bool UVec8::TestAllTrue() const { return _mm256_movemask_ps(_mm256_castsi256_ps(mValue)) == 0xff; } UVec4 UVec8::LowerVec4() const { return _mm256_castsi256_si128(mValue); } UVec4 UVec8::UpperVec4() const { return _mm_castps_si128(_mm256_extractf128_ps(_mm256_castsi256_ps(mValue), 1)); } Vec8 UVec8::ToFloat() const { return _mm256_cvtepi32_ps(mValue); } template <const uint Count> UVec8 UVec8::LogicalShiftLeft() const { static_assert(Count <= 31, "Invalid shift"); #ifdef JPH_USE_AVX2 return _mm256_slli_epi32(mValue, Count); #else return UVec8(LowerVec4().LogicalShiftLeft<Count>(), UpperVec4().LogicalShiftLeft<Count>()); #endif } template <const uint Count> UVec8 UVec8::LogicalShiftRight() const { static_assert(Count <= 31, "Invalid shift"); #ifdef JPH_USE_AVX2 return _mm256_srli_epi32(mValue, Count); #else return UVec8(LowerVec4().LogicalShiftRight<Count>(), UpperVec4().LogicalShiftRight<Count>()); #endif } template <const uint Count> UVec8 UVec8::ArithmeticShiftRight() const { static_assert(Count <= 31, "Invalid shift"); #ifdef JPH_USE_AVX2 return _mm256_srai_epi32(mValue, Count); #else return UVec8(LowerVec4().ArithmeticShiftRight<Count>(), UpperVec4().ArithmeticShiftRight<Count>()); #endif } JPH_NAMESPACE_END
0
repos/c2z/use_cases/JoltPhysics/include
repos/c2z/use_cases/JoltPhysics/include/Math/Real.h
// Jolt Physics Library (https://github.com/jrouwe/JoltPhysics) // SPDX-FileCopyrightText: 2022 Jorrit Rouwe // SPDX-License-Identifier: MIT #pragma once #include <Jolt/Math/DVec3.h> #include <Jolt/Math/DMat44.h> JPH_NAMESPACE_BEGIN #ifdef JPH_DOUBLE_PRECISION // Define real to double using Real = double; using Real3 = Double3; using RVec3 = DVec3; using RVec3Arg = DVec3Arg; using RMat44 = DMat44; using RMat44Arg = DMat44Arg; #define JPH_RVECTOR_ALIGNMENT JPH_DVECTOR_ALIGNMENT #else // Define real to float using Real = float; using Real3 = Float3; using RVec3 = Vec3; using RVec3Arg = Vec3Arg; using RMat44 = Mat44; using RMat44Arg = Mat44Arg; #define JPH_RVECTOR_ALIGNMENT JPH_VECTOR_ALIGNMENT #endif // JPH_DOUBLE_PRECISION // Put the 'real' operator in a namespace so that users can opt in to use it: // using namespace JPH::literals; namespace literals { constexpr Real operator "" _r (long double inValue) { return Real(inValue); } }; JPH_NAMESPACE_END
0
repos/c2z/use_cases/JoltPhysics/include
repos/c2z/use_cases/JoltPhysics/include/Math/Vector.h
// Jolt Physics Library (https://github.com/jrouwe/JoltPhysics) // SPDX-FileCopyrightText: 2021 Jorrit Rouwe // SPDX-License-Identifier: MIT #pragma once JPH_NAMESPACE_BEGIN /// Templatized vector class template <uint Rows> class [[nodiscard]] Vector { public: /// Constructor inline Vector() = default; inline Vector(const Vector &inRHS) { *this = inRHS; } /// Dimensions inline uint GetRows() const { return Rows; } /// Vector with all zeros inline void SetZero() { for (uint r = 0; r < Rows; ++r) mF32[r] = 0.0f; } inline static Vector sZero() { Vector v; v.SetZero(); return v; } /// Copy a (part) of another vector into this vector template <class OtherVector> void CopyPart(const OtherVector &inV, uint inSourceRow, uint inNumRows, uint inDestRow) { for (uint r = 0; r < inNumRows; ++r) mF32[inDestRow + r] = inV[inSourceRow + r]; } /// Get float component by index inline float operator [] (uint inCoordinate) const { JPH_ASSERT(inCoordinate < Rows); return mF32[inCoordinate]; } inline float & operator [] (uint inCoordinate) { JPH_ASSERT(inCoordinate < Rows); return mF32[inCoordinate]; } /// Comparison inline bool operator == (const Vector &inV2) const { for (uint r = 0; r < Rows; ++r) if (mF32[r] != inV2.mF32[r]) return false; return true; } inline bool operator != (const Vector &inV2) const { for (uint r = 0; r < Rows; ++r) if (mF32[r] != inV2.mF32[r]) return true; return false; } /// Test if vector consists of all zeros inline bool IsZero() const { for (uint r = 0; r < Rows; ++r) if (mF32[r] != 0.0f) return false; return true; } /// Test if two vectors are close to each other inline bool IsClose(const Vector &inV2, float inMaxDistSq = 1.0e-12f) { return (inV2 - *this).LengthSq() <= inMaxDistSq; } /// Assignment inline Vector & operator = (const Vector &inV2) { for (uint r = 0; r < Rows; ++r) mF32[r] = inV2.mF32[r]; return *this; } /// Multiply vector with float inline Vector operator * (const float inV2) const { Vector v; for (uint r = 0; r < Rows; ++r) v.mF32[r] = mF32[r] * inV2; return v; } inline Vector & operator *= (const float inV2) { for (uint r = 0; r < Rows; ++r) mF32[r] *= inV2; return *this; } /// Multiply vector with float inline friend Vector operator * (const float inV1, const Vector &inV2) { return inV2 * inV1; } /// Divide vector by float inline Vector operator / (float inV2) const { Vector v; for (uint r = 0; r < Rows; ++r) v.mF32[r] = mF32[r] / inV2; return v; } /// Add two float vectors (component wise) inline Vector operator + (const Vector &inV2) const { Vector v; for (uint r = 0; r < Rows; ++r) v.mF32[r] = mF32[r] + inV2.mF32[r]; return v; } inline Vector & operator += (const Vector &inV2) { for (uint r = 0; r < Rows; ++r) mF32[r] += inV2.mF32[r]; return *this; } /// Negate inline Vector operator - () const { Vector v; for (uint r = 0; r < Rows; ++r) v.mF32[r] = -mF32[r]; return v; } /// Subtract two float vectors (component wise) inline Vector operator - (const Vector &inV2) const { Vector v; for (uint r = 0; r < Rows; ++r) v.mF32[r] = mF32[r] - inV2.mF32[r]; return v; } inline Vector & operator -= (const Vector &inV2) { for (uint r = 0; r < Rows; ++r) mF32[r] -= inV2.mF32[r]; return *this; } /// Dot product inline float Dot(const Vector &inV2) const { float dot = 0.0f; for (uint r = 0; r < Rows; ++r) dot += mF32[r] * inV2.mF32[r]; return dot; } /// Squared length of vector inline float LengthSq() const { return Dot(*this); } /// Length of vector inline float Length() const { return sqrt(LengthSq()); } /// Check if vector is normalized inline bool IsNormalized(float inToleranceSq = 1.0e-6f) { return abs(LengthSq() - 1.0f) <= inToleranceSq; } /// Normalize vector inline Vector Normalized() const { return *this / Length(); } /// To String friend ostream & operator << (ostream &inStream, const Vector &inV) { inStream << "["; for (uint i = 0; i < Rows - 1; ++i) inStream << inV.mF32[i] << ", "; inStream << inV.mF32[Rows - 1] << "]"; return inStream; } float mF32[Rows]; }; JPH_NAMESPACE_END
0
repos/c2z/use_cases/JoltPhysics/include
repos/c2z/use_cases/JoltPhysics/include/Math/UVec8.h
// Jolt Physics Library (https://github.com/jrouwe/JoltPhysics) // SPDX-FileCopyrightText: 2021 Jorrit Rouwe // SPDX-License-Identifier: MIT #pragma once #include <Jolt/Math/Vec8.h> JPH_NAMESPACE_BEGIN class [[nodiscard]] UVec8 { public: JPH_OVERRIDE_NEW_DELETE UVec8() = default; ///< Intentionally not initialized for performance reasons UVec8(const UVec8 &inRHS) = default; JPH_INLINE UVec8(__m256i inRHS) : mValue(inRHS) { } /// Set 256 bit vector from 2 128 bit vectors JPH_INLINE UVec8(UVec4Arg inLo, UVec4Arg inHi); /// Comparison JPH_INLINE bool operator == (UVec8Arg inV2) const; JPH_INLINE bool operator != (UVec8Arg inV2) const { return !(*this == inV2); } /// Replicate int across all components static JPH_INLINE UVec8 sReplicate(uint32 inV); /// Replicate the X component of inV to all components static JPH_INLINE UVec8 sSplatX(UVec4Arg inV); /// Replicate the Y component of inV to all components static JPH_INLINE UVec8 sSplatY(UVec4Arg inV); /// Replicate the Z component of inV to all components static JPH_INLINE UVec8 sSplatZ(UVec4Arg inV); /// Equals (component wise) static JPH_INLINE UVec8 sEquals(UVec8Arg inV1, UVec8Arg inV2); /// Component wise select, returns inV1 when highest bit of inControl = 0 and inV2 when highest bit of inControl = 1 static JPH_INLINE UVec8 sSelect(UVec8Arg inV1, UVec8Arg inV2, UVec8Arg inControl); /// Logical or static JPH_INLINE UVec8 sOr(UVec8Arg inV1, UVec8Arg inV2); /// Logical xor static JPH_INLINE UVec8 sXor(UVec8Arg inV1, UVec8Arg inV2); /// Logical and static JPH_INLINE UVec8 sAnd(UVec8Arg inV1, UVec8Arg inV2); /// Get float component by index JPH_INLINE uint32 operator [] (uint inCoordinate) const { JPH_ASSERT(inCoordinate < 8); return mU32[inCoordinate]; } JPH_INLINE uint32 & operator [] (uint inCoordinate) { JPH_ASSERT(inCoordinate < 8); return mU32[inCoordinate]; } /// 256 bit variant of Vec::Swizzle (no cross 128 bit lane swizzle) template<uint32 SwizzleX, uint32 SwizzleY, uint32 SwizzleZ, uint32 SwizzleW> JPH_INLINE UVec8 Swizzle() const; /// Test if any of the components are true (true is when highest bit of component is set) JPH_INLINE bool TestAnyTrue() const; /// Test if all components are true (true is when highest bit of component is set) JPH_INLINE bool TestAllTrue() const; /// Fetch the lower 128 bit from a 256 bit variable JPH_INLINE UVec4 LowerVec4() const; /// Fetch the higher 128 bit from a 256 bit variable JPH_INLINE UVec4 UpperVec4() const; /// Converts int to float JPH_INLINE Vec8 ToFloat() const; /// Shift all components by Count bits to the left (filling with zeros from the left) template <const uint Count> JPH_INLINE UVec8 LogicalShiftLeft() const; /// Shift all components by Count bits to the right (filling with zeros from the right) template <const uint Count> JPH_INLINE UVec8 LogicalShiftRight() const; /// Shift all components by Count bits to the right (shifting in the value of the highest bit) template <const uint Count> JPH_INLINE UVec8 ArithmeticShiftRight() const; union { __m256i mValue; uint32 mU32[8]; }; }; static_assert(is_trivial<UVec8>(), "Is supposed to be a trivial type!"); JPH_NAMESPACE_END #include "UVec8.inl"
0
repos/c2z/use_cases/JoltPhysics/include
repos/c2z/use_cases/JoltPhysics/include/Math/Trigonometry.h
// Jolt Physics Library (https://github.com/jrouwe/JoltPhysics) // SPDX-FileCopyrightText: 2021 Jorrit Rouwe // SPDX-License-Identifier: MIT #pragma once JPH_NAMESPACE_BEGIN // Note that this file exists because std::sin etc. are not platform independent and will lead to non-deterministic simulation /// Sine of x (input in radians) JPH_INLINE float Sin(float inX) { Vec4 s, c; Vec4::sReplicate(inX).SinCos(s, c); return s.GetX(); } /// Cosine of x (input in radians) JPH_INLINE float Cos(float inX) { Vec4 s, c; Vec4::sReplicate(inX).SinCos(s, c); return c.GetX(); } /// Tangent of x (input in radians) JPH_INLINE float Tan(float inX) { return Vec4::sReplicate(inX).Tan().GetX(); } /// Arc sine of x (returns value in the range [-PI / 2, PI / 2]) /// Note that all input values will be clamped to the range [-1, 1] and this function will not return NaNs like std::asin JPH_INLINE float ASin(float inX) { return Vec4::sReplicate(inX).ASin().GetX(); } /// Arc cosine of x (returns value in the range [0, PI]) /// Note that all input values will be clamped to the range [-1, 1] and this function will not return NaNs like std::acos JPH_INLINE float ACos(float inX) { return Vec4::sReplicate(inX).ACos().GetX(); } /// Arc tangent of x (returns value in the range [-PI / 2, PI / 2]) JPH_INLINE float ATan(float inX) { return Vec4::sReplicate(inX).ATan().GetX(); } /// Arc tangent of y / x using the signs of the arguments to determine the correct quadrant (returns value in the range [-PI, PI]) JPH_INLINE float ATan2(float inY, float inX) { return Vec4::sATan2(Vec4::sReplicate(inY), Vec4::sReplicate(inX)).GetX(); } JPH_NAMESPACE_END
0
repos/c2z/use_cases/JoltPhysics/include
repos/c2z/use_cases/JoltPhysics/include/Math/DMat44.h
// Jolt Physics Library (https://github.com/jrouwe/JoltPhysics) // SPDX-FileCopyrightText: 2022 Jorrit Rouwe // SPDX-License-Identifier: MIT #pragma once #include <Jolt/Math/MathTypes.h> JPH_NAMESPACE_BEGIN /// Holds a 4x4 matrix of floats with the last column consisting of doubles class [[nodiscard]] alignas(JPH_DVECTOR_ALIGNMENT) DMat44 { public: JPH_OVERRIDE_NEW_DELETE // Underlying column type using Type = Vec4::Type; using DType = DVec3::Type; using DTypeArg = DVec3::TypeArg; // Argument type using ArgType = DMat44Arg; /// Constructor DMat44() = default; ///< Intentionally not initialized for performance reasons JPH_INLINE DMat44(Vec4Arg inC1, Vec4Arg inC2, Vec4Arg inC3, DVec3Arg inC4); DMat44(const DMat44 &inM2) = default; JPH_INLINE explicit DMat44(Mat44Arg inM); JPH_INLINE DMat44(Mat44Arg inRot, DVec3Arg inT); JPH_INLINE DMat44(Type inC1, Type inC2, Type inC3, DTypeArg inC4); /// Zero matrix static JPH_INLINE DMat44 sZero(); /// Identity matrix static JPH_INLINE DMat44 sIdentity(); /// Rotate from quaternion static JPH_INLINE DMat44 sRotation(QuatArg inQuat) { return DMat44(Mat44::sRotation(inQuat), DVec3::sZero()); } /// Get matrix that translates static JPH_INLINE DMat44 sTranslation(DVec3Arg inV) { return DMat44(Vec4(1, 0, 0, 0), Vec4(0, 1, 0, 0), Vec4(0, 0, 1, 0), inV); } /// Get matrix that rotates and translates static JPH_INLINE DMat44 sRotationTranslation(QuatArg inR, DVec3Arg inT) { return DMat44(Mat44::sRotation(inR), inT); } /// Get inverse matrix of sRotationTranslation static JPH_INLINE DMat44 sInverseRotationTranslation(QuatArg inR, DVec3Arg inT); /// Get matrix that scales (produces a matrix with (inV, 1) on its diagonal) static JPH_INLINE DMat44 sScale(Vec3Arg inV) { return DMat44(Mat44::sScale(inV), DVec3::sZero()); } /// Convert to Mat44 rounding to nearest JPH_INLINE Mat44 ToMat44() const { return Mat44(mCol[0], mCol[1], mCol[2], Vec3(mCol3)); } /// Comparsion JPH_INLINE bool operator == (DMat44Arg inM2) const; JPH_INLINE bool operator != (DMat44Arg inM2) const { return !(*this == inM2); } /// Test if two matrices are close JPH_INLINE bool IsClose(DMat44Arg inM2, float inMaxDistSq = 1.0e-12f) const; /// Multiply matrix by matrix JPH_INLINE DMat44 operator * (Mat44Arg inM) const; /// Multiply matrix by matrix JPH_INLINE DMat44 operator * (DMat44Arg inM) const; /// Multiply vector by matrix JPH_INLINE DVec3 operator * (Vec3Arg inV) const; /// Multiply vector by matrix JPH_INLINE DVec3 operator * (DVec3Arg inV) const; /// Multiply vector by only 3x3 part of the matrix JPH_INLINE Vec3 Multiply3x3(Vec3Arg inV) const { return GetRotation().Multiply3x3(inV); } /// Multiply vector by only 3x3 part of the matrix JPH_INLINE DVec3 Multiply3x3(DVec3Arg inV) const; /// Multiply vector by only 3x3 part of the transpose of the matrix (\f$result = this^T \: inV\f$) JPH_INLINE Vec3 Multiply3x3Transposed(Vec3Arg inV) const { return GetRotation().Multiply3x3Transposed(inV); } /// Scale a matrix: result = this * Mat44::sScale(inScale) JPH_INLINE DMat44 PreScaled(Vec3Arg inScale) const; /// Scale a matrix: result = Mat44::sScale(inScale) * this JPH_INLINE DMat44 PostScaled(Vec3Arg inScale) const; /// Pre multiply by translation matrix: result = this * Mat44::sTranslation(inTranslation) JPH_INLINE DMat44 PreTranslated(Vec3Arg inTranslation) const; /// Pre multiply by translation matrix: result = this * Mat44::sTranslation(inTranslation) JPH_INLINE DMat44 PreTranslated(DVec3Arg inTranslation) const; /// Post multiply by translation matrix: result = Mat44::sTranslation(inTranslation) * this (i.e. add inTranslation to the 4-th column) JPH_INLINE DMat44 PostTranslated(Vec3Arg inTranslation) const; /// Post multiply by translation matrix: result = Mat44::sTranslation(inTranslation) * this (i.e. add inTranslation to the 4-th column) JPH_INLINE DMat44 PostTranslated(DVec3Arg inTranslation) const; /// Access to the columns JPH_INLINE Vec3 GetAxisX() const { return Vec3(mCol[0]); } JPH_INLINE void SetAxisX(Vec3Arg inV) { mCol[0] = Vec4(inV, 0.0f); } JPH_INLINE Vec3 GetAxisY() const { return Vec3(mCol[1]); } JPH_INLINE void SetAxisY(Vec3Arg inV) { mCol[1] = Vec4(inV, 0.0f); } JPH_INLINE Vec3 GetAxisZ() const { return Vec3(mCol[2]); } JPH_INLINE void SetAxisZ(Vec3Arg inV) { mCol[2] = Vec4(inV, 0.0f); } JPH_INLINE DVec3 GetTranslation() const { return mCol3; } JPH_INLINE void SetTranslation(DVec3Arg inV) { mCol3 = inV; } JPH_INLINE Vec3 GetColumn3(uint inCol) const { JPH_ASSERT(inCol < 3); return Vec3(mCol[inCol]); } JPH_INLINE void SetColumn3(uint inCol, Vec3Arg inV) { JPH_ASSERT(inCol < 3); mCol[inCol] = Vec4(inV, 0.0f); } JPH_INLINE Vec4 GetColumn4(uint inCol) const { JPH_ASSERT(inCol < 3); return mCol[inCol]; } JPH_INLINE void SetColumn4(uint inCol, Vec4Arg inV) { JPH_ASSERT(inCol < 3); mCol[inCol] = inV; } /// Inverse 4x4 matrix JPH_INLINE DMat44 Inversed() const; /// Inverse 4x4 matrix when it only contains rotation and translation JPH_INLINE DMat44 InversedRotationTranslation() const; /// Get rotation part only (note: retains the first 3 values from the bottom row) JPH_INLINE Mat44 GetRotation() const { return Mat44(mCol[0], mCol[1], mCol[2], Vec4(0, 0, 0, 1)); } /// Updates the rotation part of this matrix (the first 3 columns) JPH_INLINE void SetRotation(Mat44Arg inRotation); /// Convert to quaternion JPH_INLINE Quat GetQuaternion() const { return GetRotation().GetQuaternion(); } /// Get matrix that transforms a direction with the same transform as this matrix (length is not preserved) JPH_INLINE Mat44 GetDirectionPreservingMatrix() const { return GetRotation().Inversed3x3().Transposed3x3(); } /// Works identical to Mat44::Decompose JPH_INLINE DMat44 Decompose(Vec3 &outScale) const { return DMat44(GetRotation().Decompose(outScale), mCol3); } /// To String friend ostream & operator << (ostream &inStream, DMat44Arg inM) { inStream << inM.mCol[0] << ", " << inM.mCol[1] << ", " << inM.mCol[2] << ", " << inM.mCol3; return inStream; } private: Vec4 mCol[3]; ///< Rotation columns DVec3 mCol3; ///< Translation column, 4th element is assumed to be 1 }; static_assert(is_trivial<DMat44>(), "Is supposed to be a trivial type!"); JPH_NAMESPACE_END #include "DMat44.inl"
0
repos/c2z/use_cases/JoltPhysics/include
repos/c2z/use_cases/JoltPhysics/include/Math/Float3.h
// Jolt Physics Library (https://github.com/jrouwe/JoltPhysics) // SPDX-FileCopyrightText: 2021 Jorrit Rouwe // SPDX-License-Identifier: MIT #pragma once #include <Jolt/Core/HashCombine.h> JPH_NAMESPACE_BEGIN /// Class that holds 3 floats. Used as a storage class. Convert to Vec3 for calculations. class [[nodiscard]] Float3 { public: JPH_OVERRIDE_NEW_DELETE Float3() = default; ///< Intentionally not initialized for performance reasons Float3(const Float3 &inRHS) = default; Float3(float inX, float inY, float inZ) : x(inX), y(inY), z(inZ) { } float operator [] (int inCoordinate) const { JPH_ASSERT(inCoordinate < 3); return *(&x + inCoordinate); } bool operator == (const Float3 &inRHS) const { return x == inRHS.x && y == inRHS.y && z == inRHS.z; } bool operator != (const Float3 &inRHS) const { return x != inRHS.x || y != inRHS.y || z != inRHS.z; } float x; float y; float z; }; using VertexList = Array<Float3>; static_assert(is_trivial<Float3>(), "Is supposed to be a trivial type!"); JPH_NAMESPACE_END // Create a std::hash for Float3 JPH_MAKE_HASHABLE(JPH::Float3, t.x, t.y, t.z)
0
repos/c2z/use_cases/JoltPhysics/include
repos/c2z/use_cases/JoltPhysics/include/Math/GaussianElimination.h
// Jolt Physics Library (https://github.com/jrouwe/JoltPhysics) // SPDX-FileCopyrightText: 2021 Jorrit Rouwe // SPDX-License-Identifier: MIT #pragma once JPH_NAMESPACE_BEGIN /// This function performs Gauss-Jordan elimination to solve a matrix equation. /// A must be an NxN matrix and B must be an NxM matrix forming the equation A * x = B /// on output B will contain x and A will be destroyed. /// /// This code can be used for example to compute the inverse of a matrix. /// Set A to the matrix to invert, set B to identity and let GaussianElimination solve /// the equation, on return B will be the inverse of A. And A is destroyed. /// /// Taken and adapted from Numerical Recipies in C paragraph 2.1 template <class MatrixA, class MatrixB> bool GaussianElimination(MatrixA &ioA, MatrixB &ioB, float inTolerance = 1.0e-16f) { // Get problem dimensions const uint n = ioA.GetCols(); const uint m = ioB.GetCols(); // Check matrix requirement JPH_ASSERT(ioA.GetRows() == n); JPH_ASSERT(ioB.GetRows() == n); // Create array for bookkeeping on pivoting int *ipiv = (int *)JPH_STACK_ALLOC(n * sizeof(int)); memset(ipiv, 0, n * sizeof(int)); for (uint i = 0; i < n; ++i) { // Initialize pivot element as the diagonal uint pivot_row = i, pivot_col = i; // Determine pivot element float largest_element = 0.0f; for (uint j = 0; j < n; ++j) if (ipiv[j] != 1) for (uint k = 0; k < n; ++k) { if (ipiv[k] == 0) { float element = abs(ioA(j, k)); if (element >= largest_element) { largest_element = element; pivot_row = j; pivot_col = k; } } else if (ipiv[k] > 1) { return false; } } // Mark this column as used ++ipiv[pivot_col]; // Exchange rows when needed so that the pivot element is at ioA(pivot_col, pivot_col) instead of at ioA(pivot_row, pivot_col) if (pivot_row != pivot_col) { for (uint j = 0; j < n; ++j) swap(ioA(pivot_row, j), ioA(pivot_col, j)); for (uint j = 0; j < m; ++j) swap(ioB(pivot_row, j), ioB(pivot_col, j)); } // Get diagonal element that we are about to set to 1 float diagonal_element = ioA(pivot_col, pivot_col); if (abs(diagonal_element) < inTolerance) return false; // Divide the whole row by the pivot element, making ioA(pivot_col, pivot_col) = 1 for (uint j = 0; j < n; ++j) ioA(pivot_col, j) /= diagonal_element; for (uint j = 0; j < m; ++j) ioB(pivot_col, j) /= diagonal_element; ioA(pivot_col, pivot_col) = 1.0f; // Next reduce the rows, except for the pivot one, // after this step the pivot_col column is zero except for the pivot element which is 1 for (uint j = 0; j < n; ++j) if (j != pivot_col) { float element = ioA(j, pivot_col); for (uint k = 0; k < n; ++k) ioA(j, k) -= ioA(pivot_col, k) * element; for (uint k = 0; k < m; ++k) ioB(j, k) -= ioB(pivot_col, k) * element; ioA(j, pivot_col) = 0.0f; } } // Success return true; } JPH_NAMESPACE_END
0
repos/c2z/use_cases/JoltPhysics/include
repos/c2z/use_cases/JoltPhysics/include/Skeleton/SkeletalAnimation.h
// Jolt Physics Library (https://github.com/jrouwe/JoltPhysics) // SPDX-FileCopyrightText: 2021 Jorrit Rouwe // SPDX-License-Identifier: MIT #pragma once #include <Jolt/Core/Reference.h> #include <Jolt/ObjectStream/SerializableObject.h> JPH_NAMESPACE_BEGIN class SkeletonPose; /// Resource for a skinned animation class SkeletalAnimation : public RefTarget<SkeletalAnimation> { public: JPH_DECLARE_SERIALIZABLE_NON_VIRTUAL(SkeletalAnimation) /// Constains the current state of a joint, a local space transformation relative to its parent joint class JointState { public: JPH_DECLARE_SERIALIZABLE_NON_VIRTUAL(JointState) /// Convert from a local space matrix void FromMatrix(Mat44Arg inMatrix); /// Convert to matrix representation inline Mat44 ToMatrix() const { return Mat44::sRotationTranslation(mRotation, mTranslation); } Quat mRotation = Quat::sIdentity(); ///< Local space rotation of the joint Vec3 mTranslation = Vec3::sZero(); ///< Local space translation of the joint }; /// Contains the state of a single joint at a particular time class Keyframe : public JointState { public: JPH_DECLARE_SERIALIZABLE_NON_VIRTUAL(Keyframe) float mTime = 0.0f; ///< Time of keyframe in seconds }; using KeyframeVector = Array<Keyframe>; /// Contains the animation for a single joint class AnimatedJoint { public: JPH_DECLARE_SERIALIZABLE_NON_VIRTUAL(AnimatedJoint) String mJointName; ///< Name of the joint KeyframeVector mKeyframes; ///< List of keyframes over time }; using AnimatedJointVector = Array<AnimatedJoint>; /// Get the length (in seconds) of this animation float GetDuration() const; /// Scale the size of all joints by inScale void ScaleJoints(float inScale); /// Get the (interpolated) joint transforms at time inTime void Sample(float inTime, SkeletonPose &ioPose) const; /// Get joint samples const AnimatedJointVector & GetAnimatedJoints() const { return mAnimatedJoints; } AnimatedJointVector & GetAnimatedJoints() { return mAnimatedJoints; } private: AnimatedJointVector mAnimatedJoints; ///< List of joints and keyframes bool mIsLooping = true; ///< If this animation loops back to start }; JPH_NAMESPACE_END
0
repos/c2z/use_cases/JoltPhysics/include
repos/c2z/use_cases/JoltPhysics/include/Skeleton/Skeleton.h
// Jolt Physics Library (https://github.com/jrouwe/JoltPhysics) // SPDX-FileCopyrightText: 2021 Jorrit Rouwe // SPDX-License-Identifier: MIT #pragma once #include <Jolt/Core/Reference.h> #include <Jolt/Core/Result.h> #include <Jolt/ObjectStream/SerializableObject.h> JPH_NAMESPACE_BEGIN class StreamIn; class StreamOut; /// Resource that contains the joint hierarchy for a skeleton class Skeleton : public RefTarget<Skeleton> { public: JPH_DECLARE_SERIALIZABLE_NON_VIRTUAL(Skeleton) using SkeletonResult = Result<Ref<Skeleton>>; /// Declare internal structure for a joint class Joint { public: JPH_DECLARE_SERIALIZABLE_NON_VIRTUAL(Joint) Joint() = default; Joint(const string_view &inName, const string_view &inParentName, int inParentJointIndex) : mName(inName), mParentName(inParentName), mParentJointIndex(inParentJointIndex) { } String mName; ///< Name of the joint String mParentName; ///< Name of parent joint int mParentJointIndex = -1; ///< Index of parent joint (in mJoints) or -1 if it has no parent }; using JointVector = Array<Joint>; ///@name Access to the joints ///@{ const JointVector & GetJoints() const { return mJoints; } JointVector & GetJoints() { return mJoints; } int GetJointCount() const { return (int)mJoints.size(); } const Joint & GetJoint(int inJoint) const { return mJoints[inJoint]; } Joint & GetJoint(int inJoint) { return mJoints[inJoint]; } uint AddJoint(const string_view &inName, const string_view &inParentName = string_view()) { mJoints.emplace_back(inName, inParentName, -1); return (uint)mJoints.size() - 1; } uint AddJoint(const string_view &inName, int inParentIndex) { mJoints.emplace_back(inName, inParentIndex >= 0? mJoints[inParentIndex].mName : String(), inParentIndex); return (uint)mJoints.size() - 1; } ///@} /// Find joint by name int GetJointIndex(const string_view &inName) const; /// Fill in parent joint indices based on name void CalculateParentJointIndices(); /// Many of the algorithms that use the Skeleton class require that parent joints are in the mJoints array before their children. /// This function returns true if this is the case, false if not. bool AreJointsCorrectlyOrdered() const; /// Saves the state of this object in binary form to inStream. void SaveBinaryState(StreamOut &inStream) const; /// Restore the state of this object from inStream. static SkeletonResult sRestoreFromBinaryState(StreamIn &inStream); private: /// Joints JointVector mJoints; }; JPH_NAMESPACE_END
0
repos/c2z/use_cases/JoltPhysics/include
repos/c2z/use_cases/JoltPhysics/include/Skeleton/SkeletonMapper.h
// Jolt Physics Library (https://github.com/jrouwe/JoltPhysics) // SPDX-FileCopyrightText: 2022 Jorrit Rouwe // SPDX-License-Identifier: MIT #pragma once #include <Jolt/Core/Reference.h> #include <Jolt/Skeleton/Skeleton.h> JPH_NAMESPACE_BEGIN /// Class that is able to map a low detail (ragdoll) skeleton to a high detail (animation) skeleton and vice versa class SkeletonMapper : public RefTarget<SkeletonMapper> { public: /// A joint that maps 1-on-1 to a joint in the other skeleton class Mapping { public: Mapping() = default; Mapping(int inJointIdx1, int inJointIdx2, Mat44Arg inJoint1To2) : mJointIdx1(inJointIdx1), mJointIdx2(inJointIdx2), mJoint1To2(inJoint1To2), mJoint2To1(inJoint1To2.Inversed()) { // Ensure bottom right element is 1 (numerical imprecision in the inverse can make this not so) mJoint2To1(3, 3) = 1.0f; } int mJointIdx1; ///< Index of joint from skeleton 1 int mJointIdx2; ///< Corresponding index of joint from skeleton 2 Mat44 mJoint1To2; ///< Transforms this joint from skeleton 1 to 2 Mat44 mJoint2To1; ///< Inverse of the transform above }; /// A joint chain that starts with a 1-on-1 mapped joint and ends with a 1-on-1 mapped joint with intermediate joints that cannot be mapped class Chain { public: Chain() = default; Chain(Array<int> &&inJointIndices1, Array<int> &&inJointIndices2) : mJointIndices1(std::move(inJointIndices1)), mJointIndices2(std::move(inJointIndices2)) { } Array<int> mJointIndices1; ///< Joint chain from skeleton 1 Array<int> mJointIndices2; ///< Corresponding joint chain from skeleton 2 }; /// Joints that could not be mapped from skeleton 1 to 2 class Unmapped { public: Unmapped() = default; Unmapped(int inJointIdx, int inParentJointIdx) : mJointIdx(inJointIdx), mParentJointIdx(inParentJointIdx) { } int mJointIdx; ///< Joint index of unmappable joint int mParentJointIdx; ///< Parent joint index of unmappable joint }; /// Joints that should have their translation locked (fixed) class Locked { public: int mJointIdx; ///< Joint index of joint with locked translation (in skeleton 2) int mParentJointIdx; ///< Parent joint index of joint with locked translation (in skeleton 2) Vec3 mTranslation; ///< Translation of neutral pose }; /// A function that is called to determine if a joint can be mapped from source to target skeleton using CanMapJoint = function<bool (const Skeleton *, int, const Skeleton *, int)>; /// Default function that checks if the names of the joints are equal static bool sDefaultCanMapJoint(const Skeleton *inSkeleton1, int inIndex1, const Skeleton *inSkeleton2, int inIndex2) { return inSkeleton1->GetJoint(inIndex1).mName == inSkeleton2->GetJoint(inIndex2).mName; } /// Initialize the skeleton mapper. Skeleton 1 should be the (low detail) ragdoll skeleton and skeleton 2 the (high detail) animation skeleton. /// We assume that each joint in skeleton 1 can be mapped to a joint in skeleton 2 (if not mapping from animation skeleton to ragdoll skeleton will be undefined). /// Skeleton 2 should have the same hierarchy as skeleton 1 but can contain extra joints between those in skeleton 1 and it can have extra joints at the root and leaves of the skeleton. /// @param inSkeleton1 Source skeleton to map from. /// @param inNeutralPose1 Neutral pose of the source skeleton (model space) /// @param inSkeleton2 Target skeleton to map to. /// @param inNeutralPose2 Neutral pose of the target skeleton (model space), inNeutralPose1 and inNeutralPose2 must match as closely as possible, preferably the position of the mappable joints should be identical. /// @param inCanMapJoint Function that checks if joints in skeleton 1 and skeleton 2 are equal. void Initialize(const Skeleton *inSkeleton1, const Mat44 *inNeutralPose1, const Skeleton *inSkeleton2, const Mat44 *inNeutralPose2, const CanMapJoint &inCanMapJoint = sDefaultCanMapJoint); /// This can be called so lock the translation of a specified set of joints in skeleton 2. /// Because constraints are never 100% rigid, there's always a little bit of stretch in the ragdoll when the ragdoll is under stress. /// Locking the translations of the pose will remove the visual stretch from the ragdoll but will introduce a difference between the /// physical simulation and the visual representation. /// @param inSkeleton2 Target skeleton to map to. /// @param inLockedTranslations An array of bools the size of inSkeleton2->GetJointCount(), for each joint indicating if the joint is locked. /// @param inNeutralPose2 Neutral pose to take reference translations from void LockTranslations(const Skeleton *inSkeleton2, const bool *inLockedTranslations, const Mat44 *inNeutralPose2); /// After Initialize(), this can be called to lock the translation of all joints in skeleton 2 below the first mapped joint to those of the neutral pose. /// Because constraints are never 100% rigid, there's always a little bit of stretch in the ragdoll when the ragdoll is under stress. /// Locking the translations of the pose will remove the visual stretch from the ragdoll but will introduce a difference between the /// physical simulation and the visual representation. /// @param inSkeleton2 Target skeleton to map to. /// @param inNeutralPose2 Neutral pose to take reference translations from void LockAllTranslations(const Skeleton *inSkeleton2, const Mat44 *inNeutralPose2); /// Map a pose. Joints that were directly mappable will be copied in model space from pose 1 to pose 2. Any joints that are only present in skeleton 2 /// will get their model space transform calculated through the local space transforms of pose 2. Joints that are part of a joint chain between two /// mapped joints will be reoriented towards the next joint in skeleton 1. This means that it is possible for unmapped joints to have some animation, /// but very extreme animation poses will show artifacts. /// @param inPose1ModelSpace Pose on skeleton 1 in model space /// @param inPose2LocalSpace Pose on skeleton 2 in local space (used for the joints that cannot be mapped) /// @param outPose2ModelSpace Model space pose on skeleton 2 (the output of the mapping) void Map(const Mat44 *inPose1ModelSpace, const Mat44 *inPose2LocalSpace, Mat44 *outPose2ModelSpace) const; /// Reverse map a pose, this will only use the mappings and not the chains (it assumes that all joints in skeleton 1 are mapped) /// @param inPose2ModelSpace Model space pose on skeleton 2 /// @param outPose1ModelSpace When the function returns this will contain the model space pose for skeleton 1 void MapReverse(const Mat44 *inPose2ModelSpace, Mat44 *outPose1ModelSpace) const; /// Search through the directly mapped joints (mMappings) and find inJoint1Idx, returns the corresponding Joint2Idx or -1 if not found. int GetMappedJointIdx(int inJoint1Idx) const; /// Search through the locked translations (mLockedTranslations) and find if joint inJoint2Idx is locked. bool IsJointTranslationLocked(int inJoint2Idx) const; using MappingVector = Array<Mapping>; using ChainVector = Array<Chain>; using UnmappedVector = Array<Unmapped>; using LockedVector = Array<Locked>; ///@name Access to the mapped joints ///@{ const MappingVector & GetMappings() const { return mMappings; } MappingVector & GetMappings() { return mMappings; } const ChainVector & GetChains() const { return mChains; } ChainVector & GetChains() { return mChains; } const UnmappedVector & GetUnmapped() const { return mUnmapped; } UnmappedVector & GetUnmapped() { return mUnmapped; } const LockedVector & GetLockedTranslations() const { return mLockedTranslations; } LockedVector & GetLockedTranslations() { return mLockedTranslations; } ///@} private: /// Joint mappings MappingVector mMappings; ChainVector mChains; UnmappedVector mUnmapped; ///< Joint indices that could not be mapped from 1 to 2 (these are indices in 2) LockedVector mLockedTranslations; }; JPH_NAMESPACE_END
0
repos/c2z/use_cases/JoltPhysics/include
repos/c2z/use_cases/JoltPhysics/include/Skeleton/SkeletonPose.h
// Jolt Physics Library (https://github.com/jrouwe/JoltPhysics) // SPDX-FileCopyrightText: 2021 Jorrit Rouwe // SPDX-License-Identifier: MIT #pragma once #include <Jolt/Skeleton/Skeleton.h> #include <Jolt/Skeleton/SkeletalAnimation.h> JPH_NAMESPACE_BEGIN #ifdef JPH_DEBUG_RENDERER class DebugRenderer; #endif // JPH_DEBUG_RENDERER /// Instance of a skeleton, contains the pose the current skeleton is in class SkeletonPose { public: JPH_OVERRIDE_NEW_DELETE using JointState = SkeletalAnimation::JointState; using JointStateVector = Array<JointState>; using Mat44Vector = Array<Mat44>; ///@name Skeleton ///@{ void SetSkeleton(const Skeleton *inSkeleton); const Skeleton * GetSkeleton() const { return mSkeleton; } ///@} /// Extra offset applied to the root (and therefore also to all of its children) void SetRootOffset(RVec3Arg inOffset) { mRootOffset = inOffset; } RVec3 GetRootOffset() const { return mRootOffset; } ///@name Properties of the joints ///@{ uint GetJointCount() const { return (uint)mJoints.size(); } const JointStateVector & GetJoints() const { return mJoints; } JointStateVector & GetJoints() { return mJoints; } const JointState & GetJoint(int inJoint) const { return mJoints[inJoint]; } JointState & GetJoint(int inJoint) { return mJoints[inJoint]; } ///@} ///@name Joint matrices ///@{ const Mat44Vector & GetJointMatrices() const { return mJointMatrices; } Mat44Vector & GetJointMatrices() { return mJointMatrices; } const Mat44 & GetJointMatrix(int inJoint) const { return mJointMatrices[inJoint]; } Mat44 & GetJointMatrix(int inJoint) { return mJointMatrices[inJoint]; } ///@} /// Convert the joint states to joint matrices void CalculateJointMatrices(); /// Convert joint matrices to joint states void CalculateJointStates(); /// Outputs the joint matrices in local space (ensure that outMatrices has GetJointCount() elements, assumes that values in GetJoints() is up to date) void CalculateLocalSpaceJointMatrices(Mat44 *outMatrices) const; #ifdef JPH_DEBUG_RENDERER /// Draw settings struct DrawSettings { bool mDrawJoints = true; bool mDrawJointOrientations = true; bool mDrawJointNames = false; }; /// Draw current pose void Draw(const DrawSettings &inDrawSettings, DebugRenderer *inRenderer, RMat44Arg inOffset = RMat44::sIdentity()) const; #endif // JPH_DEBUG_RENDERER private: RefConst<Skeleton> mSkeleton; ///< Skeleton definition RVec3 mRootOffset { RVec3::sZero() }; ///< Extra offset applied to the root (and therefore also to all of its children) JointStateVector mJoints; ///< Local joint orientations (local to parent Joint) Mat44Vector mJointMatrices; ///< Local joint matrices (local to world matrix) }; JPH_NAMESPACE_END
0
repos/c2z/use_cases/JoltPhysics/include
repos/c2z/use_cases/JoltPhysics/include/Core/Core.h
// Jolt Physics Library (https://github.com/jrouwe/JoltPhysics) // SPDX-FileCopyrightText: 2021 Jorrit Rouwe // SPDX-License-Identifier: MIT #pragma once // Determine platform #if defined(JPH_PLATFORM_BLUE) // Correct define already defined, this overrides everything else #elif defined(_WIN32) || defined(_WIN64) #include <winapifamily.h> #if WINAPI_FAMILY == WINAPI_FAMILY_APP #define JPH_PLATFORM_WINDOWS_UWP // Building for Universal Windows Platform #endif #define JPH_PLATFORM_WINDOWS #elif defined(__ANDROID__) // Android is linux too, so that's why we check it first #define JPH_PLATFORM_ANDROID #elif defined(__linux__) #define JPH_PLATFORM_LINUX #elif defined(__APPLE__) #include <TargetConditionals.h> #if defined(TARGET_OS_IPHONE) && !TARGET_OS_IPHONE #define JPH_PLATFORM_MACOS #else #define JPH_PLATFORM_IOS #endif #elif defined(__EMSCRIPTEN__) #define JPH_PLATFORM_WASM #endif // Platform helper macros #ifdef JPH_PLATFORM_ANDROID #define JPH_IF_NOT_ANDROID(x) #else #define JPH_IF_NOT_ANDROID(x) x #endif // Determine compiler #if defined(__clang__) #define JPH_COMPILER_CLANG #elif defined(__GNUC__) #define JPH_COMPILER_GCC #elif defined(_MSC_VER) #define JPH_COMPILER_MSVC #endif #if defined(__MINGW64__) || defined (__MINGW32__) #define JPH_COMPILER_MINGW #endif // Detect CPU architecture #if defined(__x86_64__) || defined(_M_X64) || defined(__i386__) || defined(_M_IX86) // X86 CPU architecture #define JPH_CPU_X86 #if defined(__x86_64__) || defined(_M_X64) #define JPH_CPU_ADDRESS_BITS 64 #else #define JPH_CPU_ADDRESS_BITS 32 #endif #define JPH_USE_SSE #define JPH_VECTOR_ALIGNMENT 16 #define JPH_DVECTOR_ALIGNMENT 32 // Detect enabled instruction sets #if defined(__AVX512F__) && defined(__AVX512VL__) && defined(__AVX512DQ__) && !defined(JPH_USE_AVX512) #define JPH_USE_AVX512 #endif #if (defined(__AVX2__) || defined(JPH_USE_AVX512)) && !defined(JPH_USE_AVX2) #define JPH_USE_AVX2 #endif #if (defined(__AVX__) || defined(JPH_USE_AVX2)) && !defined(JPH_USE_AVX) #define JPH_USE_AVX #endif #if (defined(__SSE4_2__) || defined(JPH_USE_AVX)) && !defined(JPH_USE_SSE4_2) #define JPH_USE_SSE4_2 #endif #if (defined(__SSE4_1__) || defined(JPH_USE_SSE4_2)) && !defined(JPH_USE_SSE4_1) #define JPH_USE_SSE4_1 #endif #if (defined(__F16C__) || defined(JPH_USE_AVX2)) && !defined(JPH_USE_F16C) #define JPH_USE_F16C #endif #if (defined(__LZCNT__) || defined(JPH_USE_AVX2)) && !defined(JPH_USE_LZCNT) #define JPH_USE_LZCNT #endif #if (defined(__BMI__) || defined(JPH_USE_AVX2)) && !defined(JPH_USE_TZCNT) #define JPH_USE_TZCNT #endif #ifndef JPH_CROSS_PLATFORM_DETERMINISTIC // FMA is not compatible with cross platform determinism #if defined(JPH_COMPILER_CLANG) || defined(JPH_COMPILER_GCC) #if defined(__FMA__) && !defined(JPH_USE_FMADD) #define JPH_USE_FMADD #endif #elif defined(JPH_COMPILER_MSVC) #if defined(__AVX2__) && !defined(JPH_USE_FMADD) // AVX2 also enables fused multiply add #define JPH_USE_FMADD #endif #else #error Undefined compiler #endif #endif #elif defined(__aarch64__) || defined(_M_ARM64) || defined(__arm__) || defined(_M_ARM) // ARM CPU architecture #define JPH_CPU_ARM #if defined(__aarch64__) || defined(_M_ARM64) #define JPH_CPU_ADDRESS_BITS 64 #define JPH_USE_NEON #define JPH_VECTOR_ALIGNMENT 16 #define JPH_DVECTOR_ALIGNMENT 32 #else #define JPH_CPU_ADDRESS_BITS 32 #define JPH_VECTOR_ALIGNMENT 8 // 32-bit ARM does not support aligning on the stack on 16 byte boundaries #define JPH_DVECTOR_ALIGNMENT 8 #endif #elif defined(JPH_PLATFORM_WASM) // WebAssembly CPU architecture #define JPH_CPU_WASM #define JPH_CPU_ADDRESS_BITS 32 #define JPH_VECTOR_ALIGNMENT 16 #define JPH_DVECTOR_ALIGNMENT 32 #define JPH_DISABLE_CUSTOM_ALLOCATOR #else #error Unsupported CPU architecture #endif // Pragmas to store / restore the warning state and to disable individual warnings #ifdef JPH_COMPILER_CLANG #define JPH_PRAGMA(x) _Pragma(#x) #define JPH_SUPPRESS_WARNING_PUSH JPH_PRAGMA(clang diagnostic push) #define JPH_SUPPRESS_WARNING_POP JPH_PRAGMA(clang diagnostic pop) #define JPH_CLANG_SUPPRESS_WARNING(w) JPH_PRAGMA(clang diagnostic ignored w) #else #define JPH_CLANG_SUPPRESS_WARNING(w) #endif #ifdef JPH_COMPILER_GCC #define JPH_PRAGMA(x) _Pragma(#x) #define JPH_SUPPRESS_WARNING_PUSH JPH_PRAGMA(GCC diagnostic push) #define JPH_SUPPRESS_WARNING_POP JPH_PRAGMA(GCC diagnostic pop) #define JPH_GCC_SUPPRESS_WARNING(w) JPH_PRAGMA(GCC diagnostic ignored w) #else #define JPH_GCC_SUPPRESS_WARNING(w) #endif #ifdef JPH_COMPILER_MSVC #define JPH_PRAGMA(x) __pragma(x) #define JPH_SUPPRESS_WARNING_PUSH JPH_PRAGMA(warning (push)) #define JPH_SUPPRESS_WARNING_POP JPH_PRAGMA(warning (pop)) #define JPH_MSVC_SUPPRESS_WARNING(w) JPH_PRAGMA(warning (disable : w)) #if _MSC_VER >= 1920 && _MSC_VER < 1930 #define JPH_MSVC2019_SUPPRESS_WARNING(w) JPH_MSVC_SUPPRESS_WARNING(w) #else #define JPH_MSVC2019_SUPPRESS_WARNING(w) #endif #else #define JPH_MSVC_SUPPRESS_WARNING(w) #define JPH_MSVC2019_SUPPRESS_WARNING(w) #endif // Disable common warnings triggered by Jolt when compiling with -Wall #define JPH_SUPPRESS_WARNINGS \ JPH_CLANG_SUPPRESS_WARNING("-Wc++98-compat") \ JPH_CLANG_SUPPRESS_WARNING("-Wc++98-compat-pedantic") \ JPH_CLANG_SUPPRESS_WARNING("-Wfloat-equal") \ JPH_CLANG_SUPPRESS_WARNING("-Wsign-conversion") \ JPH_CLANG_SUPPRESS_WARNING("-Wold-style-cast") \ JPH_CLANG_SUPPRESS_WARNING("-Wgnu-anonymous-struct") \ JPH_CLANG_SUPPRESS_WARNING("-Wnested-anon-types") \ JPH_CLANG_SUPPRESS_WARNING("-Wglobal-constructors") \ JPH_CLANG_SUPPRESS_WARNING("-Wexit-time-destructors") \ JPH_CLANG_SUPPRESS_WARNING("-Wnonportable-system-include-path") \ JPH_CLANG_SUPPRESS_WARNING("-Wlanguage-extension-token") \ JPH_CLANG_SUPPRESS_WARNING("-Wunused-parameter") \ JPH_CLANG_SUPPRESS_WARNING("-Wformat-nonliteral") \ JPH_CLANG_SUPPRESS_WARNING("-Wcovered-switch-default") \ JPH_CLANG_SUPPRESS_WARNING("-Wcast-align") \ JPH_CLANG_SUPPRESS_WARNING("-Winvalid-offsetof") \ JPH_CLANG_SUPPRESS_WARNING("-Wgnu-zero-variadic-macro-arguments") \ JPH_CLANG_SUPPRESS_WARNING("-Wdocumentation-unknown-command") \ JPH_CLANG_SUPPRESS_WARNING("-Wctad-maybe-unsupported") \ JPH_CLANG_SUPPRESS_WARNING("-Wdeprecated-copy") \ JPH_IF_NOT_ANDROID(JPH_CLANG_SUPPRESS_WARNING("-Wimplicit-int-float-conversion")) \ \ JPH_GCC_SUPPRESS_WARNING("-Wcomment") \ JPH_GCC_SUPPRESS_WARNING("-Winvalid-offsetof") \ JPH_GCC_SUPPRESS_WARNING("-Wclass-memaccess") \ \ JPH_MSVC_SUPPRESS_WARNING(4619) /* #pragma warning: there is no warning number 'XXXX' */ \ JPH_MSVC_SUPPRESS_WARNING(4514) /* 'X' : unreferenced inline function has been removed */ \ JPH_MSVC_SUPPRESS_WARNING(4710) /* 'X' : function not inlined */ \ JPH_MSVC_SUPPRESS_WARNING(4711) /* function 'X' selected for automatic inline expansion */ \ JPH_MSVC_SUPPRESS_WARNING(4820) /* 'X': 'Y' bytes padding added after data member 'Z' */ \ JPH_MSVC_SUPPRESS_WARNING(4100) /* 'X' : unreferenced formal parameter */ \ JPH_MSVC_SUPPRESS_WARNING(4626) /* 'X' : assignment operator was implicitly defined as deleted because a base class assignment operator is inaccessible or deleted */ \ JPH_MSVC_SUPPRESS_WARNING(5027) /* 'X' : move assignment operator was implicitly defined as deleted because a base class move assignment operator is inaccessible or deleted */ \ JPH_MSVC_SUPPRESS_WARNING(4365) /* 'argument' : conversion from 'X' to 'Y', signed / unsigned mismatch */ \ JPH_MSVC_SUPPRESS_WARNING(4324) /* 'X' : structure was padded due to alignment specifier */ \ JPH_MSVC_SUPPRESS_WARNING(4625) /* 'X' : copy constructor was implicitly defined as deleted because a base class copy constructor is inaccessible or deleted */ \ JPH_MSVC_SUPPRESS_WARNING(5026) /* 'X': move constructor was implicitly defined as deleted because a base class move constructor is inaccessible or deleted */ \ JPH_MSVC_SUPPRESS_WARNING(4623) /* 'X' : default constructor was implicitly defined as deleted */ \ JPH_MSVC_SUPPRESS_WARNING(4201) /* nonstandard extension used: nameless struct/union */ \ JPH_MSVC_SUPPRESS_WARNING(4371) /* 'X': layout of class may have changed from a previous version of the compiler due to better packing of member 'Y' */ \ JPH_MSVC_SUPPRESS_WARNING(5045) /* Compiler will insert Spectre mitigation for memory load if /Qspectre switch specified */ \ JPH_MSVC_SUPPRESS_WARNING(4583) /* 'X': destructor is not implicitly called */ \ JPH_MSVC_SUPPRESS_WARNING(4582) /* 'X': constructor is not implicitly called */ \ JPH_MSVC_SUPPRESS_WARNING(5219) /* implicit conversion from 'X' to 'Y', possible loss of data */ \ JPH_MSVC_SUPPRESS_WARNING(4826) /* Conversion from 'X *' to 'JPH::uint64' is sign-extended. This may cause unexpected runtime behavior. (32-bit) */ \ JPH_MSVC_SUPPRESS_WARNING(5264) /* 'X': 'const' variable is not used */ \ JPH_MSVC2019_SUPPRESS_WARNING(5246) /* the initialization of a subobject should be wrapped in braces */ // OS-specific includes #if defined(JPH_PLATFORM_WINDOWS) #define JPH_BREAKPOINT __debugbreak() #elif defined(JPH_PLATFORM_BLUE) // Configuration for a popular game console. // This file is not distributed because it would violate an NDA. // Creating one should only be a couple of minutes of work if you have the documentation for the platform // (you only need to define JPH_BREAKPOINT, JPH_PLATFORM_BLUE_GET_TICKS and JPH_PLATFORM_BLUE_GET_TICK_FREQUENCY and include the right header). #include <Jolt/Core/PlatformBlue.h> #elif defined(JPH_PLATFORM_LINUX) || defined(JPH_PLATFORM_ANDROID) || defined(JPH_PLATFORM_MACOS) || defined(JPH_PLATFORM_IOS) #if defined(JPH_CPU_X86) #define JPH_BREAKPOINT __asm volatile ("int $0x3") #elif defined(JPH_CPU_ARM) #define JPH_BREAKPOINT __builtin_trap() #endif #elif defined(JPH_PLATFORM_WASM) #define JPH_BREAKPOINT do { } while (false) // Not supported #else #error Unknown platform #endif // Crashes the application #define JPH_CRASH do { int *ptr = nullptr; *ptr = 0; } while (false) // Begin the JPH namespace #define JPH_NAMESPACE_BEGIN \ JPH_SUPPRESS_WARNING_PUSH \ JPH_SUPPRESS_WARNINGS \ namespace JPH { // End the JPH namespace #define JPH_NAMESPACE_END \ } \ JPH_SUPPRESS_WARNING_POP // Suppress warnings generated by the standard template library #define JPH_SUPPRESS_WARNINGS_STD_BEGIN \ JPH_SUPPRESS_WARNING_PUSH \ JPH_MSVC_SUPPRESS_WARNING(4619) \ JPH_MSVC_SUPPRESS_WARNING(4710) \ JPH_MSVC_SUPPRESS_WARNING(4711) \ JPH_MSVC_SUPPRESS_WARNING(4820) \ JPH_MSVC_SUPPRESS_WARNING(4514) \ JPH_MSVC_SUPPRESS_WARNING(5262) \ JPH_MSVC_SUPPRESS_WARNING(5264) #define JPH_SUPPRESS_WARNINGS_STD_END \ JPH_SUPPRESS_WARNING_POP // Standard C++ includes JPH_SUPPRESS_WARNINGS_STD_BEGIN #include <vector> #include <utility> #include <cmath> #include <sstream> #include <functional> #include <algorithm> JPH_SUPPRESS_WARNINGS_STD_END #include <limits.h> #include <float.h> #include <string.h> #if defined(JPH_USE_SSE) #include <immintrin.h> #elif defined(JPH_USE_NEON) #ifdef JPH_COMPILER_MSVC #include <intrin.h> #include <arm64_neon.h> #else #include <arm_neon.h> #endif #endif JPH_NAMESPACE_BEGIN // Commonly used STL types using std::pair; using std::min; using std::max; using std::abs; using std::sqrt; using std::ceil; using std::floor; using std::trunc; using std::round; using std::fmod; using std::swap; using std::size; using std::string; using std::string_view; using std::function; using std::numeric_limits; using std::isfinite; using std::isnan; using std::is_trivial; using std::is_trivially_constructible; using std::is_trivially_destructible; using std::ostream; using std::istream; // Standard types using uint = unsigned int; using uint8 = uint8_t; using uint16 = uint16_t; using uint32 = uint32_t; using uint64 = uint64_t; // Assert sizes of types static_assert(sizeof(uint) >= 4, "Invalid size of uint"); static_assert(sizeof(uint8) == 1, "Invalid size of uint8"); static_assert(sizeof(uint16) == 2, "Invalid size of uint16"); static_assert(sizeof(uint32) == 4, "Invalid size of uint32"); static_assert(sizeof(uint64) == 8, "Invalid size of uint64"); static_assert(sizeof(void *) == (JPH_CPU_ADDRESS_BITS == 64? 8 : 4), "Invalid size of pointer" ); // Define inline macro #if defined(JPH_COMPILER_CLANG) || defined(JPH_COMPILER_GCC) #define JPH_INLINE __inline__ __attribute__((always_inline)) #elif defined(JPH_COMPILER_MSVC) #define JPH_INLINE __forceinline #else #error Undefined #endif // Cache line size (used for aligning to cache line) #ifndef JPH_CACHE_LINE_SIZE #define JPH_CACHE_LINE_SIZE 64 #endif // Define macro to get current function name #if defined(JPH_COMPILER_CLANG) || defined(JPH_COMPILER_GCC) #define JPH_FUNCTION_NAME __PRETTY_FUNCTION__ #elif defined(JPH_COMPILER_MSVC) #define JPH_FUNCTION_NAME __FUNCTION__ #else #error Undefined #endif // Stack allocation #define JPH_STACK_ALLOC(n) alloca(n) // Shorthand for #ifdef _DEBUG / #endif #ifdef _DEBUG #define JPH_IF_DEBUG(...) __VA_ARGS__ #define JPH_IF_NOT_DEBUG(...) #else #define JPH_IF_DEBUG(...) #define JPH_IF_NOT_DEBUG(...) __VA_ARGS__ #endif // Shorthand for #ifdef JPH_FLOATING_POINT_EXCEPTIONS_ENABLED / #endif #ifdef JPH_FLOATING_POINT_EXCEPTIONS_ENABLED #define JPH_IF_FLOATING_POINT_EXCEPTIONS_ENABLED(...) __VA_ARGS__ #else #define JPH_IF_FLOATING_POINT_EXCEPTIONS_ENABLED(...) #endif // Helper macros to detect if we're running in single or double precision mode #ifdef JPH_DOUBLE_PRECISION #define JPH_IF_SINGLE_PRECISION(...) #define JPH_IF_SINGLE_PRECISION_ELSE(s, d) d #define JPH_IF_DOUBLE_PRECISION(...) __VA_ARGS__ #else #define JPH_IF_SINGLE_PRECISION(...) __VA_ARGS__ #define JPH_IF_SINGLE_PRECISION_ELSE(s, d) s #define JPH_IF_DOUBLE_PRECISION(...) #endif // Helper macro to detect if the debug renderer is active #ifdef JPH_DEBUG_RENDERER #define JPH_IF_DEBUG_RENDERER(...) __VA_ARGS__ #define JPH_IF_NOT_DEBUG_RENDERER(...) #else #define JPH_IF_DEBUG_RENDERER(...) #define JPH_IF_NOT_DEBUG_RENDERER(...) __VA_ARGS__ #endif // Macro to indicate that a parameter / variable is unused #define JPH_UNUSED(x) (void)x // Macro to enable floating point precise mode and to disable fused multiply add instructions #if defined(JPH_COMPILER_GCC) || defined(JPH_CROSS_PLATFORM_DETERMINISTIC) // We compile without -ffast-math and -ffp-contract=fast, so we don't need to disable anything #define JPH_PRECISE_MATH_ON #define JPH_PRECISE_MATH_OFF #elif defined(JPH_COMPILER_CLANG) // We compile without -ffast-math because it cannot be turned off for a single compilation unit // On clang 14 and later we can turn off float contraction through a pragma, so if FMA is on we can disable it through this macro #if __clang_major__ >= 14 && defined(JPH_USE_FMADD) #define JPH_PRECISE_MATH_ON \ _Pragma("clang fp contract(off)") #define JPH_PRECISE_MATH_OFF \ _Pragma("clang fp contract(on)") #else #define JPH_PRECISE_MATH_ON #define JPH_PRECISE_MATH_OFF #endif #elif defined(JPH_COMPILER_MSVC) // Unfortunately there is no way to push the state of fp_contract, so we have to assume it was turned on before JPH_PRECISE_MATH_ON #define JPH_PRECISE_MATH_ON \ __pragma(float_control(precise, on, push)) \ __pragma(fp_contract(off)) #define JPH_PRECISE_MATH_OFF \ __pragma(fp_contract(on)) \ __pragma(float_control(pop)) #else #error Undefined #endif JPH_NAMESPACE_END
0
repos/c2z/use_cases/JoltPhysics/include
repos/c2z/use_cases/JoltPhysics/include/Core/StaticArray.h
// Jolt Physics Library (https://github.com/jrouwe/JoltPhysics) // SPDX-FileCopyrightText: 2021 Jorrit Rouwe // SPDX-License-Identifier: MIT #pragma once JPH_NAMESPACE_BEGIN /// Simple variable length array backed by a fixed size buffer template <class T, uint N> class [[nodiscard]] StaticArray { public: using value_type = T; using size_type = uint; static constexpr uint Capacity = N; /// Default constructor StaticArray() = default; /// Constructor from initializer list explicit StaticArray(std::initializer_list<T> inList) { JPH_ASSERT(inList.size() <= N); for (typename std::initializer_list<T>::iterator i = inList.begin(); i != inList.end(); ++i) ::new (reinterpret_cast<T *>(&mElements[mSize++])) T(*i); } /// Copy constructor StaticArray(const StaticArray<T, N> &inRHS) { while (mSize < inRHS.mSize) { ::new (&mElements[mSize]) T(inRHS[mSize]); ++mSize; } } /// Destruct all elements ~StaticArray() { if constexpr (!is_trivially_destructible<T>()) for (T *e = reinterpret_cast<T *>(mElements), *end = e + mSize; e < end; ++e) e->~T(); } /// Destruct all elements and set length to zero void clear() { if constexpr (!is_trivially_destructible<T>()) for (T *e = reinterpret_cast<T *>(mElements), *end = e + mSize; e < end; ++e) e->~T(); mSize = 0; } /// Add element to the back of the array void push_back(const T &inElement) { JPH_ASSERT(mSize < N); ::new (&mElements[mSize++]) T(inElement); } /// Construct element at the back of the array template <class... A> void emplace_back(A &&... inElement) { JPH_ASSERT(mSize < N); ::new (&mElements[mSize++]) T(std::forward<A>(inElement)...); } /// Remove element from the back of the array void pop_back() { JPH_ASSERT(mSize > 0); reinterpret_cast<T &>(mElements[--mSize]).~T(); } /// Returns true if there are no elements in the array bool empty() const { return mSize == 0; } /// Returns amount of elements in the array size_type size() const { return mSize; } /// Returns maximum amount of elements the array can hold size_type capacity() const { return N; } /// Resize array to new length void resize(size_type inNewSize) { JPH_ASSERT(inNewSize <= N); if constexpr (!is_trivially_constructible<T>()) for (T *element = reinterpret_cast<T *>(mElements) + mSize, *element_end = reinterpret_cast<T *>(mElements) + inNewSize; element < element_end; ++element) ::new (element) T; if constexpr (!is_trivially_destructible<T>()) for (T *element = reinterpret_cast<T *>(mElements) + inNewSize, *element_end = reinterpret_cast<T *>(mElements) + mSize; element < element_end; ++element) element->~T(); mSize = inNewSize; } using const_iterator = const T *; /// Iterators const_iterator begin() const { return reinterpret_cast<const T *>(mElements); } const_iterator end() const { return reinterpret_cast<const T *>(mElements + mSize); } using iterator = T *; iterator begin() { return reinterpret_cast<T *>(mElements); } iterator end() { return reinterpret_cast<T *>(mElements + mSize); } const T * data() const { return reinterpret_cast<const T *>(mElements); } T * data() { return reinterpret_cast<T *>(mElements); } /// Access element T & operator [] (size_type inIdx) { JPH_ASSERT(inIdx < mSize); return reinterpret_cast<T &>(mElements[inIdx]); } const T & operator [] (size_type inIdx) const { JPH_ASSERT(inIdx < mSize); return reinterpret_cast<const T &>(mElements[inIdx]); } /// First element in the array const T & front() const { JPH_ASSERT(mSize > 0); return reinterpret_cast<const T &>(mElements[0]); } T & front() { JPH_ASSERT(mSize > 0); return reinterpret_cast<T &>(mElements[0]); } /// Last element in the array const T & back() const { JPH_ASSERT(mSize > 0); return reinterpret_cast<const T &>(mElements[mSize - 1]); } T & back() { JPH_ASSERT(mSize > 0); return reinterpret_cast<T &>(mElements[mSize - 1]); } /// Remove one element from the array void erase(const_iterator inIter) { size_type p = size_type(inIter - begin()); JPH_ASSERT(p < mSize); reinterpret_cast<T &>(mElements[p]).~T(); if (p + 1 < mSize) memmove(mElements + p, mElements + p + 1, (mSize - p - 1) * sizeof(T)); --mSize; } /// Remove multiple element from the array void erase(const_iterator inBegin, const_iterator inEnd) { size_type p = size_type(inBegin - begin()); size_type n = size_type(inEnd - inBegin); JPH_ASSERT(inEnd <= end()); for (size_type i = 0; i < n; ++i) reinterpret_cast<T &>(mElements[p + i]).~T(); if (p + n < mSize) memmove(mElements + p, mElements + p + n, (mSize - p - n) * sizeof(T)); mSize -= n; } /// Assignment operator StaticArray<T, N> & operator = (const StaticArray<T, N> &inRHS) { size_type rhs_size = inRHS.size(); if ((void *)this != (void *)&inRHS) { clear(); while (mSize < rhs_size) { ::new (&mElements[mSize]) T(inRHS[mSize]); ++mSize; } } return *this; } /// Assignment operator with static array of different max length template <uint M> StaticArray<T, N> & operator = (const StaticArray<T, M> &inRHS) { size_type rhs_size = inRHS.size(); JPH_ASSERT(rhs_size <= N); if ((void *)this != (void *)&inRHS) { clear(); while (mSize < rhs_size) { ::new (&mElements[mSize]) T(inRHS[mSize]); ++mSize; } } return *this; } /// Comparing arrays bool operator == (const StaticArray<T, N> &inRHS) const { if (mSize != inRHS.mSize) return false; for (size_type i = 0; i < mSize; ++i) if (!(reinterpret_cast<const T &>(mElements[i]) == reinterpret_cast<const T &>(inRHS.mElements[i]))) return false; return true; } bool operator != (const StaticArray<T, N> &inRHS) const { if (mSize != inRHS.mSize) return true; for (size_type i = 0; i < mSize; ++i) if (reinterpret_cast<const T &>(mElements[i]) != reinterpret_cast<const T &>(inRHS.mElements[i])) return true; return false; } protected: struct alignas(T) Storage { uint8 mData[sizeof(T)]; }; static_assert(sizeof(T) == sizeof(Storage), "Mismatch in size"); static_assert(alignof(T) == alignof(Storage), "Mismatch in alignment"); size_type mSize = 0; Storage mElements[N]; }; JPH_NAMESPACE_END JPH_SUPPRESS_WARNING_PUSH JPH_CLANG_SUPPRESS_WARNING("-Wc++98-compat") namespace std { /// Declare std::hash for StaticArray template <class T, JPH::uint N> struct hash<JPH::StaticArray<T, N>> { size_t operator () (const JPH::StaticArray<T, N> &inRHS) const { std::size_t ret = 0; // Hash length first JPH::HashCombine(ret, inRHS.size()); // Then hash elements for (const T &t : inRHS) JPH::HashCombine(ret, t); return ret; } }; } JPH_SUPPRESS_WARNING_POP
0
repos/c2z/use_cases/JoltPhysics/include
repos/c2z/use_cases/JoltPhysics/include/Core/UnorderedMap.h
// Jolt Physics Library (https://github.com/jrouwe/JoltPhysics) // SPDX-FileCopyrightText: 2021 Jorrit Rouwe // SPDX-License-Identifier: MIT #pragma once JPH_SUPPRESS_WARNINGS_STD_BEGIN #include <unordered_map> JPH_SUPPRESS_WARNINGS_STD_END JPH_NAMESPACE_BEGIN template <class Key, class T, class Hash = std::hash<Key>, class KeyEqual = std::equal_to<Key>> using UnorderedMap = std::unordered_map<Key, T, Hash, KeyEqual, STLAllocator<pair<const Key, T>>>; JPH_NAMESPACE_END
0
repos/c2z/use_cases/JoltPhysics/include
repos/c2z/use_cases/JoltPhysics/include/Core/Semaphore.h
// Jolt Physics Library (https://github.com/jrouwe/JoltPhysics) // SPDX-FileCopyrightText: 2023 Jorrit Rouwe // SPDX-License-Identifier: MIT #pragma once JPH_SUPPRESS_WARNINGS_STD_BEGIN #include <atomic> #include <mutex> #include <condition_variable> JPH_SUPPRESS_WARNINGS_STD_END JPH_NAMESPACE_BEGIN // Things we're using from STL using std::atomic; using std::mutex; using std::condition_variable; /// Implements a semaphore /// When we switch to C++20 we can use counting_semaphore to unify this class Semaphore { public: /// Constructor Semaphore(); ~Semaphore(); /// Release the semaphore, signalling the thread waiting on the barrier that there may be work void Release(uint inNumber = 1); /// Acquire the semaphore inNumber times void Acquire(uint inNumber = 1); /// Get the current value of the semaphore inline int GetValue() const { return mCount; } private: #ifdef JPH_PLATFORM_WINDOWS // On windows we use a semaphore object since it is more efficient than a lock and a condition variable alignas(JPH_CACHE_LINE_SIZE) atomic<int> mCount { 0 }; ///< We increment mCount for every release, to acquire we decrement the count. If the count is negative we know that we are waiting on the actual semaphore. void * mSemaphore; ///< The semaphore is an expensive construct so we only acquire/release it if we know that we need to wait/have waiting threads #else // Other platforms: Emulate a semaphore using a mutex, condition variable and count mutex mLock; condition_variable mWaitVariable; int mCount = 0; #endif }; JPH_NAMESPACE_END
0
repos/c2z/use_cases/JoltPhysics/include
repos/c2z/use_cases/JoltPhysics/include/Core/MutexArray.h
// Jolt Physics Library (https://github.com/jrouwe/JoltPhysics) // SPDX-FileCopyrightText: 2021 Jorrit Rouwe // SPDX-License-Identifier: MIT #pragma once #include <Jolt/Core/NonCopyable.h> JPH_NAMESPACE_BEGIN /// A mutex array protects a number of resources with a limited amount of mutexes. /// It uses hashing to find the mutex of a particular object. /// The idea is that if the amount of threads is much smaller than the amount of mutexes /// that there is a relatively small chance that two different objects map to the same mutex. template <class MutexType> class MutexArray : public NonCopyable { public: /// Constructor, constructs an empty mutex array that you need to initialize with Init() MutexArray() = default; /// Constructor, constructs an array with inNumMutexes entries explicit MutexArray(uint inNumMutexes) { Init(inNumMutexes); } /// Destructor ~MutexArray() { delete [] mMutexStorage; } /// Initialization /// @param inNumMutexes The amount of mutexes to allocate void Init(uint inNumMutexes) { JPH_ASSERT(mMutexStorage == nullptr); JPH_ASSERT(inNumMutexes > 0 && IsPowerOf2(inNumMutexes)); mMutexStorage = new MutexStorage[inNumMutexes]; mNumMutexes = inNumMutexes; } /// Get the number of mutexes that were allocated inline uint GetNumMutexes() const { return mNumMutexes; } /// Convert an object index to a mutex index inline uint32 GetMutexIndex(uint32 inObjectIndex) const { std::hash<uint32> hasher; return hasher(inObjectIndex) & (mNumMutexes - 1); } /// Get the mutex belonging to a certain object by index inline MutexType & GetMutexByObjectIndex(uint32 inObjectIndex) { return mMutexStorage[GetMutexIndex(inObjectIndex)].mMutex; } /// Get a mutex by index in the array inline MutexType & GetMutexByIndex(uint32 inMutexIndex) { return mMutexStorage[inMutexIndex].mMutex; } /// Lock all mutexes void LockAll() { JPH_PROFILE_FUNCTION(); MutexStorage *end = mMutexStorage + mNumMutexes; for (MutexStorage *m = mMutexStorage; m < end; ++m) m->mMutex.lock(); } /// Unlock all mutexes void UnlockAll() { JPH_PROFILE_FUNCTION(); MutexStorage *end = mMutexStorage + mNumMutexes; for (MutexStorage *m = mMutexStorage; m < end; ++m) m->mMutex.unlock(); } private: /// Align the mutex to a cache line to ensure there is no false sharing (this is platform dependent, we do this to be safe) struct alignas(JPH_CACHE_LINE_SIZE) MutexStorage { JPH_OVERRIDE_NEW_DELETE MutexType mMutex; }; MutexStorage * mMutexStorage = nullptr; uint mNumMutexes = 0; }; JPH_NAMESPACE_END
0
repos/c2z/use_cases/JoltPhysics/include
repos/c2z/use_cases/JoltPhysics/include/Core/Memory.h
// Jolt Physics Library (https://github.com/jrouwe/JoltPhysics) // SPDX-FileCopyrightText: 2021 Jorrit Rouwe // SPDX-License-Identifier: MIT #pragma once JPH_NAMESPACE_BEGIN #ifndef JPH_DISABLE_CUSTOM_ALLOCATOR // Normal memory allocation, must be at least 8 byte aligned on 32 bit platform and 16 byte aligned on 64 bit platform using AllocateFunction = void *(*)(size_t inSize); using FreeFunction = void (*)(void *inBlock); // Aligned memory allocation using AlignedAllocateFunction = void *(*)(size_t inSize, size_t inAlignment); using AlignedFreeFunction = void (*)(void *inBlock); // User defined allocation / free functions extern AllocateFunction Allocate; extern FreeFunction Free; extern AlignedAllocateFunction AlignedAllocate; extern AlignedFreeFunction AlignedFree; /// Register platform default allocation / free functions void RegisterDefaultAllocator(); /// Macro to override the new and delete functions #define JPH_OVERRIDE_NEW_DELETE \ JPH_INLINE void *operator new (size_t inCount) { return JPH::Allocate(inCount); } \ JPH_INLINE void operator delete (void *inPointer) noexcept { JPH::Free(inPointer); } \ JPH_INLINE void *operator new[] (size_t inCount) { return JPH::Allocate(inCount); } \ JPH_INLINE void operator delete[] (void *inPointer) noexcept { JPH::Free(inPointer); } \ JPH_INLINE void *operator new (size_t inCount, std::align_val_t inAlignment) { return JPH::AlignedAllocate(inCount, static_cast<size_t>(inAlignment)); } \ JPH_INLINE void operator delete (void *inPointer, std::align_val_t inAlignment) noexcept { JPH::AlignedFree(inPointer); } \ JPH_INLINE void *operator new[] (size_t inCount, std::align_val_t inAlignment) { return JPH::AlignedAllocate(inCount, static_cast<size_t>(inAlignment)); } \ JPH_INLINE void operator delete[] (void *inPointer, std::align_val_t inAlignment) noexcept { JPH::AlignedFree(inPointer); } #else // Directly define the allocation functions void *Allocate(size_t inSize); void Free(void *inBlock); void *AlignedAllocate(size_t inSize, size_t inAlignment); void AlignedFree(void *inBlock); // Don't implement allocator registering inline void RegisterDefaultAllocator() { } // Don't override new/delete #define JPH_OVERRIDE_NEW_DELETE #endif // !JPH_DISABLE_CUSTOM_ALLOCATOR JPH_NAMESPACE_END
0
repos/c2z/use_cases/JoltPhysics/include
repos/c2z/use_cases/JoltPhysics/include/Core/FPException.h
// Jolt Physics Library (https://github.com/jrouwe/JoltPhysics) // SPDX-FileCopyrightText: 2021 Jorrit Rouwe // SPDX-License-Identifier: MIT #pragma once #include <Jolt/Core/FPControlWord.h> JPH_NAMESPACE_BEGIN #ifdef JPH_FLOATING_POINT_EXCEPTIONS_ENABLED #if defined(JPH_USE_SSE) /// Enable floating point divide by zero exception and exceptions on invalid numbers class FPExceptionsEnable : public FPControlWord<0, _MM_MASK_DIV_ZERO | _MM_MASK_INVALID> { }; /// Disable invalid floating point value exceptions class FPExceptionDisableInvalid : public FPControlWord<_MM_MASK_INVALID, _MM_MASK_INVALID> { }; /// Disable division by zero floating point exceptions class FPExceptionDisableDivByZero : public FPControlWord<_MM_MASK_DIV_ZERO, _MM_MASK_DIV_ZERO> { }; #elif defined(JPH_CPU_ARM) && defined(JPH_COMPILER_MSVC) /// Enable floating point divide by zero exception and exceptions on invalid numbers class FPExceptionsEnable : public FPControlWord<0, _EM_INVALID | _EM_ZERODIVIDE> { }; /// Disable invalid floating point value exceptions class FPExceptionDisableInvalid : public FPControlWord<_EM_INVALID, _EM_INVALID> { }; /// Disable division by zero floating point exceptions class FPExceptionDisableDivByZero : public FPControlWord<_EM_ZERODIVIDE, _EM_ZERODIVIDE> { }; #elif defined(JPH_CPU_ARM) /// Invalid operation exception bit static constexpr uint64 FP_IOE = 1 << 8; /// Enable divide by zero exception bit static constexpr uint64 FP_DZE = 1 << 9; /// Enable floating point divide by zero exception and exceptions on invalid numbers class FPExceptionsEnable : public FPControlWord<FP_IOE | FP_DZE, FP_IOE | FP_DZE> { }; /// Disable invalid floating point value exceptions class FPExceptionDisableInvalid : public FPControlWord<0, FP_IOE> { }; /// Disable division by zero floating point exceptions class FPExceptionDisableDivByZero : public FPControlWord<0, FP_DZE> { }; #elif defined(JPH_CPU_WASM) // Not supported class FPExceptionsEnable { }; class FPExceptionDisableInvalid { }; class FPExceptionDisableDivByZero { }; #else #error Unsupported CPU architecture #endif #else /// Dummy implementations class FPExceptionsEnable { }; class FPExceptionDisableInvalid { }; class FPExceptionDisableDivByZero { }; #endif JPH_NAMESPACE_END
0
repos/c2z/use_cases/JoltPhysics/include
repos/c2z/use_cases/JoltPhysics/include/Core/UnorderedSet.h
// Jolt Physics Library (https://github.com/jrouwe/JoltPhysics) // SPDX-FileCopyrightText: 2021 Jorrit Rouwe // SPDX-License-Identifier: MIT #pragma once JPH_SUPPRESS_WARNINGS_STD_BEGIN #include <unordered_set> JPH_SUPPRESS_WARNINGS_STD_END JPH_NAMESPACE_BEGIN template <class Key, class Hash = std::hash<Key>, class KeyEqual = std::equal_to<Key>> using UnorderedSet = std::unordered_set<Key, Hash, KeyEqual, STLAllocator<Key>>; JPH_NAMESPACE_END
0
repos/c2z/use_cases/JoltPhysics/include
repos/c2z/use_cases/JoltPhysics/include/Core/LockFreeHashMap.h
// Jolt Physics Library (https://github.com/jrouwe/JoltPhysics) // SPDX-FileCopyrightText: 2021 Jorrit Rouwe // SPDX-License-Identifier: MIT #pragma once #include <Jolt/Core/NonCopyable.h> #include <Jolt/Core/Atomics.h> JPH_NAMESPACE_BEGIN /// Allocator for a lock free hash map class LFHMAllocator : public NonCopyable { public: /// Destructor inline ~LFHMAllocator(); /// Initialize the allocator /// @param inObjectStoreSizeBytes Number of bytes to reserve for all key value pairs inline void Init(uint inObjectStoreSizeBytes); /// Clear all allocations inline void Clear(); /// Allocate a new block of data /// @param inBlockSize Size of block to allocate (will potentially return a smaller block if memory is full). /// @param ioBegin Should be the start of the first free byte in current memory block on input, will contain the start of the first free byte in allocated block on return. /// @param ioEnd Should be the byte beyond the current memory block on input, will contain the byte beyond the allocated block on return. inline void Allocate(uint32 inBlockSize, uint32 &ioBegin, uint32 &ioEnd); /// Convert a pointer to an offset template <class T> inline uint32 ToOffset(const T *inData) const; /// Convert an offset to a pointer template <class T> inline T * FromOffset(uint32 inOffset) const; private: uint8 * mObjectStore = nullptr; ///< This contains a contigous list of objects (possibly of varying size) uint32 mObjectStoreSizeBytes = 0; ///< The size of mObjectStore in bytes atomic<uint32> mWriteOffset { 0 }; ///< Next offset to write to in mObjectStore }; /// Allocator context object for a lock free hash map that allocates a larger memory block at once and hands it out in smaller portions. /// This avoids contention on the atomic LFHMAllocator::mWriteOffset. class LFHMAllocatorContext : public NonCopyable { public: /// Construct a new allocator context inline LFHMAllocatorContext(LFHMAllocator &inAllocator, uint32 inBlockSize); /// @brief Allocate data block /// @param inSize Size of block to allocate. /// @param inAlignment Alignment of block to allocate. /// @param outWriteOffset Offset in buffer where block is located /// @return True if allocation succeeded inline bool Allocate(uint32 inSize, uint32 inAlignment, uint32 &outWriteOffset); private: LFHMAllocator & mAllocator; uint32 mBlockSize; uint32 mBegin = 0; uint32 mEnd = 0; }; /// Very simple lock free hash map that only allows insertion, retrieval and provides a fixed amount of buckets and fixed storage. /// Note: This class currently assumes key and value are simple types that need no calls to the destructor. template <class Key, class Value> class LockFreeHashMap : public NonCopyable { public: using MapType = LockFreeHashMap<Key, Value>; /// Destructor explicit LockFreeHashMap(LFHMAllocator &inAllocator) : mAllocator(inAllocator) { } ~LockFreeHashMap(); /// Initialization /// @param inMaxBuckets Max amount of buckets to use in the hashmap. Must be power of 2. void Init(uint32 inMaxBuckets); /// Remove all elements. /// Note that this cannot happen simultaneously with adding new elements. void Clear(); /// Get the current amount of buckets that the map is using uint32 GetNumBuckets() const { return mNumBuckets; } /// Get the maximum amount of buckets that this map supports uint32 GetMaxBuckets() const { return mMaxBuckets; } /// Update the number of buckets. This must be done after clearing the map and cannot be done concurrently with any other operations on the map. /// Note that the number of buckets can never become bigger than the specified max buckets during initialization and that it must be a power of 2. void SetNumBuckets(uint32 inNumBuckets); /// A key / value pair that is inserted in the map class KeyValue { public: const Key & GetKey() const { return mKey; } Value & GetValue() { return mValue; } const Value & GetValue() const { return mValue; } private: template <class K, class V> friend class LockFreeHashMap; Key mKey; ///< Key for this entry uint32 mNextOffset; ///< Offset in mObjectStore of next KeyValue entry with same hash Value mValue; ///< Value for this entry + optionally extra bytes }; /// Insert a new element, returns null if map full. /// Multiple threads can be inserting in the map at the same time. template <class... Params> inline KeyValue * Create(LFHMAllocatorContext &ioContext, const Key &inKey, uint64 inKeyHash, int inExtraBytes, Params &&... inConstructorParams); /// Find an element, returns null if not found inline const KeyValue * Find(const Key &inKey, uint64 inKeyHash) const; /// Value of an invalid handle const static uint32 cInvalidHandle = uint32(-1); /// Get convert key value pair to uint32 handle inline uint32 ToHandle(const KeyValue *inKeyValue) const; /// Convert uint32 handle back to key and value inline const KeyValue * FromHandle(uint32 inHandle) const; #ifdef JPH_ENABLE_ASSERTS /// Get the number of key value pairs that this map currently contains. /// Available only when asserts are enabled because adding elements creates contention on this atomic and negatively affects performance. inline uint32 GetNumKeyValues() const { return mNumKeyValues; } #endif // JPH_ENABLE_ASSERTS /// Get all key/value pairs inline void GetAllKeyValues(Array<const KeyValue *> &outAll) const; /// Non-const iterator struct Iterator { /// Comparison bool operator == (const Iterator &inRHS) const { return mMap == inRHS.mMap && mBucket == inRHS.mBucket && mOffset == inRHS.mOffset; } bool operator != (const Iterator &inRHS) const { return !(*this == inRHS); } /// Convert to key value pair KeyValue & operator * (); /// Next item Iterator & operator ++ (); MapType * mMap; uint32 mBucket; uint32 mOffset; }; /// Iterate over the map, note that it is not safe to do this in parallel to Clear(). /// It is safe to do this while adding elements to the map, but newly added elements may or may not be returned by the iterator. Iterator begin(); Iterator end(); #ifdef _DEBUG /// Output stats about this map to the log void TraceStats() const; #endif private: LFHMAllocator & mAllocator; ///< Allocator used to allocate key value pairs #ifdef JPH_ENABLE_ASSERTS atomic<uint32> mNumKeyValues = 0; ///< Number of key value pairs in the store #endif // JPH_ENABLE_ASSERTS atomic<uint32> * mBuckets = nullptr; ///< This contains the offset in mObjectStore of the first object with a particular hash uint32 mNumBuckets = 0; ///< Current number of buckets uint32 mMaxBuckets = 0; ///< Maximum number of buckets }; JPH_NAMESPACE_END #include "LockFreeHashMap.inl"
0
repos/c2z/use_cases/JoltPhysics/include
repos/c2z/use_cases/JoltPhysics/include/Core/Factory.h
// Jolt Physics Library (https://github.com/jrouwe/JoltPhysics) // SPDX-FileCopyrightText: 2021 Jorrit Rouwe // SPDX-License-Identifier: MIT #pragma once #include <Jolt/Core/RTTI.h> #include <Jolt/Core/UnorderedMap.h> JPH_NAMESPACE_BEGIN /// Factory, to create RTTI objects class Factory { public: JPH_OVERRIDE_NEW_DELETE /// Create an object void * CreateObject(const char *inName); /// Find type info for a specific class by name const RTTI * Find(const char *inName); /// Find type info for a specific class by hash const RTTI * Find(uint32 inHash); /// Register an object with the factory. Returns false on failure. bool Register(const RTTI *inRTTI); /// Register a list of objects with the factory. Returns false on failure. bool Register(const RTTI **inRTTIs, uint inNumber); /// Unregisters all types void Clear(); /// Get all registered classes Array<const RTTI *> GetAllClasses() const; /// Singleton factory instance static Factory * sInstance; private: using ClassNameMap = UnorderedMap<string_view, const RTTI *>; using ClassHashMap = UnorderedMap<uint32, const RTTI *>; /// Map of class names to type info ClassNameMap mClassNameMap; // Map of class hash to type info ClassHashMap mClassHashMap; }; JPH_NAMESPACE_END
0
repos/c2z/use_cases/JoltPhysics/include
repos/c2z/use_cases/JoltPhysics/include/Core/StreamOut.h
// Jolt Physics Library (https://github.com/jrouwe/JoltPhysics) // SPDX-FileCopyrightText: 2021 Jorrit Rouwe // SPDX-License-Identifier: MIT #pragma once JPH_NAMESPACE_BEGIN /// Simple binary output stream class StreamOut { public: /// Virtual destructor virtual ~StreamOut() = default; /// Write a string of bytes to the binary stream virtual void WriteBytes(const void *inData, size_t inNumBytes) = 0; /// Returns true if there was an IO failure virtual bool IsFailed() const = 0; /// Write a primitive (e.g. float, int, etc.) to the binary stream template <class T> void Write(const T &inT) { WriteBytes(&inT, sizeof(inT)); } /// Write a vector of primitives from the binary stream template <class T, class A> void Write(const std::vector<T, A> &inT) { typename Array<T>::size_type len = inT.size(); Write(len); if (!IsFailed()) for (typename Array<T>::size_type i = 0; i < len; ++i) Write(inT[i]); } /// Write a string to the binary stream (writes the number of characters and then the characters) template <class Type, class Traits, class Allocator> void Write(const std::basic_string<Type, Traits, Allocator> &inString) { typename std::basic_string<Type, Traits, Allocator>::size_type len = inString.size(); Write(len); if (!IsFailed()) WriteBytes(inString.data(), len * sizeof(Type)); } /// Write a Vec3 (don't write W) void Write(const Vec3 &inVec) { WriteBytes(&inVec, 3 * sizeof(float)); } /// Write a DVec3 (don't write W) void Write(const DVec3 &inVec) { WriteBytes(&inVec, 3 * sizeof(double)); } /// Write a DMat44 (don't write W component of translation) void Write(const DMat44 &inVec) { Write(inVec.GetColumn4(0)); Write(inVec.GetColumn4(1)); Write(inVec.GetColumn4(2)); Write(inVec.GetTranslation()); } }; JPH_NAMESPACE_END
0
repos/c2z/use_cases/JoltPhysics/include
repos/c2z/use_cases/JoltPhysics/include/Core/Profiler.h
// Jolt Physics Library (https://github.com/jrouwe/JoltPhysics) // SPDX-FileCopyrightText: 2021 Jorrit Rouwe // SPDX-License-Identifier: MIT #pragma once JPH_SUPPRESS_WARNINGS_STD_BEGIN #include <mutex> JPH_SUPPRESS_WARNINGS_STD_END #include <Jolt/Core/NonCopyable.h> #include <Jolt/Core/TickCounter.h> #include <Jolt/Core/UnorderedMap.h> #if defined(JPH_EXTERNAL_PROFILE) JPH_NAMESPACE_BEGIN /// Create this class on the stack to start sampling timing information of a particular scope. /// /// Left unimplemented intentionally. Needs to be implemented by the user of the library. /// On construction a measurement should start, on destruction it should be stopped. class alignas(16) ExternalProfileMeasurement : public NonCopyable { public: /// Constructor ExternalProfileMeasurement(const char *inName, uint32 inColor = 0); ~ExternalProfileMeasurement(); private: uint8 mUserData[64]; }; JPH_NAMESPACE_END ////////////////////////////////////////////////////////////////////////////////////////// // Macros to do the actual profiling ////////////////////////////////////////////////////////////////////////////////////////// JPH_SUPPRESS_WARNING_PUSH JPH_CLANG_SUPPRESS_WARNING("-Wc++98-compat-pedantic") // Dummy implementations #define JPH_PROFILE_THREAD_START(name) #define JPH_PROFILE_THREAD_END() #define JPH_PROFILE_NEXTFRAME() #define JPH_PROFILE_DUMP(...) // Scope profiling measurement #define JPH_PROFILE_TAG2(line) profile##line #define JPH_PROFILE_TAG(line) JPH_PROFILE_TAG2(line) /// Macro to collect profiling information. /// /// Usage: /// /// { /// JPH_PROFILE("Operation"); /// do operation; /// } /// #define JPH_PROFILE(...) ExternalProfileMeasurement JPH_PROFILE_TAG(__LINE__)(__VA_ARGS__) // Scope profiling for function #define JPH_PROFILE_FUNCTION() JPH_PROFILE(JPH_FUNCTION_NAME) JPH_SUPPRESS_WARNING_POP #elif defined(JPH_PROFILE_ENABLED) JPH_NAMESPACE_BEGIN class ProfileSample; class ProfileThread; /// Singleton class for managing profiling information class Profiler : public NonCopyable { public: JPH_OVERRIDE_NEW_DELETE /// Increments the frame counter to provide statistics per frame void NextFrame(); /// Dump profiling statistics at the start of the next frame /// @param inTag If not empty, this overrides the auto incrementing number in the filename of the dump file void Dump(const string_view &inTag = string_view()); /// Add a thread to be instrumented void AddThread(ProfileThread *inThread); /// Remove a thread from being instrumented void RemoveThread(ProfileThread *inThread); /// Singleton instance static Profiler * sInstance; private: /// Helper class to freeze ProfileSamples per thread while processing them struct ThreadSamples { String mThreadName; ProfileSample * mSamplesBegin; ProfileSample * mSamplesEnd; }; /// Helper class to aggregate ProfileSamples class Aggregator { public: /// Constructor Aggregator(const char *inName) : mName(inName) { } /// Accumulate results for a measurement void AccumulateMeasurement(uint64 inCyclesInCallWithChildren, uint64 inCyclesInChildren) { mCallCounter++; mTotalCyclesInCallWithChildren += inCyclesInCallWithChildren; mTotalCyclesInChildren += inCyclesInChildren; mMinCyclesInCallWithChildren = min(inCyclesInCallWithChildren, mMinCyclesInCallWithChildren); mMaxCyclesInCallWithChildren = max(inCyclesInCallWithChildren, mMaxCyclesInCallWithChildren); } /// Sort descending by total cycles bool operator < (const Aggregator &inRHS) const { return mTotalCyclesInCallWithChildren > inRHS.mTotalCyclesInCallWithChildren; } /// Identification const char * mName; ///< User defined name of this item /// Statistics uint32 mCallCounter = 0; ///< Number of times AccumulateMeasurement was called uint64 mTotalCyclesInCallWithChildren = 0; ///< Total amount of cycles spent in this scope uint64 mTotalCyclesInChildren = 0; ///< Total amount of cycles spent in children of this scope uint64 mMinCyclesInCallWithChildren = 0xffffffffffffffffUL; ///< Minimum amount of cycles spent per call uint64 mMaxCyclesInCallWithChildren = 0; ///< Maximum amount of cycles spent per call }; using Threads = Array<ThreadSamples>; using Aggregators = Array<Aggregator>; using KeyToAggregator = UnorderedMap<const char *, size_t>; /// Helper function to aggregate profile sample data static void sAggregate(int inDepth, uint32 inColor, ProfileSample *&ioSample, const ProfileSample *inEnd, Aggregators &ioAggregators, KeyToAggregator &ioKeyToAggregator); /// Dump profiling statistics void DumpInternal(); void DumpList(const char *inTag, const Aggregators &inAggregators); void DumpChart(const char *inTag, const Threads &inThreads, const KeyToAggregator &inKeyToAggregators, const Aggregators &inAggregators); std::mutex mLock; ///< Lock that protects mThreads Array<ProfileThread *> mThreads; ///< List of all active threads bool mDump = false; ///< When true, the samples are dumped next frame String mDumpTag; ///< When not empty, this overrides the auto incrementing number of the dump filename }; // Class that contains the information of a single scoped measurement class alignas(16) ProfileSample : public NonCopyable { public: JPH_OVERRIDE_NEW_DELETE const char * mName; ///< User defined name of this item uint32 mColor; ///< Color to use for this sample uint8 mDepth; ///< Calculated depth uint8 mUnused[3]; uint64 mStartCycle; ///< Cycle counter at start of measurement uint64 mEndCycle; ///< Cycle counter at end of measurement }; /// Collects all samples of a single thread class ProfileThread : public NonCopyable { public: JPH_OVERRIDE_NEW_DELETE /// Constructor inline ProfileThread(const string_view &inThreadName); inline ~ProfileThread(); static const uint cMaxSamples = 65536; String mThreadName; ///< Name of the thread that we're collecting information for ProfileSample mSamples[cMaxSamples]; ///< Buffer of samples uint mCurrentSample = 0; ///< Next position to write a sample to static thread_local ProfileThread *sInstance; }; /// Create this class on the stack to start sampling timing information of a particular scope class ProfileMeasurement : public NonCopyable { public: /// Constructor inline ProfileMeasurement(const char *inName, uint32 inColor = 0); inline ~ProfileMeasurement(); private: ProfileSample * mSample; ProfileSample mTemp; static bool sOutOfSamplesReported; }; JPH_NAMESPACE_END #include "Profiler.inl" ////////////////////////////////////////////////////////////////////////////////////////// // Macros to do the actual profiling ////////////////////////////////////////////////////////////////////////////////////////// JPH_SUPPRESS_WARNING_PUSH JPH_CLANG_SUPPRESS_WARNING("-Wc++98-compat-pedantic") /// Start instrumenting program #define JPH_PROFILE_START(name) do { Profiler::sInstance = new Profiler; JPH_PROFILE_THREAD_START(name); } while (false) /// End instrumenting program #define JPH_PROFILE_END() do { JPH_PROFILE_THREAD_END(); delete Profiler::sInstance; Profiler::sInstance = nullptr; } while (false) /// Start instrumenting a thread #define JPH_PROFILE_THREAD_START(name) do { if (Profiler::sInstance) ProfileThread::sInstance = new ProfileThread(name); } while (false) /// End instrumenting a thread #define JPH_PROFILE_THREAD_END() do { delete ProfileThread::sInstance; ProfileThread::sInstance = nullptr; } while (false) /// Scope profiling measurement #define JPH_PROFILE_TAG2(line) profile##line #define JPH_PROFILE_TAG(line) JPH_PROFILE_TAG2(line) #define JPH_PROFILE(...) ProfileMeasurement JPH_PROFILE_TAG(__LINE__)(__VA_ARGS__) /// Scope profiling for function #define JPH_PROFILE_FUNCTION() JPH_PROFILE(JPH_FUNCTION_NAME) /// Update frame counter #define JPH_PROFILE_NEXTFRAME() Profiler::sInstance->NextFrame() /// Dump profiling info #define JPH_PROFILE_DUMP(...) Profiler::sInstance->Dump(__VA_ARGS__) JPH_SUPPRESS_WARNING_POP #else ////////////////////////////////////////////////////////////////////////////////////////// // Dummy profiling instructions ////////////////////////////////////////////////////////////////////////////////////////// JPH_SUPPRESS_WARNING_PUSH JPH_CLANG_SUPPRESS_WARNING("-Wc++98-compat-pedantic") #define JPH_PROFILE_START(name) #define JPH_PROFILE_END() #define JPH_PROFILE_THREAD_START(name) #define JPH_PROFILE_THREAD_END() #define JPH_PROFILE(...) #define JPH_PROFILE_FUNCTION() #define JPH_PROFILE_NEXTFRAME() #define JPH_PROFILE_DUMP(...) JPH_SUPPRESS_WARNING_POP #endif
0
repos/c2z/use_cases/JoltPhysics/include
repos/c2z/use_cases/JoltPhysics/include/Core/FPControlWord.h
// Jolt Physics Library (https://github.com/jrouwe/JoltPhysics) // SPDX-FileCopyrightText: 2021 Jorrit Rouwe // SPDX-License-Identifier: MIT #pragma once #include <Jolt/Core/NonCopyable.h> JPH_NAMESPACE_BEGIN #ifdef JPH_USE_SSE /// Helper class that needs to be put on the stack to update the state of the floating point control word. /// This state is kept per thread. template <uint Value, uint Mask> class FPControlWord : public NonCopyable { public: FPControlWord() { mPrevState = _mm_getcsr(); _mm_setcsr((mPrevState & ~Mask) | Value); } ~FPControlWord() { _mm_setcsr((_mm_getcsr() & ~Mask) | (mPrevState & Mask)); } private: uint mPrevState; }; #elif defined(JPH_CPU_ARM) && defined(JPH_COMPILER_MSVC) /// Helper class that needs to be put on the stack to update the state of the floating point control word. /// This state is kept per thread. template <unsigned int Value, unsigned int Mask> class FPControlWord : public NonCopyable { public: FPControlWord() { // Read state before change _controlfp_s(&mPrevState, 0, 0); // Update the state unsigned int dummy; _controlfp_s(&dummy, Value, Mask); } ~FPControlWord() { // Restore state unsigned int dummy; _controlfp_s(&dummy, mPrevState, Mask); } private: unsigned int mPrevState; }; #elif defined(JPH_CPU_ARM) && defined(JPH_USE_NEON) /// Helper class that needs to be put on the stack to update the state of the floating point control word. /// This state is kept per thread. template <uint64 Value, uint64 Mask> class FPControlWord : public NonCopyable { public: FPControlWord() { uint64 val; asm volatile("mrs %0, fpcr" : "=r" (val)); mPrevState = val; val &= ~Mask; val |= Value; asm volatile("msr fpcr, %0" : /* no output */ : "r" (val)); } ~FPControlWord() { uint64 val; asm volatile("mrs %0, fpcr" : "=r" (val)); val &= ~Mask; val |= mPrevState & Mask; asm volatile("msr fpcr, %0" : /* no output */ : "r" (val)); } private: uint64 mPrevState; }; #elif defined(JPH_CPU_ARM) /// Helper class that needs to be put on the stack to update the state of the floating point control word. /// This state is kept per thread. template <uint32 Value, uint32 Mask> class FPControlWord : public NonCopyable { public: FPControlWord() { uint32 val; asm volatile("vmrs %0, fpscr" : "=r" (val)); mPrevState = val; val &= ~Mask; val |= Value; asm volatile("vmsr fpscr, %0" : /* no output */ : "r" (val)); } ~FPControlWord() { uint32 val; asm volatile("vmrs %0, fpscr" : "=r" (val)); val &= ~Mask; val |= mPrevState & Mask; asm volatile("vmsr fpscr, %0" : /* no output */ : "r" (val)); } private: uint32 mPrevState; }; #elif defined(JPH_CPU_WASM) // Not supported #else #error Unsupported CPU architecture #endif JPH_NAMESPACE_END
0
repos/c2z/use_cases/JoltPhysics/include
repos/c2z/use_cases/JoltPhysics/include/Core/TickCounter.h
// Jolt Physics Library (https://github.com/jrouwe/JoltPhysics) // SPDX-FileCopyrightText: 2021 Jorrit Rouwe // SPDX-License-Identifier: MIT #pragma once // Include for __rdtsc #if defined(JPH_PLATFORM_WINDOWS) #include <intrin.h> #elif defined(JPH_CPU_X86) && defined(JPH_COMPILER_GCC) #include <x86intrin.h> #endif JPH_NAMESPACE_BEGIN #if defined(JPH_PLATFORM_WINDOWS_UWP) || (defined(JPH_PLATFORM_WINDOWS) && defined(JPH_CPU_ARM)) /// Functionality to get the processors cycle counter uint64 GetProcessorTickCount(); // Not inline to avoid having to include Windows.h #else /// Functionality to get the processors cycle counter JPH_INLINE uint64 GetProcessorTickCount() { #if defined(JPH_PLATFORM_BLUE) return JPH_PLATFORM_BLUE_GET_TICKS(); #elif defined(JPH_CPU_X86) return __rdtsc(); #elif defined(JPH_CPU_ARM) && defined(JPH_USE_NEON) uint64 val; asm volatile("mrs %0, cntvct_el0" : "=r" (val)); return val; #elif defined(JPH_CPU_ARM) return 0; // Not supported #elif defined(JPH_CPU_WASM) return 0; // Not supported #else #error Undefined #endif } #endif // JPH_PLATFORM_WINDOWS_UWP || (JPH_PLATFORM_WINDOWS && JPH_CPU_ARM) /// Get the amount of ticks per second, note that this number will never be fully accurate as the amound of ticks per second may vary with CPU load, so this number is only to be used to give an indication of time for profiling purposes uint64 GetProcessorTicksPerSecond(); JPH_NAMESPACE_END
0
repos/c2z/use_cases/JoltPhysics/include
repos/c2z/use_cases/JoltPhysics/include/Core/Color.h
// Jolt Physics Library (https://github.com/jrouwe/JoltPhysics) // SPDX-FileCopyrightText: 2021 Jorrit Rouwe // SPDX-License-Identifier: MIT #pragma once JPH_NAMESPACE_BEGIN class Color; /// Type to use for passing arguments to a function using ColorArg = Color; /// Class that holds an RGBA color with 8-bits per component class [[nodiscard]] Color { public: /// Constructors Color() = default; ///< Intentionally not initialized for performance reasons Color(const Color &inRHS) = default; explicit constexpr Color(uint32 inColor) : mU32(inColor) { } constexpr Color(uint8 inRed, uint8 inGreen, uint8 inBlue, uint8 inAlpha = 255) : r(inRed), g(inGreen), b(inBlue), a(inAlpha) { } constexpr Color(ColorArg inRHS, uint8 inAlpha) : r(inRHS.r), g(inRHS.g), b(inRHS.b), a(inAlpha) { } /// Comparison inline bool operator == (ColorArg inRHS) const { return mU32 == inRHS.mU32; } inline bool operator != (ColorArg inRHS) const { return mU32 != inRHS.mU32; } /// Convert to uint32 uint32 GetUInt32() const { return mU32; } /// Element access, 0 = red, 1 = green, 2 = blue, 3 = alpha inline uint8 operator () (uint inIdx) const { JPH_ASSERT(inIdx < 4); return (&r)[inIdx]; } inline uint8 & operator () (uint inIdx) { JPH_ASSERT(inIdx < 4); return (&r)[inIdx]; } /// Convert to Vec4 with range [0, 1] inline Vec4 ToVec4() const { return Vec4(r, g, b, a) / 255.0f; } /// Get grayscale intensity of color inline uint8 GetIntensity() const { return uint8((uint32(r) * 54 + g * 183 + b * 19) >> 8); } /// Get a visually distinct color static Color sGetDistinctColor(int inIndex); /// Predefined colors static const Color sBlack; static const Color sDarkRed; static const Color sRed; static const Color sDarkGreen; static const Color sGreen; static const Color sDarkBlue; static const Color sBlue; static const Color sYellow; static const Color sPurple; static const Color sCyan; static const Color sOrange; static const Color sDarkOrange; static const Color sGrey; static const Color sLightGrey; static const Color sWhite; union { uint32 mU32; ///< Combined value for red, green, blue and alpha struct { uint8 r; ///< Red channel uint8 g; ///< Green channel uint8 b; ///< Blue channel uint8 a; ///< Alpha channel }; }; }; static_assert(is_trivial<Color>(), "Is supposed to be a trivial type!"); JPH_NAMESPACE_END
0
repos/c2z/use_cases/JoltPhysics/include
repos/c2z/use_cases/JoltPhysics/include/Core/LockFreeHashMap.inl
// Jolt Physics Library (https://github.com/jrouwe/JoltPhysics) // SPDX-FileCopyrightText: 2021 Jorrit Rouwe // SPDX-License-Identifier: MIT #pragma once JPH_NAMESPACE_BEGIN /////////////////////////////////////////////////////////////////////////////////// // LFHMAllocator /////////////////////////////////////////////////////////////////////////////////// inline LFHMAllocator::~LFHMAllocator() { AlignedFree(mObjectStore); } inline void LFHMAllocator::Init(uint inObjectStoreSizeBytes) { JPH_ASSERT(mObjectStore == nullptr); mObjectStoreSizeBytes = inObjectStoreSizeBytes; mObjectStore = reinterpret_cast<uint8 *>(JPH::AlignedAllocate(inObjectStoreSizeBytes, 16)); } inline void LFHMAllocator::Clear() { mWriteOffset = 0; } inline void LFHMAllocator::Allocate(uint32 inBlockSize, uint32 &ioBegin, uint32 &ioEnd) { // If we're already beyond the end of our buffer then don't do an atomic add. // It's possible that many keys are inserted after the allocator is full, making it possible // for mWriteOffset (uint32) to wrap around to zero. When this happens, there will be a memory corruption. // This way, we will be able to progress the write offset beyond the size of the buffer // worst case by max <CPU count> * inBlockSize. if (mWriteOffset.load(memory_order_relaxed) >= mObjectStoreSizeBytes) return; // Atomically fetch a block from the pool uint32 begin = mWriteOffset.fetch_add(inBlockSize, memory_order_relaxed); uint32 end = min(begin + inBlockSize, mObjectStoreSizeBytes); if (ioEnd == begin) { // Block is allocated straight after our previous block begin = ioBegin; } else { // Block is a new block begin = min(begin, mObjectStoreSizeBytes); } // Store the begin and end of the resulting block ioBegin = begin; ioEnd = end; } template <class T> inline uint32 LFHMAllocator::ToOffset(const T *inData) const { const uint8 *data = reinterpret_cast<const uint8 *>(inData); JPH_ASSERT(data >= mObjectStore && data < mObjectStore + mObjectStoreSizeBytes); return uint32(data - mObjectStore); } template <class T> inline T *LFHMAllocator::FromOffset(uint32 inOffset) const { JPH_ASSERT(inOffset < mObjectStoreSizeBytes); return reinterpret_cast<T *>(mObjectStore + inOffset); } /////////////////////////////////////////////////////////////////////////////////// // LFHMAllocatorContext /////////////////////////////////////////////////////////////////////////////////// inline LFHMAllocatorContext::LFHMAllocatorContext(LFHMAllocator &inAllocator, uint32 inBlockSize) : mAllocator(inAllocator), mBlockSize(inBlockSize) { } inline bool LFHMAllocatorContext::Allocate(uint32 inSize, uint32 inAlignment, uint32 &outWriteOffset) { // Calculate needed bytes for alignment JPH_ASSERT(IsPowerOf2(inAlignment)); uint32 alignment_mask = inAlignment - 1; uint32 alignment = (inAlignment - (mBegin & alignment_mask)) & alignment_mask; // Check if we have space if (mEnd - mBegin < inSize + alignment) { // Allocate a new block mAllocator.Allocate(mBlockSize, mBegin, mEnd); // Update alignment alignment = (inAlignment - (mBegin & alignment_mask)) & alignment_mask; // Check if we have space again if (mEnd - mBegin < inSize + alignment) return false; } // Make the allocation mBegin += alignment; outWriteOffset = mBegin; mBegin += inSize; return true; } /////////////////////////////////////////////////////////////////////////////////// // LockFreeHashMap /////////////////////////////////////////////////////////////////////////////////// template <class Key, class Value> void LockFreeHashMap<Key, Value>::Init(uint32 inMaxBuckets) { JPH_ASSERT(inMaxBuckets >= 4 && IsPowerOf2(inMaxBuckets)); JPH_ASSERT(mBuckets == nullptr); mNumBuckets = inMaxBuckets; mMaxBuckets = inMaxBuckets; mBuckets = reinterpret_cast<atomic<uint32> *>(AlignedAllocate(inMaxBuckets * sizeof(atomic<uint32>), 16)); Clear(); } template <class Key, class Value> LockFreeHashMap<Key, Value>::~LockFreeHashMap() { AlignedFree(mBuckets); } template <class Key, class Value> void LockFreeHashMap<Key, Value>::Clear() { #ifdef JPH_ENABLE_ASSERTS // Reset number of key value pairs mNumKeyValues = 0; #endif // JPH_ENABLE_ASSERTS // Reset buckets 4 at a time static_assert(sizeof(atomic<uint32>) == sizeof(uint32)); UVec4 invalid_handle = UVec4::sReplicate(cInvalidHandle); uint32 *start = reinterpret_cast<uint32 *>(mBuckets); const uint32 *end = start + mNumBuckets; JPH_ASSERT(IsAligned(start, 16)); while (start < end) { invalid_handle.StoreInt4Aligned(start); start += 4; } } template <class Key, class Value> void LockFreeHashMap<Key, Value>::SetNumBuckets(uint32 inNumBuckets) { JPH_ASSERT(mNumKeyValues == 0); JPH_ASSERT(inNumBuckets <= mMaxBuckets); JPH_ASSERT(inNumBuckets >= 4 && IsPowerOf2(inNumBuckets)); mNumBuckets = inNumBuckets; } template <class Key, class Value> template <class... Params> inline typename LockFreeHashMap<Key, Value>::KeyValue *LockFreeHashMap<Key, Value>::Create(LFHMAllocatorContext &ioContext, const Key &inKey, uint64 inKeyHash, int inExtraBytes, Params &&... inConstructorParams) { // This is not a multi map, test the key hasn't been inserted yet JPH_ASSERT(Find(inKey, inKeyHash) == nullptr); // Calculate total size uint size = sizeof(KeyValue) + inExtraBytes; // Get the write offset for this key value pair uint32 write_offset; if (!ioContext.Allocate(size, alignof(KeyValue), write_offset)) return nullptr; #ifdef JPH_ENABLE_ASSERTS // Increment amount of entries in map mNumKeyValues.fetch_add(1, memory_order_relaxed); #endif // JPH_ENABLE_ASSERTS // Construct the key/value pair KeyValue *kv = mAllocator.template FromOffset<KeyValue>(write_offset); JPH_ASSERT(intptr_t(kv) % alignof(KeyValue) == 0); #ifdef _DEBUG memset(kv, 0xcd, size); #endif kv->mKey = inKey; new (&kv->mValue) Value(std::forward<Params>(inConstructorParams)...); // Get the offset to the first object from the bucket with corresponding hash atomic<uint32> &offset = mBuckets[inKeyHash & (mNumBuckets - 1)]; // Add this entry as the first element in the linked list uint32 old_offset = offset.load(memory_order_relaxed); for (;;) { kv->mNextOffset = old_offset; if (offset.compare_exchange_weak(old_offset, write_offset, memory_order_release)) break; } return kv; } template <class Key, class Value> inline const typename LockFreeHashMap<Key, Value>::KeyValue *LockFreeHashMap<Key, Value>::Find(const Key &inKey, uint64 inKeyHash) const { // Get the offset to the keyvalue object from the bucket with corresponding hash uint32 offset = mBuckets[inKeyHash & (mNumBuckets - 1)].load(memory_order_acquire); while (offset != cInvalidHandle) { // Loop through linked list of values until the right one is found const KeyValue *kv = mAllocator.template FromOffset<const KeyValue>(offset); if (kv->mKey == inKey) return kv; offset = kv->mNextOffset; } // Not found return nullptr; } template <class Key, class Value> inline uint32 LockFreeHashMap<Key, Value>::ToHandle(const KeyValue *inKeyValue) const { return mAllocator.ToOffset(inKeyValue); } template <class Key, class Value> inline const typename LockFreeHashMap<Key, Value>::KeyValue *LockFreeHashMap<Key, Value>::FromHandle(uint32 inHandle) const { return mAllocator.template FromOffset<const KeyValue>(inHandle); } template <class Key, class Value> inline void LockFreeHashMap<Key, Value>::GetAllKeyValues(Array<const KeyValue *> &outAll) const { for (const atomic<uint32> *bucket = mBuckets; bucket < mBuckets + mNumBuckets; ++bucket) { uint32 offset = *bucket; while (offset != cInvalidHandle) { const KeyValue *kv = mAllocator.template FromOffset<const KeyValue>(offset); outAll.push_back(kv); offset = kv->mNextOffset; } } } template <class Key, class Value> typename LockFreeHashMap<Key, Value>::Iterator LockFreeHashMap<Key, Value>::begin() { // Start with the first bucket Iterator it { this, 0, mBuckets[0] }; // If it doesn't contain a valid entry, use the ++ operator to find the first valid entry if (it.mOffset == cInvalidHandle) ++it; return it; } template <class Key, class Value> typename LockFreeHashMap<Key, Value>::Iterator LockFreeHashMap<Key, Value>::end() { return { this, mNumBuckets, cInvalidHandle }; } template <class Key, class Value> typename LockFreeHashMap<Key, Value>::KeyValue &LockFreeHashMap<Key, Value>::Iterator::operator* () { JPH_ASSERT(mOffset != cInvalidHandle); return *mMap->mAllocator.template FromOffset<KeyValue>(mOffset); } template <class Key, class Value> typename LockFreeHashMap<Key, Value>::Iterator &LockFreeHashMap<Key, Value>::Iterator::operator++ () { JPH_ASSERT(mBucket < mMap->mNumBuckets); // Find the next key value in this bucket if (mOffset != cInvalidHandle) { const KeyValue *kv = mMap->mAllocator.template FromOffset<const KeyValue>(mOffset); mOffset = kv->mNextOffset; if (mOffset != cInvalidHandle) return *this; } // Loop over next buckets for (;;) { // Next bucket ++mBucket; if (mBucket >= mMap->mNumBuckets) return *this; // Fetch the first entry in the bucket mOffset = mMap->mBuckets[mBucket]; if (mOffset != cInvalidHandle) return *this; } } #ifdef _DEBUG template <class Key, class Value> void LockFreeHashMap<Key, Value>::TraceStats() const { const int cMaxPerBucket = 256; int max_objects_per_bucket = 0; int num_objects = 0; int histogram[cMaxPerBucket]; for (int i = 0; i < cMaxPerBucket; ++i) histogram[i] = 0; for (atomic<uint32> *bucket = mBuckets, *bucket_end = mBuckets + mNumBuckets; bucket < bucket_end; ++bucket) { int objects_in_bucket = 0; uint32 offset = *bucket; while (offset != cInvalidHandle) { const KeyValue *kv = mAllocator.template FromOffset<const KeyValue>(offset); offset = kv->mNextOffset; ++objects_in_bucket; ++num_objects; } max_objects_per_bucket = max(objects_in_bucket, max_objects_per_bucket); histogram[min(objects_in_bucket, cMaxPerBucket - 1)]++; } Trace("max_objects_per_bucket = %d, num_buckets = %d, num_objects = %d", max_objects_per_bucket, mNumBuckets, num_objects); for (int i = 0; i < cMaxPerBucket; ++i) if (histogram[i] != 0) Trace("%d: %d", i, histogram[i]); } #endif JPH_NAMESPACE_END
0
repos/c2z/use_cases/JoltPhysics/include
repos/c2z/use_cases/JoltPhysics/include/Core/FPFlushDenormals.h
// Jolt Physics Library (https://github.com/jrouwe/JoltPhysics) // SPDX-FileCopyrightText: 2021 Jorrit Rouwe // SPDX-License-Identifier: MIT #pragma once #include <Jolt/Core/FPControlWord.h> JPH_NAMESPACE_BEGIN #if defined(JPH_USE_SSE) /// Helper class that needs to be put on the stack to enable flushing denormals to zero /// This can make floating point operations much faster when working with very small numbers class FPFlushDenormals : public FPControlWord<_MM_FLUSH_ZERO_ON, _MM_FLUSH_ZERO_MASK> { }; #elif defined(JPH_CPU_ARM) && defined(JPH_COMPILER_MSVC) class FPFlushDenormals : public FPControlWord<_DN_FLUSH, _MCW_DN> { }; #elif defined(JPH_CPU_ARM) /// Flush denormals to zero bit static constexpr uint64 FP_FZ = 1 << 24; /// Helper class that needs to be put on the stack to enable flushing denormals to zero /// This can make floating point operations much faster when working with very small numbers class FPFlushDenormals : public FPControlWord<FP_FZ, FP_FZ> { }; #elif defined(JPH_CPU_WASM) // Not supported class FPFlushDenormals { }; #else #error Unsupported CPU architecture #endif JPH_NAMESPACE_END
0
repos/c2z/use_cases/JoltPhysics/include
repos/c2z/use_cases/JoltPhysics/include/Core/JobSystem.h
// Jolt Physics Library (https://github.com/jrouwe/JoltPhysics) // SPDX-FileCopyrightText: 2021 Jorrit Rouwe // SPDX-License-Identifier: MIT #pragma once #include <Jolt/Core/Reference.h> #include <Jolt/Core/Color.h> #include <Jolt/Core/Profiler.h> #include <Jolt/Core/NonCopyable.h> #include <Jolt/Core/StaticArray.h> #include <Jolt/Core/Atomics.h> JPH_NAMESPACE_BEGIN /// A class that allows units of work (Jobs) to be scheduled across multiple threads. /// It allows dependencies between the jobs so that the jobs form a graph. /// /// The pattern for using this class is: /// /// // Create job system /// JobSystem *job_system = new JobSystemThreadPool(...); /// /// // Create some jobs /// JobHandle second_job = job_system->CreateJob("SecondJob", Color::sRed, []() { ... }, 1); // Create a job with 1 dependency /// JobHandle first_job = job_system->CreateJob("FirstJob", Color::sGreen, [second_job]() { ....; second_job.RemoveDependency(); }, 0); // Job can start immediately, will start second job when it's done /// JobHandle third_job = job_system->CreateJob("ThirdJob", Color::sBlue, []() { ... }, 0); // This job can run immediately as well and can run in parallel to job 1 and 2 /// /// // Add the jobs to the barrier so that we can execute them while we're waiting /// Barrier *barrier = job_system->CreateBarrier(); /// barrier->AddJob(first_job); /// barrier->AddJob(second_job); /// barrier->AddJob(third_job); /// job_system->WaitForJobs(barrier); /// /// // Clean up /// job_system->DestroyBarrier(barrier); /// delete job_system; /// /// Jobs are guaranteed to be started in the order that their dependency counter becomes zero (in case they're scheduled on a background thread) /// or in the order they're added to the barrier (when dependency count is zero and when executing on the thread that calls WaitForJobs). /// /// If you want to implement your own job system, inherit from JobSystem and implement: /// /// * JobSystem::GetMaxConcurrency - This should return the maximum number of jobs that can run in parallel. /// * JobSystem::CreateJob - This should create a Job object and return it to the caller. /// * JobSystem::FreeJob - This should free the memory associated with the job object. It is called by the Job destructor when it is Release()-ed for the last time. /// * JobSystem::QueueJob/QueueJobs - These should store the job pointer in an internal queue to run immediately (dependencies are tracked internally, this function is called when the job can run). /// The Job objects are reference counted and are guaranteed to stay alive during the QueueJob(s) call. If you store the job in your own data structure you need to call AddRef() to take a reference. /// After the job has been executed you need to call Release() to release the reference. Make sure you no longer dereference the job pointer after calling Release(). /// /// JobSystem::Barrier is used to track the completion of a set of jobs. Jobs will be created by other jobs and added to the barrier while it is being waited on. This means that you cannot /// create a dependency graph beforehand as the graph changes while jobs are running. Implement the following functions: /// /// * Barrier::AddJob/AddJobs - Add a job to the barrier, any call to WaitForJobs will now also wait for this job to complete. /// If you store the job in a data structure in the Barrier you need to call AddRef() on the job to keep it alive and Release() after you're done with it. /// * Barrier::OnJobFinished - This function is called when a job has finished executing, you can use this to track completion and remove the job from the list of jobs to wait on. /// /// The functions on JobSystem that need to be implemented to support barriers are: /// /// * JobSystem::CreateBarrier - Create a new barrier. /// * JobSystem::DestroyBarrier - Destroy a barrier. /// * JobSystem::WaitForJobs - This is the main function that is used to wait for all jobs that have been added to a Barrier. WaitForJobs can execute jobs that have /// been added to the barrier while waiting. It is not wise to execute other jobs that touch physics structures as this can cause race conditions and deadlocks. Please keep in mind that the barrier is /// only intended to wait on the completion of the Jolt jobs added to it, if you scheduled any jobs in your engine's job system to execute the Jolt jobs as part of QueueJob/QueueJobs, you might still need /// to wait for these in this function after the barrier is finished waiting. /// /// An example implementation is JobSystemThreadPool. If you don't want to write the Barrier class you can also inherit from JobSystemWithBarrier. class JobSystem : public NonCopyable { protected: class Job; public: JPH_OVERRIDE_NEW_DELETE /// A job handle contains a reference to a job. The job will be deleted as soon as there are no JobHandles. /// referring to the job and when it is not in the job queue / being processed. class JobHandle : private Ref<Job> { public: /// Constructor inline JobHandle() = default; inline JobHandle(const JobHandle &inHandle) = default; inline JobHandle(JobHandle &&inHandle) noexcept : Ref<Job>(std::move(inHandle)) { } /// Constructor, only to be used by JobSystem inline explicit JobHandle(Job *inJob) : Ref<Job>(inJob) { } /// Assignment inline JobHandle & operator = (const JobHandle &inHandle) = default; inline JobHandle & operator = (JobHandle &&inHandle) noexcept = default; /// Check if this handle contains a job inline bool IsValid() const { return GetPtr() != nullptr; } /// Check if this job has finished executing inline bool IsDone() const { return GetPtr() != nullptr && GetPtr()->IsDone(); } /// Add to the dependency counter. inline void AddDependency(int inCount = 1) const { GetPtr()->AddDependency(inCount); } /// Remove from the dependency counter. Job will start whenever the dependency counter reaches zero /// and if it does it is no longer valid to call the AddDependency/RemoveDependency functions. inline void RemoveDependency(int inCount = 1) const { GetPtr()->RemoveDependencyAndQueue(inCount); } /// Remove a dependency from a batch of jobs at once, this can be more efficient than removing them one by one as it requires less locking static inline void sRemoveDependencies(JobHandle *inHandles, uint inNumHandles, int inCount = 1); /// Helper function to remove dependencies on a static array of job handles template <uint N> static inline void sRemoveDependencies(StaticArray<JobHandle, N> &inHandles, int inCount = 1) { sRemoveDependencies(inHandles.data(), inHandles.size(), inCount); } /// Inherit the GetPtr function, only to be used by the JobSystem using Ref<Job>::GetPtr; }; /// A job barrier keeps track of a number of jobs and allows waiting until they are all completed. class Barrier : public NonCopyable { public: JPH_OVERRIDE_NEW_DELETE /// Add a job to this barrier /// Note that jobs can keep being added to the barrier while waiting for the barrier virtual void AddJob(const JobHandle &inJob) = 0; /// Add multiple jobs to this barrier /// Note that jobs can keep being added to the barrier while waiting for the barrier virtual void AddJobs(const JobHandle *inHandles, uint inNumHandles) = 0; protected: /// Job needs to be able to call OnJobFinished friend class Job; /// Destructor, you should call JobSystem::DestroyBarrier instead of destructing this object directly virtual ~Barrier() = default; /// Called by a Job to mark that it is finished virtual void OnJobFinished(Job *inJob) = 0; }; /// Main function of the job using JobFunction = function<void()>; /// Destructor virtual ~JobSystem() = default; /// Get maximum number of concurrently executing jobs virtual int GetMaxConcurrency() const = 0; /// Create a new job, the job is started immediately if inNumDependencies == 0 otherwise it starts when /// RemoveDependency causes the dependency counter to reach 0. virtual JobHandle CreateJob(const char *inName, ColorArg inColor, const JobFunction &inJobFunction, uint32 inNumDependencies = 0) = 0; /// Create a new barrier, used to wait on jobs virtual Barrier * CreateBarrier() = 0; /// Destroy a barrier when it is no longer used. The barrier should be empty at this point. virtual void DestroyBarrier(Barrier *inBarrier) = 0; /// Wait for a set of jobs to be finished, note that only 1 thread can be waiting on a barrier at a time virtual void WaitForJobs(Barrier *inBarrier) = 0; protected: /// A class that contains information for a single unit of work class Job { public: JPH_OVERRIDE_NEW_DELETE /// Constructor Job([[maybe_unused]] const char *inJobName, [[maybe_unused]] ColorArg inColor, JobSystem *inJobSystem, const JobFunction &inJobFunction, uint32 inNumDependencies) : #if defined(JPH_EXTERNAL_PROFILE) || defined(JPH_PROFILE_ENABLED) mJobName(inJobName), mColor(inColor), #endif // defined(JPH_EXTERNAL_PROFILE) || defined(JPH_PROFILE_ENABLED) mJobSystem(inJobSystem), mJobFunction(inJobFunction), mNumDependencies(inNumDependencies) { } /// Get the jobs system to which this job belongs inline JobSystem * GetJobSystem() { return mJobSystem; } /// Add or release a reference to this object inline void AddRef() { // Adding a reference can use relaxed memory ordering mReferenceCount.fetch_add(1, memory_order_relaxed); } inline void Release() { // Releasing a reference must use release semantics... if (mReferenceCount.fetch_sub(1, memory_order_release) == 1) { // ... so that we can use aquire to ensure that we see any updates from other threads that released a ref before freeing the job atomic_thread_fence(memory_order_acquire); mJobSystem->FreeJob(this); } } /// Add to the dependency counter. inline void AddDependency(int inCount); /// Remove from the dependency counter. Returns true whenever the dependency counter reaches zero /// and if it does it is no longer valid to call the AddDependency/RemoveDependency functions. inline bool RemoveDependency(int inCount); /// Remove from the dependency counter. Job will be queued whenever the dependency counter reaches zero /// and if it does it is no longer valid to call the AddDependency/RemoveDependency functions. inline void RemoveDependencyAndQueue(int inCount); /// Set the job barrier that this job belongs to and returns false if this was not possible because the job already finished inline bool SetBarrier(Barrier *inBarrier) { intptr_t barrier = 0; if (mBarrier.compare_exchange_strong(barrier, reinterpret_cast<intptr_t>(inBarrier), memory_order_relaxed)) return true; JPH_ASSERT(barrier == cBarrierDoneState, "A job can only belong to 1 barrier"); return false; } /// Run the job function, returns the number of dependencies that this job still has or cExecutingState or cDoneState inline uint32 Execute() { // Transition job to executing state uint32 state = 0; // We can only start running with a dependency counter of 0 if (!mNumDependencies.compare_exchange_strong(state, cExecutingState, memory_order_acquire)) return state; // state is updated by compare_exchange_strong to the current value // Run the job function { JPH_PROFILE(mJobName, mColor.GetUInt32()); mJobFunction(); } // Fetch the barrier pointer and exchange it for the done state, so we're sure that no barrier gets set after we want to call the callback intptr_t barrier = mBarrier.load(memory_order_relaxed); for (;;) { if (mBarrier.compare_exchange_weak(barrier, cBarrierDoneState, memory_order_relaxed)) break; } JPH_ASSERT(barrier != cBarrierDoneState); // Mark job as done state = cExecutingState; mNumDependencies.compare_exchange_strong(state, cDoneState, memory_order_relaxed); JPH_ASSERT(state == cExecutingState); // Notify the barrier after we've changed the job to the done state so that any thread reading the state after receiving the callback will see that the job has finished if (barrier != 0) reinterpret_cast<Barrier *>(barrier)->OnJobFinished(this); return cDoneState; } /// Test if the job can be executed inline bool CanBeExecuted() const { return mNumDependencies.load(memory_order_relaxed) == 0; } /// Test if the job finished executing inline bool IsDone() const { return mNumDependencies.load(memory_order_relaxed) == cDoneState; } static constexpr uint32 cExecutingState = 0xe0e0e0e0; ///< Value of mNumDependencies when job is executing static constexpr uint32 cDoneState = 0xd0d0d0d0; ///< Value of mNumDependencies when job is done executing static constexpr intptr_t cBarrierDoneState = ~intptr_t(0); ///< Value to use when the barrier has been triggered private: #if defined(JPH_EXTERNAL_PROFILE) || defined(JPH_PROFILE_ENABLED) const char * mJobName; ///< Name of the job Color mColor; ///< Color of the job in the profiler #endif // defined(JPH_EXTERNAL_PROFILE) || defined(JPH_PROFILE_ENABLED) JobSystem * mJobSystem; ///< The job system we belong to atomic<intptr_t> mBarrier = 0; ///< Barrier that this job is associated with (is a Barrier pointer) JobFunction mJobFunction; ///< Main job function atomic<uint32> mReferenceCount = 0; ///< Amount of JobHandles pointing to this job atomic<uint32> mNumDependencies; ///< Amount of jobs that need to complete before this job can run }; /// Adds a job to the job queue virtual void QueueJob(Job *inJob) = 0; /// Adds a number of jobs at once to the job queue virtual void QueueJobs(Job **inJobs, uint inNumJobs) = 0; /// Frees a job virtual void FreeJob(Job *inJob) = 0; }; using JobHandle = JobSystem::JobHandle; JPH_NAMESPACE_END #include "JobSystem.inl"