file_path
stringlengths
20
202
content
stringlengths
9
3.85M
size
int64
9
3.85M
lang
stringclasses
9 values
avg_line_length
float64
3.33
100
max_line_length
int64
8
993
alphanum_fraction
float64
0.26
0.93
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/authoring/NvBlastExtAuthoring.cpp
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved. #include "NvBlastExtAuthoring.h" #include "NvBlastTypes.h" #include "NvBlastIndexFns.h" #include "NvBlast.h" #include "NvBlastAssert.h" #include "NvBlastGlobals.h" #include "NvBlastExtAssetUtils.h" #include "NvBlastExtAuthoringPatternGeneratorImpl.h" #include "NvBlastExtAuthoringBooleanToolImpl.h" #include "NvBlastExtAuthoringAcceleratorImpl.h" #include "NvBlastExtAuthoringMeshImpl.h" #include "NvBlastExtAuthoringMeshCleanerImpl.h" #include "NvBlastExtAuthoringFractureToolImpl.h" #include "NvBlastExtAuthoringBondGeneratorImpl.h" #include "NvBlastExtAuthoringCollisionBuilderImpl.h" #include "NvBlastExtAuthoringCutoutImpl.h" #include "NvBlastExtAuthoringInternalCommon.h" #include "NvBlastNvSharedHelpers.h" #include <algorithm> #include <memory> using namespace Nv::Blast; using namespace nvidia; #define SAFE_ARRAY_NEW(T, x) ((x) > 0) ? reinterpret_cast<T*>(NVBLAST_ALLOC(sizeof(T) * (x))) : nullptr; #define SAFE_ARRAY_DELETE(x) if (x != nullptr) {NVBLAST_FREE(x); x = nullptr;} Mesh* NvBlastExtAuthoringCreateMesh(const NvcVec3* position, const NvcVec3* normals, const NvcVec2* uv, uint32_t verticesCount, const uint32_t* indices, uint32_t indicesCount) { return new MeshImpl(position, normals, uv, verticesCount, indices, indicesCount); } Mesh* NvBlastExtAuthoringCreateMeshOnlyTriangles(const void* Vertices, uint32_t vcount, uint32_t* indices, uint32_t indexCount, void* materials, uint32_t materialStride) { return new MeshImpl((Vertex*)Vertices, vcount, indices, indexCount, materials, materialStride); } Mesh* NvBlastExtAuthoringCreateMeshFromFacets(const void* vertices, const void* edges, const void* facets, uint32_t verticesCount, uint32_t edgesCount, uint32_t facetsCount) { return new MeshImpl((Vertex*)vertices, (Edge*)edges, (Facet*)facets, verticesCount, edgesCount, facetsCount); } MeshCleaner* NvBlastExtAuthoringCreateMeshCleaner() { return new MeshCleanerImpl; } VoronoiSitesGenerator* NvBlastExtAuthoringCreateVoronoiSitesGenerator(Mesh* mesh, RandomGeneratorBase* rng) { return new VoronoiSitesGeneratorImpl(mesh, rng); } CutoutSet* NvBlastExtAuthoringCreateCutoutSet() { return new CutoutSetImpl(); } void NvBlastExtAuthoringBuildCutoutSet(CutoutSet& cutoutSet, const uint8_t* pixelBuffer, uint32_t bufferWidth, uint32_t bufferHeight, float segmentationErrorThreshold, float snapThreshold, bool periodic, bool expandGaps) { ::createCutoutSet(*(CutoutSetImpl*)&cutoutSet, pixelBuffer, bufferWidth, bufferHeight, segmentationErrorThreshold, snapThreshold, periodic, expandGaps); } FractureTool* NvBlastExtAuthoringCreateFractureTool() { return new FractureToolImpl; } BlastBondGenerator* NvBlastExtAuthoringCreateBondGenerator(Nv::Blast::ConvexMeshBuilder* builder) { return new BlastBondGeneratorImpl(builder); } int32_t NvBlastExtAuthoringBuildMeshConvexDecomposition(ConvexMeshBuilder* cmb, const Nv::Blast::Triangle* mesh, uint32_t triangleCount, const ConvexDecompositionParams& params, CollisionHull**& convexes) { NVBLAST_ASSERT(cmb != nullptr); return buildMeshConvexDecomposition(*cmb, mesh, triangleCount, params, convexes); } void NvBlastExtAuthoringTrimCollisionGeometry(ConvexMeshBuilder* cmb, uint32_t chunksCount, Nv::Blast::CollisionHull** in, const uint32_t* chunkDepth) { return trimCollisionGeometry(*cmb, chunksCount, in, chunkDepth); } void NvBlastExtAuthoringTransformCollisionHullInPlace(CollisionHull* hull, const NvcVec3* scaling, const NvcQuat* rotation, const NvcVec3* translation) { // Local copies of scaling (S), rotation (R), and translation (T) nvidia::NvVec3 S = { 1, 1, 1 }; nvidia::NvQuat R = { 0, 0, 0, 1 }; nvidia::NvVec3 T = { 0, 0, 0 }; nvidia::NvVec3 cofS = { 1, 1, 1 }; float sgnDetS = 1; { if (rotation) { R = *toNvShared(rotation); } if (scaling) { S = *toNvShared(scaling); cofS.x = S.y * S.z; cofS.y = S.z * S.x; cofS.z = S.x * S.y; sgnDetS = (S.x * S.y * S.z < 0) ? -1 : 1; } if (translation) { T = *toNvShared(translation); } } const uint32_t pointCount = hull->pointsCount; for (uint32_t pi = 0; pi < pointCount; pi++) { nvidia::NvVec3& p = toNvShared(hull->points[pi]); p = (R.rotate(p.multiply(S)) + T); } const uint32_t planeCount = hull->polygonDataCount; for (uint32_t pi = 0; pi < planeCount; pi++) { float* plane = hull->polygonData[pi].plane; nvidia::NvPlane nvPlane(plane[0], plane[1], plane[2], plane[3]); NvVec3 transformedNormal = sgnDetS*R.rotate(nvPlane.n.multiply(cofS)).getNormalized(); NvVec3 transformedPt = R.rotate(nvPlane.pointInPlane().multiply(S)) + T; nvidia::NvPlane transformedPlane(transformedPt, transformedNormal); plane[0] = transformedPlane.n[0]; plane[1] = transformedPlane.n[1]; plane[2] = transformedPlane.n[2]; plane[3] = transformedPlane.d; } } CollisionHull* NvBlastExtAuthoringTransformCollisionHull(const CollisionHull* hull, const NvcVec3* scaling, const NvcQuat* rotation, const NvcVec3* translation) { CollisionHull* ret = new CollisionHull(*hull); ret->points = SAFE_ARRAY_NEW(NvcVec3, ret->pointsCount); ret->indices = SAFE_ARRAY_NEW(uint32_t, ret->indicesCount); ret->polygonData = SAFE_ARRAY_NEW(HullPolygon, ret->polygonDataCount); memcpy(ret->points, hull->points, sizeof(ret->points[0]) * ret->pointsCount); memcpy(ret->indices, hull->indices, sizeof(ret->indices[0]) * ret->indicesCount); memcpy(ret->polygonData, hull->polygonData, sizeof(ret->polygonData[0]) * ret->polygonDataCount); NvBlastExtAuthoringTransformCollisionHullInPlace(ret, scaling, rotation, translation); return ret; } void buildPhysicsChunks(ConvexMeshBuilder& collisionBuilder, AuthoringResult& result, const ConvexDecompositionParams& params, uint32_t chunksToProcessCount = 0, uint32_t* chunksToProcess = nullptr) { uint32_t chunkCount = (uint32_t)result.chunkCount; if (params.maximumNumberOfHulls == 1) { result.collisionHullOffset = SAFE_ARRAY_NEW(uint32_t, chunkCount + 1); result.collisionHullOffset[0] = 0; result.collisionHull = SAFE_ARRAY_NEW(CollisionHull*, chunkCount); for (uint32_t i = 0; i < chunkCount; ++i) { std::vector<NvcVec3> vertices; for (uint32_t p = result.geometryOffset[i]; p < result.geometryOffset[i + 1]; ++p) { Nv::Blast::Triangle& tri = result.geometry[p]; vertices.push_back(tri.a.p); vertices.push_back(tri.b.p); vertices.push_back(tri.c.p); } result.collisionHullOffset[i + 1] = result.collisionHullOffset[i] + 1; result.collisionHull[i] = collisionBuilder.buildCollisionGeometry((uint32_t)vertices.size(), vertices.data()); } } else { std::set<int32_t> chunkSet; for (uint32_t c = 0; c < chunksToProcessCount; c++) { chunkSet.insert(chunksToProcess[c]); } std::vector<std::vector<CollisionHull*> > hulls(chunkCount); int32_t totalHulls = 0; for (uint32_t i = 0; i < chunkCount; ++i) { if (chunkSet.size() > 0 && chunkSet.find(i) == chunkSet.end()) { int32_t newHulls = result.collisionHullOffset[i + 1] - result.collisionHullOffset[i]; int32_t off = result.collisionHullOffset[i]; for (int32_t subhull = 0; subhull < newHulls; ++subhull) { hulls[i].push_back(result.collisionHull[off + subhull]); } totalHulls += newHulls; continue; } CollisionHull** tempHull; int32_t newHulls = buildMeshConvexDecomposition(collisionBuilder, result.geometry + result.geometryOffset[i], result.geometryOffset[i + 1] - result.geometryOffset[i], params, tempHull); totalHulls += newHulls; for (int32_t h = 0; h < newHulls; ++h) { hulls[i].push_back(tempHull[h]); } SAFE_ARRAY_DELETE(tempHull); } result.collisionHullOffset = SAFE_ARRAY_NEW(uint32_t, chunkCount + 1); result.collisionHullOffset[0] = 0; result.collisionHull = SAFE_ARRAY_NEW(CollisionHull*, totalHulls); for (uint32_t i = 0; i < chunkCount; ++i) { result.collisionHullOffset[i + 1] = result.collisionHullOffset[i] + hulls[i].size(); int32_t off = result.collisionHullOffset[i]; for (uint32_t subhull = 0; subhull < hulls[i].size(); ++subhull) { result.collisionHull[off + subhull] = hulls[i][subhull]; } } } } void NvBlastExtAuthoringReleaseAuthoringResultCollision(Nv::Blast::ConvexMeshBuilder& collisionBuilder, Nv::Blast::AuthoringResult* ar) { if (ar->collisionHull != nullptr) { for (uint32_t ch = 0; ch < ar->collisionHullOffset[ar->chunkCount]; ch++) { collisionBuilder.releaseCollisionHull(ar->collisionHull[ch]); } SAFE_ARRAY_DELETE(ar->collisionHullOffset); SAFE_ARRAY_DELETE(ar->collisionHull); } } void NvBlastExtAuthoringReleaseAuthoringResult(Nv::Blast::ConvexMeshBuilder& collisionBuilder, Nv::Blast::AuthoringResult* ar) { NvBlastExtAuthoringReleaseAuthoringResultCollision(collisionBuilder, ar); if (ar->asset) { NVBLAST_FREE(ar->asset); ar->asset = nullptr; } SAFE_ARRAY_DELETE(ar->assetToFractureChunkIdMap); SAFE_ARRAY_DELETE(ar->geometryOffset); SAFE_ARRAY_DELETE(ar->geometry); SAFE_ARRAY_DELETE(ar->chunkDescs); SAFE_ARRAY_DELETE(ar->bondDescs); delete ar; } static float getGeometryVolumeAndCentroid(NvcVec3& centroid, const Nv::Blast::Triangle* tris, size_t triCount) { class GeometryQuery { public: GeometryQuery(const Nv::Blast::Triangle* tris, size_t triCount) : m_tris(tris), m_triCount(triCount) {} size_t faceCount() const { return m_triCount; } size_t vertexCount(size_t faceIndex) const { NV_UNUSED(faceIndex); return 3; } NvcVec3 vertex(size_t faceIndex, size_t vertexIndex) const { const Nv::Blast::Triangle& tri = m_tris[faceIndex]; switch (vertexIndex) { case 0: return tri.a.p; case 1: return tri.b.p; case 2: return tri.c.p; } return NvcVec3({0.0f, 0.0f, 0.0f}); } const Nv::Blast::Triangle* m_tris; size_t m_triCount; }; return calculateMeshVolumeAndCentroid<GeometryQuery>(centroid, {tris, triCount}); } AuthoringResult* NvBlastExtAuthoringProcessFracture(FractureTool& fTool, BlastBondGenerator& bondGenerator, ConvexMeshBuilder& collisionBuilder, const ConvexDecompositionParams& collisionParam, int32_t defaultSupportDepth) { fTool.finalizeFracturing(); const uint32_t chunkCount = fTool.getChunkCount(); if (chunkCount == 0) { return nullptr; } AuthoringResult* ret = new AuthoringResult; if (ret == nullptr) { return nullptr; } AuthoringResult& aResult = *ret; aResult.chunkCount = chunkCount; std::shared_ptr<bool> isSupport(new bool[chunkCount], [](bool* b) {delete[] b; }); memset(isSupport.get(), 0, sizeof(bool) * chunkCount); for (uint32_t i = 0; i < fTool.getChunkCount(); ++i) { if (defaultSupportDepth < 0 || fTool.getChunkDepth(fTool.getChunkId(i)) < defaultSupportDepth) { isSupport.get()[i] = fTool.getChunkInfo(i).isLeaf; } else if (fTool.getChunkDepth(fTool.getChunkId(i)) == defaultSupportDepth) { isSupport.get()[i] = true; } } const uint32_t bondCount = bondGenerator.buildDescFromInternalFracture(&fTool, isSupport.get(), aResult.bondDescs, aResult.chunkDescs); aResult.bondCount = bondCount; if (bondCount == 0) { aResult.bondDescs = nullptr; } // order chunks, build map std::vector<uint32_t> chunkReorderInvMap; { std::vector<uint32_t> chunkReorderMap(chunkCount); std::vector<char> scratch(chunkCount * sizeof(NvBlastChunkDesc)); NvBlastEnsureAssetExactSupportCoverage(aResult.chunkDescs, chunkCount, scratch.data(), logLL); NvBlastBuildAssetDescChunkReorderMap(chunkReorderMap.data(), aResult.chunkDescs, chunkCount, scratch.data(), logLL); NvBlastApplyAssetDescChunkReorderMapInPlace(aResult.chunkDescs, chunkCount, aResult.bondDescs, bondCount, chunkReorderMap.data(), true, scratch.data(), logLL); chunkReorderInvMap.resize(chunkReorderMap.size()); Nv::Blast::invertMap(chunkReorderInvMap.data(), chunkReorderMap.data(), static_cast<unsigned int>(chunkReorderMap.size())); } // get result geometry aResult.geometryOffset = SAFE_ARRAY_NEW(uint32_t, chunkCount + 1); aResult.assetToFractureChunkIdMap = SAFE_ARRAY_NEW(uint32_t, chunkCount + 1); aResult.geometryOffset[0] = 0; std::vector<Nv::Blast::Triangle*> chunkGeometry(chunkCount); for (uint32_t i = 0; i < chunkCount; ++i) { uint32_t chunkInfoIndex = chunkReorderInvMap[i]; aResult.geometryOffset[i+1] = aResult.geometryOffset[i] + fTool.getBaseMesh(chunkInfoIndex, chunkGeometry[i]); aResult.assetToFractureChunkIdMap[i] = fTool.getChunkId(chunkInfoIndex); } aResult.geometry = SAFE_ARRAY_NEW(Triangle, aResult.geometryOffset[chunkCount]); for (uint32_t i = 0; i < chunkCount; ++i) { uint32_t trianglesCount = aResult.geometryOffset[i + 1] - aResult.geometryOffset[i]; memcpy(aResult.geometry + aResult.geometryOffset[i], chunkGeometry[i], trianglesCount * sizeof(Nv::Blast::Triangle)); delete chunkGeometry[i]; chunkGeometry[i] = nullptr; } float maxX = FLT_MAX; float maxY = FLT_MAX; float maxZ = FLT_MAX; float minX = -FLT_MAX; float minY = -FLT_MAX; float minZ = -FLT_MAX; for (uint32_t i = 0; i < bondCount; i++) { NvBlastBondDesc& bondDesc = aResult.bondDescs[i]; minX = std::min(minX, bondDesc.bond.centroid[0]); maxX = std::max(maxX, bondDesc.bond.centroid[0]); minY = std::min(minY, bondDesc.bond.centroid[1]); maxY = std::max(maxY, bondDesc.bond.centroid[1]); minZ = std::min(minZ, bondDesc.bond.centroid[2]); maxZ = std::max(maxZ, bondDesc.bond.centroid[2]); } // prepare physics data (convexes) buildPhysicsChunks(collisionBuilder, aResult, collisionParam); // set NvBlastChunk volume and centroid from CollisionHull for (uint32_t i = 0; i < chunkCount; i++) { float totalVolume = 0.f; NvcVec3 totalCentroid = {0.0f, 0.0f, 0.0f}; for (uint32_t k = aResult.collisionHullOffset[i]; k < aResult.collisionHullOffset[i+1]; k++) { const CollisionHull* hull = aResult.collisionHull[k]; if (hull) { NvcVec3 centroid; const float volume = calculateCollisionHullVolumeAndCentroid(centroid, *hull); totalVolume += volume; totalCentroid = totalCentroid + volume*centroid; } else { totalVolume = 0.0f; // Found a null hull, signal this with zero volume break; } } if (totalVolume > 0.0f) { totalCentroid = totalCentroid / totalVolume; aResult.chunkDescs[i].volume = totalVolume; aResult.chunkDescs[i].centroid[0] = totalCentroid.x; aResult.chunkDescs[i].centroid[1] = totalCentroid.y; aResult.chunkDescs[i].centroid[2] = totalCentroid.z; } else { // Fallback to using mesh size_t triCount = aResult.geometryOffset[i+1] - aResult.geometryOffset[i]; const Nv::Blast::Triangle* tris = aResult.geometry + aResult.geometryOffset[i]; NvcVec3 centroid; aResult.chunkDescs[i].volume = getGeometryVolumeAndCentroid(centroid, tris, triCount); aResult.chunkDescs[i].centroid[0] = centroid.x; aResult.chunkDescs[i].centroid[1] = centroid.y; aResult.chunkDescs[i].centroid[2] = centroid.z; } } // build and serialize ExtPhysicsAsset NvBlastAssetDesc descriptor; descriptor.bondCount = bondCount; descriptor.bondDescs = aResult.bondDescs; descriptor.chunkCount = chunkCount; descriptor.chunkDescs = aResult.chunkDescs; std::vector<uint8_t> scratch(static_cast<unsigned int>(NvBlastGetRequiredScratchForCreateAsset(&descriptor, logLL))); void* mem = NVBLAST_ALLOC(NvBlastGetAssetMemorySize(&descriptor, logLL)); aResult.asset = NvBlastCreateAsset(mem, &descriptor, scratch.data(), logLL); //aResult.asset = std::shared_ptr<NvBlastAsset>(asset, [=](NvBlastAsset* asset) //{ // NVBLAST_FREE(asset); //}); //std::cout << "Done" << std::endl; ret->materialCount = 0; ret->materialNames = nullptr; return ret; } uint32_t NvBlastExtAuthoringFindAssetConnectingBonds ( const NvBlastAsset** components, const NvcVec3* scales, const NvcQuat* rotations, const NvcVec3* translations, const uint32_t** convexHullOffsets, const CollisionHull*** chunkHulls, uint32_t componentCount, NvBlastExtAssetUtilsBondDesc*& newBondDescs, float maxSeparation ) { //We don't need to use any of the cooking related parts of this BlastBondGeneratorImpl bondGenerator(nullptr); std::vector<uint32_t> componentChunkOffsets; componentChunkOffsets.reserve(componentCount + 1); componentChunkOffsets.push_back(0); std::vector<uint32_t> combinedConvexHullOffsets; std::vector<const CollisionHull*> combinedConvexHulls; std::vector<CollisionHull*> hullsToRelease; combinedConvexHullOffsets.push_back(0); std::vector<uint32_t> originalComponentIndex; const nvidia::NvVec3 identityScale(1); //Combine our hull lists into a single combined list for bondsFromPrefractured for (uint32_t c = 0; c < componentCount; c++) { const uint32_t chunkCount = NvBlastAssetGetChunkCount(components[c], &logLL); const NvcVec3* scale = scales ? scales + c : nullptr; const NvcQuat* rotation = rotations ? rotations + c : nullptr; const NvcVec3* translation = translations ? translations + c : nullptr; componentChunkOffsets.push_back(chunkCount + componentChunkOffsets.back()); for (uint32_t chunk = 0; chunk < chunkCount; chunk++) { const uint32_t hullsStart = convexHullOffsets[c][chunk]; const uint32_t hullsEnd = convexHullOffsets[c][chunk + 1]; for (uint32_t hull = hullsStart; hull < hullsEnd; hull++) { if ((scale != nullptr && *toNvShared(scale) != identityScale) || (rotation != nullptr && !toNvShared(rotation)->isIdentity()) || (translation != nullptr && !toNvShared(translation)->isZero())) { hullsToRelease.emplace_back(NvBlastExtAuthoringTransformCollisionHull(chunkHulls[c][hull], scale, rotation, translation)); combinedConvexHulls.emplace_back(hullsToRelease.back()); } else { //No need to transform combinedConvexHulls.emplace_back(chunkHulls[c][hull]); } } combinedConvexHullOffsets.push_back((hullsEnd - hullsStart) + combinedConvexHullOffsets.back()); originalComponentIndex.push_back(c); } } const uint32_t totalChunkCount = componentChunkOffsets.back(); //Can't use std::vector<bool> since we need a bool* later std::unique_ptr<bool[]> isSupportChunk(new bool[totalChunkCount]); for (uint32_t c = 0; c < componentCount; c++) { const uint32_t chunkCount = componentChunkOffsets[c + 1] - componentChunkOffsets[c]; NvBlastSupportGraph supportGraph = NvBlastAssetGetSupportGraph(components[c], &logLL); for (uint32_t chunk = 0; chunk < chunkCount; chunk++) { auto chunkIndiciesEnd = supportGraph.chunkIndices + supportGraph.nodeCount; isSupportChunk[chunk + componentChunkOffsets[c]] = (std::find(supportGraph.chunkIndices, chunkIndiciesEnd, chunk) != chunkIndiciesEnd); } } //Find the bonds NvBlastBondDesc* newBonds = nullptr; const int32_t newBoundCount = bondGenerator.bondsFromPrefractured(totalChunkCount, combinedConvexHullOffsets.data(), combinedConvexHulls.data(), isSupportChunk.get(), originalComponentIndex.data(), newBonds, maxSeparation); //Convert the bonds back to per-component chunks newBondDescs = SAFE_ARRAY_NEW(NvBlastExtAssetUtilsBondDesc, newBoundCount); for (int32_t nb = 0; nb < newBoundCount; ++nb) { newBondDescs[nb].bond = newBonds[nb].bond; for (uint32_t ci = 0; ci < 2; ++ci) { uint32_t absChunkIdx = newBonds[nb].chunkIndices[ci]; uint32_t componentIdx = originalComponentIndex[absChunkIdx]; newBondDescs[nb].componentIndices[ci] = componentIdx; newBondDescs[nb].chunkIndices[ci] = absChunkIdx - componentChunkOffsets[componentIdx]; } } //Don't need this anymore NVBLAST_FREE(newBonds); // These hulls were generated by NvBlastExtAuthoringTransformCollisionHull, which uses SAFE_ARRAY_NEW // to allocate the arrays referenced in each hull. Be sure to delete the array pointers here before // deleting the CollisionHull structs. for (CollisionHull* hull : hullsToRelease) { SAFE_ARRAY_DELETE(hull->indices); SAFE_ARRAY_DELETE(hull->points); SAFE_ARRAY_DELETE(hull->polygonData); delete hull; } return newBoundCount; } void NvBlastExtAuthoringUpdateGraphicsMesh(Nv::Blast::FractureTool& fTool, Nv::Blast::AuthoringResult& aResult) { uint32_t chunkCount = fTool.getChunkCount(); for (uint32_t i = 0; i < chunkCount; ++i) { fTool.updateBaseMesh(fTool.getChunkInfoIndex(aResult.assetToFractureChunkIdMap[i]), aResult.geometry + aResult.geometryOffset[i]); } } void NvBlastExtAuthoringBuildCollisionMeshes(Nv::Blast::AuthoringResult& ares, Nv::Blast::ConvexMeshBuilder& collisionBuilder, const Nv::Blast::ConvexDecompositionParams& collisionParam, uint32_t chunksToProcessCount, uint32_t* chunksToProcess) { buildPhysicsChunks(collisionBuilder, ares, collisionParam, chunksToProcessCount, chunksToProcess); } PatternGenerator* NvBlastExtAuthoringCreatePatternGenerator() { return NVBLAST_NEW(PatternGeneratorImpl); } SpatialGrid* NvBlastExtAuthoringCreateSpatialGrid(uint32_t resolution, const Mesh* m) { Grid* g = NVBLAST_NEW(Grid)(resolution); g->setMesh(m); return g; } SpatialAccelerator* NvBlastExtAuthoringCreateGridAccelerator(SpatialGrid* parentGrid) { return NVBLAST_NEW(GridAccelerator)((Grid*)parentGrid); } SpatialAccelerator* NvBlastExtAuthoringCreateSweepingAccelerator(const Mesh* m) { return NVBLAST_NEW(SweepingAccelerator)(m); } SpatialAccelerator* NvBlastExtAuthoringCreateBBoxBasedAccelerator(uint32_t resolution, const Mesh* m) { return NVBLAST_NEW(BBoxBasedAccelerator)(m, resolution); } BooleanTool* NvBlastExtAuthoringCreateBooleanTool() { return new BooleanToolImpl; }
25,671
C++
39.428346
227
0.661797
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/authoring/NvBlastExtTriangleProcessor.cpp
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved. #include "NvBlastExtTriangleProcessor.h" #include "NvBlastExtAuthoringInternalCommon.h" #define COLLIN_EPS 1e-4f #define V_COMP_EPS 1e-5f using namespace nvidia; namespace Nv { namespace Blast { /** Segments bounding box interseciton test */ bool boundingRectangleIntersection(const NvVec2& s1, const NvVec2& e1, const NvVec2& s2, const NvVec2& e2) { // sl1/sl2 is always left bottom end of rectangle // se1/el2 is always right top end of rectangle float sl1, sl2, el1, el2; if (s1.x < e1.x) { sl1 = s1.x; el1 = e1.x; } else { el1 = s1.x; sl1 = e1.x; } if (s2.x < e2.x) { sl2 = s2.x; el2 = e2.x; } else { el2 = s2.x; sl2 = e2.x; } if (NvMax(sl1, sl2) > NvMin(el1, el2)) return false; if (s1.y < e1.y) { sl1 = s1.y; el1 = e1.y; } else { el1 = s1.y; sl1 = e1.y; } if (s2.y < e2.y) { sl2 = s2.y; el2 = e2.y; } else { el2 = s2.y; sl2 = e2.y; } if (NvMax(sl1, sl2) > NvMin(el1, el2)) return false; return true; } inline float getRotation(NvVec2 a, NvVec2 b) { return a.x * b.y - a.y * b.x; } inline float getParameter(const NvVec2& a, const NvVec2& b, const NvVec2& point) { return (point - a).magnitude() / (b - a).magnitude(); } inline NvVec3 lerp3D(const NvVec3& a, const NvVec3& b, const float t) { return (b - a) * t + a; } struct Line2D { NvVec2 normal; float c; Line2D(NvVec2 vec, NvVec2 point) { normal.x = vec.y; normal.y = -vec.x; c = -normal.dot(point); } }; uint32_t TriangleProcessor::getSegmentIntersection(const NvVec2& s1, const NvVec2& e1, const NvVec2& s2, const NvVec2& e2, float& t1) { if (!boundingRectangleIntersection(s1, e1, s2, e2)) return 0; NvVec2 vec1 = e1 - s1; NvVec2 vec2 = e2 - s2; float det1 = getRotation(vec1, vec2); if (NvAbs(det1) < COLLIN_EPS) { return 0; } Line2D lineA(vec1, s1); Line2D lineB(vec2, s2); NvVec2 fInt; float detX = lineA.normal.y * lineB.c - lineA.c * lineB.normal.y; float detY = lineA.c * lineB.normal.x - lineB.c * lineA.normal.x; float x = detX / det1; float y = detY / det1; if (x + V_COMP_EPS >= NvMax(NvMin(s1.x, e1.x), NvMin(s2.x, e2.x)) && x - V_COMP_EPS <= NvMin(NvMax(s1.x, e1.x), NvMax(s2.x, e2.x)) && y + V_COMP_EPS >= NvMax(NvMin(s1.y, e1.y), NvMin(s2.y, e2.y)) && y - V_COMP_EPS <= NvMin(NvMax(s1.y, e1.y), NvMax(s2.y, e2.y))) { fInt.x = x; fInt.y = y; t1 = getParameter(s1, e1, fInt); return 1; } return 0; } struct cwComparer { NvVec3 basePoint; NvVec3 normal; cwComparer(NvVec3 basePointIn, NvVec3 norm) { basePoint = basePointIn; normal = norm; }; bool operator()(const NvVec3& a, const NvVec3& b) { NvVec3 norm = (a - basePoint).cross(b - basePoint); return normal.dot(norm) > 0; } }; bool vec3Comparer(const NvVec3& a, const NvVec3& b) { if (a.x + V_COMP_EPS < b.x) return true; if (a.x - V_COMP_EPS > b.x) return false; if (a.y + V_COMP_EPS < b.y) return true; if (a.y - V_COMP_EPS > b.y) return false; if (a.z + V_COMP_EPS < b.z) return true; return false; } void TriangleProcessor::sortToCCW(std::vector<NvVec3>& points, NvVec3& normal) { std::sort(points.begin(), points.end(), vec3Comparer); int lastUnique = 0; for (uint32_t i = 1; i < points.size(); ++i) { NvVec3 df = (points[i] - points[lastUnique]).abs(); if (df.x > V_COMP_EPS || df.y > V_COMP_EPS || df.z > V_COMP_EPS) { points[++lastUnique] = points[i]; } } points.resize(lastUnique + 1); if (points.size() > 2) { cwComparer compr(points[0], normal); std::sort(points.begin() + 1, points.end(), compr); } } void TriangleProcessor::buildConvexHull(std::vector<NvVec3>& points, std::vector<NvVec3>& convexHull,const NvVec3& normal) { std::sort(points.begin(), points.end(), vec3Comparer); int lastUnique = 0; for (uint32_t i = 1; i < points.size(); ++i) { NvVec3 df = (points[i] - points[lastUnique]).abs(); if (df.x > V_COMP_EPS || df.y > V_COMP_EPS || df.z > V_COMP_EPS) { points[++lastUnique] = points[i]; } } points.resize(lastUnique + 1); if (points.size() > 2) { cwComparer compr(points[0], normal); std::sort(points.begin() + 1, points.end(), compr); } if (points.size() < 3) return; convexHull.push_back(points[0]); convexHull.push_back(points[1]); ProjectionDirections projectionDirection = getProjectionDirection(normal); for (uint32_t i = 2; i < points.size(); ++i) { NvVec2 pnt = getProjectedPointWithWinding(points[i], projectionDirection); NvVec2 vec = pnt - getProjectedPointWithWinding(convexHull.back(), projectionDirection); if (NvAbs(vec.x) < V_COMP_EPS && NvAbs(vec.y) < V_COMP_EPS) { continue; } if (getRotation(vec, getProjectedPointWithWinding(convexHull.back(), projectionDirection) - getProjectedPointWithWinding(convexHull[convexHull.size() - 2], projectionDirection)) < 0) { convexHull.push_back(points[i]); } else { while (convexHull.size() > 1 && getRotation(vec, getProjectedPointWithWinding(convexHull.back(), projectionDirection) - getProjectedPointWithWinding(convexHull[convexHull.size() - 2], projectionDirection)) > 0) { convexHull.pop_back(); vec = pnt - getProjectedPointWithWinding(convexHull.back(), projectionDirection); } convexHull.push_back(points[i]); } } } uint32_t TriangleProcessor::getTriangleIntersection(TrPrcTriangle& a, TrPrcTriangle2d& aProjected, TrPrcTriangle &b, NvVec3& centroid, std::vector<NvVec3>& intersectionBuffer, NvVec3 normal) { b.points[0] -= centroid; b.points[1] -= centroid; b.points[2] -= centroid; ProjectionDirections prjDir = getProjectionDirection(normal); TrPrcTriangle2d bProjected; bProjected.points[0] = getProjectedPointWithWinding(b.points[0], prjDir); bProjected.points[1] = getProjectedPointWithWinding(b.points[1], prjDir); bProjected.points[2] = getProjectedPointWithWinding(b.points[2], prjDir); if (!triangleBoundingBoxIntersection(aProjected, bProjected)) return 0; //* Check triangle A against points of B *// for (int i = 0; i < 3; ++i) { if (isPointInside(bProjected.points[i], aProjected)) { intersectionBuffer.push_back(b.points[i]); } } //* Check triangle B against points of A *// for (int i = 0; i < 3; ++i) { if (isPointInside(aProjected.points[i], bProjected)) { intersectionBuffer.push_back(a.points[i]); } } //* Check edges intersection *// float param = 0; for (int i = 0; i < 3; ++i) { for (int j = 0; j < 3; ++j) { if (getSegmentIntersection(aProjected.points[i], aProjected.points[(i + 1) % 3], bProjected.points[j], bProjected.points[(j + 1) % 3], param)) { intersectionBuffer.push_back(lerp3D(a.points[i], a.points[(i + 1) % 3], param)); } } } if (intersectionBuffer.size() == 0) return 0; // Intersection between two triangles is convex, but points should be reordered to construct right polygon // std::vector<NvVec3> intrs; buildConvexHull(intersectionBuffer, intrs, normal); intersectionBuffer = intrs; // Return all points back from origin // for (uint32_t i = 0; i < intersectionBuffer.size(); ++i) { intersectionBuffer[i] += centroid; } return 1; } bool TriangleProcessor::triangleBoundingBoxIntersection(TrPrcTriangle2d& a, TrPrcTriangle2d& b) { float fb = std::min(a.points[0].x, std::min(a.points[1].x, a.points[2].x)); float fe = std::max(a.points[0].x, std::max(a.points[1].x, a.points[2].x)); float sb = std::min(b.points[0].x, std::min(b.points[1].x, b.points[2].x)); float se = std::max(b.points[0].x, std::max(b.points[1].x, b.points[2].x)); if (std::min(fe, se) + V_COMP_EPS < std::max(fb, sb)) return 0; fb = std::min(a.points[0].y, std::min(a.points[1].y, a.points[2].y)); fe = std::max(a.points[0].y, std::max(a.points[1].y, a.points[2].y)); sb = std::min(b.points[0].y, std::min(b.points[1].y, b.points[2].y)); se = std::max(b.points[0].y, std::max(b.points[1].y, b.points[2].y)); if (std::min(fe, se) + V_COMP_EPS < std::max(fb, sb)) return 0; return 1; } uint32_t TriangleProcessor::isPointInside(const NvVec2& point, const TrPrcTriangle2d& triangle) { float av = getRotation(point - triangle.points[0], triangle.points[1] - triangle.points[0]); float bv = getRotation(point - triangle.points[1], triangle.points[2] - triangle.points[1]); float cv = getRotation(point - triangle.points[2], triangle.points[0] - triangle.points[2]); if (NvAbs(av) < COLLIN_EPS) av = 0; if (NvAbs(bv) < COLLIN_EPS) bv = 0; if (NvAbs(cv) < COLLIN_EPS) cv = 0; if (av >= 0 && bv >= 0 && cv >= 0) { if (av == 0 || bv == 0 || cv == 0) return 2; return 1; } if (av <= 0 && bv <= 0 && cv <= 0) { if (av == 0 || bv == 0 || cv == 0) return 2; return 1; } return 0; } } // namespace Blast } // namespace Nv
11,307
C++
29.316354
222
0.600425
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/authoring/NvBlastExtAuthoringPatternGeneratorImpl.cpp
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved. #define _CRT_SECURE_NO_WARNINGS #include "NvBlastGlobals.h" #include "NvBlastAssert.h" #include "NvBlastExtAuthoringTypes.h" #include "NvBlastExtAuthoringPatternGeneratorImpl.h" #include "NvBlastExtAuthoringMeshUtils.h" #include "NvBlastExtAuthoringMeshImpl.h" #include "NvBlastExtAuthoringFractureToolImpl.h" #include "NvBlastExtAuthoringBooleanToolImpl.h" #include "NvBlastExtAuthoringTriangulator.h" #include "NvBlastExtAuthoringPerlinNoise.h" #include <NvBlastNvSharedHelpers.h> #include <vector> using namespace Nv::Blast; using namespace nvidia; struct DamagePatternImpl : public DamagePattern { virtual void release() override; }; DamagePattern* PatternGeneratorImpl::generateUniformPattern(const UniformPatternDesc* desc) { std::vector<NvcVec3> points; float radiusDelta = desc->radiusMax - desc->radiusMin; for (uint32_t i = 0; i < desc->cellsCount; ++i) { float rd = desc->RNG() * radiusDelta + desc->radiusMin; if (desc->radiusDistr != 1.0f) { rd = std::pow(rd / desc->radiusMax, desc->radiusDistr) * desc->radiusMax; } float phi = desc->RNG() * 6.28f; float theta = (desc->RNG()) * 6.28f; float x = rd * cos(phi) * sin(theta); float y = rd * sin(phi) * sin(theta); float z = rd * cos(theta); points.push_back({x, y, z}); } auto pattern = generateVoronoiPattern((uint32_t)points.size(), points.data(), desc->interiorMaterialId); pattern->activationRadius = desc->radiusMax * desc->debrisRadiusMult; return pattern; } DamagePattern* PatternGeneratorImpl::generateVoronoiPattern(uint32_t cellCount, const NvcVec3* inPoints, int32_t interiorMaterialId) { return generateVoronoiPatternInternal(cellCount, inPoints, interiorMaterialId); } DamagePattern* PatternGeneratorImpl::generateVoronoiPatternInternal(uint32_t cellCount, const NvcVec3* inPoints, int32_t interiorMaterialId, float angle) { DamagePatternImpl* pattern = NVBLAST_NEW(DamagePatternImpl); std::vector<NvcVec3> points(cellCount); NvcVec3 orig = {0, 0, 0}; for (uint32_t i = 0; i < cellCount; ++i) { points[i] = inPoints[i]; orig = orig + points[i]; } orig = orig / cellCount; std::vector<std::vector<std::pair<int32_t, int32_t>>> neighbors; findCellBasePlanes(points, neighbors); Mesh** patterns = (Mesh**)NVBLAST_ALLOC(sizeof(Mesh*) * cellCount); //PreparedMesh** prepMeshes = (PreparedMesh**)NVBLAST_ALLOC(sizeof(PreparedMesh*) * cellCount); BooleanEvaluator evl; for (uint32_t i = 0; i < cellCount; ++i) { patterns[i] = getCellMesh(evl, 0, i, points, neighbors, interiorMaterialId, orig); if (patterns[i] == nullptr) { continue; } if (angle != 0) { auto* vr = patterns[i]->getVerticesWritable(); for (uint32_t j = 0; j < patterns[i]->getVerticesCount(); ++j) { float& z = vr[j].p.z; z -= 3.8f; if (z < -2) // we presume that this vertex has infinite -z position (everything scaled to unit cube). { if (angle > 0) { float d = sqrt(vr[j].p.x * vr[j].p.x + vr[j].p.y * vr[j].p.y); vr[j].p.x *= (d + 4 * tan(angle * nvidia::NvPi / 180.f)) / d; vr[j].p.y *= (d + 4 * tan(angle * nvidia::NvPi / 180.f)) / d; } } } patterns[i]->recalculateBoundingBox(); } } for (int32_t i = cellCount - 1; i >= 0; i--) { if (patterns[i] == nullptr) { cellCount--; std::swap(patterns[i], patterns[cellCount]); //std::swap(prepMeshes[i], prepMeshes[cellCount]); } } pattern->cellsCount = cellCount; pattern->cellsMeshes = patterns; //pattern->preparedMeshes = prepMeshes; #ifdef USE_MERGED_MESH pattern->outputEdges = NVBLAST_ALLOC(sizeof(BooleanResultEdge) * (cellCount * BLASTRT_MAX_EDGES_PER_CHUNK)); pattern->outputEdgesCount = (uint32_t*)NVBLAST_ALLOC(sizeof(uint32_t) * cellCount); #endif return pattern; } DamagePattern* PatternGeneratorImpl::generateBeamPattern(const BeamPatternDesc* desc) { std::vector<NvcVec3> points; float radiusDelta = desc->radiusMax - desc->radiusMin; for (uint32_t i = 0; i < desc->cellsCount; ++i) { float rd = desc->RNG() * radiusDelta + desc->radiusMin; float phi = desc->RNG() * 6.28f; float x = rd * cos(phi); float y = rd * sin(phi); float z = desc->RNG() - 1; points.push_back({x, y, z}); } auto pattern = generateVoronoiPattern((uint32_t)points.size(), points.data(), desc->interiorMaterialId); pattern->activationType = DamagePattern::Line; return pattern; } DamagePattern* PatternGeneratorImpl::generateRegularRadialPattern(const RegularRadialPatternDesc* desc) { SimplexNoise noise(desc->radialNoiseAmplitude, desc->radialNoiseFrequency, 3, desc->RNG() * 999999); std::vector<NvVec3> points; float radialDelta = (desc->radiusMax - desc->radiusMin) / desc->radialSteps; float angularDelta = 2 * acos(-1.0f) / desc->angularSteps; for (uint32_t i = 0; i < desc->radialSteps; ++i) { for (uint32_t j = 0; j < desc->angularSteps; ++j) { float angle = j * angularDelta + desc->RNG() * desc->angularNoiseAmplitude; float rd = ((i + noise.sample(NvVec3(angle, 0, 0))) * radialDelta + desc->radiusMin); float x = rd * cos(angle); float y = rd * sin(angle); float z = 0; points.push_back(NvVec3(x, y, z)); } } float mrd = 0.0; for (uint32_t i = 0; i < points.size(); ++i) { mrd = std::max(mrd, points[i].magnitude()); } for (uint32_t i = 0; i < points.size(); ++i) { points[i] *= desc->radiusMax / mrd; } float ap = std::max(0.0f, desc->aperture); auto pattern = generateVoronoiPatternInternal((uint32_t)points.size(), fromNvShared(points.data()), desc->interiorMaterialId, ap); pattern->activationRadius = desc->radiusMax * desc->debrisRadiusMult; pattern->activationType = (ap == 0) ? DamagePattern::Line : DamagePattern::Cone; pattern->angle = ap; return pattern; } void PatternGeneratorImpl::release() { NVBLAST_DELETE(this, PatternGeneratorImpl); } void DamagePatternImpl::release() { if (cellsMeshes) { for (uint32_t i = 0; i < cellsCount; i++) { cellsMeshes[i]->release(); } NVBLAST_FREE(cellsMeshes); } #ifdef USE_MERGED_MESH if (outputEdges) { NVBLAST_FREE(outputEdges); } if (outputEdgesCount) { NVBLAST_FREE(outputEdgesCount); } if (mergedMesh) { mergedMesh->release(); } if (preparedMergedMesh) { preparedMergedMesh->release(); } if (validFacetsForChunk) { for (uint32_t i = 0; i < cellsCount; i++) { if (validFacetsForChunk[i]) { NVBLAST_FREE(validFacetsForChunk[i]); } } NVBLAST_FREE(validFacetsForChunk); } #endif NVBLAST_DELETE(this, DamagePatternImpl); } namespace Nv { namespace Blast { void savePatternToObj(DamagePattern* pattern) { FILE* fl = fopen("Pattern.obj", "w"); std::vector<uint32_t> trc; for (uint32_t mesh = 0; mesh < pattern->cellsCount; ++mesh) { Mesh* m = pattern->cellsMeshes[mesh]; Triangulator trgl; trgl.triangulate(m); auto& t = trgl.getBaseMesh(); for (uint32_t v = 0; v < t.size(); ++v) { fprintf(fl, "v %f %f %f\n", t[v].a.p.x, t[v].a.p.y, t[v].a.p.z); fprintf(fl, "v %f %f %f\n", t[v].b.p.x, t[v].b.p.y, t[v].b.p.z); fprintf(fl, "v %f %f %f\n", t[v].c.p.x, t[v].c.p.y, t[v].c.p.z); } trc.push_back(t.size()); } uint32_t cv = 1; for (uint32_t m = 0; m < trc.size(); ++m) { fprintf(fl, "g %d\n", m); for (uint32_t k = 0; k < trc[m]; ++k) { fprintf(fl, "f %d %d %d \n", cv, cv + 1, cv + 2); cv += 3; } } fclose(fl); } } }
10,186
C++
31.650641
153
0.595327
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/authoring/NvBlastExtAuthoringBooleanToolImpl.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved. #ifndef NVBLASTEXTAUTHORINGBOOLEANTOOLIMPL_H #define NVBLASTEXTAUTHORINGBOOLEANTOOLIMPL_H #include "NvBlastExtAuthoringTypes.h" #include "NvBlastExtAuthoringInternalCommon.h" #include "NvBlastExtAuthoringBooleanTool.h" #include <vector> #include "NvBlastTypes.h" namespace Nv { namespace Blast { class Mesh; /** Boolean tool config, used to perform different operations: UNION, INTERSECTION, DIFFERENCE */ struct BooleanConf { int32_t ca, cb, ci; BooleanConf(int32_t a, int32_t b, int32_t c) : ca(a), cb(b), ci(c) { } }; namespace BooleanConfigurations { /** Creates boolean tool configuration to perform intersection of meshes A and B. */ inline BooleanConf BOOLEAN_INTERSECTION() { return BooleanConf(0, 0, 1); } /** Creates boolean tool configuration to perform union of meshes A and B. */ inline BooleanConf BOOLEAN_UNION() { return BooleanConf(1, 1, -1); } /** Creates boolean tool configuration to perform difference of meshes(A - B). */ inline BooleanConf BOOLEAN_DIFFERENCE() { return BooleanConf(1, 0, -1); } } /** Structure which holds information about intersection facet with edge. */ struct EdgeFacetIntersectionData { int32_t edId; int32_t intersectionType; Vertex intersectionPoint; EdgeFacetIntersectionData(int32_t edId, int32_t intersType, Vertex& inters) : edId(edId), intersectionType(intersType), intersectionPoint(inters) { } EdgeFacetIntersectionData(int32_t edId) : edId(edId) { } bool operator<(const EdgeFacetIntersectionData& b) const { return edId < b.edId; } }; class SpatialAccelerator; /** Tool for performing boolean operations on polygonal meshes. Tool supports only closed meshes. Performing boolean on meshes with holes can lead to unexpected behavior, e.g. holes in result geometry. */ class BooleanEvaluator { public: BooleanEvaluator(); ~BooleanEvaluator(); /** Perform boolean operation on two polygonal meshes (A and B). \param[in] meshA Mesh A \param[in] meshB Mesh B \param[in] spAccelA Acceleration structure for mesh A \param[in] spAccelB Acceleration structure for mesh B \param[in] mode Boolean operation type */ void performBoolean(const Mesh* meshA, const Mesh* meshB, SpatialAccelerator* spAccelA, SpatialAccelerator* spAccelB, const BooleanConf& mode); /** Perform boolean operation on two polygonal meshes (A and B). \param[in] meshA Mesh A \param[in] meshB Mesh B \param[in] mode Boolean operation type */ void performBoolean(const Mesh* meshA, const Mesh* meshB, const BooleanConf& mode); /** Perform cutting of mesh with some large box, which represents cutting plane. This method skips part of intersetion computations, so should be used ONLY with cutting box, received from getBigBox(...) method from NvBlastExtAuthoringMesh.h. For cutting use only BOOLEAN_INTERSECTION or BOOLEAN_DIFFERENCE mode. \param[in] meshA Mesh A \param[in] meshB Cutting box \param[in] spAccelA Acceleration structure for mesh A \param[in] spAccelB Acceleration structure for cutting box \param[in] mode Boolean operation type */ void performFastCutting(const Mesh* meshA, const Mesh* meshB, SpatialAccelerator* spAccelA, SpatialAccelerator* spAccelB, const BooleanConf& mode); /** Perform cutting of mesh with some large box, which represents cutting plane. This method skips part of intersetion computations, so should be used ONLY with cutting box, received from getBigBox(...) method from NvBlastExtAuthoringMesh.h. For cutting use only BOOLEAN_INTERSECTION or BOOLEAN_DIFFERENCE mode. \param[in] meshA Mesh A \param[in] meshB Cutting box \param[in] mode Boolean operation type */ void performFastCutting(const Mesh* meshA, const Mesh* meshB, const BooleanConf& mode); /** Test whether point contained in mesh. \param[in] mesh Mesh geometry \param[in] point Point which should be tested \return not 0 if point is inside of mesh */ int32_t isPointContainedInMesh(const Mesh* mesh, const NvcVec3& point); /** Test whether point contained in mesh. \param[in] mesh Mesh geometry \param[in] spAccel Acceleration structure for mesh \param[in] point Point which should be tested \return not 0 if point is inside of mesh */ int32_t isPointContainedInMesh(const Mesh* mesh, SpatialAccelerator* spAccel, const NvcVec3& point); /** Generates result polygon mesh after performing boolean operation. \return If not nullptr - result mesh geometry. */ Mesh* createNewMesh(); /** Reset tool state. */ void reset(); private: void buildFaceFaceIntersections(const BooleanConf& mode); void buildFastFaceFaceIntersection(const BooleanConf& mode); void collectRetainedPartsFromA(const BooleanConf& mode); void collectRetainedPartsFromB(const BooleanConf& mode); int32_t addIfNotExist(const Vertex& p); void addEdgeIfValid(const EdgeWithParent& ed); private: int32_t vertexMeshStatus03(const NvcVec3& p, const Mesh* mesh); int32_t vertexMeshStatus30(const NvcVec3& p, const Mesh* mesh); const Mesh* mMeshA; const Mesh* mMeshB; SpatialAccelerator* mAcceleratorA; SpatialAccelerator* mAcceleratorB; std::vector<EdgeWithParent> mEdgeAggregate; std::vector<Vertex> mVerticesAggregate; std::vector<std::vector<EdgeFacetIntersectionData> > mEdgeFacetIntersectionData12; std::vector<std::vector<EdgeFacetIntersectionData> > mEdgeFacetIntersectionData21; }; /// BooleanTool class BooleanToolImpl : public BooleanTool { public: /** * Release BooleanTool memory */ virtual void release() override; virtual Mesh* performBoolean(const Mesh* meshA, SpatialAccelerator* accelA, const Mesh* meshB, SpatialAccelerator* accelB, BooleanTool::Op op) override; virtual bool pointInMesh(const Mesh* mesh, SpatialAccelerator* accel, const NvcVec3& point) override; private: BooleanEvaluator m_evaluator; }; } // namespace Blast } // namespace Nv #endif // ifndef NVBLASTEXTAUTHORINGBOOLEANTOOLIMPL_H
8,200
C
34.349138
183
0.693659
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/authoring/NvBlastExtAuthoringMeshCleanerImpl.cpp
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved. #include "NvVec3.h" #include "NvVec2.h" #include "NvBounds3.h" #include <vector> #include <queue> #include <map> #include <NvBlastExtAuthoringMeshCleanerImpl.h> #include <NvBlastExtAuthoringMeshImpl.h> #include <NvBlastExtAuthoringInternalCommon.h> #include <NvBlastNvSharedHelpers.h> #include <boost/multiprecision/cpp_int.hpp> using namespace nvidia; using namespace Nv::Blast; using namespace boost::multiprecision; /** Exact rational vector types. */ struct RVec3 { cpp_rational x, y, z; RVec3() {} bool isZero() { return x.is_zero() && y.is_zero() && z.is_zero(); } RVec3(cpp_rational _x, cpp_rational _y, cpp_rational _z) { x = _x; y = _y; z = _z; } RVec3(const NvcVec3& p) { x = cpp_rational(p.x); y = cpp_rational(p.y); z = cpp_rational(p.z); } NvVec3 toVec3() { return { x.convert_to<float>(), y.convert_to<float>(), z.convert_to<float>() }; } RVec3 operator-(const RVec3& b) const { return RVec3(x - b.x, y - b.y, z - b.z); } RVec3 operator+(const RVec3& b) const { return RVec3(x + b.x, y + b.y, z + b.z); } RVec3 cross(const RVec3& in) const { return RVec3(y * in.z - in.y * z, in.x * z - x * in.z, x * in.y - in.x * y); } cpp_rational dot(const RVec3& in) const { return x * in.x + y * in.y + z * in.z; } RVec3 operator*(const cpp_rational& in) const { return RVec3(x * in, y * in, z * in); } }; struct RVec2 { cpp_rational x, y; RVec2() {} RVec2(cpp_rational _x, cpp_rational _y) { x = _x; y = _y; } RVec2(const NvcVec2& p) { x = cpp_rational(p.x); y = cpp_rational(p.y); } NvVec2 toVec2() { return { x.convert_to<float>(), y.convert_to<float>() }; } RVec2 operator-(const RVec2& b) const { return RVec2(x - b.x, y - b.y); } RVec2 operator+(const RVec2& b) const { return RVec2(x + b.x, y + b.y); } cpp_rational cross(const RVec2& in) const { return x * in.y - y * in.x; } cpp_rational dot(const RVec2& in) const { return x * in.x + y * in.y; } RVec2 operator*(const cpp_rational& in) const { return RVec2(x * in, y * in); } }; struct RatPlane { RVec3 n; cpp_rational d; RatPlane(const RVec3& a, const RVec3& b, const RVec3& c) { n = (b - a).cross(c - a); d = -n.dot(a); }; cpp_rational distance(RVec3& in) { return n.dot(in) + d; } }; bool isSame(const RatPlane& a, const RatPlane& b) { if (a.d != b.d) return false; if (a.n.x != b.n.x || a.n.y != b.n.y || a.n.z != b.n.z) return false; return true; } RVec3 planeSegmInters(RVec3& a, RVec3& b, RatPlane& pl) { cpp_rational t = -(a.dot(pl.n) + pl.d) / pl.n.dot(b - a); RVec3 on = a + (b - a) * t; return on; } enum POINT_CLASS { ON_AB = 0, ON_BC = 1, ON_AC = 2, INSIDE_TR, OUTSIDE_TR, ON_VERTEX }; int32_t isPointInside(const RVec2& a, const RVec2& b, const RVec2& c, const RVec2& p) { cpp_rational v1 = (b - a).cross(p - a); cpp_rational v2 = (c - b).cross(p - b); cpp_rational v3 = (a - c).cross(p - c); int32_t v1s = v1.sign(); int32_t v2s = v2.sign(); int32_t v3s = v3.sign(); if (v1s * v2s < 0 || v1s * v3s < 0 || v2s * v3s < 0) return OUTSIDE_TR; if (v1s == 0 && v2s == 0) return OUTSIDE_TR; if (v1s == 0 && v3s == 0) return OUTSIDE_TR; if (v2s == 0 && v3s == 0) return OUTSIDE_TR; if (v1s == 0) return ON_AB; if (v2s == 0) return ON_BC; if (v3s == 0) return ON_AC; return INSIDE_TR; } RVec2 getProjectedPointWithWinding(const RVec3& point, ProjectionDirections dir) { if (dir & YZ_PLANE) { if (dir & OPPOSITE_WINDING) { return RVec2(point.z, point.y); } else return RVec2(point.y, point.z); } if (dir & ZX_PLANE) { if (dir & OPPOSITE_WINDING) { return RVec2(point.z, point.x); } return RVec2(point.x, point.z); } if (dir & OPPOSITE_WINDING) { return RVec2(point.y, point.x); } return RVec2(point.x, point.y); } struct DelTriangle { int32_t p[3]; int32_t n[3]; int32_t parentTriangle; int32_t getEdWP(int32_t vrt) { if (p[0] == vrt) return 1; if (p[1] == vrt) return 2; if (p[2] == vrt) return 0; return -1; } int32_t getEdId(int32_t v1, int32_t v2) { if (p[0] == v1 && p[1] == v2) return 0; if (p[1] == v1 && p[2] == v2) return 1; if (p[2] == v1 && p[0] == v2) return 2; return -1; } int32_t getOppP(int32_t v1, int32_t v2) { if (p[0] == v1 && p[1] == v2) return 2; if (p[1] == v1 && p[2] == v2) return 0; if (p[2] == v1 && p[0] == v2) return 1; return -1; } int32_t getOppPoint(int32_t v1, int32_t v2) { if (p[0] != v1 && p[0] != v2) return p[0]; if (p[1] != v1 && p[1] != v2) return p[1]; if (p[2] != v1 && p[2] != v2) return p[2]; return -1; } bool compare(const DelTriangle& t) const { if (p[0] == t.p[0] && p[1] == t.p[1] && p[2] == t.p[2]) return true; if (p[1] == t.p[0] && p[2] == t.p[1] && p[0] == t.p[2]) return true; if (p[2] == t.p[0] && p[0] == t.p[1] && p[1] == t.p[2]) return true; return false; } }; struct DelEdge { int32_t s, e; int32_t nr, nl; }; bool isIntersectsTriangle(RVec2& a, RVec2& b, RVec2& c, RVec2& s, RVec2& e) { RVec2 vec = e - s; if ((a - s).cross(vec) * (b - s).cross(vec) < 0) { RVec2 vec2 = b - a; if ((s - a).cross(vec2) * (e - a).cross(vec) < 0) return true; } if ((b - s).cross(vec) * (c - s).cross(vec) < 0) { RVec2 vec2 = c - b; if ((s - b).cross(vec2) * (e - b).cross(vec) < 0) return true; } if ((a - s).cross(vec) * (c - s).cross(vec) < 0) { RVec2 vec2 = a - c; if ((s - c).cross(vec2) * (e - c).cross(vec) < 0) return true; } return false; } inline int32_t inCircumcircle(RVec2& a, RVec2& b, RVec2& c, RVec2& p) { RVec2 ta = a - p; RVec2 tb = b - p; RVec2 tc = c - p; cpp_rational ad = ta.dot(ta); cpp_rational bd = tb.dot(tb); cpp_rational cd = tc.dot(tc); cpp_rational pred = ta.x * (tb.y * cd - tc.y * bd) - ta.y * (tb.x * cd - tc.x * bd) + ad * (tb.x * tc.y - tc.x * tb.y); if (pred > 0) return 1; if (pred < 0) return -1; return 0; } int32_t getEdge(std::vector<DelEdge>& edges, int32_t s, int32_t e) { for (uint32_t i = 0; i < edges.size(); ++i) { if (edges[i].s == s && edges[i].e == e) return i; } edges.push_back(DelEdge()); edges.back().s = s; edges.back().e = e; return edges.size() - 1; } void reubildAdjacency(std::vector<DelTriangle>& state) { for (uint32_t i = 0; i < state.size(); ++i) { state[i].n[0] = state[i].n[1] = state[i].n[2] = -1; } for (uint32_t i = 0; i < state.size(); ++i) { if (state[i].p[0] == -1) continue; for (uint32_t j = i + 1; j < state.size(); ++j) { if (state[j].p[0] == -1) continue; for (uint32_t k = 0; k < 3; ++k) { for (uint32_t c = 0; c < 3; ++c) { if (state[i].p[k] == state[j].p[(c + 1) % 3] && state[i].p[(k + 1) % 3] == state[j].p[c]) { state[i].n[k] = j; state[j].n[c] = i; } } } } } } void insertPoint(std::vector<RVec2>& vertices, std::vector<DelTriangle>& state, int32_t p, const std::vector<Edge>& edges) { std::queue<int32_t> triangleToCheck; for (uint32_t i = 0; i < state.size(); ++i) { if (state[i].p[0] == -1) continue; DelTriangle ctr = state[i]; int32_t cv = isPointInside(vertices[ctr.p[0]], vertices[ctr.p[1]], vertices[ctr.p[2]], vertices[p]); if (cv == OUTSIDE_TR) continue; if (cv == INSIDE_TR) { uint32_t taInd = state.size(); uint32_t tbInd = state.size() + 1; uint32_t tcInd = state.size() + 2; state.resize(state.size() + 3); state[taInd].p[0] = ctr.p[2]; state[taInd].p[1] = ctr.p[0]; state[taInd].p[2] = p; state[taInd].n[0] = ctr.n[2]; state[taInd].n[1] = tbInd; state[taInd].n[2] = tcInd; state[tbInd].p[0] = ctr.p[0]; state[tbInd].p[1] = ctr.p[1]; state[tbInd].p[2] = p; state[tbInd].n[0] = ctr.n[0]; state[tbInd].n[1] = tcInd; state[tbInd].n[2] = taInd; state[tcInd].p[0] = ctr.p[1]; state[tcInd].p[1] = ctr.p[2]; state[tcInd].p[2] = p; state[tcInd].n[0] = ctr.n[1]; state[tcInd].n[1] = taInd; state[tcInd].n[2] = tbInd; triangleToCheck.push(taInd); triangleToCheck.push(tbInd); triangleToCheck.push(tcInd); /** Change neighbors */ int32_t nb = state[i].n[0]; if (nb != -1) state[nb].n[state[nb].getEdId(state[i].p[1], state[i].p[0])] = tbInd; nb = state[i].n[1]; if (nb != -1) state[nb].n[state[nb].getEdId(state[i].p[2], state[i].p[1])] = tcInd; nb = state[i].n[2]; if (nb != -1) state[nb].n[state[nb].getEdId(state[i].p[0], state[i].p[2])] = taInd; state[i].p[0] = -1; } else { uint32_t taInd = state.size(); uint32_t tbInd = state.size() + 1; state.resize(state.size() + 2); int32_t bPoint = state[i].p[(cv + 2) % 3]; state[taInd].p[0] = bPoint; state[taInd].p[1] = state[i].p[cv]; state[taInd].p[2] = p; state[tbInd].p[0] = bPoint; state[tbInd].p[1] = p; state[tbInd].p[2] = state[i].p[(cv + 1) % 3]; state[taInd].n[0] = state[i].n[(cv + 2) % 3]; state[taInd].n[1] = -1; state[taInd].n[2] = tbInd; state[tbInd].n[0] = taInd; state[tbInd].n[1] = -1; state[tbInd].n[2] = state[i].n[(cv + 1) % 3]; if (state[i].n[(cv + 1) % 3] != -1) for (int32_t k = 0; k < 3; ++k) if (state[state[i].n[(cv + 1) % 3]].n[k] == (int32_t)i) { state[state[i].n[(cv + 1) % 3]].n[k] = tbInd; break; } if (state[i].n[(cv + 2) % 3] != -1) for (int32_t k = 0; k < 3; ++k) if (state[state[i].n[(cv + 2) % 3]].n[k] == (int32_t)i) { state[state[i].n[(cv + 2) % 3]].n[k] = taInd; break; } triangleToCheck.push(taInd); triangleToCheck.push(tbInd); int32_t total = 2; int32_t oppositeTr = 0; if (state[i].n[cv] != -1) { oppositeTr = state[i].n[cv]; total += 2; uint32_t tcInd = state.size(); uint32_t tdInd = state.size() + 1; state.resize(state.size() + 2); int32_t oped = state[oppositeTr].getEdId(state[i].p[(cv + 1) % 3], state[i].p[cv]); state[tcInd].n[0] = state[oppositeTr].n[(oped + 2) % 3]; state[tcInd].n[1] = tbInd; state[tbInd].n[1] = tcInd; state[tcInd].n[2] = tdInd; state[tdInd].n[0] = tcInd; state[tdInd].n[1] = taInd; state[taInd].n[1] = tdInd; state[tdInd].n[2] = state[oppositeTr].n[(oped + 1) % 3]; if (state[oppositeTr].n[(oped + 2) % 3] != -1) for (int32_t k = 0; k < 3; ++k) if (state[state[oppositeTr].n[(oped + 2) % 3]].n[k] == oppositeTr) { state[state[oppositeTr].n[(oped + 2) % 3]].n[k] = tcInd; break; } if (state[oppositeTr].n[(oped + 1) % 3] != -1) for (int32_t k = 0; k < 3; ++k) if (state[state[oppositeTr].n[(oped + 1) % 3]].n[k] == oppositeTr) { state[state[oppositeTr].n[(oped + 1) % 3]].n[k] = tdInd; break; } int32_t pop = state[oppositeTr].p[(oped + 2) % 3]; state[tcInd].p[0] = pop; state[tcInd].p[1] = state[i].p[(cv + 1) % 3]; state[tcInd].p[2] = p; state[tdInd].p[0] = pop; state[tdInd].p[1] = p; state[tdInd].p[2] = state[i].p[cv]; state[oppositeTr].p[0] = -1; triangleToCheck.push(tcInd); triangleToCheck.push(tdInd); } state[i].p[0] = -1; } break; } while (!triangleToCheck.empty()) { int32_t ctrid = triangleToCheck.front(); triangleToCheck.pop(); DelTriangle& ctr = state[ctrid]; int32_t oppTr = -5; int32_t ced = 0; for (uint32_t i = 0; i < 3; ++i) { if (ctr.p[i] != p && ctr.p[(i + 1) % 3] != p) { ced = i; oppTr = ctr.n[i]; break; } } if (oppTr == -1) continue; bool toCont = false; for (size_t i = 0; i < edges.size(); ++i) { if ((int32_t)edges[i].s == ctr.p[ced] && ctr.p[(ced + 1) % 3] == (int32_t)edges[i].e) { toCont = true; break; } if ((int32_t)edges[i].e == ctr.p[ced] && ctr.p[(ced + 1) % 3] == (int32_t)edges[i].s) { toCont = true; break; } } if (toCont) continue; DelTriangle& otr = state[oppTr]; if (inCircumcircle(vertices[state[oppTr].p[0]], vertices[state[oppTr].p[1]], vertices[state[oppTr].p[2]], vertices[p]) > 0) { int32_t notPIndx = 0; for (; notPIndx < 3; ++notPIndx) { if (otr.p[notPIndx] != ctr.p[0] && otr.p[notPIndx] != ctr.p[1] && otr.p[notPIndx] != ctr.p[2]) break; } int32_t oppCed = state[oppTr].getEdId(ctr.p[(ced + 1) % 3], ctr.p[ced]); int32_t ntr1 = ctrid, ntr2 = oppTr; DelTriangle nt1, nt2; nt1.p[0] = state[oppTr].p[notPIndx]; nt1.p[1] = p; nt1.n[0] = ntr2; nt1.p[2] = ctr.p[ced]; nt1.n[1] = ctr.n[(ced + 2) % 3]; nt1.n[2] = otr.n[(oppCed + 1) % 3]; if (nt1.n[2] != -1) for (uint32_t k = 0; k < 3; ++k) if (state[nt1.n[2]].n[k] == oppTr) state[nt1.n[2]].n[k] = ntr1; nt2.p[0] = p; nt2.p[1] = state[oppTr].p[notPIndx]; nt2.n[0] = ntr1; nt2.p[2] = ctr.p[(ced + 1) % 3]; nt2.n[1] = otr.n[(oppCed + 2) % 3]; nt2.n[2] = ctr.n[(ced + 1) % 3]; if (nt2.n[2] != -1) for (uint32_t k = 0; k < 3; ++k) if (state[nt2.n[2]].n[k] == ctrid) state[nt2.n[2]].n[k] = ntr2; state[ntr1] = nt1; state[ntr2] = nt2; triangleToCheck.push(ntr1); triangleToCheck.push(ntr2); } } } bool edgeIsIntersected(const RVec2& a, const RVec2& b, const RVec2& es, const RVec2& ee) { RVec2 t = b - a; cpp_rational temp = (es - a).cross(t) * (ee - a).cross(t); if (temp < 0) { t = es - ee; if ((a - ee).cross(t) * (b - ee).cross(t) <= 0) return true; } return false; } void triangulatePseudoPolygon(std::vector<RVec2>& vertices, int32_t ba, int32_t bb, std::vector<int32_t>& pseudo, std::vector<DelTriangle>& output) { if (pseudo.empty()) return; int32_t c = 0; if (pseudo.size() > 1) { for (uint32_t i = 1; i < pseudo.size(); ++i) { if (inCircumcircle(vertices[ba], vertices[bb], vertices[pseudo[c]], vertices[pseudo[i]]) > 0) { c = i; } } std::vector<int32_t> toLeft; std::vector<int32_t> toRight; for (int32_t t = 0; t < c; ++t) { toLeft.push_back(pseudo[t]); } for (size_t t = c + 1; t < pseudo.size(); ++t) { toRight.push_back(pseudo[t]); } if (toLeft.size() > 0) triangulatePseudoPolygon(vertices, ba, pseudo[c], toLeft, output); if (toRight.size() > 0) triangulatePseudoPolygon(vertices, pseudo[c], bb, toRight, output); } output.push_back(DelTriangle()); output.back().p[0] = ba; output.back().p[1] = bb; output.back().p[2] = pseudo[c]; } void insertEdge(std::vector<RVec2>& vertices, std::vector<DelTriangle>& output, int32_t edBeg, int32_t edEnd) { bool hasEdge = false; for (auto& it : output) { for (uint32_t i = 0; i < 3; ++i) if ((it.p[i] == edBeg || it.p[i] == edEnd) && (it.p[(i + 1) % 3] == edBeg || it.p[(i + 1) % 3] == edEnd)) { hasEdge = true; } } if (hasEdge) return; int32_t startTriangle = -1; int32_t edg = -1; for (uint32_t i = 0; i < output.size(); ++i) { if (output[i].p[0] == -1) continue; if (output[i].p[0] == edBeg || output[i].p[1] == edBeg || output[i].p[2] == edBeg) { edg = output[i].getEdWP(edBeg); if (edgeIsIntersected(vertices[edBeg], vertices[edEnd], vertices[output[i].p[edg]], vertices[output[i].p[(edg + 1) % 3]])) { startTriangle = i; break; } } } if (startTriangle == -1) { return; } int32_t cvertex = edBeg; std::vector<int32_t> pointsAboveEdge; std::vector<int32_t> pointsBelowEdge; RVec2 vec = vertices[edEnd] - vertices[edBeg]; if (vec.cross(vertices[output[startTriangle].p[edg]] - vertices[edBeg]) > 0) { pointsAboveEdge.push_back(output[startTriangle].p[edg]); pointsBelowEdge.push_back(output[startTriangle].p[(edg + 1) % 3]); } else { pointsBelowEdge.push_back(output[startTriangle].p[edg]); pointsAboveEdge.push_back(output[startTriangle].p[(edg + 1) % 3]); } while (1) { DelTriangle& ctr = output[startTriangle]; int32_t oed = ctr.getEdWP(cvertex); int32_t nextTriangle = ctr.n[oed]; if (output[nextTriangle].p[0] == edEnd || output[nextTriangle].p[1] == edEnd || output[nextTriangle].p[2] == edEnd) { ctr.p[0] = -1; output[nextTriangle].p[0] = -1; break; } DelTriangle& otr = output[nextTriangle]; int32_t opp = otr.p[otr.getOppP(ctr.p[(oed + 1) % 3], ctr.p[oed % 3])]; int32_t nextPoint = 0; if (vec.cross((vertices[opp] - vertices[edBeg])) > 0) { pointsAboveEdge.push_back(opp); if (vec.cross(vertices[ctr.p[(oed + 1) % 3]] - vertices[edBeg]) > 0) { nextPoint = ctr.p[(oed + 1) % 3]; } else { nextPoint = ctr.p[oed]; } } else { pointsBelowEdge.push_back(opp); if (vec.cross(vertices[ctr.p[(oed + 1) % 3]] - vertices[edBeg]) < 0) { nextPoint = ctr.p[(oed + 1) % 3]; } else { nextPoint = ctr.p[oed]; } } startTriangle = nextTriangle; cvertex = nextPoint; ctr.p[0] = -1; } triangulatePseudoPolygon(vertices, edBeg, edEnd, pointsAboveEdge, output); std::reverse(pointsBelowEdge.begin(), pointsBelowEdge.end()); triangulatePseudoPolygon(vertices, edEnd, edBeg, pointsBelowEdge, output); reubildAdjacency(output); } void buildCDT(std::vector<RVec3>& vertices, std::vector<Edge>& edges, std::vector<DelTriangle>& output, ProjectionDirections dr) { std::vector<DelTriangle> state; DelTriangle crt; std::vector<bool> added(vertices.size(), false); for (uint32_t i = 0; i < 3; ++i) { crt.p[i] = edges[i].s; added[edges[i].s] = true; crt.n[i] = -1; // dont have neighbors; } state.push_back(crt); std::vector<RVec2> p2d(vertices.size()); for (uint32_t i = 0; i < vertices.size(); ++i) { p2d[i] = getProjectedPointWithWinding(vertices[i], dr); } for (size_t i = 0; i < edges.size(); ++i) { if (!added[edges[i].s]) { insertPoint(p2d, state, edges[i].s, edges); added[edges[i].s] = true; } if (!added[edges[i].e]) { insertPoint(p2d, state, edges[i].e, edges); added[edges[i].e] = true; } if (edges[i].s != edges[i].e) { insertEdge(p2d, state, edges[i].s, edges[i].e); } } for (uint32_t t = 0; t < state.size(); ++t) { if (state[t].p[0] != -1) { output.push_back(state[t]); } } } int32_t intersectSegments(RVec3& s1, RVec3& e1, RVec3& s2, RVec3& e2, ProjectionDirections dir, std::vector<cpp_rational>& t1v, std::vector<cpp_rational>& t2v); void getTriangleIntersectionCoplanar(uint32_t tr1, uint32_t tr2, std::vector<std::vector<RVec3> >& stencil, ProjectionDirections dr) { std::vector<cpp_rational> intr1[3]; std::vector<cpp_rational> intr2[3]; RVec3 p1[3]; p1[0] = stencil[tr1][0]; p1[1] = stencil[tr1][1]; p1[2] = stencil[tr1][3]; RVec3 p2[3]; p2[0] = stencil[tr2][0]; p2[1] = stencil[tr2][1]; p2[2] = stencil[tr2][3]; for (uint32_t i = 0; i < 3; ++i) { for (uint32_t j = 0; j < 3; ++j) { intersectSegments(p1[i], p1[(i + 1) % 3], p2[j], p2[(j + 1) % 3], dr, intr1[i], intr2[j]); } } int32_t inRel1[3]; for (uint32_t i = 0; i < 3; ++i) { inRel1[i] = isPointInside(getProjectedPointWithWinding(p2[0], dr), getProjectedPointWithWinding(p2[1], dr), getProjectedPointWithWinding(p2[2], dr), getProjectedPointWithWinding(p1[i], dr)); } int32_t inRel2[3]; for (uint32_t i = 0; i < 3; ++i) { inRel2[i] = isPointInside(getProjectedPointWithWinding(p1[0], dr), getProjectedPointWithWinding(p1[1], dr), getProjectedPointWithWinding(p1[2], dr), getProjectedPointWithWinding(p2[i], dr)); } for (uint32_t i = 0; i < 3; ++i) { if (inRel1[i] == INSIDE_TR && inRel1[(i + 1) % 3] == INSIDE_TR) { stencil[tr2].push_back(p1[i]); stencil[tr2].push_back(p1[(i + 1) % 3]); } else { if (inRel1[i] == INSIDE_TR && intr1[i].size() == 1) { stencil[tr2].push_back(p1[i]); stencil[tr2].push_back((p1[(i + 1) % 3] - p1[i]) * intr1[i][0] + p1[i]); } if (inRel1[(i + 1) % 3] == INSIDE_TR && intr1[i].size() == 1) { stencil[tr2].push_back(p1[(i + 1) % 3]); stencil[tr2].push_back((p1[(i + 1) % 3] - p1[i]) * intr1[i][0] + p1[i]); } if (intr1[i].size() == 2) { stencil[tr2].push_back((p1[(i + 1) % 3] - p1[i]) * intr1[i][0] + p1[i]); stencil[tr2].push_back((p1[(i + 1) % 3] - p1[i]) * intr1[i][1] + p1[i]); } } } for (uint32_t i = 0; i < 3; ++i) { if (inRel2[i] == INSIDE_TR && inRel2[(i + 1) % 3] == INSIDE_TR) { stencil[tr1].push_back(p2[i]); stencil[tr1].push_back(p2[(i + 1) % 3]); } else { if (inRel2[i] == INSIDE_TR && intr2[i].size() == 1) { stencil[tr1].push_back(p2[i]); stencil[tr1].push_back((p2[(i + 1) % 3] - p2[i]) * intr2[i][0] + p2[i]); } if (inRel2[(i + 1) % 3] == INSIDE_TR && intr2[i].size() == 1) { stencil[tr1].push_back(p2[(i + 1) % 3]); stencil[tr1].push_back((p2[(i + 1) % 3] - p2[i]) * intr2[i][0] + p2[i]); } if (intr2[i].size() == 2) { stencil[tr1].push_back((p2[(i + 1) % 3] - p2[i]) * intr2[i][0] + p2[i]); stencil[tr1].push_back((p2[(i + 1) % 3] - p2[i]) * intr2[i][1] + p2[i]); } } } } int32_t getTriangleIntersection3d(uint32_t tr1, uint32_t tr2, std::vector<std::vector<RVec3> >& stencil, ProjectionDirections dr) { RatPlane pl1(stencil[tr1][0], stencil[tr1][1], stencil[tr1][3]); if (pl1.n.isZero()) { std::swap(tr1, tr2); pl1 = RatPlane(stencil[tr1][0], stencil[tr1][1], stencil[tr1][3]); if (pl1.n.isZero()) return 0; } cpp_rational d1 = pl1.distance(stencil[tr2][0]); cpp_rational d2 = pl1.distance(stencil[tr2][1]); cpp_rational d3 = pl1.distance(stencil[tr2][3]); int32_t sd1 = d1.sign(); int32_t sd2 = d2.sign(); int32_t sd3 = d3.sign(); if (sd1 == 0 && sd2 == 0 && sd3 == 0) { getTriangleIntersectionCoplanar(tr1, tr2, stencil, dr); return 0; } /** Never intersected */ if (sd1 < 0 && sd2 < 0 && sd3 < 0) return 0; if (sd1 > 0 && sd2 > 0 && sd3 > 0) return 0; RVec3 tb0 = stencil[tr2][0]; RVec3 tb1 = stencil[tr2][1]; RVec3 tb2 = stencil[tr2][3]; if (sd1 * sd3 > 0) { std::swap(tb1, tb2); std::swap(d2, d3); } else { if (sd2 * sd3 > 0) { std::swap(tb0, tb2); std::swap(d1, d3); } else { if (sd3 == 0 && sd1 * sd2 < 0) { std::swap(tb0, tb2); std::swap(d1, d3); } } } RatPlane pl2(stencil[tr2][0], stencil[tr2][1], stencil[tr2][3]); cpp_rational d21 = pl2.distance(stencil[tr1][0]); cpp_rational d22 = pl2.distance(stencil[tr1][1]); cpp_rational d23 = pl2.distance(stencil[tr1][3]); int32_t sd21 = d21.sign(); int32_t sd22 = d22.sign(); int32_t sd23 = d23.sign(); if (sd21 < 0 && sd22 < 0 && sd23 < 0) return 0; if (sd21 > 0 && sd22 > 0 && sd23 > 0) return 0; RVec3 ta0 = stencil[tr1][0]; RVec3 ta1 = stencil[tr1][1]; RVec3 ta2 = stencil[tr1][3]; if (sd21 * sd23 > 0) { std::swap(ta1, ta2); std::swap(d22, d23); } else { if (sd22 * sd23 > 0) { std::swap(ta0, ta2); std::swap(d21, d23); } else { if (sd23 == 0 && sd21 * sd22 < 0) { std::swap(ta0, ta2); std::swap(d21, d23); } } } ////////////////////////////////////////////////// RVec3 dir = ta2 - ta0; cpp_rational dirPlaneDot = dir.dot(pl2.n); RVec3 pointOnIntersectionLine; if (dirPlaneDot != 0) { pointOnIntersectionLine = ta0 - dir * (d21 / dirPlaneDot); } else { pointOnIntersectionLine = ta0; } RVec3 interLineDir = pl1.n.cross(pl2.n); cpp_rational sqd = interLineDir.dot(interLineDir); if (sqd.is_zero()) return 0; cpp_rational t1p2 = (ta1 - pointOnIntersectionLine).dot(interLineDir) / sqd; cpp_rational t1p3 = (ta2 - pointOnIntersectionLine).dot(interLineDir) / sqd; cpp_rational t1p2param = t1p2; if (d22 != d23) { t1p2param = t1p2 + (t1p3 - t1p2) * (d22 / (d22 - d23)); } t1p2 = (tb0 - pointOnIntersectionLine).dot(interLineDir) / sqd; t1p3 = (tb2 - pointOnIntersectionLine).dot(interLineDir) / sqd; cpp_rational t2p1param = t1p2; if (d1 != d3) { t2p1param = t1p2 + (t1p3 - t1p2) * d1 / (d1 - d3); } t1p2 = (tb1 - pointOnIntersectionLine).dot(interLineDir) / sqd; cpp_rational t2p2param = t1p2; if (d2 != d3) { t2p2param = t1p2 + (t1p3 - t1p2) * d2 / (d2 - d3); } cpp_rational beg1 = 0; if (t1p2param < 0) { std::swap(beg1, t1p2param); } if (t2p2param < t2p1param) { std::swap(t2p2param, t2p1param); } cpp_rational minEnd = std::min(t1p2param, t2p2param); cpp_rational maxBeg = std::max(beg1, t2p1param); if (minEnd > maxBeg) { RVec3 p1 = pointOnIntersectionLine + interLineDir * maxBeg; RVec3 p2 = pointOnIntersectionLine + interLineDir * minEnd; stencil[tr1].push_back(p1); stencil[tr1].push_back(p2); stencil[tr2].push_back(p1); stencil[tr2].push_back(p2); return 1; } return 0; } int32_t intersectSegments(RVec3& s1, RVec3& e1, RVec3& s2, RVec3& e2, ProjectionDirections dir, std::vector<cpp_rational>& t1v, std::vector<cpp_rational>& t2v) { RVec2 s1p = getProjectedPointWithWinding(s1, dir); RVec2 e1p = getProjectedPointWithWinding(e1, dir); RVec2 s2p = getProjectedPointWithWinding(s2, dir); RVec2 e2p = getProjectedPointWithWinding(e2, dir); RVec2 dir1 = e1p - s1p; RVec2 dir2 = s2p - e2p; cpp_rational crs = dir1.cross(dir2); if (crs != 0) { cpp_rational c1 = s2p.x - s1p.x; cpp_rational c2 = s2p.y - s1p.y; cpp_rational det1 = c1 * dir2.y - c2 * dir2.x; cpp_rational det2 = dir1.x * c2 - dir1.y * c1; cpp_rational t1 = det1 / crs; cpp_rational t2 = det2 / crs; if (t1 > 0 && t1 < 1 && (t2 >= 0 && t2 <= 1)) { t1v.push_back(t1); } if (t2 > 0 && t2 < 1 && (t1 >= 0 && t1 <= 1)) { t2v.push_back(t2); } } else { if (dir1.cross(s2p - s1p) == 0) { if (dir1.x != 0) { cpp_rational t1 = (s2p.x - s1p.x) / dir1.x; cpp_rational t2 = (e2p.x - s1p.x) / dir1.x; if (t1 > 0 && t1 < 1) t1v.push_back(t1); if (t2 > 0 && t2 < 1) t1v.push_back(t2); } else { if (dir1.y != 0) { cpp_rational t1 = (s2p.y - s1p.y) / dir1.y; cpp_rational t2 = (e2p.y - s1p.y) / dir1.y; if (t1 > 0 && t1 < 1) t1v.push_back(t1); if (t2 > 0 && t2 < 1) t1v.push_back(t2); } } } if (dir2.cross(s1p - s2p) == 0) { dir2 = e2p - s2p; if (dir2.x != 0) { cpp_rational t1 = (s1p.x - s2p.x) / dir2.x; cpp_rational t2 = (e1p.x - s2p.x) / dir2.x; if (t1 > 0 && t1 < 1) t2v.push_back(t1); if (t2 > 0 && t2 < 1) t2v.push_back(t2); } else { if (dir2.y != 0) { cpp_rational t1 = (s1p.y - s2p.y) / dir2.y; cpp_rational t2 = (e1p.y - s2p.y) / dir2.y; if (t1 > 0 && t1 < 1) t2v.push_back(t1); if (t2 > 0 && t2 < 1) t2v.push_back(t2); } } } } return 1; } struct RVec3Comparer { bool operator()(const RVec3& a, const RVec3& b) const { if (a.x < b.x) return true; if (a.x > b.x) return false; if (a.y < b.y) return true; if (a.y > b.y) return false; if (a.z < b.z) return true; return false; } }; void getBarycentricCoords(NvVec2& a, NvVec2& b, NvVec2& c, NvVec2& p, float& u, float& v) { NvVec3 v1(b.x - a.x, c.x - a.x, a.x - p.x); NvVec3 v2(b.y - a.y, c.y - a.y, a.y - p.y); NvVec3 resl = v1.cross(v2); u = resl.x / resl.z; v = resl.y / resl.z; } Mesh* MeshCleanerImpl::cleanMesh(const Mesh* mesh) { /** ======= Get mesh data =========== */ std::vector<Vertex> vertices; std::vector<Edge> edges; std::vector<Facet> facets; vertices.resize(mesh->getVerticesCount()); edges.resize(mesh->getEdgesCount()); facets.resize(mesh->getFacetCount()); nvidia::NvBounds3 bnd; bnd.setEmpty(); for (uint32_t i = 0; i < mesh->getVerticesCount(); ++i) { vertices[i] = mesh->getVertices()[i]; bnd.include(toNvShared(vertices[i].p)); } for (uint32_t i = 0; i < mesh->getEdgesCount(); ++i) { edges[i] = mesh->getEdges()[i]; } for (uint32_t i = 0; i < mesh->getFacetCount(); ++i) { facets[i] = mesh->getFacetsBuffer()[i]; } //====================================== /** Transform vertices to fit unit cube and snap them to grid. **/ float scale = 1.0f / bnd.getExtents().abs().maxElement(); int32_t gridSize = 10000; // Grid resolution to which vertices position will be snapped. for (uint32_t i = 0; i < mesh->getVerticesCount(); ++i) { vertices[i].p = (vertices[i].p - fromNvShared(bnd.minimum)) * scale; vertices[i].p.x = std::floor(vertices[i].p.x * gridSize) / gridSize; vertices[i].p.y = std::floor(vertices[i].p.y * gridSize) / gridSize; vertices[i].p.z = std::floor(vertices[i].p.z * gridSize) / gridSize; } std::vector<std::vector<RVec3> > triangleStencil(facets.size()); std::vector<NvVec3> facetsNormals(facets.size()); std::vector<NvBounds3> facetBound(facets.size()); for (uint32_t tr1 = 0; tr1 < facets.size(); ++tr1) { if (facets[tr1].edgesCount != 3) { return nullptr; } int32_t fed = facets[tr1].firstEdgeNumber; triangleStencil[tr1].push_back(vertices[edges[fed].s].p); triangleStencil[tr1].push_back(vertices[edges[fed].e].p); triangleStencil[tr1].push_back(vertices[edges[fed + 1].s].p); triangleStencil[tr1].push_back(vertices[edges[fed + 1].e].p); triangleStencil[tr1].push_back(vertices[edges[fed + 2].s].p); triangleStencil[tr1].push_back(vertices[edges[fed + 2].e].p); facetBound[tr1].setEmpty(); facetBound[tr1].include(toNvShared(vertices[edges[fed].s].p)); facetBound[tr1].include(toNvShared(vertices[edges[fed].e].p)); facetBound[tr1].include(toNvShared(vertices[edges[fed + 2].s].p)); facetBound[tr1].fattenFast(0.001f); facetsNormals[tr1] = toNvShared(vertices[edges[fed + 1].s].p - vertices[edges[fed].s].p) .cross(toNvShared(vertices[edges[fed + 2].s].p - vertices[edges[fed].s].p)); } /** Build intersections between all pairs of triangles. */ for (uint32_t tr1 = 0; tr1 < facets.size(); ++tr1) { if (triangleStencil[tr1].empty()) continue; for (uint32_t tr2 = tr1 + 1; tr2 < facets.size(); ++tr2) { if (triangleStencil[tr2].empty()) continue; if (facetBound[tr1].intersects(facetBound[tr2]) == false) continue; getTriangleIntersection3d(tr1, tr2, triangleStencil, getProjectionDirection(facetsNormals[tr1])); } } /** Reintersect all segments */ for (uint32_t tr = 0; tr < triangleStencil.size(); ++tr) { std::vector<RVec3>& ctr = triangleStencil[tr]; std::vector<std::vector<cpp_rational> > perSegmentInters(ctr.size() / 2); for (uint32_t sg1 = 6; sg1 < ctr.size(); sg1 += 2) { for (uint32_t sg2 = sg1 + 2; sg2 < ctr.size(); sg2 += 2) { intersectSegments(ctr[sg1], ctr[sg1 + 1], ctr[sg2], ctr[sg2 + 1], getProjectionDirection(facetsNormals[tr]), perSegmentInters[sg1 / 2], perSegmentInters[sg2 / 2]); } } std::vector<RVec3> newStencil; newStencil.reserve(ctr.size()); for (uint32_t i = 0; i < ctr.size(); i += 2) { int32_t csm = i / 2; if (perSegmentInters[csm].size() == 0) { newStencil.push_back(ctr[i]); newStencil.push_back(ctr[i + 1]); } else { cpp_rational current = 0; newStencil.push_back(ctr[i]); std::sort(perSegmentInters[csm].begin(), perSegmentInters[csm].end()); for (size_t j = 0; j < perSegmentInters[csm].size(); ++j) { if (perSegmentInters[csm][j] > current) { current = perSegmentInters[csm][j]; RVec3 pnt = (ctr[i + 1] - ctr[i]) * current + ctr[i]; newStencil.push_back(pnt); newStencil.push_back(pnt); } } newStencil.push_back(ctr[i + 1]); } } ctr = newStencil; } std::vector<RVec3> finalPoints; std::vector<std::vector<Edge> > tsten(facets.size()); { std::map<RVec3, uint32_t, RVec3Comparer> mapping; for (uint32_t tr1 = 0; tr1 < triangleStencil.size(); ++tr1) { for (uint32_t j = 0; j < triangleStencil[tr1].size(); j += 2) { auto it = mapping.find(triangleStencil[tr1][j]); int32_t pt = 0; if (it == mapping.end()) { mapping[triangleStencil[tr1][j]] = finalPoints.size(); pt = finalPoints.size(); finalPoints.push_back(triangleStencil[tr1][j]); } else { pt = it->second; } Edge newed; newed.s = pt; it = mapping.find(triangleStencil[tr1][j + 1]); if (it == mapping.end()) { mapping[triangleStencil[tr1][j + 1]] = finalPoints.size(); pt = finalPoints.size(); finalPoints.push_back(triangleStencil[tr1][j + 1]); } else { pt = it->second; } newed.e = pt; bool hasNewEdge = false; for (uint32_t e = 0; e < tsten[tr1].size(); ++e) { if (tsten[tr1][e].s == newed.s && tsten[tr1][e].e == newed.e) { hasNewEdge = true; break; } if (tsten[tr1][e].e == newed.s && tsten[tr1][e].s == newed.e) { hasNewEdge = true; break; } } if (!hasNewEdge) tsten[tr1].push_back(newed); } } } /** Build constrained DT */ std::vector<DelTriangle> trs; for (uint32_t i = 0; i < tsten.size(); ++i) { if (tsten[i].size() < 3) continue; if (tsten[i].size() > 3) { int32_t oldSize = trs.size(); buildCDT(finalPoints, tsten[i], trs, getProjectionDirection(facetsNormals[i])); for (uint32_t k = oldSize; k < trs.size(); ++k) trs[k].parentTriangle = i; } else { trs.push_back(DelTriangle()); trs.back().parentTriangle = i; for (uint32_t v = 0; v < 3; ++v) trs.back().p[v] = tsten[i][v].s; } } /** Remove 'deleted' triangles from array. */ { std::vector<DelTriangle> trstemp; trstemp.reserve(trs.size()); for (uint32_t i = 0; i < trs.size(); ++i) { if (trs[i].p[0] != -1) trstemp.push_back(trs[i]); } trs = trstemp; } /** Filter exterior surface */ std::vector<bool> fillingMask(trs.size(), false); std::map<std::pair<int32_t, int32_t>, int32_t> edgeMap; std::vector<std::vector<int32_t> > edgeToTriangleMapping; for (uint32_t i = 0; i < trs.size(); ++i) { if (trs[i].p[0] == -1) continue; if (trs[i].p[0] == trs[i].p[1] || trs[i].p[2] == trs[i].p[1] || trs[i].p[2] == trs[i].p[0]) { trs[i].p[0] = -1; continue; } #if 0 // Filter null-area triangles. if ((finalPoints[trs[i].p[1]] - finalPoints[trs[i].p[0]]).cross(finalPoints[trs[i].p[2]] - finalPoints[trs[i].p[0]]).isZero()) { trs[i].p[0] = -1; continue; } #endif for (uint32_t k = 0; k < 3; ++k) { int32_t es = trs[i].p[k]; int32_t ee = trs[i].p[(k + 1) % 3]; if (es > ee) { std::swap(es, ee); } auto pr = std::make_pair(es, ee); auto iter = edgeMap.find(pr); if (iter == edgeMap.end()) { edgeMap[pr] = edgeToTriangleMapping.size(); trs[i].n[k] = edgeToTriangleMapping.size(); edgeToTriangleMapping.resize(edgeToTriangleMapping.size() + 1); edgeToTriangleMapping.back().push_back(i); } else { for (uint32_t j = 0; j < edgeToTriangleMapping[iter->second].size(); ++j) { if (trs[edgeToTriangleMapping[iter->second][j]].compare(trs[i])) { trs[i].p[0] = -1; break; } } if (trs[i].p[0] != -1) { trs[i].n[k] = iter->second; edgeToTriangleMapping[iter->second].push_back(i); } } } } std::queue<int32_t> trque; float maxx = -1000; int32_t best = 0; for (uint32_t i = 0; i < trs.size(); ++i) { if (trs[i].p[0] == -1) continue; float m = std::max( finalPoints[trs[i].p[0]].x.convert_to<float>(), std::max(finalPoints[trs[i].p[1]].x.convert_to<float>(), finalPoints[trs[i].p[2]].x.convert_to<float>())); if (m > maxx && facetsNormals[trs[i].parentTriangle].x > 0) { maxx = m; best = i; } } if (!trs.empty()) { trque.push(best); } while (!trque.empty()) { int32_t trid = trque.front(); fillingMask[trid] = true; DelTriangle& tr = trs[trque.front()]; trque.pop(); for (uint32_t ed = 0; ed < 3; ++ed) { auto& tlist = edgeToTriangleMapping[tr.n[ed]]; if (tlist.size() == 2) { for (uint32_t k = 0; k < tlist.size(); ++k) { int32_t to = tlist[k]; if (to != trid && !fillingMask[to] && edgeToTriangleMapping[trs[to].n[0]].size() > 0 && edgeToTriangleMapping[trs[to].n[1]].size() > 0 && edgeToTriangleMapping[trs[to].n[2]].size() > 0) { trque.push(tlist[k]); fillingMask[tlist[k]] = true; } } } if (tlist.size() > 2) { int32_t bestPath = (tlist[0] == trid) ? tlist[1] : tlist[0]; RVec3 start = finalPoints[trs[trid].p[ed]]; RVec3 axis = finalPoints[trs[trid].p[(ed + 1) % 3]] - start; RVec3 nAxis = finalPoints[trs[trid].p[(ed + 2) % 3]] - start; RVec3 normal = axis.cross(nAxis); uint32_t op = trs[bestPath].getOppPoint(trs[trid].p[ed], trs[trid].p[(ed + 1) % 3]); RVec3 dir2 = (finalPoints[op] - start); RVec3 normal2 = dir2.cross(axis); cpp_rational bestDir = normal.cross(normal2).dot(axis); cpp_rational oldDist = normal2.dot(normal2); for (uint32_t k = 0; k < tlist.size(); ++k) { if (tlist[k] == trid) continue; op = trs[tlist[k]].getOppPoint(trs[trid].p[ed], trs[trid].p[(ed + 1) % 3]); dir2 = (finalPoints[op] - start); normal2 = dir2.cross(axis); cpp_rational newOne = normal.cross(normal2).dot(axis); if (newOne * oldDist < bestDir * normal2.dot(normal2)) { oldDist = normal2.dot(normal2); bestPath = tlist[k]; bestDir = newOne; } } if (!fillingMask[bestPath] && edgeToTriangleMapping[trs[bestPath].n[0]].size() > 0 && edgeToTriangleMapping[trs[bestPath].n[1]].size() > 0 && edgeToTriangleMapping[trs[bestPath].n[2]].size() > 0) { trque.push(bestPath); fillingMask[bestPath] = true; } } edgeToTriangleMapping[tr.n[ed]].clear(); } } for (uint32_t id = 0; id < trs.size(); ++id) { if (!fillingMask[id]) { trs[id].p[0] = -1; // Remove triangle } } ///////////////////////////////////////////////////////////////////////////////////////////// std::vector<NvVec3> newVertices; newVertices.resize(finalPoints.size()); for (uint32_t i = 0; i < finalPoints.size(); ++i) { newVertices[i].x = finalPoints[i].x.convert_to<float>(); newVertices[i].y = finalPoints[i].y.convert_to<float>(); newVertices[i].z = finalPoints[i].z.convert_to<float>(); } /** Rescale mesh to initial coordinates. */ for (uint32_t i = 0; i < finalPoints.size(); ++i) { newVertices[i] = newVertices[i] * (1.0f / scale) + bnd.minimum; } for (uint32_t i = 0; i < vertices.size(); ++i) { vertices[i].p = vertices[i].p * (1.0f / scale) + fromNvShared(bnd.minimum); } std::vector<Triangle> result; result.reserve(trs.size()); { std::vector<NvVec2> projectedTriangles(facets.size() * 3); std::vector<Vertex> normalTriangles(facets.size() * 3); for (uint32_t i = 0; i < facets.size(); ++i) { for (uint32_t k = 0; k < 3; ++k) { normalTriangles[i * 3 + k] = vertices[edges[facets[i].firstEdgeNumber + k].s]; projectedTriangles[i * 3 + k] = getProjectedPointWithWinding( vertices[edges[facets[i].firstEdgeNumber + k].s].p, getProjectionDirection(facetsNormals[i])).toVec2(); } } for (uint32_t i = 0; i < trs.size(); ++i) { if (trs[i].p[0] == -1) continue; int32_t id = 0; int32_t parentTriangle = trs[i].parentTriangle; float u = 0, v = 0; result.resize(result.size() + 1); result.back().materialId = facets[parentTriangle].materialId; result.back().smoothingGroup = facets[parentTriangle].smoothingGroup; for (auto vert : { &result.back().a, &result.back().b, &result.back().c }) { toNvShared(vert->p) = newVertices[trs[i].p[id]]; NvVec2 p = getProjectedPointWithWinding(vert->p, getProjectionDirection(facetsNormals[parentTriangle])).toVec2(); getBarycentricCoords(projectedTriangles[parentTriangle * 3], projectedTriangles[parentTriangle * 3 + 1], projectedTriangles[parentTriangle * 3 + 2], p, u, v); vert->uv[0] = (1 - u - v) * normalTriangles[parentTriangle * 3].uv[0] + u * normalTriangles[parentTriangle * 3 + 1].uv[0] + v * normalTriangles[parentTriangle * 3 + 2].uv[0]; vert->n = (1 - u - v) * normalTriangles[parentTriangle * 3].n + u * normalTriangles[parentTriangle * 3 + 1].n + v * normalTriangles[parentTriangle * 3 + 2].n; ++id; } } } /** Reuse old buffers to create Mesh */ std::vector<NvcVec3> newMeshVertices(result.size() * 3); std::vector<NvcVec3> newMeshNormals(result.size() * 3); std::vector<NvcVec2> newMeshUvs(result.size() * 3); std::vector<int32_t> newMaterialIds(result.size()); std::vector<int32_t> newSmoothingGroups(result.size()); for (uint32_t i = 0; i < result.size(); ++i) { Vertex* arr[3] = { &result[i].a, &result[i].b, &result[i].c }; for (uint32_t k = 0; k < 3; ++k) { newMeshVertices[i * 3 + k] = arr[k]->p; newMeshNormals[i * 3 + k] = arr[k]->n; newMeshUvs[i * 3 + k] = arr[k]->uv[0]; } } std::vector<uint32_t> serializedIndices; serializedIndices.reserve(result.size() * 3); int32_t cindex = 0; for (uint32_t i = 0; i < result.size(); ++i) { newMaterialIds[i] = result[i].materialId; newSmoothingGroups[i] = result[i].smoothingGroup; for (uint32_t pi = 0; pi < 3; ++pi) serializedIndices.push_back(cindex++); } MeshImpl* rMesh = new MeshImpl(newMeshVertices.data(), newMeshNormals.data(), newMeshUvs.data(), static_cast<uint32_t>(newMeshVertices.size()), serializedIndices.data(), static_cast<uint32_t>(serializedIndices.size())); rMesh->setMaterialId(newMaterialIds.data()); rMesh->setSmoothingGroup(newSmoothingGroups.data()); return rMesh; } void MeshCleanerImpl::release() { delete this; }
53,514
C++
29.650057
134
0.461038
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/authoring/NvBlastExtAuthoringMeshNoiser.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved. #ifndef NVBLASTEXTAUTHORINGMESHNOISER_H #define NVBLASTEXTAUTHORINGMESHNOISER_H #include <vector> #include <map> #include "NvBlastExtAuthoringInternalCommon.h" namespace Nv { namespace Blast { class SimplexNoise; /** Structure used on tesselation stage. Maps edge to two neighbor triangles */ struct EdgeToTriangles { int32_t tr[2]; int32_t c; EdgeToTriangles() { c = 0; } /** Add triangle to edge. Should not be called more than twice for one edge!!!!. */ void add(int32_t t) { tr[c] = t; ++c; } /** Replaces mapping from one triangle to another. */ void replace(int32_t from, int32_t to) { if (tr[0] == from) { tr[0] = to; } else { if (c == 2 && tr[1] == from) { tr[1] = to; } } } /** Get triangle which is mapped by this edge and which index is different than provided. */ int32_t getNot(int32_t id) { if (tr[0] != id) { return tr[0]; } if (c == 2 && tr[1] != id) { return tr[1]; } return -1; } }; /** Tool for graphic mesh tesselation and adding noise to internal surface. Each triangle must have initialized Triangle::userInfo field (0 for external surface triangles and != 0 for internal) */ class MeshNoiser { public: MeshNoiser() { reset(); } void reset(); /** Edge flags */ enum EdgeFlag { INTERNAL_EDGE, EXTERNAL_BORDER_EDGE, INTERNAL_BORDER_EDGE, EXTERNAL_EDGE, NONE }; /** Set mesh to tesselate and apply noise */ void setMesh(const std::vector<Triangle>& mesh); /** Tesselate internal surface. \param[in] maxLen - maximal length of edge on internal surface. */ void tesselateInternalSurface(float maxLen); /** Apply noise to internal surface. Must be called only after tesselation!!! \param[in] noise - noise generator \param[in] falloff - damping of noise around of external surface \param[in] relaxIterations - number of smoothing iterations before applying noise \param[in] relaxFactor - amount of smooting before applying noise. */ void applyNoise(SimplexNoise& noise, float falloff, int32_t relaxIterations, float relaxFactor); std::vector<Triangle> getMesh(); private: nvidia::NvVec3 mOffset; float mScale; bool isTesselated; /** Mesh data */ std::vector<Vertex> mVertices; std::vector<TriangleIndexed> mTriangles; std::vector<Edge> mEdges; std::map<Vertex, int32_t, VrtComp> mVertMap; std::map<Edge, int32_t> mEdgeMap; /** Final triangles. */ std::vector<Triangle> mResultTriangles; int32_t addVerticeIfNotExist(const Vertex& p); int32_t addEdge(const Edge& e); int32_t findEdge(const Edge& e); void collapseEdge(int32_t id); void divideEdge(int32_t id); void updateVertEdgeInfo(); void updateEdgeTriangleInfo(); void relax(int32_t iterations, float factor, std::vector<Vertex>& vertices); void recalcNoiseDirs(); std::vector<bool> mRestrictionFlag; std::vector<EdgeFlag> mEdgeFlag; std::vector<EdgeToTriangles> mTrMeshEdToTr; std::vector<int32_t> mVertexValence; std::vector<std::vector<int32_t> > mVertexToTriangleMap; std::vector<float> mVerticesDistances; std::vector<nvidia::NvVec3> mVerticesNormalsSmoothed; std::vector<uint32_t> mPositionMappedVrt; std::vector<std::vector<int32_t> > mGeometryGraph; void prebuildEdgeFlagArray(); void computePositionedMapping(); void computeFalloffAndNormals(); void prebuildTesselatedTriangles(); }; } // namespace Blast } // namespace Nv #endif // ! NVBLASTEXTAUTHORINGMESHNOISER_H
7,224
C
36.435233
135
0.500415
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/authoring/NvBlastExtAuthoringTriangulator.cpp
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved. // This warning arises when using some stl containers with older versions of VC // c:\program files (x86)\microsoft visual studio 12.0\vc\include\xtree(1826): warning C4702: unreachable code #include "NvPreprocessor.h" #if NV_VC && NV_VC < 14 #pragma warning(disable : 4702) #endif #include "NvBlastExtAuthoringTriangulator.h" #include "NvBlastExtAuthoringMesh.h" #include "NvBlastExtAuthoringTypes.h" #include "NvPreprocessor.h" #include "NvBlastExtAuthoringBooleanToolImpl.h" #include <NvBlastAssert.h> #include <NvBlastNvSharedHelpers.h> #include <math.h> #include <algorithm> #include <list> #include <queue> #include <set> #include <vector> using nvidia::NvVec2; using nvidia::NvVec3; namespace Nv { namespace Blast { // used with ear clipping algorithm to deal with floating point precision artifacts for nearly co-linear points #define MIN_ANGLE (0.0001f) // helper for ear clipping algorithm // holds the vertex indices for the previous and next vertex in the facet // along with the scaled area of the triangle defined by the 3 vertices struct AdjVertInfo { uint32_t prev; uint32_t next; float scaledArea; }; NV_FORCE_INLINE bool compareTwoFloats(float a, float b) { return std::abs(b - a) <= FLT_EPSILON * std::abs(b + a); } NV_FORCE_INLINE bool compareTwoVertices(const NvVec3& a, const NvVec3& b) { return compareTwoFloats(a.x, b.x) && compareTwoFloats(a.y, b.y) && compareTwoFloats(a.z, b.z); } NV_FORCE_INLINE bool compareTwoVertices(const NvVec2& a, const NvVec2& b) { return compareTwoFloats(a.x, b.x) && compareTwoFloats(a.y, b.y); } NV_FORCE_INLINE float getRotation(const NvVec2& a, const NvVec2& b) { return a.x * b.y - a.y * b.x; } NV_FORCE_INLINE bool pointInside( const NvVec2& ba, const NvVec2& cb, const NvVec2& ac, const NvVec2& a, const NvVec2& b, const NvVec2& c, const NvVec2& pnt ) { // Co-positional verts are not considered inside because that would break the exterior of the facet if (compareTwoVertices(a, pnt) || compareTwoVertices(b, pnt) || compareTwoVertices(c, pnt)) { return false; } const float v1 = getRotation(ba, (pnt - a).getNormalized()); const float v2 = getRotation(cb, (pnt - b).getNormalized()); const float v3 = getRotation(ac, (pnt - c).getNormalized()); // If the sign of all angles match, then the point is inside // A 0 angle is considered inside because otherwise verts would get dropped during triangulation return (v1 >= -MIN_ANGLE && v2 >= -MIN_ANGLE && v3 >= -MIN_ANGLE) || (v1 <= MIN_ANGLE && v2 <= MIN_ANGLE && v3 <= MIN_ANGLE); } static void updatePotentialEar( uint32_t curr, const Vertex* vert, const ProjectionDirections& dir, const std::map<uint32_t, AdjVertInfo>& adjVertInfoMap, const std::list<uint32_t>& reflexVerts, std::list<uint32_t>& potentialEars ) { // remove from potential list if it exists already // it will be added back if it is still a valid potential ear const auto itr = std::find(potentialEars.begin(), potentialEars.end(), curr); if (itr != potentialEars.end()) { potentialEars.erase(itr); } // doing it this way so the map can be passed as a const reference, but it should always be fully populated const auto mapItr = adjVertInfoMap.find(curr); if (mapItr == adjVertInfoMap.end()) { NVBLAST_ASSERT_WITH_MESSAGE(false, "this should never happen"); return; } // only convex verts need to be considered for potential ears const AdjVertInfo& adjVertInfo = mapItr->second; if (adjVertInfo.scaledArea <= 0.0f) { return; } // only need to check against reflex verts to see if they are inside potential ears // convex verts can't be inside potential ears if (reflexVerts.size()) { const Vertex cV = vert[curr]; const Vertex pV = vert[adjVertInfo.prev]; const Vertex nV = vert[adjVertInfo.next]; const NvVec2 cVp = getProjectedPoint(cV.p, dir); const NvVec2 pVp = getProjectedPoint(pV.p, dir); const NvVec2 nVp = getProjectedPoint(nV.p, dir); // if there are no other verts inside, then it is a potential ear const NvVec2 ba = (nVp - cVp).getNormalized(); const NvVec2 cb = (pVp - nVp).getNormalized(); const NvVec2 ac = (cVp - pVp).getNormalized(); for (uint32_t vrt : reflexVerts) { // ignore reflex verts that are part of the tri being tested if (vrt == adjVertInfo.prev || vrt == adjVertInfo.next) { continue; } const NvVec2 pnt = getProjectedPoint(vert[vrt].p, dir); if (pointInside(ba, cb, ac, cVp, nVp, pVp, pnt)) { return; } } } potentialEars.push_back(curr); } static void updateVertData( uint32_t curr, uint32_t prev, uint32_t next, const Vertex* vert, const ProjectionDirections& dir, std::map<uint32_t, AdjVertInfo>& adjVertInfoMap, std::list<uint32_t>& reflexVerts ) { // remove the index from the reflex list if there is already an entry for it // it will be added back if it is still a reflex vertex const auto reflexItr = std::find(reflexVerts.begin(), reflexVerts.end(), curr); if (reflexItr != reflexVerts.end()) { reflexVerts.erase(reflexItr); } // if next == prev it isn't a valid triangle // this will happen when the facet has less than 3 verts in it // no need to add them as reflex verts at that point, the algorithm is finishing up the final pass float scaledArea = 0.0f; if (prev != next) { const Vertex cV = vert[curr]; const Vertex pV = vert[prev]; const Vertex nV = vert[next]; const NvVec2 cVp = getProjectedPoint(cV.p, dir); const NvVec2 pVp = getProjectedPoint(pV.p, dir); const NvVec2 nVp = getProjectedPoint(nV.p, dir); const NvVec2 prevEdge = (cVp - pVp); const NvVec2 nextEdge = (nVp - cVp); // use normalized vectors to get a better calc for the angle between them float rot = getRotation(prevEdge.getNormalized(), nextEdge.getNormalized()); if (dir & OPPOSITE_WINDING) rot = -rot; if (rot > MIN_ANGLE) { // this is a valid convex vertex, calculate 2 * area (used for sorting later) // actual area isn't needed because it is only used to compare with other ears, so relative numbers are fine scaledArea = getRotation(prevEdge, nextEdge); if (dir & OPPOSITE_WINDING) scaledArea = -scaledArea; } else { // the angle is roughly 180 or greater, consider it a reflex vertex reflexVerts.push_back(curr); } } // the scaled area will be used to sort potential ears later adjVertInfoMap[curr] = {prev, next, scaledArea}; } void Triangulator::triangulatePolygonWithEarClipping(const std::vector<uint32_t>& inputPolygon, const Vertex* vert, const ProjectionDirections& dir) { uint32_t vCount = static_cast<uint32_t>(inputPolygon.size()); if (vCount < 3) { return; } // High level of ear clipping algorithm: // // - find potential ears (3 consecutive verts that form a triangle fully inside the facet with no other points from the facet inside or on an edge) // while (potential ears) // - sort the potential ears by area // - add tri formed by largest ear to output and remove vert from the tip of the ear from the facet // - update potential ears for remaining 2 verts in the tri // // This will ensure that no sliver triangles are created // start by building up vertex data and a list of reflex (interior angle >= 180) verts std::list<uint32_t> reflexVerts; std::list<uint32_t> potentialEars; std::map<uint32_t, AdjVertInfo> adjVertInfoMap; for (uint32_t curr = 0; curr < vCount; curr++) { const uint32_t prev = (curr == 0) ? vCount - 1 : curr - 1; const uint32_t next = (curr == vCount - 1) ? 0 : curr + 1; const uint32_t currIdx = inputPolygon[curr]; const uint32_t prevIdx = inputPolygon[prev]; const uint32_t nextIdx = inputPolygon[next]; updateVertData(currIdx, prevIdx, nextIdx, vert, dir, adjVertInfoMap, reflexVerts); } // build the list of potential ears defined by convex verts by checking any reflex vert is inside for (auto pair : adjVertInfoMap) { // if a vert is not a reflex, it must be convex and should be considered as an ear const uint32_t currIdx = pair.first; if (std::find(reflexVerts.begin(), reflexVerts.end(), currIdx) == reflexVerts.end()) { updatePotentialEar(currIdx, vert, dir, adjVertInfoMap, reflexVerts, potentialEars); } } // descending sort by scaled area auto compArea = [&adjVertInfoMap](const uint32_t& a, const uint32_t& b) -> bool { return (adjVertInfoMap[a].scaledArea > adjVertInfoMap[b].scaledArea); }; while (potentialEars.size()) { // sort the potential ear list based on the area of the triangles they form potentialEars.sort(compArea); // add the largest triangle to the output const uint32_t curr = potentialEars.front(); const AdjVertInfo& adjVertInfo = adjVertInfoMap[curr]; mBaseMeshTriangles.push_back(TriangleIndexed(curr, adjVertInfo.prev, adjVertInfo.next)); // remove the ear tip from the potential ear list potentialEars.pop_front(); // update data for the other 2 verts involved const uint32_t prevPrev = adjVertInfoMap[adjVertInfo.prev].prev; const uint32_t nextNext = adjVertInfoMap[adjVertInfo.next].next; // vert data must be updated first for both updateVertData(adjVertInfo.prev, prevPrev, adjVertInfo.next, vert, dir, adjVertInfoMap, reflexVerts); updateVertData(adjVertInfo.next, adjVertInfo.prev, nextNext, vert, dir, adjVertInfoMap, reflexVerts); // then potential ear list updatePotentialEar(adjVertInfo.prev, vert, dir, adjVertInfoMap, reflexVerts, potentialEars); updatePotentialEar(adjVertInfo.next, vert, dir, adjVertInfoMap, reflexVerts, potentialEars); } } struct LoopInfo { LoopInfo() { used = false; } NvVec3 normal; float area; int32_t index; bool used; bool operator<(const LoopInfo& b) const { return area < b.area; } }; int32_t unitePolygons(std::vector<uint32_t>& externalLoop, std::vector<uint32_t>& internalLoop, Vertex* vrx, const ProjectionDirections& dir) { if (externalLoop.size() < 3 || internalLoop.size() < 3) return 1; /** Find point with maximum x-coordinate */ float x_max = -MAXIMUM_EXTENT; int32_t mIndex = -1; for (uint32_t i = 0; i < internalLoop.size(); ++i) { float nx = getProjectedPoint(vrx[internalLoop[i]].p, dir).x; if (nx > x_max) { mIndex = i; x_max = nx; } } if (mIndex == -1) { return 1; } /** Search for base point on external loop */ float minX = MAXIMUM_EXTENT; int32_t vrtIndex = -1; bool isFromBuffer = 0; NvVec2 holePoint = getProjectedPoint(vrx[internalLoop[mIndex]].p, dir); NvVec2 computedPoint; for (uint32_t i = 0; i < externalLoop.size(); ++i) { int32_t nx = (i + 1) % externalLoop.size(); NvVec2 pnt1 = getProjectedPoint(vrx[externalLoop[i]].p, dir); NvVec2 pnt2 = getProjectedPoint(vrx[externalLoop[nx]].p, dir); if (pnt1.x < x_max && pnt2.x < x_max) { continue; } NvVec2 vc = pnt2 - pnt1; if (vc.y == 0 && pnt1.y == holePoint.y) { if (pnt1.x < minX && pnt1.x < pnt2.x && pnt1.x > x_max) { minX = pnt1.x; vrtIndex = i; isFromBuffer = true; } if (pnt2.x < minX && pnt2.x < pnt1.x && pnt2.x > x_max) { minX = pnt2.x; vrtIndex = nx; isFromBuffer = true; } } else { float t = (holePoint.y - pnt1.y) / vc.y; if (t <= 1 && t >= 0) { NvVec2 tempPoint = vc * t + pnt1; if (tempPoint.x < minX && tempPoint.x > x_max) { minX = tempPoint.x; vrtIndex = i; isFromBuffer = false; computedPoint = tempPoint; } } } } if (vrtIndex == -1) { // std::cout << "Triangulation: base vertex for inner loop is not found..." << std::endl; return 1; } int32_t bridgePoint = -1; float bestAngle = 100; if (!isFromBuffer) { NvVec2 ex1 = getProjectedPoint(vrx[externalLoop[vrtIndex]].p, dir); NvVec2 ex2 = getProjectedPoint(vrx[externalLoop[(vrtIndex + 1) % externalLoop.size()]].p, dir); if (ex1.x > ex2.x) { vrtIndex = (vrtIndex + 1) % externalLoop.size(); ex1 = ex2; } /* Check if some point is inside triangle */ bool notFound = true; const NvVec2 ba = (ex1 - holePoint).getNormalized(); const NvVec2 cb = (computedPoint - ex1).getNormalized(); const NvVec2 ac = (holePoint - computedPoint).getNormalized(); for (int32_t i = 0; i < (int32_t)externalLoop.size(); ++i) { const NvVec2 tempPoint = getProjectedPoint(vrx[externalLoop[i]].p, dir); if (pointInside(ba, cb, ac, holePoint, ex1, computedPoint, tempPoint)) { notFound = false; const NvVec2 cVp = getProjectedPoint(vrx[externalLoop[i]].p, dir); const NvVec2 pVp = getProjectedPoint(vrx[externalLoop[(i - 1 + externalLoop.size()) % externalLoop.size()]].p, dir); const NvVec2 nVp = getProjectedPoint(vrx[externalLoop[(i + 1) % externalLoop.size()]].p, dir); float rt = getRotation((cVp - pVp).getNormalized(), (nVp - pVp).getNormalized()); if (dir & OPPOSITE_WINDING) rt = -rt; if (rt < MIN_ANGLE) continue; const float tempAngle = NvVec2(1, 0).dot((tempPoint - holePoint).getNormalized()); if (bestAngle < tempAngle) { bestAngle = tempAngle; bridgePoint = i; } } } if (notFound) { bridgePoint = vrtIndex; } if (bridgePoint == -1) { // std::cout << "Triangulation: bridge vertex for inner loop is not found..." << std::endl; return 1; } } else { bridgePoint = vrtIndex; } std::vector<uint32_t> temporal; for (int32_t i = 0; i <= bridgePoint; ++i) { temporal.push_back(externalLoop[i]); } temporal.push_back(internalLoop[mIndex]); for (int32_t i = (mIndex + 1) % internalLoop.size(); i != mIndex; i = (i + 1) % internalLoop.size()) { temporal.push_back(internalLoop[i]); } temporal.push_back(internalLoop[mIndex]); for (uint32_t i = bridgePoint; i < externalLoop.size(); ++i) { temporal.push_back(externalLoop[i]); } externalLoop = temporal; return 0; } void Triangulator::buildPolygonAndTriangulate(std::vector<Edge>& edges, Vertex* vertices, int32_t userData, int32_t materialId, int32_t smoothingGroup) { std::vector<std::vector<uint32_t> > serializedLoops; std::set<int> visitedVertices; std::vector<int> used(edges.size(), 0); uint32_t collected = 0; std::vector<int> edgesIds; /** Add first edge to polygon */ edgesIds.push_back(0); visitedVertices.insert(edges[0].s); visitedVertices.insert(edges[0].e); used[0] = true; collected = 1; uint32_t lastEdge = 0; bool successfullPass = false; for (; collected < edges.size();) { successfullPass = false; for (uint32_t p = 0; p < edges.size(); ++p) { if (used[p] == 0 && edges[p].s == edges[lastEdge].e) { successfullPass = true; collected++; used[p] = true; edgesIds.push_back(p); lastEdge = p; if (visitedVertices.find(edges[p].e) != visitedVertices.end()) // if we formed loop, detach it and // triangulate { serializedLoops.push_back(std::vector<uint32_t>()); std::vector<uint32_t>& serializedPositions = serializedLoops.back(); while (edgesIds.size() > 0) { serializedPositions.push_back(edges[edgesIds.back()].s); visitedVertices.erase(edges[edgesIds.back()].s); if (edges[edgesIds.back()].s == edges[p].e) { edgesIds.pop_back(); break; } edgesIds.pop_back(); } if (edgesIds.size() > 0) { lastEdge = edgesIds.back(); } else { for (uint32_t t = 0; t < edges.size(); ++t) { if (used[t] == 0) { edgesIds.push_back(t); visitedVertices.insert(edges[t].s); visitedVertices.insert(edges[t].e); used[t] = true; collected++; lastEdge = t; break; } } } } else { visitedVertices.insert(edges[p].e); } } } if (!successfullPass) { break; } } std::vector<LoopInfo> loopsInfo(serializedLoops.size()); // Compute normal to whole polygon, and areas of loops NvVec3 wholeFacetNormal(0, 0, 0); for (uint32_t loop = 0; loop < serializedLoops.size(); ++loop) { NvVec3 loopNormal(0, 0, 0); const std::vector<uint32_t>& pos = serializedLoops[loop]; for (uint32_t vrt = 1; vrt + 1 < serializedLoops[loop].size(); ++vrt) { loopNormal += toNvShared(vertices[pos[vrt]].p - vertices[pos[0]].p) .cross(toNvShared(vertices[pos[vrt + 1]].p - vertices[pos[0]].p)); } loopsInfo[loop].area = loopNormal.magnitude(); loopsInfo[loop].normal = loopNormal; loopsInfo[loop].index = loop; wholeFacetNormal += loopNormal; } // Change areas signs according to winding direction for (uint32_t loop = 0; loop < serializedLoops.size(); ++loop) { if (wholeFacetNormal.dot(loopsInfo[loop].normal) < 0) { loopsInfo[loop].area = -loopsInfo[loop].area; } } const ProjectionDirections dir = getProjectionDirection(wholeFacetNormal); std::sort(loopsInfo.begin(), loopsInfo.end()); std::vector<NvVec3> tempPositions; int32_t oldSize = static_cast<int32_t>(mBaseMeshTriangles.size()); for (uint32_t extPoly = 0; extPoly < loopsInfo.size(); ++extPoly) { if (loopsInfo[extPoly].area < 0) { continue; // Polygon with negative area is hole } int32_t baseLoop = loopsInfo[extPoly].index; for (uint32_t intPoly = 0; intPoly < loopsInfo.size(); ++intPoly) { if (loopsInfo[intPoly].area > 0 || loopsInfo[intPoly].used || std::abs(loopsInfo[intPoly].area) > loopsInfo[extPoly].area) { continue; } int32_t holeLoop = loopsInfo[intPoly].index; if (!unitePolygons(serializedLoops[baseLoop], serializedLoops[holeLoop], vertices, dir)) { loopsInfo[intPoly].used = true; }; } triangulatePolygonWithEarClipping(serializedLoops[baseLoop], vertices, dir); } for (uint32_t i = oldSize; i < mBaseMeshTriangles.size(); ++i) { mBaseMeshTriangles[i].userData = userData; mBaseMeshTriangles[i].materialId = materialId; mBaseMeshTriangles[i].smoothingGroup = smoothingGroup; } } NV_FORCE_INLINE int32_t Triangulator::addVerticeIfNotExist(const Vertex& p) { auto it = mVertMap.find(p); if (it == mVertMap.end()) { mVertMap[p] = static_cast<int32_t>(mVertices.size()); mVertices.push_back(p); return static_cast<int32_t>(mVertices.size()) - 1; } else { return it->second; } } NV_FORCE_INLINE void Triangulator::addEdgeIfValid(EdgeWithParent& ed) { if (ed.s == ed.e) return; EdgeWithParent opposite(ed.e, ed.s, ed.parent); auto it = mEdgeMap.find(opposite); if (it == mEdgeMap.end()) { mEdgeMap[ed] = static_cast<int32_t>(mBaseMeshEdges.size()); mBaseMeshEdges.push_back(ed); } else { if (mBaseMeshEdges[it->second].s == kNotValidVertexIndex) { mBaseMeshEdges[it->second].s = ed.s; mBaseMeshEdges[it->second].e = ed.e; } else { mBaseMeshEdges[it->second].s = kNotValidVertexIndex; } } } void Triangulator::prepare(const Mesh* mesh) { const Edge* ed = mesh->getEdges(); const Vertex* vr = mesh->getVertices(); mBaseMapping.resize(mesh->getVerticesCount()); for (uint32_t i = 0; i < mesh->getFacetCount(); ++i) { const Facet* fc = mesh->getFacet(i); for (uint32_t j = fc->firstEdgeNumber; j < fc->firstEdgeNumber + fc->edgesCount; ++j) { int32_t a = addVerticeIfNotExist(vr[ed[j].s]); int32_t b = addVerticeIfNotExist(vr[ed[j].e]); mBaseMapping[ed[j].s] = a; mBaseMapping[ed[j].e] = b; EdgeWithParent e(a, b, i); addEdgeIfValid(e); } } std::vector<EdgeWithParent> temp; temp.reserve(mBaseMeshEdges.size()); for (uint32_t i = 0; i < mBaseMeshEdges.size(); ++i) { if (mBaseMeshEdges[i].s != kNotValidVertexIndex) { temp.push_back(mBaseMeshEdges[i]); } } mBaseMeshEdges = temp; } void Triangulator::reset() { mVertices.clear(); mBaseMeshEdges.clear(); mVertMap.clear(); mEdgeMap.clear(); mBaseMeshTriangles.clear(); mBaseMeshResultTriangles.clear(); } void Triangulator::triangulate(const Mesh* mesh) { reset(); if (mesh == nullptr || !mesh->isValid()) { return; } prepare(mesh); if (mBaseMeshEdges.empty()) { return; } std::vector<Edge> temp; uint32_t fP = mBaseMeshEdges[0].parent; for (uint32_t i = 0; i < mBaseMeshEdges.size(); ++i) { if (fP != mBaseMeshEdges[i].parent) { if (temp.empty() == false) { buildPolygonAndTriangulate(temp, mVertices.data(), mesh->getFacet(fP)->userData, mesh->getFacet(fP)->materialId, mesh->getFacet(fP)->smoothingGroup); } temp.clear(); fP = mBaseMeshEdges[i].parent; } temp.push_back({ mBaseMeshEdges[i].s, mBaseMeshEdges[i].e }); } buildPolygonAndTriangulate(temp, mVertices.data(), mesh->getFacet(fP)->userData, mesh->getFacet(fP)->materialId, mesh->getFacet(fP)->smoothingGroup); /* Build final triangles */ mBaseMeshResultTriangles.clear(); for (uint32_t i = 0; i < mBaseMeshTriangles.size(); ++i) { if (mBaseMeshTriangles[i].ea == kNotValidVertexIndex) { continue; } mBaseMeshResultTriangles.push_back({ mVertices[mBaseMeshTriangles[i].ea], mVertices[mBaseMeshTriangles[i].eb], mVertices[mBaseMeshTriangles[i].ec], mBaseMeshTriangles[i].userData, mBaseMeshTriangles[i].materialId, mBaseMeshTriangles[i].smoothingGroup }); } mBaseMeshUVFittedTriangles = mBaseMeshResultTriangles; // Uvs will be fitted later, in FractureTool. computePositionedMapping(); } void Triangulator::computePositionedMapping() { std::map<NvcVec3, int32_t, VrtPositionComparator> mPosMap; mPositionMappedVrt.clear(); mPositionMappedVrt.resize(mVertices.size()); for (uint32_t i = 0; i < mVertices.size(); ++i) { auto it = mPosMap.find(mVertices[i].p); if (it == mPosMap.end()) { mPosMap[mVertices[i].p] = i; mPositionMappedVrt[i] = i; } else { mPositionMappedVrt[i] = it->second; } } } } // namespace Blast } // namespace Nv
27,321
C++
34.391192
151
0.576882
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/authoring/NvBlastExtAuthoringBooleanToolImpl.cpp
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved. #include "NvBlastGlobals.h" #include "NvBlastExtAuthoringBooleanToolImpl.h" #include "NvBlastExtAuthoringBooleanTool.h" #include "NvBlastExtAuthoringMeshImpl.h" #include "NvBlastExtAuthoringAcceleratorImpl.h" #include <NvBlastNvSharedHelpers.h> #include <math.h> #include <set> #include <algorithm> using nvidia::NvBounds3; namespace Nv { namespace Blast { /* Linear interpolation of vectors */ NV_FORCE_INLINE void vec3Lerp(const NvcVec3& a, const NvcVec3& b, NvcVec3& out, float t) { out.x = (b.x - a.x) * t + a.x; out.y = (b.y - a.y) * t + a.y; out.z = (b.z - a.z) * t + a.z; } NV_FORCE_INLINE void vec2Lerp(const NvcVec2& a, const NvcVec2& b, NvcVec2& out, float t) { out.x = (b.x - a.x) * t + a.x; out.y = (b.y - a.y) * t + a.y; } NV_FORCE_INLINE int32_t BooleanEvaluator::addIfNotExist(const Vertex& p) { mVerticesAggregate.push_back(p); return static_cast<int32_t>(mVerticesAggregate.size()) - 1; } NV_FORCE_INLINE void BooleanEvaluator::addEdgeIfValid(const EdgeWithParent& ed) { mEdgeAggregate.push_back(ed); } /** Vertex level shadowing functions */ NV_FORCE_INLINE int32_t vertexShadowing(const NvcVec3& a, const NvcVec3& b) { return (b.x >= a.x) ? 1 : 0; } /** Vertex-edge status functions */ NV_FORCE_INLINE int32_t veStatus01(const NvcVec3& sEdge, const NvcVec3& eEdge, const NvcVec3& p) { return vertexShadowing(p, eEdge) - vertexShadowing(p, sEdge); } NV_FORCE_INLINE int32_t veStatus10(const NvcVec3& sEdge, const NvcVec3& eEdge, const NvcVec3& p) { return -vertexShadowing(eEdge, p) + vertexShadowing(sEdge, p); } bool shouldSwap(const NvcVec3& a, const NvcVec3& b) { if (a.x < b.x) return false; if (a.x > b.x) return true; if (a.y < b.y) return false; if (a.y > b.y) return true; if (a.z < b.z) return false; if (a.z > b.z) return true; return false; } /** Vertex-edge shadowing functions */ int32_t shadowing01(Vertex sEdge, Vertex eEdge, const NvcVec3& p, Vertex& onEdgePoint, bool& hasOnEdge) { int32_t winding = veStatus01(sEdge.p, eEdge.p, p); if (sEdge.p.x > eEdge.p.x) { std::swap(sEdge, eEdge); } if (winding != 0) { float t = (p.x - sEdge.p.x) / (eEdge.p.x - sEdge.p.x); if (t >= 1) { onEdgePoint = eEdge; } else if (t <= 0) { onEdgePoint = sEdge; } else { vec3Lerp(sEdge.p, eEdge.p, onEdgePoint.p, t); vec3Lerp(sEdge.n, eEdge.n, onEdgePoint.n, t); vec2Lerp(sEdge.uv[0], eEdge.uv[0], onEdgePoint.uv[0], t); } hasOnEdge = true; if (onEdgePoint.p.y >= p.y) { return winding; } } else { hasOnEdge = false; } return 0; } int32_t shadowing10(Vertex sEdge, Vertex eEdge, const NvcVec3& p, Vertex& onEdgePoint, bool& hasOnEdge) { int32_t winding = veStatus10(sEdge.p, eEdge.p, p); if (sEdge.p.x > eEdge.p.x) { std::swap(sEdge, eEdge); } if (winding != 0) { float t = (p.x - sEdge.p.x) / (eEdge.p.x - sEdge.p.x); if (t >= 1) { onEdgePoint = eEdge; } else if (t <= 0) { onEdgePoint = sEdge; } else { vec3Lerp(sEdge.p, eEdge.p, onEdgePoint.p, t); vec3Lerp(sEdge.n, eEdge.n, onEdgePoint.n, t); vec2Lerp(sEdge.uv[0], eEdge.uv[0], onEdgePoint.uv[0], t); } hasOnEdge = true; if (onEdgePoint.p.y < p.y) { return winding; } } else { hasOnEdge = false; } return 0; } int32_t shadowing01(NvcVec3 sEdge, NvcVec3 eEdge, const NvcVec3& p) { int32_t winding = veStatus01(sEdge, eEdge, p); if (winding != 0) { if (sEdge.x > eEdge.x) { std::swap(sEdge, eEdge); } float t = ((p.x - sEdge.x) / (eEdge.x - sEdge.x)); NvcVec3 onEdgePoint; if (t >= 1) onEdgePoint = eEdge; else if (t <= 0) onEdgePoint = sEdge; else vec3Lerp(sEdge, eEdge, onEdgePoint, t); if (onEdgePoint.y >= p.y) { return winding; } } return 0; } int32_t shadowing10(NvcVec3 sEdge, NvcVec3 eEdge, const NvcVec3& p) { int32_t winding = veStatus10(sEdge, eEdge, p); if (winding != 0) { if (sEdge.x > eEdge.x) { std::swap(sEdge, eEdge); } float t = ((p.x - sEdge.x) / (eEdge.x - sEdge.x)); NvcVec3 onEdgePoint; if (t >= 1) onEdgePoint = eEdge; else if (t <= 0) onEdgePoint = sEdge; else vec3Lerp(sEdge, eEdge, onEdgePoint, t); if (onEdgePoint.y < p.y) { return winding; } } return 0; } /** Vertex-facet shadowing functions */ int32_t vfStatus02(const NvcVec3& p, const Vertex* points, const Edge* edges, int32_t edgesCount, Vertex* out) { int32_t val = 0; Vertex pnt; bool hasOnEdge = false; out[0].p.y = -MAXIMUM_EXTENT; out[1].p.y = MAXIMUM_EXTENT; for (int32_t i = 0; i < edgesCount; ++i) { val -= shadowing01(points[edges->s], points[edges->e], p, pnt, hasOnEdge); if (hasOnEdge != 0) { if (p.y > pnt.p.y && pnt.p.y > out[0].p.y) { out[0] = pnt; } if (p.y <= pnt.p.y && pnt.p.y < out[1].p.y) { out[1] = pnt; } } ++edges; } return val; } int32_t shadowing02(const NvcVec3& p, const Vertex* points, const Edge* edges, int edgesCount, bool& hasOnFacetPoint, Vertex& onFacetPoint) { Vertex outp[2]; int32_t stat = vfStatus02(p, points, edges, edgesCount, outp); float z = 0; hasOnFacetPoint = false; if (stat != 0) { Vertex& p1 = outp[0]; Vertex& p2 = outp[1]; NvcVec3 vc = p2.p - p1.p; float t = 0; t = (std::abs(vc.x) > std::abs(vc.y)) ? (p.x - p1.p.x) / vc.x : (p.y - p1.p.y) / vc.y; t = nvidia::NvClamp(t, 0.0f, 1.0f); z = t * vc.z + p1.p.z; hasOnFacetPoint = true; onFacetPoint.p.x = p.x; onFacetPoint.p.y = p.y; onFacetPoint.p.z = z; vec2Lerp(p1.uv[0], p2.uv[0], onFacetPoint.uv[0], t); vec3Lerp(p1.n, p2.n, onFacetPoint.n, t); if (z >= p.z) { return stat; } } return 0; } int32_t vfStatus20(const NvcVec3& p, const Vertex* points, const Edge* edges, int32_t edgesCount, Vertex* out) { int32_t val = 0; Vertex pnt; bool hasOnEdge = false; out[0].p.y = -MAXIMUM_EXTENT; out[1].p.y = MAXIMUM_EXTENT; for (int32_t i = 0; i < edgesCount; ++i) { val += shadowing10(points[edges->s], points[edges->e], p, pnt, hasOnEdge); if (hasOnEdge != 0) { if (p.y > pnt.p.y && pnt.p.y > out[0].p.y) { out[0] = pnt; } if (p.y <= pnt.p.y && pnt.p.y < out[1].p.y) { out[1] = pnt; } } ++edges; } return val; } int32_t shadowing20(const NvcVec3& p, const Vertex* points, const Edge* edges, int edgesCount, bool& hasOnFacetPoint, Vertex& onFacetPoint) { Vertex outp[2]; int32_t stat = vfStatus20(p, points, edges, edgesCount, outp); hasOnFacetPoint = false; if (stat != 0) { Vertex& p1 = outp[0]; Vertex& p2 = outp[1]; NvcVec3 vc = p2.p - p1.p; float t = 0; t = (std::abs(vc.x) > std::abs(vc.y)) ? (p.x - p1.p.x) / vc.x : (p.y - p1.p.y) / vc.y; t = nvidia::NvClamp(t, 0.0f, 1.0f); hasOnFacetPoint = true; onFacetPoint.p.x = p.x; onFacetPoint.p.y = p.y; onFacetPoint.p.z = t * vc.z + p1.p.z; vec2Lerp(p1.uv[0], p2.uv[0], onFacetPoint.uv[0], t); vec3Lerp(p1.n, p2.n, onFacetPoint.n, t); if (onFacetPoint.p.z < p.z) { return stat; } } return 0; } NV_FORCE_INLINE int32_t edgesCrossCheck(const NvcVec3& eAs, const NvcVec3& eAe, const NvcVec3& eBs, const NvcVec3& eBe) { return shadowing01(eBs, eBe, eAe) - shadowing01(eBs, eBe, eAs) + shadowing10(eAs, eAe, eBe) - shadowing10(eAs, eAe, eBs); } int32_t edgesIntersection(const Vertex& eAs, const Vertex& eAe, const Vertex& eBs, const Vertex& eBe, Vertex& intersectionA, Vertex& intersectionB, bool& hasPoints) { int32_t status = edgesCrossCheck(eAs.p, eAe.p, eBs.p, eBe.p); hasPoints = false; if (status == 0) { return 0; } Vertex tempPoint; Vertex bShadowingPair[2]; Vertex aShadowingPair[2]; bool hasOnEdge = false; bool aShadowing = false; bool bShadowing = false; /** Search for two pairs where parts of A shadows B, and where B shadows A. Needed for search intersection point. */ for (auto p : { &eBs, &eBe }) { int32_t shadowingType = shadowing10(eAs, eAe, p->p, tempPoint, hasOnEdge); if (shadowingType == 0 && !aShadowing && hasOnEdge) { aShadowing = true; aShadowingPair[0] = *p; aShadowingPair[1] = tempPoint; } else { if ((shadowingType == 1 || shadowingType == -1) && !bShadowing) { bShadowing = true; bShadowingPair[0] = *p; bShadowingPair[1] = tempPoint; } } } if (!aShadowing || !bShadowing) { for (auto p : { &eAs, &eAe }) { int32_t shadowingType = shadowing01(eBs, eBe, p->p, tempPoint, hasOnEdge); if (shadowingType == 0 && !aShadowing && hasOnEdge) { aShadowing = true; aShadowingPair[1] = *p; aShadowingPair[0] = tempPoint; } else { if ((shadowingType == 1 || shadowingType == -1) && !bShadowing) { bShadowing = true; bShadowingPair[1] = *p; bShadowingPair[0] = tempPoint; } } } } float deltaPlus = bShadowingPair[0].p.y - bShadowingPair[1].p.y; float deltaMinus = aShadowingPair[0].p.y - aShadowingPair[1].p.y; float div = 0; if (deltaPlus > 0) div = deltaPlus / (deltaPlus - deltaMinus); else div = 0; intersectionA.p = bShadowingPair[1].p - div * (bShadowingPair[1].p - aShadowingPair[1].p); intersectionA.n = bShadowingPair[1].n - div * (bShadowingPair[1].n - aShadowingPair[1].n); intersectionA.uv[0] = bShadowingPair[1].uv[0] - (bShadowingPair[1].uv[0] - aShadowingPair[1].uv[0]) * div; intersectionB.p = intersectionA.p; intersectionB.p.z = bShadowingPair[0].p.z - div * (bShadowingPair[0].p.z - aShadowingPair[0].p.z); intersectionB.n = bShadowingPair[0].n - div * (bShadowingPair[0].n - aShadowingPair[0].n); intersectionB.uv[0] = bShadowingPair[0].uv[0] - (bShadowingPair[0].uv[0] - aShadowingPair[0].uv[0]) * div; hasPoints = true; return status; } NV_FORCE_INLINE int32_t edgeEdgeShadowing(const Vertex& eAs, const Vertex& eAe, const Vertex& eBs, const Vertex& eBe, Vertex& intersectionA, Vertex& intersectionB, bool& hasPoints) { int32_t status = edgesIntersection(eAs, eAe, eBs, eBe, intersectionA, intersectionB, hasPoints); if (intersectionB.p.z >= intersectionA.p.z) { return status; } return 0; } int32_t edgeFacetIntersection12(const Vertex& edSt, const Vertex& edEnd, const Vertex* points, const Edge* edges, int edgesCount, Vertex& intersectionA, Vertex& intersectionB) { int32_t status = 0; Vertex p1, p2; Vertex bShadowingPair[2]; Vertex aShadowingPair[2]; bool hasPoint = false; bool aShadowing = false; bool bShadowing = false; int32_t mlt = -1; int32_t shadowingType; for (auto p : { &edEnd, &edSt }) { shadowingType = shadowing02(p->p, points, edges, edgesCount, hasPoint, p1); status += mlt * shadowingType; if (shadowingType == 0 && !aShadowing && hasPoint) { aShadowing = true; aShadowingPair[0] = p1; aShadowingPair[1] = *p; } else if ((shadowingType == 1 || shadowingType == -1) && !bShadowing) { bShadowing = true; bShadowingPair[0] = p1; bShadowingPair[1] = *p; } mlt = 1; } for (int32_t ed = 0; ed < edgesCount; ++ed) { if (shouldSwap(points[edges[ed].s].p, points[edges[ed].e].p)) { shadowingType = -edgeEdgeShadowing(edSt, edEnd, points[edges[ed].e], points[edges[ed].s], p1, p2, hasPoint); } else { shadowingType = edgeEdgeShadowing(edSt, edEnd, points[edges[ed].s], points[edges[ed].e], p1, p2, hasPoint); } status -= shadowingType; if (shadowingType == 0 && !aShadowing && hasPoint) { aShadowing = true; aShadowingPair[0] = p2; aShadowingPair[1] = p1; } else if ((shadowingType == 1 || shadowingType == -1) && !bShadowing) { bShadowing = true; bShadowingPair[0] = p2; bShadowingPair[1] = p1; } } if (!status || !bShadowing || !aShadowing) { return 0; } float deltaPlus = bShadowingPair[0].p.z - bShadowingPair[1].p.z; float div = 0; if (deltaPlus != 0) { float deltaMinus = aShadowingPair[0].p.z - aShadowingPair[1].p.z; div = deltaPlus / (deltaPlus - deltaMinus); } intersectionA.p = bShadowingPair[1].p - div * (bShadowingPair[1].p - aShadowingPair[1].p); intersectionA.n = bShadowingPair[1].n - div * (bShadowingPair[1].n - aShadowingPair[1].n); intersectionA.uv[0] = bShadowingPair[1].uv[0] - (bShadowingPair[1].uv[0] - aShadowingPair[1].uv[0]) * div; intersectionB.p = intersectionA.p; intersectionB.n = bShadowingPair[0].n - div * (bShadowingPair[0].n - aShadowingPair[0].n); intersectionB.uv[0] = bShadowingPair[0].uv[0] - (bShadowingPair[0].uv[0] - aShadowingPair[0].uv[0]) * div; return status; } int32_t edgeFacetIntersection21(const Vertex& edSt, const Vertex& edEnd, const Vertex* points, const Edge* edges, int edgesCount, Vertex& intersectionA, Vertex& intersectionB) { int32_t status = 0; Vertex p1, p2; Vertex bShadowingPair[2]; Vertex aShadowingPair[2]; bool hasPoint = false; bool aShadowing = false; bool bShadowing = false; int32_t shadowingType; int32_t mlt = 1; for (auto p : { &edEnd, &edSt }) { shadowingType = shadowing20(p->p, points, edges, edgesCount, hasPoint, p1); status += mlt * shadowingType; if (shadowingType == 0 && !aShadowing && hasPoint) { aShadowing = true; aShadowingPair[0] = *p; aShadowingPair[1] = p1; } else if ((shadowingType == 1 || shadowingType == -1) && !bShadowing) { bShadowing = true; bShadowingPair[0] = *p; bShadowingPair[1] = p1; } mlt = -1; } for (int32_t ed = 0; ed < edgesCount; ++ed) { if (shouldSwap(points[edges[ed].s].p, points[edges[ed].e].p)) { shadowingType = -edgeEdgeShadowing(points[edges[ed].e], points[edges[ed].s], edSt, edEnd, p1, p2, hasPoint); } else { shadowingType = edgeEdgeShadowing(points[edges[ed].s], points[edges[ed].e], edSt, edEnd, p1, p2, hasPoint); } status -= shadowingType; if (shadowingType == 0) { if (!aShadowing && hasPoint) { aShadowing = true; aShadowingPair[0] = p2; aShadowingPair[1] = p1; } } else { if ((shadowingType == 1 || shadowingType == -1) && !bShadowing) { bShadowing = true; bShadowingPair[0] = p2; bShadowingPair[1] = p1; } } } if (!status || !bShadowing || !aShadowing) { return 0; } float deltaPlus = bShadowingPair[0].p.z - bShadowingPair[1].p.z; float div = 0; if (deltaPlus != 0) { float deltaMinus = aShadowingPair[0].p.z - aShadowingPair[1].p.z; div = deltaPlus / (deltaPlus - deltaMinus); } intersectionA.p = bShadowingPair[1].p - div * (bShadowingPair[1].p - aShadowingPair[1].p); intersectionA.n = bShadowingPair[1].n - div * (bShadowingPair[1].n - aShadowingPair[1].n); intersectionA.uv[0] = bShadowingPair[1].uv[0] - (bShadowingPair[1].uv[0] - aShadowingPair[1].uv[0]) * div; intersectionB.p = intersectionA.p; intersectionB.n = bShadowingPair[0].n - div * (bShadowingPair[0].n - aShadowingPair[0].n); intersectionB.uv[0] = bShadowingPair[0].uv[0] - (bShadowingPair[0].uv[0] - aShadowingPair[0].uv[0]) * div; return status; } int32_t BooleanEvaluator::vertexMeshStatus03(const NvcVec3& p, const Mesh* mesh) { int32_t status = 0; Vertex pnt; bool hasPoint = false; mAcceleratorB->setState(p); int32_t facet = mAcceleratorB->getNextFacet(); while (facet != -1) { const Edge* ed = mesh->getEdges() + mesh->getFacet(facet)->firstEdgeNumber; status += shadowing02(p, mesh->getVertices(), ed, mesh->getFacet(facet)->edgesCount, hasPoint, pnt); facet = mAcceleratorB->getNextFacet(); } return status; } int32_t BooleanEvaluator::vertexMeshStatus30(const NvcVec3& p, const Mesh* mesh) { int32_t status = 0; bool hasPoints = false; Vertex point; mAcceleratorA->setState(p); int32_t facet = mAcceleratorA->getNextFacet(); while ( facet != -1) { const Edge* ed = mesh->getEdges() + mesh->getFacet(facet)->firstEdgeNumber; status -= shadowing20(p, mesh->getVertices(), ed, mesh->getFacet(facet)->edgesCount, hasPoints, point); facet = mAcceleratorA->getNextFacet(); } return status; } NV_FORCE_INLINE int32_t inclusionValue03(const BooleanConf& conf, int32_t xValue) { return conf.ca + conf.ci * xValue; } NV_FORCE_INLINE int32_t inclusionValueEdgeFace(const BooleanConf& conf, int32_t xValue) { return conf.ci * xValue; } NV_FORCE_INLINE int32_t inclusionValue30(const BooleanConf& conf, int32_t xValue) { return conf.cb + conf.ci * xValue; } struct VertexComparator { VertexComparator(NvcVec3 base = NvcVec3()) : basePoint(base) {}; NvcVec3 basePoint; bool operator()(const Vertex& a, const Vertex& b) { return ((b.p - a.p) | basePoint) > 0.0; } }; struct VertexPairComparator { VertexPairComparator(NvcVec3 base = NvcVec3()) : basePoint(base) {}; NvcVec3 basePoint; bool operator()(const std::pair<Vertex, Vertex>& a, const std::pair<Vertex, Vertex>& b) { return ((b.first.p - a.first.p) | basePoint) > 0.0; } }; int32_t BooleanEvaluator::isPointContainedInMesh(const Mesh* msh, const NvcVec3& point) { if (msh == nullptr) { return 0; } DummyAccelerator dmAccel(msh->getFacetCount()); mAcceleratorA = &dmAccel; return vertexMeshStatus30(point, msh); } int32_t BooleanEvaluator::isPointContainedInMesh(const Mesh* msh, SpatialAccelerator* spAccel, const NvcVec3& point) { if (msh == nullptr) { return 0; } mAcceleratorA = spAccel; return vertexMeshStatus30(point, msh); } void BooleanEvaluator::buildFaceFaceIntersections(const BooleanConf& mode) { int32_t statusValue = 0; int32_t inclusionValue = 0; std::vector<std::pair<Vertex, Vertex> > retainedStarts; std::vector<std::pair<Vertex, Vertex>> retainedEnds; VertexPairComparator comp; Vertex newPointA; Vertex newPointB; const Vertex* meshAPoints = mMeshA->getVertices(); const Vertex* meshBPoints = mMeshB->getVertices(); EdgeWithParent newEdge; mEdgeFacetIntersectionData12.clear(); mEdgeFacetIntersectionData21.clear(); mEdgeFacetIntersectionData12.resize(mMeshA->getFacetCount()); mEdgeFacetIntersectionData21.resize(mMeshB->getFacetCount()); for (uint32_t facetB = 0; facetB < mMeshB->getFacetCount(); ++facetB) { mAcceleratorA->setState(meshBPoints, mMeshB->getEdges(), *mMeshB->getFacet(facetB)); int32_t facetA = mAcceleratorA->getNextFacet(); while (facetA != -1) { const Edge* facetBEdges = mMeshB->getEdges() + mMeshB->getFacet(facetB)->firstEdgeNumber; const Edge* facetAEdges = mMeshA->getEdges() + mMeshA->getFacet(facetA)->firstEdgeNumber; const Edge* fbe = facetBEdges; const Edge* fae = facetAEdges; retainedStarts.clear(); retainedEnds.clear(); NvcVec3 compositeEndPoint = {0, 0, 0}; NvcVec3 compositeStartPoint = {0, 0, 0}; uint32_t facetAEdgeCount = mMeshA->getFacet(facetA)->edgesCount; uint32_t facetBEdgeCount = mMeshB->getFacet(facetB)->edgesCount; int32_t ic = 0; for (uint32_t i = 0; i < facetAEdgeCount; ++i) { if (shouldSwap(meshAPoints[fae->e].p, meshAPoints[fae->s].p)) { statusValue = -edgeFacetIntersection12(meshAPoints[fae->e], meshAPoints[fae->s], meshBPoints, facetBEdges, facetBEdgeCount, newPointA, newPointB); } else { statusValue = edgeFacetIntersection12(meshAPoints[fae->s], meshAPoints[fae->e], meshBPoints, facetBEdges, facetBEdgeCount, newPointA, newPointB); } inclusionValue = -inclusionValueEdgeFace(mode, statusValue); if (inclusionValue > 0) { for (ic = 0; ic < inclusionValue; ++ic) { retainedEnds.push_back(std::make_pair(newPointA, newPointB)); compositeEndPoint = compositeEndPoint + newPointA.p; } mEdgeFacetIntersectionData12[facetA].push_back(EdgeFacetIntersectionData(i, statusValue, newPointA)); } if (inclusionValue < 0) { for (ic = 0; ic < -inclusionValue; ++ic) { retainedStarts.push_back(std::make_pair(newPointA, newPointB)); compositeStartPoint = compositeStartPoint + newPointA.p; } mEdgeFacetIntersectionData12[facetA].push_back(EdgeFacetIntersectionData(i, statusValue, newPointA)); } fae++; } for (uint32_t i = 0; i < facetBEdgeCount; ++i) { if (shouldSwap(meshBPoints[fbe->e].p, meshBPoints[fbe->s].p)) { statusValue = -edgeFacetIntersection21(meshBPoints[fbe->e], meshBPoints[fbe->s], meshAPoints, facetAEdges, facetAEdgeCount, newPointA, newPointB); } else { statusValue = edgeFacetIntersection21(meshBPoints[fbe->s], meshBPoints[fbe->e], meshAPoints, facetAEdges, facetAEdgeCount, newPointA, newPointB); } inclusionValue = inclusionValueEdgeFace(mode, statusValue); if (inclusionValue > 0) { for (ic = 0; ic < inclusionValue; ++ic) { retainedEnds.push_back(std::make_pair(newPointA, newPointB)); compositeEndPoint = compositeEndPoint + newPointB.p; } mEdgeFacetIntersectionData21[facetB].push_back(EdgeFacetIntersectionData( i, statusValue, newPointB)); } if (inclusionValue < 0) { for (ic = 0; ic < -inclusionValue; ++ic) { retainedStarts.push_back(std::make_pair(newPointA, newPointB)); compositeStartPoint = compositeStartPoint + newPointB.p; } mEdgeFacetIntersectionData21[facetB].push_back(EdgeFacetIntersectionData(i, statusValue, newPointB)); } fbe++; } if (retainedStarts.size() != retainedEnds.size()) { NVBLAST_LOG_ERROR("Not equal number of starting and ending vertices! Probably input mesh has open edges."); return; } for (uint32_t rv = 0; rv < retainedStarts.size(); ++rv) { newEdge.s = addIfNotExist(retainedStarts[rv].first); newEdge.e = addIfNotExist(retainedEnds[rv].first); newEdge.parent = facetA; addEdgeIfValid(newEdge); newEdge.parent = facetB + mMeshA->getFacetCount(); newEdge.e = addIfNotExist(retainedStarts[rv].second); newEdge.s = addIfNotExist(retainedEnds[rv].second); addEdgeIfValid(newEdge); } facetA = mAcceleratorA->getNextFacet(); } // while (*iter != -1) } // for (uint32_t facetB = 0; facetB < mMeshB->getFacetCount(); ++facetB) } void BooleanEvaluator::buildFastFaceFaceIntersection(const BooleanConf& mode) { int32_t statusValue = 0; int32_t inclusionValue = 0; std::vector<std::pair<Vertex, Vertex> > retainedStarts; std::vector<std::pair<Vertex, Vertex>> retainedEnds; VertexPairComparator comp; Vertex newPointA; Vertex newPointB; const Vertex* meshAPoints = mMeshA->getVertices(); const Vertex* meshBPoints = mMeshB->getVertices(); EdgeWithParent newEdge; mEdgeFacetIntersectionData12.clear(); mEdgeFacetIntersectionData21.clear(); mEdgeFacetIntersectionData12.resize(mMeshA->getFacetCount()); mEdgeFacetIntersectionData21.resize(mMeshB->getFacetCount()); for (uint32_t facetA = 0; facetA < mMeshA->getFacetCount(); ++facetA) { const Edge* facetAEdges = mMeshA->getEdges() + mMeshA->getFacet(facetA)->firstEdgeNumber; int32_t facetB = 0; const Edge* facetBEdges = mMeshB->getEdges() + mMeshB->getFacet(facetB)->firstEdgeNumber; const Edge* fae = facetAEdges; retainedStarts.clear(); retainedEnds.clear(); NvcVec3 compositeEndPoint = {0, 0, 0}; NvcVec3 compositeStartPoint = {0, 0, 0}; uint32_t facetAEdgeCount = mMeshA->getFacet(facetA)->edgesCount; uint32_t facetBEdgeCount = mMeshB->getFacet(facetB)->edgesCount; int32_t ic = 0; for (uint32_t i = 0; i < facetAEdgeCount; ++i) { if (shouldSwap(meshAPoints[fae->e].p, meshAPoints[fae->s].p)) { statusValue = -edgeFacetIntersection12(meshAPoints[fae->e], meshAPoints[fae->s], meshBPoints, facetBEdges, facetBEdgeCount, newPointA, newPointB); } else { statusValue = edgeFacetIntersection12(meshAPoints[fae->s], meshAPoints[fae->e], meshBPoints, facetBEdges, facetBEdgeCount, newPointA, newPointB); } inclusionValue = -inclusionValueEdgeFace(mode, statusValue); if (inclusionValue > 0) { for (ic = 0; ic < inclusionValue; ++ic) { retainedEnds.push_back(std::make_pair(newPointA, newPointB)); compositeEndPoint = compositeEndPoint + newPointA.p; } mEdgeFacetIntersectionData12[facetA].push_back(EdgeFacetIntersectionData(i, statusValue, newPointA)); } if (inclusionValue < 0) { for (ic = 0; ic < -inclusionValue; ++ic) { retainedStarts.push_back(std::make_pair(newPointA, newPointB)); compositeStartPoint = compositeStartPoint + newPointA.p; } mEdgeFacetIntersectionData12[facetA].push_back(EdgeFacetIntersectionData(i, statusValue, newPointA)); } fae++; } if (retainedStarts.size() != retainedEnds.size()) { NVBLAST_LOG_ERROR("Not equal number of starting and ending vertices! Probably input mesh has open edges."); return; } if (retainedStarts.size() > 1) { comp.basePoint = compositeEndPoint - compositeStartPoint; std::sort(retainedStarts.begin(), retainedStarts.end(), comp); std::sort(retainedEnds.begin(), retainedEnds.end(), comp); } for (uint32_t rv = 0; rv < retainedStarts.size(); ++rv) { newEdge.s = addIfNotExist(retainedStarts[rv].first); newEdge.e = addIfNotExist(retainedEnds[rv].first); newEdge.parent = facetA; addEdgeIfValid(newEdge); newEdge.parent = facetB + mMeshA->getFacetCount(); newEdge.e = addIfNotExist(retainedStarts[rv].second); newEdge.s = addIfNotExist(retainedEnds[rv].second); addEdgeIfValid(newEdge); } } } void BooleanEvaluator::collectRetainedPartsFromA(const BooleanConf& mode) { int32_t statusValue = 0; int32_t inclusionValue = 0; const Vertex* vertices = mMeshA->getVertices(); VertexComparator comp; const NvBounds3& bMeshBoudning = toNvShared(mMeshB->getBoundingBox()); const Edge* facetEdges = mMeshA->getEdges(); std::vector<Vertex> retainedStartVertices; std::vector<Vertex> retainedEndVertices; retainedStartVertices.reserve(255); retainedEndVertices.reserve(255); int32_t ic = 0; for (uint32_t facetId = 0; facetId < mMeshA->getFacetCount(); ++facetId) { retainedStartVertices.clear(); retainedEndVertices.clear(); for (uint32_t i = 0; i < mMeshA->getFacet(facetId)->edgesCount; ++i) { NvcVec3 compositeEndPoint = {0, 0, 0}; NvcVec3 compositeStartPoint = {0, 0, 0}; int32_t lastPos = static_cast<int32_t>(retainedEndVertices.size()); /* Test start and end point of edge against mesh */ if (bMeshBoudning.contains(toNvShared(vertices[facetEdges->s].p))) { statusValue = vertexMeshStatus03(vertices[facetEdges->s].p, mMeshB); } else { statusValue = 0; } inclusionValue = -inclusionValue03(mode, statusValue); if (inclusionValue > 0) { for (ic = 0; ic < inclusionValue; ++ic) { retainedEndVertices.push_back(vertices[facetEdges->s]); compositeEndPoint = compositeEndPoint + vertices[facetEdges->s].p; } } else if (inclusionValue < 0) { for (ic = 0; ic < -inclusionValue; ++ic) { retainedStartVertices.push_back(vertices[facetEdges->s]); compositeStartPoint = compositeStartPoint + vertices[facetEdges->s].p; } } if (bMeshBoudning.contains(toNvShared(vertices[facetEdges->e].p))) { statusValue = vertexMeshStatus03(vertices[facetEdges->e].p, mMeshB); } else { statusValue = 0; } inclusionValue = inclusionValue03(mode, statusValue); if (inclusionValue > 0) { for (ic = 0; ic < inclusionValue; ++ic) { retainedEndVertices.push_back(vertices[facetEdges->e]); compositeEndPoint = compositeEndPoint + vertices[facetEdges->e].p; } } else if (inclusionValue < 0) { for (ic = 0; ic < -inclusionValue; ++ic) { retainedStartVertices.push_back(vertices[facetEdges->e]); compositeStartPoint = compositeStartPoint + vertices[facetEdges->e].p; } } /* Test edge intersection with mesh*/ for (uint32_t intrs = 0; intrs < mEdgeFacetIntersectionData12[facetId].size(); ++intrs) { const EdgeFacetIntersectionData& intr = mEdgeFacetIntersectionData12[facetId][intrs]; if (intr.edId != (int32_t)i) continue; inclusionValue = inclusionValueEdgeFace(mode, intr.intersectionType); if (inclusionValue > 0) { for (ic = 0; ic < inclusionValue; ++ic) { retainedEndVertices.push_back(intr.intersectionPoint); compositeEndPoint = compositeEndPoint + intr.intersectionPoint.p; } } else if (inclusionValue < 0) { for (ic = 0; ic < -inclusionValue; ++ic) { retainedStartVertices.push_back(intr.intersectionPoint); compositeStartPoint = compositeStartPoint + intr.intersectionPoint.p; } } } facetEdges++; if (retainedStartVertices.size() != retainedEndVertices.size()) { NVBLAST_LOG_ERROR("Not equal number of starting and ending vertices! Probably input mesh has open edges."); return; } if (retainedEndVertices.size() - lastPos > 1) { comp.basePoint = compositeEndPoint - compositeStartPoint; std::sort(retainedStartVertices.begin() + lastPos, retainedStartVertices.end(), comp); std::sort(retainedEndVertices.begin() + lastPos, retainedEndVertices.end(), comp); } } EdgeWithParent newEdge; for (uint32_t rv = 0; rv < retainedStartVertices.size(); ++rv) { newEdge.s = addIfNotExist(retainedStartVertices[rv]); newEdge.e = addIfNotExist(retainedEndVertices[rv]); newEdge.parent = facetId; addEdgeIfValid(newEdge); } } return; } void BooleanEvaluator::collectRetainedPartsFromB(const BooleanConf& mode) { int32_t statusValue = 0; int32_t inclusionValue = 0; const Vertex* vertices = mMeshB->getVertices(); VertexComparator comp; const NvBounds3& aMeshBoudning = toNvShared(mMeshA->getBoundingBox()); const Edge* facetEdges = mMeshB->getEdges(); std::vector<Vertex> retainedStartVertices; std::vector<Vertex> retainedEndVertices; retainedStartVertices.reserve(255); retainedEndVertices.reserve(255); int32_t ic = 0; for (uint32_t facetId = 0; facetId < mMeshB->getFacetCount(); ++facetId) { retainedStartVertices.clear(); retainedEndVertices.clear(); for (uint32_t i = 0; i < mMeshB->getFacet(facetId)->edgesCount; ++i) { NvcVec3 compositeEndPoint = {0, 0, 0}; NvcVec3 compositeStartPoint = {0, 0, 0}; int32_t lastPos = static_cast<int32_t>(retainedEndVertices.size()); if (aMeshBoudning.contains(toNvShared(vertices[facetEdges->s].p))) { statusValue = vertexMeshStatus30(vertices[facetEdges->s].p, mMeshA); } else { statusValue = 0; } inclusionValue = -inclusionValue30(mode, statusValue); if (inclusionValue > 0) { for (ic = 0; ic < inclusionValue; ++ic) { retainedEndVertices.push_back(vertices[facetEdges->s]); compositeEndPoint = compositeEndPoint + vertices[facetEdges->s].p; } } else if (inclusionValue < 0) { for (ic = 0; ic < -inclusionValue; ++ic) { retainedStartVertices.push_back(vertices[facetEdges->s]); compositeStartPoint = compositeStartPoint + vertices[facetEdges->s].p; } } if (aMeshBoudning.contains(toNvShared(vertices[facetEdges->e].p))) { statusValue = vertexMeshStatus30(vertices[facetEdges->e].p, mMeshA); } else { statusValue = 0; } inclusionValue = inclusionValue30(mode, statusValue); if (inclusionValue > 0) { for (ic = 0; ic < inclusionValue; ++ic) { retainedEndVertices.push_back(vertices[facetEdges->e]); compositeEndPoint = compositeEndPoint + vertices[facetEdges->e].p; } } else if (inclusionValue < 0) { for (ic = 0; ic < -inclusionValue; ++ic) { retainedStartVertices.push_back(vertices[facetEdges->e]); compositeStartPoint = compositeStartPoint + vertices[facetEdges->e].p; } } for (uint32_t intrs = 0; intrs < mEdgeFacetIntersectionData21[facetId].size(); ++intrs) { const EdgeFacetIntersectionData& intr = mEdgeFacetIntersectionData21[facetId][intrs]; if (intr.edId != (int32_t)i) continue; inclusionValue = inclusionValueEdgeFace(mode, intr.intersectionType); if (inclusionValue > 0) { for (ic = 0; ic < inclusionValue; ++ic) { retainedEndVertices.push_back(intr.intersectionPoint); compositeEndPoint = compositeEndPoint + intr.intersectionPoint.p; } } else if (inclusionValue < 0) { for (ic = 0; ic < -inclusionValue; ++ic) { retainedStartVertices.push_back(intr.intersectionPoint); compositeStartPoint = compositeStartPoint + intr.intersectionPoint.p; } } } facetEdges++; if (retainedStartVertices.size() != retainedEndVertices.size()) { NVBLAST_LOG_ERROR("Not equal number of starting and ending vertices! Probably input mesh has open edges."); return; } if (retainedEndVertices.size() - lastPos > 1) { comp.basePoint = compositeEndPoint - compositeStartPoint; std::sort(retainedStartVertices.begin() + lastPos, retainedStartVertices.end(), comp); std::sort(retainedEndVertices.begin() + lastPos, retainedEndVertices.end(), comp); } } EdgeWithParent newEdge; for (uint32_t rv = 0; rv < retainedStartVertices.size(); ++rv) { newEdge.s = addIfNotExist(retainedStartVertices[rv]); newEdge.e = addIfNotExist(retainedEndVertices[rv]); newEdge.parent = facetId + mMeshA->getFacetCount(); addEdgeIfValid(newEdge); } } return; } bool EdgeWithParentSortComp(const EdgeWithParent& a, const EdgeWithParent& b) { return a.parent < b.parent; } void BooleanEvaluator::performBoolean(const Mesh* meshA, const Mesh* meshB, SpatialAccelerator* spAccelA, SpatialAccelerator* spAccelB, const BooleanConf& mode) { reset(); mMeshA = meshA; mMeshB = meshB; mAcceleratorA = spAccelA; mAcceleratorB = spAccelB; buildFaceFaceIntersections(mode); collectRetainedPartsFromA(mode); collectRetainedPartsFromB(mode); mAcceleratorA = nullptr; mAcceleratorB = nullptr; } void BooleanEvaluator::performBoolean(const Mesh* meshA, const Mesh* meshB, const BooleanConf& mode) { reset(); mMeshA = meshA; mMeshB = meshB; DummyAccelerator ac = DummyAccelerator(mMeshA->getFacetCount()); DummyAccelerator bc = DummyAccelerator(mMeshB->getFacetCount()); performBoolean(meshA, meshB, &ac, &bc, mode); } void BooleanEvaluator::performFastCutting(const Mesh* meshA, const Mesh* meshB, SpatialAccelerator* spAccelA, SpatialAccelerator* spAccelB, const BooleanConf& mode) { reset(); mMeshA = meshA; mMeshB = meshB; mAcceleratorA = spAccelA; mAcceleratorB = spAccelB; buildFastFaceFaceIntersection(mode); collectRetainedPartsFromA(mode); mAcceleratorA = nullptr; mAcceleratorB = nullptr; } void BooleanEvaluator::performFastCutting(const Mesh* meshA, const Mesh* meshB, const BooleanConf& mode) { reset(); mMeshA = meshA; mMeshB = meshB; DummyAccelerator ac = DummyAccelerator(mMeshA->getFacetCount()); DummyAccelerator bc = DummyAccelerator(mMeshB->getFacetCount()); performFastCutting(meshA, meshB, &ac, &bc, mode); } BooleanEvaluator::BooleanEvaluator() { mMeshA = nullptr; mMeshB = nullptr; mAcceleratorA = nullptr; mAcceleratorB = nullptr; } BooleanEvaluator::~BooleanEvaluator() { reset(); } Mesh* BooleanEvaluator::createNewMesh() { if (mEdgeAggregate.size() == 0) { return nullptr; } std::sort(mEdgeAggregate.begin(), mEdgeAggregate.end(), EdgeWithParentSortComp); std::vector<Facet> newFacets; std::vector<Edge> newEdges(mEdgeAggregate.size()); int32_t lastPos = 0; uint32_t lastParent = mEdgeAggregate[0].parent; uint32_t collected = 0; int64_t userData = 0; int32_t materialId = 0; int32_t smoothingGroup = 0; for (uint32_t i = 0; i < mEdgeAggregate.size(); ++i) { if (mEdgeAggregate[i].parent != lastParent) { if (lastParent < mMeshA->getFacetCount()) { userData = mMeshA->getFacet(lastParent)->userData; materialId = mMeshA->getFacet(lastParent)->materialId; smoothingGroup = mMeshA->getFacet(lastParent)->smoothingGroup; } else { userData = mMeshB->getFacet(lastParent - mMeshA->getFacetCount())->userData; materialId = mMeshB->getFacet(lastParent - mMeshA->getFacetCount())->materialId; smoothingGroup = mMeshB->getFacet(lastParent - mMeshA->getFacetCount())->smoothingGroup; } newFacets.push_back({ lastPos, collected, userData, materialId, smoothingGroup }); lastPos = i; lastParent = mEdgeAggregate[i].parent; collected = 0; } collected++; newEdges[i].s = mEdgeAggregate[i].s; newEdges[i].e = mEdgeAggregate[i].e; } if (lastParent < mMeshA->getFacetCount()) { userData = mMeshA->getFacet(lastParent)->userData; materialId = mMeshA->getFacet(lastParent)->materialId; smoothingGroup = mMeshA->getFacet(lastParent)->smoothingGroup; } else { uint32_t pr = lastParent - mMeshA->getFacetCount(); userData = mMeshB->getFacet(pr)->userData; materialId = mMeshB->getFacet(pr)->materialId; smoothingGroup = mMeshB->getFacet(pr)->smoothingGroup; } newFacets.push_back({ lastPos, collected, userData, materialId, smoothingGroup }); return new MeshImpl(mVerticesAggregate.data(), newEdges.data(), newFacets.data(), static_cast<uint32_t>(mVerticesAggregate.size()), static_cast<uint32_t>(mEdgeAggregate.size()), static_cast<uint32_t>(newFacets.size())); } void BooleanEvaluator::reset() { mMeshA = nullptr; mMeshB = nullptr; mAcceleratorA = nullptr; mAcceleratorB = nullptr; mEdgeAggregate.clear(); mVerticesAggregate.clear(); mEdgeFacetIntersectionData12.clear(); mEdgeFacetIntersectionData21.clear(); } /// BooleanTool void BooleanToolImpl::release() { delete this; } Mesh* BooleanToolImpl::performBoolean(const Mesh* meshA, SpatialAccelerator* accelA, const Mesh* meshB, SpatialAccelerator* accelB, BooleanTool::Op op) { const BooleanConf modes[] = { BooleanConfigurations::BOOLEAN_INTERSECTION(), BooleanConfigurations::BOOLEAN_UNION(), BooleanConfigurations::BOOLEAN_DIFFERENCE(), }; constexpr size_t modeCount = sizeof(modes)/sizeof(modes[0]); if (op < 0 || op >= modeCount) { NVBLAST_LOG_ERROR("Illegal mode passed into BooleanToolImpl::performBoolean."); return nullptr; } if (!meshA || !meshB) { NVBLAST_LOG_ERROR("Null mesh pointer passed into BooleanToolImpl::performBoolean."); return nullptr; } DummyAccelerator dmAccelA(meshA->getFacetCount()); DummyAccelerator dmAccelB(meshA->getFacetCount()); m_evaluator.performBoolean(meshA, meshB, accelA ? accelA : &dmAccelA, accelB ? accelB : &dmAccelB, modes[op]); return m_evaluator.createNewMesh(); } bool BooleanToolImpl::pointInMesh(const Mesh* mesh, SpatialAccelerator* accel, const NvcVec3& point) { if (!mesh) { NVBLAST_LOG_ERROR("Null mesh pointer passed into BooleanToolImpl::pointInMesh."); return false; } DummyAccelerator dmAccel(mesh->getFacetCount()); return m_evaluator.isPointContainedInMesh(mesh, accel ? accel : &dmAccel, point); } } // namespace Blast } // namespace Nv
47,338
C++
33.105908
223
0.575647
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/authoring/NvBlastExtAuthoringVSA.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved. #ifndef NVBLASTEXTAUTHORINGVSA_H #define NVBLASTEXTAUTHORINGVSA_H namespace Nv { namespace Blast { /* This code copied from APEX GSA */ namespace VSA { typedef float real; struct VS3D_Halfspace_Set { virtual real farthest_halfspace(real plane[4], const real point[4]) = 0; }; // Simple types and operations for internal calculations struct Vec3 { real x, y, z; }; // 3-vector inline Vec3 vec3(real x, real y, real z) { Vec3 r; r.x = x; r.y = y; r.z = z; return r; } // vector builder inline Vec3 operator + (const Vec3& a, const Vec3& b) { return vec3(a.x + b.x, a.y + b.y, a.z + b.z); } // vector addition inline Vec3 operator * (real s, const Vec3& v) { return vec3(s*v.x, s*v.y, s*v.z); } // scalar multiplication inline real operator | (const Vec3& a, const Vec3& b) { return a.x*b.x + a.y*b.y + a.z*b.z; } // dot product inline Vec3 operator ^ (const Vec3& a, const Vec3& b) { return vec3(a.y*b.z - b.y*a.z, a.z*b.x - b.z*a.x, a.x*b.y - b.x*a.y); } // cross product struct Vec4 { Vec3 v; real w; }; // 4-vector split into 3-vector and scalar parts inline Vec4 vec4(const Vec3& v, real w) { Vec4 r; r.v = v; r.w = w; return r; } // vector builder inline real operator | (const Vec4& a, const Vec4& b) { return (a.v | b.v) + a.w*b.w; } // dot product // More accurate perpendicular inline Vec3 perp(const Vec3& a, const Vec3& b) { Vec3 c = a^b; // Cross-product gives perpendicular #if VS3D_HIGH_ACCURACY || REAL_DOUBLE const real c2 = c | c; if (c2 != 0) c = c + (1 / c2)*((a | c)*(c^b) + (b | c)*(a^c)); // Improvement to (a b)^T(c) = (0) #endif return c; } // Square inline real sq(real x) { return x*x; } // Returns index of the extremal element in a three-element set {e0, e1, e2} based upon comparisons c_ij. The extremal index m is such that c_mn is true, or e_m == e_n, for all n. inline int ext_index(int c_10, int c_21, int c_20) { return c_10 << c_21 | (c_21&c_20) << 1; } // Returns index (0, 1, or 2) of minimum argument inline int index_of_min(real x0, real x1, real x2) { return ext_index((int)(x1 < x0), (int)(x2 < x1), (int)(x2 < x0)); } // Compare fractions with positive deominators. Returns a_num*sqrt(a_rden2) > b_num*sqrt(b_rden2) inline bool frac_gt(real a_num, real a_rden2, real b_num, real b_rden2) { const bool a_num_neg = a_num < 0; const bool b_num_neg = b_num < 0; return a_num_neg != b_num_neg ? b_num_neg : ((a_num*a_num*a_rden2 > b_num*b_num*b_rden2) != a_num_neg); } // Returns index (0, 1, or 2) of maximum fraction with positive deominators inline int index_of_max_frac(real x0_num, real x0_rden2, real x1_num, real x1_rden2, real x2_num, real x2_rden2) { return ext_index((int)frac_gt(x1_num, x1_rden2, x0_num, x0_rden2), (int)frac_gt(x2_num, x2_rden2, x1_num, x1_rden2), (int)frac_gt(x2_num, x2_rden2, x0_num, x0_rden2)); } // Compare values given their signs and squares. Returns a > b. a2 and b2 may have any constant offset applied to them. inline bool sgn_sq_gt(real sgn_a, real a2, real sgn_b, real b2) { return sgn_a*sgn_b < 0 ? (sgn_b < 0) : ((a2 > b2) != (sgn_a < 0)); } // Returns index (0, 1, or 2) of maximum value given their signs and squares. sq_x0, sq_x1, and sq_x2 may have any constant offset applied to them. inline int index_of_max_sgn_sq(real sgn_x0, real sq_x0, real sgn_x1, real sq_x1, real sgn_x2, real sq_x2) { return ext_index((int)sgn_sq_gt(sgn_x1, sq_x1, sgn_x0, sq_x0), (int)sgn_sq_gt(sgn_x2, sq_x2, sgn_x1, sq_x1), (int)sgn_sq_gt(sgn_x2, sq_x2, sgn_x0, sq_x0)); } // Project 2D (homogeneous) vector onto 2D half-space boundary inline void project2D(Vec3& r, const Vec3& plane, real delta, real recip_n2, real eps2) { r = r + (-delta*recip_n2)*vec3(plane.x, plane.y, 0); r = r + (-(r | plane)*recip_n2)*vec3(plane.x, plane.y, 0); // Second projection for increased accuracy if ((r | r) > eps2) return; r = (-plane.z*recip_n2)*vec3(plane.x, plane.y, 0); r.z = 1; } // Update function for vs3d_test static bool vs3d_update(Vec4& p, Vec4 S[4], int& plane_count, const Vec4& q, real eps2) { // h plane is the last plane const Vec4& h = S[plane_count - 1]; // Handle plane_count == 1 specially (optimization; this could be commented out) if (plane_count == 1) { // Solution is objective projected onto h plane p = q; p.v = p.v + -(p | h)*h.v; if ((p | p) <= eps2) p = vec4(-h.w*h.v, 1); // If p == 0 then q is a direction vector, any point in h is a support point return true; } // Create basis in the h plane const int min_i = index_of_min(h.v.x*h.v.x, h.v.y*h.v.y, h.v.z*h.v.z); const Vec3 y = h.v^vec3((real)(min_i == 0), (real)(min_i == 1), (real)(min_i == 2)); const Vec3 x = y^h.v; // Use reduced vector r instead of p Vec3 r = { x | q.v, y | q.v, q.w*(y | y) }; // (x|x) = (y|y) = square of plane basis scale // If r == 0 (within epsilon), then it is a direction vector, and we have a bounded solution if ((r | r) <= eps2) r.z = 1; // Create plane equations in the h plane. These will not be normalized in general. int N = 0; // Plane count in h subspace Vec3 R[3]; // Planes in h subspace real recip_n2[3]; // Plane normal vector reciprocal lengths squared real delta[3]; // Signed distance of objective to the planes int index[3]; // Keep track of original plane indices for (int i = 0; i < plane_count - 1; ++i) { const Vec3& vi = S[i].v; const real cos_theta = h.v | vi; R[N] = vec3(x | vi, y | vi, S[i].w - h.w*cos_theta); index[N] = i; const real n2 = R[N].x*R[N].x + R[N].y*R[N].y; if (n2 >= eps2) { const real lin_norm = (real)1.5 - (real)0.5*n2; // 1st-order approximation to 1/sqrt(n2) expanded about n2 = 1 R[N] = lin_norm*R[N]; // We don't need normalized plane equations, but rescaling (even with an approximate normalization) gives better numerical behavior recip_n2[N] = 1 / (R[N].x*R[N].x + R[N].y*R[N].y); delta[N] = r | R[N]; ++N; // Keep this plane } else if (cos_theta < 0) return false; // Parallel cases are redundant and rejected, anti-parallel cases are 1D voids } // Now work with the N-sized R array of half-spaces in the h plane switch (N) { case 1: one_plane : if (delta[0] < 0) N = 0; // S[0] is redundant, eliminate it else project2D(r, R[0], delta[0], recip_n2[0], eps2); break; case 2: two_planes : if (delta[0] < 0 && delta[1] < 0) N = 0; // S[0] and S[1] are redundant, eliminate them else { const int max_d_index = (int)frac_gt(delta[1], recip_n2[1], delta[0], recip_n2[0]); project2D(r, R[max_d_index], delta[max_d_index], recip_n2[max_d_index], eps2); const int min_d_index = max_d_index ^ 1; const real new_delta_min = r | R[min_d_index]; if (new_delta_min < 0) { index[0] = index[max_d_index]; N = 1; // S[min_d_index] is redundant, eliminate it } else { // Set r to the intersection of R[0] and R[1] and keep both r = perp(R[0], R[1]); if (r.z*r.z*recip_n2[0] * recip_n2[1] < eps2) { if (R[0].x*R[1].x + R[0].y*R[1].y < 0) return false; // 2D void found goto one_plane; } r = (1 / r.z)*r; // We could just as well multiply r by sgn(r.z); we just need to ensure r.z > 0 } } break; case 3: if (delta[0] < 0 && delta[1] < 0 && delta[2] < 0) N = 0; // S[0], S[1], and S[2] are redundant, eliminate them else { const Vec3 row_x = { R[0].x, R[1].x, R[2].x }; const Vec3 row_y = { R[0].y, R[1].y, R[2].y }; const Vec3 row_w = { R[0].z, R[1].z, R[2].z }; const Vec3 cof_w = perp(row_x, row_y); const bool detR_pos = (row_w | cof_w) > 0; const int nrw_sgn0 = cof_w.x*cof_w.x*recip_n2[1] * recip_n2[2] < eps2 ? 0 : (((int)((cof_w.x > 0) == detR_pos) << 1) - 1); const int nrw_sgn1 = cof_w.y*cof_w.y*recip_n2[2] * recip_n2[0] < eps2 ? 0 : (((int)((cof_w.y > 0) == detR_pos) << 1) - 1); const int nrw_sgn2 = cof_w.z*cof_w.z*recip_n2[0] * recip_n2[1] < eps2 ? 0 : (((int)((cof_w.z > 0) == detR_pos) << 1) - 1); if ((nrw_sgn0 | nrw_sgn1 | nrw_sgn2) >= 0) return false; // 3D void found const int positive_width_count = ((nrw_sgn0 >> 1) & 1) + ((nrw_sgn1 >> 1) & 1) + ((nrw_sgn2 >> 1) & 1); if (positive_width_count == 1) { // A single positive width results from a redundant plane. Eliminate it and peform N = 2 calculation. const int pos_width_index = ((nrw_sgn1 >> 1) & 1) | (nrw_sgn2 & 2); // Calculates which index corresponds to the positive-width side R[pos_width_index] = R[2]; recip_n2[pos_width_index] = recip_n2[2]; delta[pos_width_index] = delta[2]; index[pos_width_index] = index[2]; N = 2; goto two_planes; } // Find the max dot product of r and R[i]/|R_normal[i]|. For numerical accuracy when the angle between r and the i^{th} plane normal is small, we take some care below: const int max_d_index = r.z != 0 ? index_of_max_frac(delta[0], recip_n2[0], delta[1], recip_n2[1], delta[2], recip_n2[2]) // displacement term resolves small-angle ambiguity, just use dot product : index_of_max_sgn_sq(delta[0], -sq(r.x*R[0].y - r.y*R[0].x)*recip_n2[0], delta[1], -sq(r.x*R[1].y - r.y*R[1].x)*recip_n2[1], delta[2], -sq(r.x*R[2].y - r.y*R[2].x)*recip_n2[2]); // No displacement term. Use wedge product to find the sine of the angle. // Project r onto max-d plane project2D(r, R[max_d_index], delta[max_d_index], recip_n2[max_d_index], eps2); N = 1; // Unless we use a vertex in the loop below const int index_max = index[max_d_index]; // The number of finite widths should be >= 2. If not, it should be 0, but in any case it implies three parallel lines in the plane, which we should not have here. // If we do have three parallel lines (# of finite widths < 2), we've picked the line corresponding to the half-plane farthest from r, which is correct. const int finite_width_count = (nrw_sgn0 & 1) + (nrw_sgn1 & 1) + (nrw_sgn2 & 1); if (finite_width_count >= 2) { const int i_remaining[2] = { (1 << max_d_index) & 3, (3 >> max_d_index) ^ 1 }; // = {(max_d_index+1)%3, (max_d_index+2)%3} const int i_select = (int)frac_gt(delta[i_remaining[1]], recip_n2[i_remaining[1]], delta[i_remaining[0]], recip_n2[i_remaining[0]]); // Select the greater of the remaining dot products for (int i = 0; i < 2; ++i) { const int j = i_remaining[i_select^i]; // i = 0 => the next-greatest, i = 1 => the least if ((r | R[j]) >= 0) { r = perp(R[max_d_index], R[j]); r = (1 / r.z)*r; // We could just as well multiply r by sgn(r.z); we just need to ensure r.z > 0 index[1] = index[j]; N = 2; break; } } } index[0] = index_max; } break; } // Transform r back to 3D space p = vec4(r.x*x + r.y*y + (-r.z*h.w)*h.v, r.z); // Pack S array with kept planes if (N < 2 || index[1] != 0) { for (int i = 0; i < N; ++i) S[i] = S[index[i]]; } // Safe to copy columns in order else { const Vec4 temp = S[0]; S[0] = S[index[0]]; S[1] = temp; } // Otherwise use temp storage to avoid overwrite S[N] = h; plane_count = N + 1; return true; } // Performs the VS algorithm for D = 3 inline int vs3d_test(VS3D_Halfspace_Set& halfspace_set, real* q = nullptr) { // Objective = q if it is not NULL, otherwise it is the origin represented in homogeneous coordinates const Vec4 objective = q ? (q[3] != 0 ? vec4((1 / q[3])*vec3(q[0], q[1], q[2]), 1) : *(Vec4*)q) : vec4(vec3(0, 0, 0), 1); // Tolerance for 3D void simplex algorithm const real eps_f = (real)1 / (sizeof(real) == 4 ? (1L << 23) : (1LL << 52)); // Floating-point epsilon #if VS3D_HIGH_ACCURACY || REAL_DOUBLE const real eps = 8 * eps_f; #else const real eps = 80 * eps_f; #endif const real eps2 = eps*eps; // Using epsilon squared // Maximum allowed iterations of main loop. If exceeded, error code is returned const int max_iteration_count = 50; // State Vec4 S[4]; // Up to 4 planes int plane_count = 0; // Number of valid planes Vec4 p = objective; // Test point, initialized to objective // Default result, changed to valid result if found in loop below int result = -1; // Iterate until a stopping condition is met or the maximum number of iterations is reached for (int i = 0; result < 0 && i < max_iteration_count; ++i) { Vec4& plane = S[plane_count++]; real delta = halfspace_set.farthest_halfspace(&plane.v.x, &p.v.x); #if VS3D_UNNORMALIZED_PLANE_HANDLING != 0 const real recip_norm = vs3d_recip_sqrt(plane.v | plane.v); plane = vec4(recip_norm*plane.v, recip_norm*plane.w); delta *= recip_norm; #endif if (delta <= 0 || delta*delta <= eps2*(p | p)) result = 1; // Intersection found else if (!vs3d_update(p, S, plane_count, objective, eps2)) result = 0; // Void simplex found } // If q is given, fill it with the solution (normalize p.w if it is not zero) if (q) *(Vec4*)q = (p.w != 0) ? vec4((1 / p.w)*p.v, 1) : p; return result; } } // namespace VSA } // namespace Blast } // namespace Nv #endif // ifndef NVBLASTEXTAUTHORINGVSA_H
15,829
C
46.969697
270
0.5766
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/authoring/NvBlastExtApexSharedParts.cpp
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved. #include "NvBlastExtApexSharedParts.h" #include "NvBlastGlobals.h" #include "NvBlastMemory.h" #include "NvBlastAssert.h" #include "NsVecMath.h" #include "NvMat44.h" #include "NvBounds3.h" #include "NsVecMath.h" #include <vector> using namespace nvidia; using namespace nvidia::shdfnd::aos; namespace Nv { namespace Blast { NV_NOALIAS NV_FORCE_INLINE BoolV PointOutsideOfPlane4(const Vec3VArg _a, const Vec3VArg _b, const Vec3VArg _c, const Vec3VArg _d) { // this is not 0 because of the following scenario: // All the points lie on the same plane and the plane goes through the origin (0,0,0). // On the Wii U, the math below has the problem that when point A gets projected on the // plane cumputed by A, B, C, the distance to the plane might not be 0 for the mentioned // scenario but a small positive or negative value. This can lead to the wrong boolean // results. Using a small negative value as threshold is more conservative but safer. const Vec4V zero = V4Load(-1e-6f); const Vec3V ab = V3Sub(_b, _a); const Vec3V ac = V3Sub(_c, _a); const Vec3V ad = V3Sub(_d, _a); const Vec3V bd = V3Sub(_d, _b); const Vec3V bc = V3Sub(_c, _b); const Vec3V v0 = V3Cross(ab, ac); const Vec3V v1 = V3Cross(ac, ad); const Vec3V v2 = V3Cross(ad, ab); const Vec3V v3 = V3Cross(bd, bc); const FloatV signa0 = V3Dot(v0, _a); const FloatV signa1 = V3Dot(v1, _a); const FloatV signa2 = V3Dot(v2, _a); const FloatV signd3 = V3Dot(v3, _a); const FloatV signd0 = V3Dot(v0, _d); const FloatV signd1 = V3Dot(v1, _b); const FloatV signd2 = V3Dot(v2, _c); const FloatV signa3 = V3Dot(v3, _b); const Vec4V signa = V4Merge(signa0, signa1, signa2, signa3); const Vec4V signd = V4Merge(signd0, signd1, signd2, signd3); return V4IsGrtrOrEq(V4Mul(signa, signd), zero);//same side, outside of the plane } NV_NOALIAS NV_FORCE_INLINE Vec3V closestPtPointSegment(const Vec3VArg a, const Vec3VArg b) { const FloatV zero = FZero(); const FloatV one = FOne(); //Test degenerated case const Vec3V ab = V3Sub(b, a); const FloatV denom = V3Dot(ab, ab); const Vec3V ap = V3Neg(a);//V3Sub(origin, a); const FloatV nom = V3Dot(ap, ab); const BoolV con = FIsEq(denom, zero); const FloatV tValue = FClamp(FDiv(nom, denom), zero, one); const FloatV t = FSel(con, zero, tValue); return V3Sel(con, a, V3ScaleAdd(ab, t, a)); } NV_NOALIAS NV_FORCE_INLINE Vec3V closestPtPointSegment(const Vec3VArg Q0, const Vec3VArg Q1, const Vec3VArg A0, const Vec3VArg A1, const Vec3VArg B0, const Vec3VArg B1, uint32_t& size, Vec3V& closestA, Vec3V& closestB) { const Vec3V a = Q0; const Vec3V b = Q1; const BoolV bTrue = BTTTT(); const FloatV zero = FZero(); const FloatV one = FOne(); //Test degenerated case const Vec3V ab = V3Sub(b, a); const FloatV denom = V3Dot(ab, ab); const Vec3V ap = V3Neg(a);//V3Sub(origin, a); const FloatV nom = V3Dot(ap, ab); const BoolV con = FIsEq(denom, zero); if (BAllEq(con, bTrue)) { size = 1; closestA = A0; closestB = B0; return Q0; } const Vec3V v = V3Sub(A1, A0); const Vec3V w = V3Sub(B1, B0); const FloatV tValue = FClamp(FDiv(nom, denom), zero, one); const FloatV t = FSel(con, zero, tValue); const Vec3V tempClosestA = V3ScaleAdd(v, t, A0); const Vec3V tempClosestB = V3ScaleAdd(w, t, B0); closestA = tempClosestA; closestB = tempClosestB; return V3Sub(tempClosestA, tempClosestB); } NV_NOALIAS Vec3V closestPtPointSegmentTesselation(const Vec3VArg Q0, const Vec3VArg Q1, const Vec3VArg A0, const Vec3VArg A1, const Vec3VArg B0, const Vec3VArg B1, uint32_t& size, Vec3V& closestA, Vec3V& closestB) { const FloatV half = FHalf(); const FloatV targetSegmentLengthSq = FLoad(10000.f);//100 unit Vec3V q0 = Q0; Vec3V q1 = Q1; Vec3V a0 = A0; Vec3V a1 = A1; Vec3V b0 = B0; Vec3V b1 = B1; for (;;) { const Vec3V midPoint = V3Scale(V3Add(q0, q1), half); const Vec3V midA = V3Scale(V3Add(a0, a1), half); const Vec3V midB = V3Scale(V3Add(b0, b1), half); const Vec3V v = V3Sub(midPoint, q0); const FloatV sqV = V3Dot(v, v); if (FAllGrtr(targetSegmentLengthSq, sqV)) break; //split the segment into half const Vec3V tClos0 = closestPtPointSegment(q0, midPoint); const FloatV sqDist0 = V3Dot(tClos0, tClos0); const Vec3V tClos1 = closestPtPointSegment(q1, midPoint); const FloatV sqDist1 = V3Dot(tClos1, tClos1); //const BoolV con = FIsGrtr(sqDist0, sqDist1); if (FAllGrtr(sqDist0, sqDist1)) { //segment [m, q1] q0 = midPoint; a0 = midA; b0 = midB; } else { //segment [q0, m] q1 = midPoint; a1 = midA; b1 = midB; } } return closestPtPointSegment(q0, q1, a0, a1, b0, b1, size, closestA, closestB); } NV_NOALIAS Vec3V closestPtPointTriangleTesselation(const Vec3V* NV_RESTRICT Q, const Vec3V* NV_RESTRICT A, const Vec3V* NV_RESTRICT B, const uint32_t* NV_RESTRICT indices, uint32_t& size, Vec3V& closestA, Vec3V& closestB) { size = 3; const FloatV zero = FZero(); const FloatV eps = FEps(); const FloatV half = FHalf(); const BoolV bTrue = BTTTT(); const FloatV four = FLoad(4.f); const FloatV sixty = FLoad(100.f); const uint32_t ind0 = indices[0]; const uint32_t ind1 = indices[1]; const uint32_t ind2 = indices[2]; const Vec3V a = Q[ind0]; const Vec3V b = Q[ind1]; const Vec3V c = Q[ind2]; Vec3V ab_ = V3Sub(b, a); Vec3V ac_ = V3Sub(c, a); Vec3V bc_ = V3Sub(b, c); const FloatV dac_ = V3Dot(ac_, ac_); const FloatV dbc_ = V3Dot(bc_, bc_); if (FAllGrtrOrEq(eps, FMin(dac_, dbc_))) { //degenerate size = 2; return closestPtPointSegment(Q[ind0], Q[ind1], A[ind0], A[ind1], B[ind0], B[ind1], size, closestA, closestB); } Vec3V ap = V3Neg(a); Vec3V bp = V3Neg(b); Vec3V cp = V3Neg(c); FloatV d1 = V3Dot(ab_, ap); // snom FloatV d2 = V3Dot(ac_, ap); // tnom FloatV d3 = V3Dot(ab_, bp); // -sdenom FloatV d4 = V3Dot(ac_, bp); // unom = d4 - d3 FloatV d5 = V3Dot(ab_, cp); // udenom = d5 - d6 FloatV d6 = V3Dot(ac_, cp); // -tdenom /* FloatV unom = FSub(d4, d3); FloatV udenom = FSub(d5, d6);*/ FloatV va = FNegScaleSub(d5, d4, FMul(d3, d6));//edge region of BC FloatV vb = FNegScaleSub(d1, d6, FMul(d5, d2));//edge region of AC FloatV vc = FNegScaleSub(d3, d2, FMul(d1, d4));//edge region of AB //check if p in vertex region outside a const BoolV con00 = FIsGrtrOrEq(zero, d1); // snom <= 0 const BoolV con01 = FIsGrtrOrEq(zero, d2); // tnom <= 0 const BoolV con0 = BAnd(con00, con01); // vertex region a if (BAllEq(con0, bTrue)) { //size = 1; closestA = A[ind0]; closestB = B[ind0]; return Q[ind0]; } //check if p in vertex region outside b const BoolV con10 = FIsGrtrOrEq(d3, zero); const BoolV con11 = FIsGrtrOrEq(d3, d4); const BoolV con1 = BAnd(con10, con11); // vertex region b if (BAllEq(con1, bTrue)) { /*size = 1; indices[0] = ind1;*/ closestA = A[ind1]; closestB = B[ind1]; return Q[ind1]; } //check if p in vertex region outside of c const BoolV con20 = FIsGrtrOrEq(d6, zero); const BoolV con21 = FIsGrtrOrEq(d6, d5); const BoolV con2 = BAnd(con20, con21); // vertex region c if (BAllEq(con2, bTrue)) { closestA = A[ind2]; closestB = B[ind2]; return Q[ind2]; } //check if p in edge region of AB const BoolV con30 = FIsGrtrOrEq(zero, vc); const BoolV con31 = FIsGrtrOrEq(d1, zero); const BoolV con32 = FIsGrtrOrEq(zero, d3); const BoolV con3 = BAnd(con30, BAnd(con31, con32)); if (BAllEq(con3, bTrue)) { //size = 2; //p in edge region of AB, split AB return closestPtPointSegmentTesselation(Q[ind0], Q[ind1], A[ind0], A[ind1], B[ind0], B[ind1], size, closestA, closestB); } //check if p in edge region of BC const BoolV con40 = FIsGrtrOrEq(zero, va); const BoolV con41 = FIsGrtrOrEq(d4, d3); const BoolV con42 = FIsGrtrOrEq(d5, d6); const BoolV con4 = BAnd(con40, BAnd(con41, con42)); if (BAllEq(con4, bTrue)) { //p in edge region of BC, split BC return closestPtPointSegmentTesselation(Q[ind1], Q[ind2], A[ind1], A[ind2], B[ind1], B[ind2], size, closestA, closestB); } //check if p in edge region of AC const BoolV con50 = FIsGrtrOrEq(zero, vb); const BoolV con51 = FIsGrtrOrEq(d2, zero); const BoolV con52 = FIsGrtrOrEq(zero, d6); const BoolV con5 = BAnd(con50, BAnd(con51, con52)); if (BAllEq(con5, bTrue)) { //p in edge region of AC, split AC return closestPtPointSegmentTesselation(Q[ind0], Q[ind2], A[ind0], A[ind2], B[ind0], B[ind2], size, closestA, closestB); } size = 3; Vec3V q0 = Q[ind0]; Vec3V q1 = Q[ind1]; Vec3V q2 = Q[ind2]; Vec3V a0 = A[ind0]; Vec3V a1 = A[ind1]; Vec3V a2 = A[ind2]; Vec3V b0 = B[ind0]; Vec3V b1 = B[ind1]; Vec3V b2 = B[ind2]; for (;;) { const Vec3V ab = V3Sub(q1, q0); const Vec3V ac = V3Sub(q2, q0); const Vec3V bc = V3Sub(q2, q1); const FloatV dab = V3Dot(ab, ab); const FloatV dac = V3Dot(ac, ac); const FloatV dbc = V3Dot(bc, bc); const FloatV fMax = FMax(dab, FMax(dac, dbc)); const FloatV fMin = FMin(dab, FMin(dac, dbc)); const Vec3V w = V3Cross(ab, ac); const FloatV area = V3Length(w); const FloatV ratio = FDiv(FSqrt(fMax), FSqrt(fMin)); if (FAllGrtr(four, ratio) && FAllGrtr(sixty, area)) break; //calculate the triangle normal const Vec3V triNormal = V3Normalize(w); NVBLAST_ASSERT(V3AllEq(triNormal, V3Zero()) == 0); //split the longest edge if (FAllGrtrOrEq(dab, dac) && FAllGrtrOrEq(dab, dbc)) { //split edge q0q1 const Vec3V midPoint = V3Scale(V3Add(q0, q1), half); const Vec3V midA = V3Scale(V3Add(a0, a1), half); const Vec3V midB = V3Scale(V3Add(b0, b1), half); const Vec3V v = V3Sub(midPoint, q2); const Vec3V n = V3Normalize(V3Cross(v, triNormal)); const FloatV d = FNeg(V3Dot(n, midPoint)); const FloatV dp = FAdd(V3Dot(n, q0), d); const FloatV sum = FMul(d, dp); if (FAllGrtr(sum, zero)) { //q0 and origin at the same side, split triangle[q0, m, q2] q1 = midPoint; a1 = midA; b1 = midB; } else { //q1 and origin at the same side, split triangle[m, q1, q2] q0 = midPoint; a0 = midA; b0 = midB; } } else if (FAllGrtrOrEq(dac, dbc)) { //split edge q0q2 const Vec3V midPoint = V3Scale(V3Add(q0, q2), half); const Vec3V midA = V3Scale(V3Add(a0, a2), half); const Vec3V midB = V3Scale(V3Add(b0, b2), half); const Vec3V v = V3Sub(midPoint, q1); const Vec3V n = V3Normalize(V3Cross(v, triNormal)); const FloatV d = FNeg(V3Dot(n, midPoint)); const FloatV dp = FAdd(V3Dot(n, q0), d); const FloatV sum = FMul(d, dp); if (FAllGrtr(sum, zero)) { //q0 and origin at the same side, split triangle[q0, q1, m] q2 = midPoint; a2 = midA; b2 = midB; } else { //q2 and origin at the same side, split triangle[m, q1, q2] q0 = midPoint; a0 = midA; b0 = midB; } } else { //split edge q1q2 const Vec3V midPoint = V3Scale(V3Add(q1, q2), half); const Vec3V midA = V3Scale(V3Add(a1, a2), half); const Vec3V midB = V3Scale(V3Add(b1, b2), half); const Vec3V v = V3Sub(midPoint, q0); const Vec3V n = V3Normalize(V3Cross(v, triNormal)); const FloatV d = FNeg(V3Dot(n, midPoint)); const FloatV dp = FAdd(V3Dot(n, q1), d); const FloatV sum = FMul(d, dp); if (FAllGrtr(sum, zero)) { //q1 and origin at the same side, split triangle[q0, q1, m] q2 = midPoint; a2 = midA; b2 = midB; } else { //q2 and origin at the same side, split triangle[q0, m, q2] q1 = midPoint; a1 = midA; b1 = midB; } } } //P must project inside face region. Compute Q using Barycentric coordinates ab_ = V3Sub(q1, q0); ac_ = V3Sub(q2, q0); ap = V3Neg(q0); bp = V3Neg(q1); cp = V3Neg(q2); d1 = V3Dot(ab_, ap); // snom d2 = V3Dot(ac_, ap); // tnom d3 = V3Dot(ab_, bp); // -sdenom d4 = V3Dot(ac_, bp); // unom = d4 - d3 d5 = V3Dot(ab_, cp); // udenom = d5 - d6 d6 = V3Dot(ac_, cp); // -tdenom va = FNegScaleSub(d5, d4, FMul(d3, d6));//edge region of BC vb = FNegScaleSub(d1, d6, FMul(d5, d2));//edge region of AC vc = FNegScaleSub(d3, d2, FMul(d1, d4));//edge region of AB const FloatV toRecipD = FAdd(va, FAdd(vb, vc)); const FloatV denom = FRecip(toRecipD);//V4GetW(recipTmp); const Vec3V v0 = V3Sub(a1, a0); const Vec3V v1 = V3Sub(a2, a0); const Vec3V w0 = V3Sub(b1, b0); const Vec3V w1 = V3Sub(b2, b0); const FloatV t = FMul(vb, denom); const FloatV w = FMul(vc, denom); const Vec3V vA1 = V3Scale(v1, w); const Vec3V vB1 = V3Scale(w1, w); const Vec3V tempClosestA = V3Add(a0, V3ScaleAdd(v0, t, vA1)); const Vec3V tempClosestB = V3Add(b0, V3ScaleAdd(w0, t, vB1)); closestA = tempClosestA; closestB = tempClosestB; return V3Sub(tempClosestA, tempClosestB); } NV_NOALIAS Vec3V closestPtPointTetrahedronTesselation(Vec3V* NV_RESTRICT Q, Vec3V* NV_RESTRICT A, Vec3V* NV_RESTRICT B, uint32_t& size, Vec3V& closestA, Vec3V& closestB) { const FloatV eps = FEps(); const Vec3V zeroV = V3Zero(); uint32_t tempSize = size; FloatV bestSqDist = FLoad(NV_MAX_F32); const Vec3V a = Q[0]; const Vec3V b = Q[1]; const Vec3V c = Q[2]; const Vec3V d = Q[3]; const BoolV bTrue = BTTTT(); const BoolV bFalse = BFFFF(); //degenerated const Vec3V ad = V3Sub(d, a); const Vec3V bd = V3Sub(d, b); const Vec3V cd = V3Sub(d, c); const FloatV dad = V3Dot(ad, ad); const FloatV dbd = V3Dot(bd, bd); const FloatV dcd = V3Dot(cd, cd); const FloatV fMin = FMin(dad, FMin(dbd, dcd)); if (FAllGrtr(eps, fMin)) { size = 3; uint32_t tempIndices[] = { 0, 1, 2 }; return closestPtPointTriangleTesselation(Q, A, B, tempIndices, size, closestA, closestB); } Vec3V _Q[] = { Q[0], Q[1], Q[2], Q[3] }; Vec3V _A[] = { A[0], A[1], A[2], A[3] }; Vec3V _B[] = { B[0], B[1], B[2], B[3] }; uint32_t indices[3] = { 0, 1, 2 }; const BoolV bIsOutside4 = PointOutsideOfPlane4(a, b, c, d); if (BAllEq(bIsOutside4, bFalse)) { //origin is inside the tetrahedron, we are done return zeroV; } Vec3V result = zeroV; Vec3V tempClosestA, tempClosestB; if (BAllEq(BGetX(bIsOutside4), bTrue)) { uint32_t tempIndices[] = { 0, 1, 2 }; uint32_t _size = 3; result = closestPtPointTriangleTesselation(_Q, _A, _B, tempIndices, _size, tempClosestA, tempClosestB); const FloatV sqDist = V3Dot(result, result); bestSqDist = sqDist; indices[0] = tempIndices[0]; indices[1] = tempIndices[1]; indices[2] = tempIndices[2]; tempSize = _size; closestA = tempClosestA; closestB = tempClosestB; } if (BAllEq(BGetY(bIsOutside4), bTrue)) { uint32_t tempIndices[] = { 0, 2, 3 }; uint32_t _size = 3; const Vec3V q = closestPtPointTriangleTesselation(_Q, _A, _B, tempIndices, _size, tempClosestA, tempClosestB); const FloatV sqDist = V3Dot(q, q); const BoolV con = FIsGrtr(bestSqDist, sqDist); if (BAllEq(con, bTrue)) { result = q; bestSqDist = sqDist; indices[0] = tempIndices[0]; indices[1] = tempIndices[1]; indices[2] = tempIndices[2]; tempSize = _size; closestA = tempClosestA; closestB = tempClosestB; } } if (BAllEq(BGetZ(bIsOutside4), bTrue)) { uint32_t tempIndices[] = { 0, 3, 1 }; uint32_t _size = 3; const Vec3V q = closestPtPointTriangleTesselation(_Q, _A, _B, tempIndices, _size, tempClosestA, tempClosestB); const FloatV sqDist = V3Dot(q, q); const BoolV con = FIsGrtr(bestSqDist, sqDist); if (BAllEq(con, bTrue)) { result = q; bestSqDist = sqDist; indices[0] = tempIndices[0]; indices[1] = tempIndices[1]; indices[2] = tempIndices[2]; tempSize = _size; closestA = tempClosestA; closestB = tempClosestB; } } if (BAllEq(BGetW(bIsOutside4), bTrue)) { uint32_t tempIndices[] = { 1, 3, 2 }; uint32_t _size = 3; const Vec3V q = closestPtPointTriangleTesselation(_Q, _A, _B, tempIndices, _size, tempClosestA, tempClosestB); const FloatV sqDist = V3Dot(q, q); const BoolV con = FIsGrtr(bestSqDist, sqDist); if (BAllEq(con, bTrue)) { result = q; bestSqDist = sqDist; indices[0] = tempIndices[0]; indices[1] = tempIndices[1]; indices[2] = tempIndices[2]; tempSize = _size; closestA = tempClosestA; closestB = tempClosestB; } } A[0] = _A[indices[0]]; A[1] = _A[indices[1]]; A[2] = _A[indices[2]]; B[0] = _B[indices[0]]; B[1] = _B[indices[1]]; B[2] = _B[indices[2]]; Q[0] = _Q[indices[0]]; Q[1] = _Q[indices[1]]; Q[2] = _Q[indices[2]]; size = tempSize; return result; } NV_NOALIAS NV_FORCE_INLINE Vec3V doTesselation(Vec3V* NV_RESTRICT Q, Vec3V* NV_RESTRICT A, Vec3V* NV_RESTRICT B, const Vec3VArg support, const Vec3VArg supportA, const Vec3VArg supportB, uint32_t& size, Vec3V& closestA, Vec3V& closestB) { switch (size) { case 1: { closestA = supportA; closestB = supportB; return support; } case 2: { return closestPtPointSegmentTesselation(Q[0], support, A[0], supportA, B[0], supportB, size, closestA, closestB); } case 3: { uint32_t tempIndices[3] = { 0, 1, 2 }; return closestPtPointTriangleTesselation(Q, A, B, tempIndices, size, closestA, closestB); } case 4: { return closestPtPointTetrahedronTesselation(Q, A, B, size, closestA, closestB); } default: NVBLAST_ASSERT(0); } return support; } enum Status { STATUS_NON_INTERSECT, STATUS_CONTACT, STATUS_DEGENERATE, }; struct Output { /// Get the normal to push apart in direction from A to B NV_FORCE_INLINE Vec3V getNormal() const { return V3Normalize(V3Sub(mClosestB, mClosestA)); } Vec3V mClosestA; ///< Closest point on A Vec3V mClosestB; ///< Closest point on B FloatV mDistSq; }; struct ConvexV { void calcExtent(const Vec3V& dir, float& minOut, float& maxOut) const { // Expand const Vec4V x = Vec4V_From_FloatV(V3GetX(dir)); const Vec4V y = Vec4V_From_FloatV(V3GetY(dir)); const Vec4V z = Vec4V_From_FloatV(V3GetZ(dir)); const Vec4V* src = mAovVertices; const Vec4V* end = src + mNumAovVertices * 3; // Do first step Vec4V max = V4MulAdd(x, src[0], V4MulAdd(y, src[1], V4Mul(z, src[2]))); Vec4V min = max; src += 3; // Do the rest for (; src < end; src += 3) { const Vec4V dot = V4MulAdd(x, src[0], V4MulAdd(y, src[1], V4Mul(z, src[2]))); max = V4Max(dot, max); min = V4Min(dot, min); } FStore(V4ExtractMax(max), &maxOut); FStore(V4ExtractMin(min), &minOut); } Vec3V calcSupport(const Vec3V& dir) const { // Expand const Vec4V x = Vec4V_From_FloatV(V3GetX(dir)); const Vec4V y = Vec4V_From_FloatV(V3GetY(dir)); const Vec4V z = Vec4V_From_FloatV(V3GetZ(dir)); NV_ALIGN(16, static const float index4const[]) = { 0.0f, 1.0f, 2.0f, 3.0f }; Vec4V index4 = *(const Vec4V*)index4const; NV_ALIGN(16, static const float delta4const[]) = { 4.0f, 4.0f, 4.0f, 4.0f }; const Vec4V delta4 = *(const Vec4V*)delta4const; const Vec4V* src = mAovVertices; const Vec4V* end = src + mNumAovVertices * 3; // Do first step Vec4V max = V4MulAdd(x, src[0], V4MulAdd(y, src[1], V4Mul(z, src[2]))); Vec4V maxIndex = index4; index4 = V4Add(index4, delta4); src += 3; // Do the rest for (; src < end; src += 3) { const Vec4V dot = V4MulAdd(x, src[0], V4MulAdd(y, src[1], V4Mul(z, src[2]))); const BoolV cmp = V4IsGrtr(dot, max); max = V4Max(dot, max); maxIndex = V4Sel(cmp, index4, maxIndex); index4 = V4Add(index4, delta4); } Vec4V horiMax = Vec4V_From_FloatV(V4ExtractMax(max)); uint32_t mask = BGetBitMask(V4IsEq(horiMax, max)); const uint32_t simdIndex = (0x12131210 >> (mask + mask)) & uint32_t(3); /// NOTE! Could be load hit store /// Would be better to have all simd. NV_ALIGN(16, float f[4]); V4StoreA(maxIndex, f); uint32_t index = uint32_t(uint32_t(f[simdIndex])); const Vec4V* aovIndex = (mAovVertices + (index >> 2) * 3); const float* aovOffset = ((const float*)aovIndex) + (index & 3); return Vec3V_From_Vec4V(V4LoadXYZW(aovOffset[0], aovOffset[4], aovOffset[8], 1.0f)); } const Vec4V* mAovVertices; ///< Vertices storex x,x,x,x, y,y,y,y, z,z,z,z uint32_t mNumAovVertices; ///< Number of groups of 4 of vertices }; Status Collide(const Vec3V& initialDir, const ConvexV& convexA, const Mat34V& bToA, const ConvexV& convexB, Output& out) { Vec3V Q[4]; Vec3V A[4]; Vec3V B[4]; Mat33V aToB = M34Trnsps33(bToA); uint32_t size = 0; const Vec3V zeroV = V3Zero(); const BoolV bTrue = BTTTT(); //Vec3V v = V3UnitX(); Vec3V v = V3Sel(FIsGrtr(V3Dot(initialDir, initialDir), FZero()), initialDir, V3UnitX()); //const FloatV minMargin = zero; //const FloatV eps2 = FMul(minMargin, FLoad(0.01f)); //FloatV eps2 = zero; FloatV eps2 = FLoad(1e-6f); const FloatV epsRel = FLoad(0.000225f); Vec3V closA(zeroV), closB(zeroV); FloatV sDist = FMax(); FloatV minDist = sDist; Vec3V closAA = zeroV; Vec3V closBB = zeroV; BoolV bNotTerminated = bTrue; BoolV bCon = bTrue; do { minDist = sDist; closAA = closA; closBB = closB; uint32_t index = size++; NVBLAST_ASSERT(index < 4); const Vec3V supportA = convexA.calcSupport(V3Neg(v)); const Vec3V supportB = M34MulV3(bToA, convexB.calcSupport(M33MulV3(aToB, v))); const Vec3V support = Vec3V_From_Vec4V(Vec4V_From_Vec3V(V3Sub(supportA, supportB))); A[index] = supportA; B[index] = supportB; Q[index] = support; const FloatV signDist = V3Dot(v, support); const FloatV tmp0 = FSub(sDist, signDist); if (FAllGrtr(FMul(epsRel, sDist), tmp0)) { out.mClosestA = closA; out.mClosestB = closB; out.mDistSq = sDist; return STATUS_NON_INTERSECT; } //calculate the closest point between two convex hull v = doTesselation(Q, A, B, support, supportA, supportB, size, closA, closB); sDist = V3Dot(v, v); bCon = FIsGrtr(minDist, sDist); bNotTerminated = BAnd(FIsGrtr(sDist, eps2), bCon); } while (BAllEq(bNotTerminated, bTrue)); out.mClosestA = V3Sel(bCon, closA, closAA); out.mClosestB = V3Sel(bCon, closB, closBB); out.mDistSq = FSel(bCon, sDist, minDist); return Status(BAllEq(bCon, bTrue) == 1 ? STATUS_CONTACT : STATUS_DEGENERATE); } static void _calcSeparation(const ConvexV& convexA, const nvidia::NvTransform& aToWorldIn, const Mat34V& bToA, ConvexV& convexB, const Vec3V& centroidAToB, Output& out, Separation& sep) { Mat33V aToB = M34Trnsps33(bToA); Vec3V normalA = out.getNormal(); FloatV vEpsilon = FLoad(1e-6f); if (BAllEqFFFF(FIsGrtr(out.mDistSq, vEpsilon))) { if (BAllEqTTTT(FIsGrtr(V3Dot(centroidAToB, centroidAToB), vEpsilon))) { normalA = V3Normalize(centroidAToB); } else { normalA = V3UnitX(); } } convexA.calcExtent(normalA, sep.min0, sep.max0); Vec3V normalB = M33MulV3(aToB, normalA); convexB.calcExtent(normalB, sep.min1, sep.max1); { // Offset the min max taking into account transform // Distance of origin from B's space in As space in direction of the normal in As space should fix it... float fix; FStore(V3Dot(bToA.col3, normalA), &fix); sep.min1 += fix; sep.max1 += fix; } // Looks like it's the plane at the midpoint Vec3V center = V3Scale(V3Add(out.mClosestA, out.mClosestB), FLoad(0.5f)); // Transform to world space Mat34V aToWorld; *(NvMat44*)&aToWorld = aToWorldIn; // Put the normal in world space Vec3V worldCenter = M34MulV3(aToWorld, center); Vec3V worldNormal = M34Mul33V3(aToWorld, normalA); FloatV dist = V3Dot(worldNormal, worldCenter); V3StoreU(worldNormal, sep.plane.n); FStore(dist, &sep.plane.d); sep.plane.d = -sep.plane.d; } static void _arrayVec3ToVec4(const NvVec3* src, Vec4V* dst, uint32_t num) { const uint32_t num4 = num >> 2; for (uint32_t i = 0; i < num4; i++, dst += 3, src += 4) { Vec3V v0 = V3LoadU(&src[0].x); Vec3V v1 = V3LoadU(&src[1].x); Vec3V v2 = V3LoadU(&src[2].x); Vec3V v3 = V3LoadU(&src[3].x); // Transpose V4Transpose(v0, v1, v2, v3); // Save dst[0] = v0; dst[1] = v1; dst[2] = v2; } const uint32_t remain = num & 3; if (remain) { Vec3V work[4]; uint32_t i = 0; for (; i < remain; i++) work[i] = V3LoadU(&src[i].x); for (; i < 4; i++) work[i] = work[remain - 1]; V4Transpose(work[0], work[1], work[2], work[3]); dst[0] = work[0]; dst[1] = work[1]; dst[2] = work[2]; } } static void _arrayVec3ToVec4(const NvVec3* src, const Vec3V& scale, Vec4V* dst, uint32_t num) { // If no scale - use the faster version if (V3AllEq(scale, V3One())) { return _arrayVec3ToVec4(src, dst, num); } const uint32_t num4 = num >> 2; for (uint32_t i = 0; i < num4; i++, dst += 3, src += 4) { Vec3V v0 = V3Mul(scale, V3LoadU(&src[0].x)); Vec3V v1 = V3Mul(scale, V3LoadU(&src[1].x)); Vec3V v2 = V3Mul(scale, V3LoadU(&src[2].x)); Vec3V v3 = V3Mul(scale, V3LoadU(&src[3].x)); // Transpose V4Transpose(v0, v1, v2, v3); // Save dst[0] = v0; dst[1] = v1; dst[2] = v2; } const uint32_t remain = num & 3; if (remain) { Vec3V work[4]; uint32_t i = 0; for (; i < remain; i++) work[i] = V3Mul(scale, V3LoadU(&src[i].x)); for (; i < 4; i++) work[i] = work[remain - 1]; V4Transpose(work[0], work[1], work[2], work[3]); dst[0] = work[0]; dst[1] = work[1]; dst[2] = work[2]; } } // TODO: move this to a better long term home // scope based helper struct to pick between stack and heap alloc based on the size of the request struct ScopeMemoryAllocator { public: ScopeMemoryAllocator() : mAlloc(nullptr) {}; ~ScopeMemoryAllocator() { this->free(); } void* alloc(size_t buffSize) { if (mAlloc == nullptr) { mAlloc = NVBLAST_ALLOC(buffSize); return mAlloc; } return nullptr; } void free() { if (mAlloc != nullptr) { NVBLAST_FREE(mAlloc); mAlloc = nullptr; } } private: void* mAlloc; }; #define STACK_ALLOC_LIMIT (100 * 1024) #define ALLOCATE_TEMP_MEMORY(_out, buffSize) \ ScopeMemoryAllocator _out##Allocator; \ _out = (buffSize < STACK_ALLOC_LIMIT ? NvBlastAlloca(buffSize) : _out##Allocator.alloc(buffSize)) bool importerHullsInProximityApexFree(uint32_t hull0Count, const NvVec3* hull0, NvBounds3& hull0Bounds, const nvidia::NvTransform& localToWorldRT0In, const nvidia::NvVec3& scale0In, uint32_t hull1Count, const NvVec3* hull1, NvBounds3& hull1Bounds, const nvidia::NvTransform& localToWorldRT1In, const nvidia::NvVec3& scale1In, float maxDistance, Separation* separation) { const uint32_t numVerts0 = static_cast<uint32_t>(hull0Count); const uint32_t numVerts1 = static_cast<uint32_t>(hull1Count); const uint32_t numAov0 = (numVerts0 + 3) >> 2; const uint32_t numAov1 = (numVerts1 + 3) >> 2; const uint32_t buffSize = (numAov0 + numAov1) * sizeof(Vec4V) * 3; void* buff = nullptr; ALLOCATE_TEMP_MEMORY(buff, buffSize); Vec4V* verts0 = (Vec4V*)buff; // Make sure it's aligned NVBLAST_ASSERT((size_t(verts0) & 0xf) == 0); Vec4V* verts1 = verts0 + (numAov0 * 3); const Vec3V scale0 = V3LoadU(&scale0In.x); const Vec3V scale1 = V3LoadU(&scale1In.x); std::vector<NvVec3> vert0(numVerts0); for (uint32_t i = 0; i < numVerts0; ++i) { vert0[i] = hull0[i]; } std::vector<NvVec3> vert1(numVerts1); for (uint32_t i = 0; i < numVerts1; ++i) { vert1[i] = hull1[i]; } _arrayVec3ToVec4(vert0.data(), scale0, verts0, numVerts0); _arrayVec3ToVec4(vert1.data(), scale1, verts1, numVerts1); const NvTransform trans1To0 = localToWorldRT0In.transformInv(localToWorldRT1In); // Load into simd mat Mat34V bToA; *(NvMat44*)&bToA = trans1To0; (*(NvMat44*)&bToA).column3.w = 0.0f; // AOS wants the 4th component of Vec3V to be 0 to work properly ConvexV convexA; ConvexV convexB; convexA.mNumAovVertices = numAov0; convexA.mAovVertices = verts0; convexB.mNumAovVertices = numAov1; convexB.mAovVertices = verts1; const nvidia::NvVec3 hullACenter = hull0Bounds.getCenter(); const nvidia::NvVec3 hullBCenter = hull1Bounds.getCenter(); const Vec3V centroidA = V3LoadU(&hullACenter.x); const Vec3V centroidB = M34MulV3(bToA, V3LoadU(&hullBCenter.x)); // Take the origin of B in As space as the inital direction as it is 'the difference in transform origins B-A in A's space' // Should be a good first guess // Use centroid information const Vec3V initialDir = V3Sub(centroidB, centroidA); Output output; Status status = Collide(initialDir, convexA, bToA, convexB, output); if (status == STATUS_DEGENERATE) { // Calculate the tolerance from the extents const NvVec3 extents0 = hull0Bounds.getExtents(); const NvVec3 extents1 = hull1Bounds.getExtents(); const FloatV tolerance0 = V3ExtractMin(V3Mul(V3LoadU(&extents0.x), scale0)); const FloatV tolerance1 = V3ExtractMin(V3Mul(V3LoadU(&extents1.x), scale1)); const FloatV tolerance = FMul(FAdd(tolerance0, tolerance1), FLoad(0.01f)); const FloatV sqTolerance = FMul(tolerance, tolerance); status = FAllGrtr(sqTolerance, output.mDistSq) ? STATUS_CONTACT : STATUS_NON_INTERSECT; } switch (status) { case STATUS_CONTACT: { if (separation) { _calcSeparation(convexA, localToWorldRT0In, bToA, convexB, initialDir, output, *separation); } return true; } default: case STATUS_NON_INTERSECT: { if (separation) { _calcSeparation(convexA, localToWorldRT0In, bToA, convexB, initialDir, output, *separation); } float val; FStore(output.mDistSq, &val); return val < (maxDistance * maxDistance); } } } } // namespace Blast } // namespace Nv
34,605
C++
30.806985
221
0.588412
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/authoring/NvBlastExtTriangleProcessor.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved. #ifndef NVBLASTEXTTRIANGLEPROCESSOR_H #define NVBLASTEXTTRIANGLEPROCESSOR_H #include "NvVec2.h" #include "NvVec3.h" #include <vector> #include <algorithm> using namespace nvidia; namespace Nv { namespace Blast { /** Triangle processor internal triangle representation. Contains only vertex positions. */ struct TrPrcTriangle { NvVec3 points[3]; TrPrcTriangle(NvVec3 a = NvVec3(0.0f), NvVec3 b = NvVec3(0.0f), NvVec3 c = NvVec3(0.0f)) { points[0] = a; points[1] = b; points[2] = c; } TrPrcTriangle& operator=(const TrPrcTriangle& b) { points[0] = b.points[0]; points[1] = b.points[1]; points[2] = b.points[2]; return *this; } TrPrcTriangle(const TrPrcTriangle& b) { points[0] = b.points[0]; points[1] = b.points[1]; points[2] = b.points[2]; } NvVec3 getNormal() const { return (points[1] - points[0]).cross(points[2] - points[0]); } }; /** Triangle processor internal 2D triangle representation. Contains only vertex positions. */ struct TrPrcTriangle2d { NvVec2 points[3]; TrPrcTriangle2d(NvVec2 a = NvVec2(0.0f), NvVec2 b = NvVec2(0.0f), NvVec2 c = NvVec2(0.0f)) { points[0] = a; points[1] = b; points[2] = c; } TrPrcTriangle2d operator=(const TrPrcTriangle2d& b) { points[0] = b.points[0]; points[1] = b.points[1]; points[2] = b.points[2]; return *this; } TrPrcTriangle2d(const TrPrcTriangle2d& b) { points[0] = b.points[0]; points[1] = b.points[1]; points[2] = b.points[2]; } }; class TriangleProcessor { public: TriangleProcessor(){}; ~TriangleProcessor() {} /** Build intersection between two triangles \param[in] a First triangle (A) \param[in] aProjected Projected triangle A \param[in] b Second triangle (B) \param[in] centroid Centroid of first triangle (A) \param[out] intersectionBuffer Result intersection polygon \param[in] normal Normal vector to triangle (Common for both A and B). \return 1 - if if intersection is found. */ uint32_t getTriangleIntersection(TrPrcTriangle& a, TrPrcTriangle2d& aProjected, TrPrcTriangle& b, NvVec3& centroid, std::vector<NvVec3>& intersectionBuffer, NvVec3 normal); /** Test whether BB of triangles intersect. \param[in] a First triangle (A) \param[in] b Second triangle (B) \return true - if intersect */ bool triangleBoundingBoxIntersection(TrPrcTriangle2d& a, TrPrcTriangle2d& b); /** Test whether point is inside of triangle. \param[in] point Point coordinates in 2d space. \param[in] triangle Triangle in 2d space. \return 1 - if inside, 2 if on edge, 0 if neither inside nor edge. */ uint32_t isPointInside(const NvVec2& point, const TrPrcTriangle2d& triangle); /** Segment intersection point \param[in] s1 Segment-1 start point \param[in] e1 Segment-1 end point \param[in] s2 Segment-2 start point \param[in] e2 Segment-2 end point \param[out] t1 Intersection point parameter relatively to Segment-1, lies in [0.0, 1.0] range. \return 0 if there is no intersections, 1 - if intersection is found. */ uint32_t getSegmentIntersection(const NvVec2& s1, const NvVec2& e1, const NvVec2& s2, const NvVec2& e2, float& t1); /** Sort vertices of polygon in CCW-order */ void sortToCCW(std::vector<NvVec3>& points, NvVec3& normal); /** Builds convex polygon for given set of points. Points should be coplanar. \param[in] points Input array of points \param[out] convexHull Output polygon \param[in] normal Normal vector to polygon. */ void buildConvexHull(std::vector<NvVec3>& points, std::vector<NvVec3>& convexHull, const NvVec3& normal); }; } // namespace Blast } // namespace Nv #endif // NVBLASTEXTTRIANGLEPROCESSOR_H
5,735
C
32.156069
119
0.6551
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/authoring/NvBlastExtAuthoringTriangulator.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved. #ifndef NVBLASTEXTAUTHORINGTRIANGULATOR_H #define NVBLASTEXTAUTHORINGTRIANGULATOR_H #include <vector> #include <map> #include "NvBlastExtAuthoringTypes.h" #include "NvBlastExtAuthoringMesh.h" #include "NvBlastExtAuthoringInternalCommon.h" namespace Nv { namespace Blast { /** Tool for doing all post processing steps of authoring. */ class Triangulator { public: /** Triangulates provided mesh and saves result internally. Uses Ear-clipping algorithm. \param[in] mesh Mesh for triangulation */ void triangulate(const Mesh* mesh); /** \return Return array of triangles of base mesh. */ std::vector<Triangle>& getBaseMesh() { return mBaseMeshUVFittedTriangles; } std::vector<Triangle>& getBaseMeshNotFitted() { return mBaseMeshResultTriangles; } /** \return Return array of TriangleIndexed of base mesh. Each TriangleIndexed contains index of corresponding vertex in internal vertex buffer. */ std::vector<TriangleIndexed>& getBaseMeshIndexed() { return mBaseMeshTriangles; } /** \return Return mapping from vertices of input Mesh to internal vertices buffer. Used for island detection. */ std::vector<uint32_t>& getBaseMapping() { return mBaseMapping; }; /** \return Return mapping from vertices of input Mesh to internal vertices buffer, only positions are accounted. Used for island detection. */ std::vector<int32_t>& getPositionedMapping() { return mPositionMappedVrt; }; /** \return Return internal vertex buffer size. Vertices internally are welded with some threshold. */ uint32_t getWeldedVerticesCount() { return static_cast<uint32_t>(mVertices.size()); } /** Removes all information about mesh triangulation. */ void reset(); int32_t& getParentChunkId() { return parentChunkId; }; private: int32_t parentChunkId; int32_t addVerticeIfNotExist(const Vertex& p); void addEdgeIfValid(EdgeWithParent& ed); /* Data used before triangulation to build polygon loops*/ std::vector<Vertex> mVertices; std::vector<EdgeWithParent> mBaseMeshEdges; std::map<Vertex, int32_t, VrtComp> mVertMap; std::map<EdgeWithParent, int32_t, EdgeComparator> mEdgeMap; std::vector<uint32_t> mBaseMapping; std::vector<int32_t> mPositionMappedVrt; /* ------------------------------------------------------------ */ /** Unite all almost similar vertices, update edges according to this changes */ void prepare(const Mesh* mesh); void triangulatePolygonWithEarClipping(const std::vector<uint32_t>& inputPolygon, const Vertex* vert, const ProjectionDirections& dir); void buildPolygonAndTriangulate(std::vector<Edge>& edges, Vertex* vertices, int32_t userData, int32_t materialId, int32_t smoothingGroup); void computePositionedMapping(); std::vector<TriangleIndexed> mBaseMeshTriangles; /** Final triangles */ std::vector<Triangle> mBaseMeshResultTriangles; std::vector<Triangle> mBaseMeshUVFittedTriangles; }; } // namespace Blast } // namespace Nv #endif // ifndef NVBLASTEXTAUTHORINGTRIANGULATOR_H
5,401
C
35.5
169
0.633031
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/authoring/NvBlastExtAuthoringFractureToolImpl.cpp
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved. #include "NvBlastExtAuthoringFractureToolImpl.h" #include "NvBlastExtAuthoringMeshImpl.h" #include "NvBlastExtAuthoringMeshUtils.h" // This warning arises when using some stl containers with older versions of VC // c:\program files (x86)\microsoft visual studio 12.0\vc\include\xtree(1826): warning C4702: unreachable code #if NV_VC && NV_VC < 14 #pragma warning(disable : 4702) #endif #include <queue> #include <vector> #include <map> #include <stack> #include <functional> #include "NvBlastExtAuthoringVSA.h" #include <float.h> #include "NvBlastExtAuthoring.h" #include "NvBlastExtAuthoringTriangulator.h" #include "NvBlastExtAuthoringBooleanToolImpl.h" #include "NvBlastExtAuthoringAcceleratorImpl.h" #include "NvBlastExtAuthoringCutout.h" #include "NvBlast.h" #include "NvBlastGlobals.h" #include "NvBlastExtAuthoringPerlinNoise.h" #include <NvBlastAssert.h> #include <NvBlastNvSharedHelpers.h> #ifndef SAFE_DELETE #define SAFE_DELETE(p) \ { \ if (p) \ { \ delete (p); \ (p) = NULL; \ } \ } #endif namespace Nv { namespace Blast { /* Vector operations using TransformST */ inline TransformST createCubeTMFromBounds(const NvcBounds3& bounds) { // scale = max extent, translation = center const NvcVec3 center = 0.5f*(bounds.maximum + bounds.minimum); const NvcVec3 extent = 0.5f*(bounds.maximum - bounds.minimum); const float maxExtent = std::max(extent.x, std::max(extent.y, extent.z)); return {center, maxExtent > 0.0f ? maxExtent : 1.0f}; // Keep the transformation from being singular } ////////////////////////////////////////// struct Halfspace_partitioning : public VSA::VS3D_Halfspace_Set { std::vector<NvcPlane> planes; VSA::real farthest_halfspace(VSA::real plane[4], const VSA::real point[4]) { float biggest_d = -FLT_MAX; for (uint32_t i = 0; i < planes.size(); ++i) { float d = planes[i].n.x * point[0] + planes[i].n.y * point[1] + planes[i].n.z * point[2] + planes[i].d * point[3]; if (d > biggest_d) { biggest_d = d; plane[0] = planes[i].n.x; plane[1] = planes[i].n.y; plane[2] = planes[i].n.z; plane[3] = planes[i].d; } } return biggest_d; }; }; int32_t findCellBasePlanes(const std::vector<NvcVec3>& sites, std::vector<std::vector<std::pair<int32_t, int32_t>>>& neighbors) { Halfspace_partitioning prt; std::vector<NvcPlane>& planes = prt.planes; int32_t neighborGlobalIndex = 0; neighbors.resize(sites.size()); for (uint32_t cellId = 0; cellId + 1 < sites.size(); ++cellId) { planes.clear(); planes.resize(sites.size() - 1 - cellId); std::vector<NvcVec3> midpoints(sites.size() - 1); int32_t collected = 0; for (uint32_t i = cellId + 1; i < sites.size(); ++i) { NvcVec3 midpoint = 0.5 * (sites[i] + sites[cellId]); NvcVec3 direction = fromNvShared(toNvShared(sites[i] - sites[cellId]).getNormalized()); planes[collected].n = direction; planes[collected].d = -(direction | midpoint); midpoints[collected] = midpoint; ++collected; } for (uint32_t i = 0; i < planes.size(); ++i) { planes[i].n = -planes[i].n; planes[i].d = -planes[i].d; if (VSA::vs3d_test(prt)) { const uint32_t nId = i + cellId + 1; neighbors[cellId].push_back(std::pair<int32_t, int32_t>(nId, neighborGlobalIndex)); neighbors[nId].push_back(std::pair<int32_t, int32_t>(cellId, neighborGlobalIndex)); ++neighborGlobalIndex; }; planes[i].n = -planes[i].n; planes[i].d = -planes[i].d; } } return neighborGlobalIndex; } #define SITE_BOX_SIZE 4 #define CUTTING_BOX_SIZE 40 Mesh* getCellMesh(BooleanEvaluator& eval, int32_t planeIndexerOffset, int32_t cellId, const std::vector<NvcVec3>& sites, const std::vector<std::vector<std::pair<int32_t, int32_t>>>& neighbors, int32_t interiorMaterialId, NvcVec3 origin) { Mesh* cell = getBigBox(toNvShared(origin), SITE_BOX_SIZE, interiorMaterialId); Mesh* cuttingMesh = getCuttingBox(NvVec3(0, 0, 0), NvVec3(1, 1, 1), CUTTING_BOX_SIZE, 0, interiorMaterialId); for (uint32_t i = 0; i < neighbors[cellId].size(); ++i) { std::pair<int32_t, int32_t> neighbor = neighbors[cellId][i]; int32_t nCell = neighbor.first; NvVec3 midpoint = 0.5 * toNvShared(sites[nCell] + sites[cellId]); NvVec3 direction = toNvShared(sites[nCell] - sites[cellId]).getNormalized(); int32_t planeIndex = neighbor.second + planeIndexerOffset; if (nCell < cellId) planeIndex = -planeIndex; setCuttingBox(midpoint, -direction, cuttingMesh, CUTTING_BOX_SIZE, planeIndex); eval.performFastCutting(cell, cuttingMesh, BooleanConfigurations::BOOLEAN_INTERSECTION()); Mesh* newCell = eval.createNewMesh(); delete cell; cell = newCell; if (cell == nullptr) break; } delete cuttingMesh; return cell; } #define MAX_VORONOI_ATTEMPT_NUMBER 450 VoronoiSitesGeneratorImpl::VoronoiSitesGeneratorImpl(const Mesh* mesh, RandomGeneratorBase* rnd) { mMesh = mesh; mRnd = rnd; mAccelerator = new BBoxBasedAccelerator(mMesh, kBBoxBasedAcceleratorDefaultResolution); mStencil = nullptr; } void VoronoiSitesGeneratorImpl::setBaseMesh(const Mesh* m) { mGeneratedSites.clear(); delete mAccelerator; mMesh = m; mAccelerator = new BBoxBasedAccelerator(mMesh, kBBoxBasedAcceleratorDefaultResolution); } VoronoiSitesGeneratorImpl::~VoronoiSitesGeneratorImpl() { delete mAccelerator; mAccelerator = nullptr; } void VoronoiSitesGeneratorImpl::release() { delete this; } void VoronoiSitesGeneratorImpl::setStencil(const Mesh* stencil) { mStencil = stencil; } void VoronoiSitesGeneratorImpl::clearStencil() { mStencil = nullptr; } void VoronoiSitesGeneratorImpl::uniformlyGenerateSitesInMesh(const uint32_t sitesCount) { BooleanEvaluator voronoiMeshEval; NvcVec3 mn = mMesh->getBoundingBox().minimum; NvcVec3 mx = mMesh->getBoundingBox().maximum; NvcVec3 vc = mx - mn; uint32_t attemptNumber = 0; uint32_t generatedSites = 0; while (generatedSites < sitesCount && attemptNumber < MAX_VORONOI_ATTEMPT_NUMBER) { float rn1 = mRnd->getRandomValue() * vc.x; float rn2 = mRnd->getRandomValue() * vc.y; float rn3 = mRnd->getRandomValue() * vc.z; if (voronoiMeshEval.isPointContainedInMesh(mMesh, NvcVec3{ rn1, rn2, rn3 } + mn) && (mStencil == nullptr || voronoiMeshEval.isPointContainedInMesh(mStencil, NvcVec3{ rn1, rn2, rn3 } + mn))) { generatedSites++; mGeneratedSites.push_back(NvcVec3{ rn1, rn2, rn3 } + mn); attemptNumber = 0; } else { attemptNumber++; if (attemptNumber > MAX_VORONOI_ATTEMPT_NUMBER) break; } } } void VoronoiSitesGeneratorImpl::clusteredSitesGeneration(const uint32_t numberOfClusters, const uint32_t sitesPerCluster, float clusterRadius) { BooleanEvaluator voronoiMeshEval; NvcVec3 mn = mMesh->getBoundingBox().minimum; NvcVec3 mx = mMesh->getBoundingBox().maximum; NvcVec3 middle = (mx + mn) * 0.5; NvcVec3 vc = (mx - mn) * 0.5; uint32_t attemptNumber = 0; uint32_t generatedSites = 0; std::vector<NvcVec3> tempPoints; while (generatedSites < numberOfClusters) { float rn1 = mRnd->getRandomValue() * 2 - 1; float rn2 = mRnd->getRandomValue() * 2 - 1; float rn3 = mRnd->getRandomValue() * 2 - 1; NvcVec3 p = { middle.x + rn1 * vc.x, middle.y + rn2 * vc.y, middle.z + rn3 * vc.z }; if (voronoiMeshEval.isPointContainedInMesh(mMesh, p) && (mStencil == nullptr || voronoiMeshEval.isPointContainedInMesh(mStencil, p))) { generatedSites++; tempPoints.push_back(p); attemptNumber = 0; } else { attemptNumber++; if (attemptNumber > MAX_VORONOI_ATTEMPT_NUMBER) break; } } int32_t totalCount = 0; for (; tempPoints.size() > 0; tempPoints.pop_back()) { uint32_t unif = sitesPerCluster; generatedSites = 0; while (generatedSites < unif) { NvcVec3 p = tempPoints.back() + fromNvShared(NvVec3(mRnd->getRandomValue() * 2 - 1, mRnd->getRandomValue() * 2 - 1, mRnd->getRandomValue() * 2 - 1) .getNormalized()) * (mRnd->getRandomValue() + 0.001f) * clusterRadius; if (voronoiMeshEval.isPointContainedInMesh(mMesh, p) && (mStencil == nullptr || voronoiMeshEval.isPointContainedInMesh(mStencil, p))) { totalCount++; generatedSites++; mGeneratedSites.push_back(p); attemptNumber = 0; } else { attemptNumber++; if (attemptNumber > MAX_VORONOI_ATTEMPT_NUMBER) break; } } } } #define IN_SPHERE_ATTEMPT_NUMBER 20 void VoronoiSitesGeneratorImpl::addSite(const NvcVec3& site) { mGeneratedSites.push_back(site); } void VoronoiSitesGeneratorImpl::generateInSphere(const uint32_t count, const float radius, const NvcVec3& center) { BooleanEvaluator voronoiMeshEval; uint32_t attemptNumber = 0; uint32_t generatedSites = 0; std::vector<NvcVec3> tempPoints; float radiusSquared = radius * radius; while (generatedSites < count && attemptNumber < MAX_VORONOI_ATTEMPT_NUMBER) { float rn1 = (mRnd->getRandomValue() - 0.5f) * 2.f * radius; float rn2 = (mRnd->getRandomValue() - 0.5f) * 2.f * radius; float rn3 = (mRnd->getRandomValue() - 0.5f) * 2.f * radius; NvcVec3 point = { rn1, rn2, rn3 }; if (toNvShared(point).magnitudeSquared() < radiusSquared && voronoiMeshEval.isPointContainedInMesh(mMesh, point + center) && (mStencil == nullptr || voronoiMeshEval.isPointContainedInMesh(mStencil, point + center))) { generatedSites++; mGeneratedSites.push_back(point + center); attemptNumber = 0; } else { attemptNumber++; if (attemptNumber > MAX_VORONOI_ATTEMPT_NUMBER) break; } } } void VoronoiSitesGeneratorImpl::deleteInSphere(const float radius, const NvcVec3& center, float deleteProbability) { float r2 = radius * radius; for (uint32_t i = 0; i < mGeneratedSites.size(); ++i) { if (toNvShared(mGeneratedSites[i] - center).magnitudeSquared() < r2 && mRnd->getRandomValue() <= deleteProbability) { std::swap(mGeneratedSites[i], mGeneratedSites.back()); mGeneratedSites.pop_back(); --i; } } } void VoronoiSitesGeneratorImpl::radialPattern(const NvcVec3& center, const NvcVec3& normal, float radius, int32_t angularSteps, int32_t radialSteps, float angleOffset, float variability) { // mGeneratedSites.push_back(center); NvVec3 t1, t2; if (std::abs(normal.z) < 0.9) { t1 = toNvShared(normal).cross(NvVec3(0, 0, 1)); } else { t1 = toNvShared(normal).cross(NvVec3(1, 0, 0)); } t2 = t1.cross(toNvShared(normal)); t1.normalize(); t2.normalize(); float radStep = radius / radialSteps; int32_t cCr = 0; float angleStep = nvidia::NvPi * 2 / angularSteps; for (float cRadius = radStep; cRadius < radius; cRadius += radStep) { float cAngle = angleOffset * cCr; for (int32_t i = 0; i < angularSteps; ++i) { float angVars = mRnd->getRandomValue() * variability + (1.0f - 0.5f * variability); float radVars = mRnd->getRandomValue() * variability + (1.0f - 0.5f * variability); NvcVec3 nPos = fromNvShared(std::cos(cAngle * angVars) * t1 + std::sin(cAngle * angVars) * t2) * cRadius * radVars + center; mGeneratedSites.push_back(nPos); cAngle += angleStep; } ++cCr; } } uint32_t VoronoiSitesGeneratorImpl::getVoronoiSites(const NvcVec3*& sites) { if (mGeneratedSites.size()) { sites = &mGeneratedSites[0]; } return (uint32_t)mGeneratedSites.size(); } int32_t FractureToolImpl::voronoiFracturing(uint32_t chunkId, uint32_t cellCount, const NvcVec3* cellPointsIn, bool replaceChunk) { if (chunkId == 0 && replaceChunk) { return 1; } int32_t chunkInfoIndex = getChunkInfoIndex(chunkId); if (chunkInfoIndex == -1 || cellCount < 2) { return 1; } if (!mChunkData[chunkInfoIndex].isLeaf) { deleteChunkSubhierarchy(chunkId); } chunkInfoIndex = getChunkInfoIndex(chunkId); Mesh* mesh = mChunkData[chunkInfoIndex].getMesh(); const TransformST& tm = mChunkData[chunkInfoIndex].getTmToWorld(); std::vector<NvcVec3> cellPoints(cellCount); for (uint32_t i = 0; i < cellCount; ++i) { cellPoints[i] = tm.invTransformPos(cellPointsIn[i]); } /** Prebuild accelerator structure */ BooleanEvaluator eval; BooleanEvaluator voronoiMeshEval; BBoxBasedAccelerator spAccel = BBoxBasedAccelerator(mesh, kBBoxBasedAcceleratorDefaultResolution); std::vector<std::vector<std::pair<int32_t, int32_t>>> neighbors; const int32_t neighborCount = findCellBasePlanes(cellPoints, neighbors); /** Fracture */ int32_t parentChunkId = replaceChunk ? mChunkData[chunkInfoIndex].parentChunkId : chunkId; std::vector<uint32_t> newlyCreatedChunksIds; for (uint32_t i = 0; i < cellPoints.size(); ++i) { Mesh* cell = getCellMesh(eval, mPlaneIndexerOffset, i, cellPoints, neighbors, mInteriorMaterialId, cellPoints[i]); if (cell == nullptr) { continue; } DummyAccelerator dmAccel(cell->getFacetCount()); voronoiMeshEval.performBoolean(mesh, cell, &spAccel, &dmAccel, BooleanConfigurations::BOOLEAN_INTERSECTION()); Mesh* resultMesh = voronoiMeshEval.createNewMesh(); if (resultMesh) { uint32_t ncidx = createNewChunk(parentChunkId); mChunkData[ncidx].isLeaf = true; setChunkInfoMesh(mChunkData[ncidx], resultMesh); newlyCreatedChunksIds.push_back(mChunkData[ncidx].chunkId); } eval.reset(); delete cell; } mChunkData[chunkInfoIndex].isLeaf = false; if (replaceChunk) { deleteChunkSubhierarchy(chunkId, true); } mPlaneIndexerOffset += neighborCount; if (mRemoveIslands) { for (auto chunkToCheck : newlyCreatedChunksIds) { islandDetectionAndRemoving(chunkToCheck); } } return 0; } template<typename Cmp> static void compactifyAndTransformVertexBuffer ( std::vector<Nv::Blast::Vertex>& vertexBuffer, Edge* edges, const Nv::Blast::Vertex* sourceVertices, uint32_t numSourceVerts, uint32_t numEdges, const TransformST& tm ) { std::vector<uint32_t> indexMap; indexMap.reserve(numSourceVerts); std::map<Vertex, uint32_t, Cmp> vertexMapping; for (uint32_t i = 0; i < numSourceVerts; i++) { const auto& vert = sourceVertices[i]; auto it = vertexMapping.find(vert); if (it == vertexMapping.end()) { const uint32_t size = static_cast<uint32_t>(vertexBuffer.size()); vertexMapping[vert] = size; // transform the position and normalZ back to world space before storing it Nv::Blast::Vertex transformedVert = vert; transformedVert.p = tm.transformPos(vert.p); vertexBuffer.push_back(transformedVert); indexMap.push_back(size); } else { indexMap.push_back(it->second); } } // now we need convert the list of edges to be based on the compacted vertex buffer for (uint32_t i = 0; i < numEdges; i++) { Edge &edge = edges[i]; edge.s = indexMap[edges[i].s]; edge.e = indexMap[edges[i].e]; } } Mesh* FractureToolImpl::createChunkMesh(int32_t chunkInfoIndex, bool splitUVs /* = true */) { // make sure the chunk is valid if (chunkInfoIndex < 0 || uint32_t(chunkInfoIndex) >= this->getChunkCount()) { return nullptr; } // grab the original source mesh const auto sourceMesh = this->getChunkInfo(chunkInfoIndex).getMesh(); if (!sourceMesh) { return nullptr; } const Nv::Blast::Vertex* sourceVertices = sourceMesh->getVertices(); const uint32_t numSourceVerts = sourceMesh->getVerticesCount(); const auto sourceEdges = sourceMesh->getEdges(); const auto numEdges = sourceMesh->getEdgesCount(); const auto edgeBufferSize = numEdges * sizeof(Edge); Edge* edges = reinterpret_cast<Edge*>(NVBLAST_ALLOC(edgeBufferSize)); memcpy(edges, sourceEdges, edgeBufferSize); const TransformST& tm = this->getChunkInfo(chunkInfoIndex).getTmToWorld(); std::vector<Vertex> _vertexBuffer; if (splitUVs) compactifyAndTransformVertexBuffer<VrtComp>(_vertexBuffer, edges, sourceVertices, numSourceVerts, numEdges, tm); else compactifyAndTransformVertexBuffer<VrtCompNoUV>(_vertexBuffer, edges, sourceVertices, numSourceVerts, numEdges, tm); // now fix the order of the edges // compacting the vertex buffer can put them out of order // the end of one edge needs to be the start of the next const auto facets = sourceMesh->getFacetsBuffer(); const auto facetsCount = sourceMesh->getFacetCount(); Vertex* vertices = reinterpret_cast<Vertex*>(_vertexBuffer.data()); const auto numVerts = static_cast<uint32_t>(_vertexBuffer.size()); nvidia::NvBounds3 bnd; bnd.setEmpty(); std::set<int32_t> vertUVsToFix; for (uint32_t f = 0; f < facetsCount; f++) { const Facet& facet = facets[f]; uint32_t nextIndex = edges[facet.firstEdgeNumber].e; for (uint32_t edge = 1; edge < facet.edgesCount; edge++) { for (uint32_t test = edge; test < facet.edgesCount; test++) { if (nextIndex == edges[facet.firstEdgeNumber + test].s) { if (test != edge) { std::swap(edges[facet.firstEdgeNumber + edge], edges[facet.firstEdgeNumber + test]); } nextIndex = edges[facet.firstEdgeNumber + edge].e; break; } } // make sure the last edge wraps around and points back at the first edge NVBLAST_ASSERT(edges[facet.firstEdgeNumber + edge - 1].e == edges[facet.firstEdgeNumber + edge].s); } // we need to de-normalize the UVs for interior faces // build a set of interior vertex indices as we inflate the bounds to include all the UVs if (facet.userData != 0) { for (uint32_t edge = 0; edge < facet.edgesCount; edge++) { const int32_t v1 = edges[facet.firstEdgeNumber + edge].s; if (vertUVsToFix.insert(v1).second) { bnd.include(NvVec3(vertices[v1].uv[0].x, vertices[v1].uv[0].y, 0.0f)); } const int32_t v2 = edges[facet.firstEdgeNumber + edge].e; if (vertUVsToFix.insert(v2).second) { bnd.include(NvVec3(vertices[v2].uv[0].x, vertices[v2].uv[0].y, 0.0f)); } } } } const float xscale = (bnd.maximum.x - bnd.minimum.x); const float yscale = (bnd.maximum.y - bnd.minimum.y); const float scale = 1.0f / std::min(xscale, yscale); // To have uniform scaling for (auto vertIdx: vertUVsToFix) { NVBLAST_ASSERT(uint32_t(vertIdx) < numVerts); auto& vert = vertices[vertIdx]; vert.uv[0].x = (vert.uv[0].x - bnd.minimum.x) * scale; vert.uv[0].y = (vert.uv[0].y - bnd.minimum.y) * scale; } // build a new mesh from the converted data Mesh* chunkMesh = new MeshImpl(vertices, edges, facets, numVerts, numEdges, facetsCount); NVBLAST_FREE(edges); return chunkMesh; } bool FractureToolImpl::isMeshContainOpenEdges(const Mesh* input) { std::map<NvcVec3, int32_t, VrtPositionComparator> vertexMapping; std::vector<int32_t> vertexRemappingArray(input->getVerticesCount()); std::vector<Edge> remappedEdges(input->getEdgesCount()); /** Remap vertices */ const Vertex* vrx = input->getVertices(); for (uint32_t i = 0; i < input->getVerticesCount(); ++i) { auto it = vertexMapping.find(vrx->p); if (it == vertexMapping.end()) { vertexMapping[vrx->p] = i; vertexRemappingArray[i] = i; } else { vertexRemappingArray[i] = it->second; } ++vrx; } const Edge* ed = input->getEdges(); for (uint32_t i = 0; i < input->getEdgesCount(); ++i) { remappedEdges[i].s = vertexRemappingArray[ed->s]; remappedEdges[i].e = vertexRemappingArray[ed->e]; if (remappedEdges[i].e < remappedEdges[i].s) { std::swap(remappedEdges[i].s, remappedEdges[i].e); } ++ed; } std::sort(remappedEdges.begin(), remappedEdges.end()); int32_t collected = 1; for (uint32_t i = 1; i < remappedEdges.size(); ++i) { if (remappedEdges[i - 1].s == remappedEdges[i].s && remappedEdges[i - 1].e == remappedEdges[i].e) { collected++; } else { if (collected & 1) { return true; } else { collected = 1; } } } return collected & 1; } int32_t FractureToolImpl::voronoiFracturing(uint32_t chunkId, uint32_t cellCount, const NvcVec3* cellPointsIn, const NvcVec3& scale, const NvcQuat& rotation, bool replaceChunk) { if (chunkId == 0 && replaceChunk) { return 1; } int32_t chunkInfoIndex = getChunkInfoIndex(chunkId); if (chunkInfoIndex == -1 || cellCount < 2) { return 1; } if (!mChunkData[chunkInfoIndex].isLeaf) { deleteChunkSubhierarchy(chunkId); } chunkInfoIndex = getChunkInfoIndex(chunkId); Mesh* mesh = mChunkData[chunkInfoIndex].getMesh(); const TransformST& tm = mChunkData[chunkInfoIndex].getTmToWorld(); std::vector<NvcVec3> cellPoints(cellCount); for (uint32_t i = 0; i < cellCount; ++i) { cellPoints[i] = tm.invTransformPos(cellPointsIn[i]); toNvShared(cellPoints[i]) = toNvShared(rotation).rotateInv(toNvShared(cellPoints[i])); cellPoints[i].x *= (1.0f / scale.x); cellPoints[i].y *= (1.0f / scale.y); cellPoints[i].z *= (1.0f / scale.z); } /** Prebuild accelerator structure */ BooleanEvaluator eval; BooleanEvaluator voronoiMeshEval; BBoxBasedAccelerator spAccel = BBoxBasedAccelerator(mesh, kBBoxBasedAcceleratorDefaultResolution); std::vector<std::vector<std::pair<int32_t, int32_t>>> neighbors; const int32_t neighborCount = findCellBasePlanes(cellPoints, neighbors); /** Fracture */ int32_t parentChunkId = replaceChunk ? mChunkData[chunkInfoIndex].parentChunkId : chunkId; std::vector<uint32_t> newlyCreatedChunksIds; for (uint32_t i = 0; i < cellPoints.size(); ++i) { Mesh* cell = getCellMesh(eval, mPlaneIndexerOffset, i, cellPoints, neighbors, mInteriorMaterialId, cellPoints[i]); if (cell == nullptr) { continue; } for (uint32_t v = 0; v < cell->getVerticesCount(); ++v) { cell->getVerticesWritable()[v].p.x *= scale.x; cell->getVerticesWritable()[v].p.y *= scale.y; cell->getVerticesWritable()[v].p.z *= scale.z; toNvShared(cell->getVerticesWritable()[v].p) = toNvShared(rotation).rotate(toNvShared(cell->getVerticesWritable()[v].p)); } cell->recalculateBoundingBox(); DummyAccelerator dmAccel(cell->getFacetCount()); voronoiMeshEval.performBoolean(mesh, cell, &spAccel, &dmAccel, BooleanConfigurations::BOOLEAN_INTERSECTION()); Mesh* resultMesh = voronoiMeshEval.createNewMesh(); if (resultMesh) { uint32_t ncidx = createNewChunk(parentChunkId); mChunkData[ncidx].isLeaf = true; setChunkInfoMesh(mChunkData[ncidx], resultMesh); newlyCreatedChunksIds.push_back(mChunkData[ncidx].chunkId); } eval.reset(); delete cell; } mChunkData[chunkInfoIndex].isLeaf = false; if (replaceChunk) { deleteChunkSubhierarchy(chunkId, true); } mPlaneIndexerOffset += neighborCount; if (mRemoveIslands) { for (auto chunkToCheck : newlyCreatedChunksIds) { islandDetectionAndRemoving(chunkToCheck); } } return 0; } int32_t FractureToolImpl::slicing(uint32_t chunkId, const SlicingConfiguration& conf, bool replaceChunk, RandomGeneratorBase* rnd) { if (conf.noise.amplitude != 0) { return slicingNoisy(chunkId, conf, replaceChunk, rnd); } if (replaceChunk && chunkId == 0) { return 1; } int32_t chunkInfoIndex = getChunkInfoIndex(chunkId); if (chunkInfoIndex == -1) { return 1; } if (!mChunkData[chunkInfoIndex].isLeaf) { deleteChunkSubhierarchy(chunkId); } chunkInfoIndex = getChunkInfoIndex(chunkId); Mesh* mesh = new MeshImpl(*reinterpret_cast<MeshImpl*>(mChunkData[chunkInfoIndex].getMesh())); BooleanEvaluator bTool; int32_t x_slices = conf.x_slices; int32_t y_slices = conf.y_slices; int32_t z_slices = conf.z_slices; const nvidia::NvBounds3 sourceBBox = toNvShared(mesh->getBoundingBox()); NvVec3 center = {mesh->getBoundingBox().minimum.x, 0, 0}; float x_offset = (sourceBBox.maximum.x - sourceBBox.minimum.x) * (1.0f / (x_slices + 1)); float y_offset = (sourceBBox.maximum.y - sourceBBox.minimum.y) * (1.0f / (y_slices + 1)); float z_offset = (sourceBBox.maximum.z - sourceBBox.minimum.z) * (1.0f / (z_slices + 1)); center.x += x_offset; NvVec3 dir = {1, 0, 0}; Mesh* slBox = getCuttingBox(center, dir, 20, 0, mInteriorMaterialId); ChunkInfo ch; ch.isLeaf = true; ch.isChanged = true; ch.flags = ChunkInfo::NO_FLAGS; ch.parentChunkId = replaceChunk ? mChunkData[chunkInfoIndex].parentChunkId : chunkId; std::vector<Mesh*> xSlicedChunks; std::vector<Mesh*> ySlicedChunks; std::vector<uint32_t> newlyCreatedChunksIds; /** Slice along x direction */ for (int32_t slice = 0; slice < x_slices; ++slice) { NvVec3 randVect = NvVec3(2 * rnd->getRandomValue() - 1, 2 * rnd->getRandomValue() - 1, 2 * rnd->getRandomValue() - 1); NvVec3 lDir = dir + randVect * conf.angle_variations; setCuttingBox(center, -lDir, slBox, 20, mPlaneIndexerOffset); bTool.performFastCutting(mesh, slBox, BooleanConfigurations::BOOLEAN_INTERSECTION()); Mesh* xSlice = bTool.createNewMesh(); if (xSlice != nullptr) { xSlicedChunks.push_back(xSlice); } inverseNormalAndIndices(slBox); ++mPlaneIndexerOffset; bTool.performFastCutting(mesh, slBox, BooleanConfigurations::BOOLEAN_DIFFERENCE()); Mesh* result = bTool.createNewMesh(); delete mesh; mesh = result; if (mesh == nullptr) { break; } center.x += x_offset + (rnd->getRandomValue()) * conf.offset_variations * x_offset; } if (mesh != nullptr) { xSlicedChunks.push_back(mesh); } for (uint32_t chunk = 0; chunk < xSlicedChunks.size(); ++chunk) { center = NvVec3(0, sourceBBox.minimum.y, 0); center.y += y_offset; dir = NvVec3(0, 1, 0); mesh = xSlicedChunks[chunk]; for (int32_t slice = 0; slice < y_slices; ++slice) { NvVec3 randVect = NvVec3(2 * rnd->getRandomValue() - 1, 2 * rnd->getRandomValue() - 1, 2 * rnd->getRandomValue() - 1); NvVec3 lDir = dir + randVect * conf.angle_variations; setCuttingBox(center, -lDir, slBox, 20, mPlaneIndexerOffset); bTool.performFastCutting(mesh, slBox, BooleanConfigurations::BOOLEAN_INTERSECTION()); Mesh* ySlice = bTool.createNewMesh(); if (ySlice != nullptr) { ySlicedChunks.push_back(ySlice); } inverseNormalAndIndices(slBox); ++mPlaneIndexerOffset; bTool.performFastCutting(mesh, slBox, BooleanConfigurations::BOOLEAN_DIFFERENCE()); Mesh* result = bTool.createNewMesh(); delete mesh; mesh = result; if (mesh == nullptr) { break; } center.y += y_offset + (rnd->getRandomValue()) * conf.offset_variations * y_offset; } if (mesh != nullptr) { ySlicedChunks.push_back(mesh); } } for (uint32_t chunk = 0; chunk < ySlicedChunks.size(); ++chunk) { center = NvVec3(0, 0, sourceBBox.minimum.z); center.z += z_offset; dir = NvVec3(0, 0, 1); mesh = ySlicedChunks[chunk]; for (int32_t slice = 0; slice < z_slices; ++slice) { NvVec3 randVect = NvVec3(2 * rnd->getRandomValue() - 1, 2 * rnd->getRandomValue() - 1, 2 * rnd->getRandomValue() - 1); NvVec3 lDir = dir + randVect * conf.angle_variations; setCuttingBox(center, -lDir, slBox, 20, mPlaneIndexerOffset); bTool.performFastCutting(mesh, slBox, BooleanConfigurations::BOOLEAN_INTERSECTION()); Mesh* ySlice = bTool.createNewMesh(); if (ySlice != nullptr) { setChunkInfoMesh(ch, ySlice); ch.chunkId = createId(); newlyCreatedChunksIds.push_back(ch.chunkId); mChunkData.push_back(ch); } inverseNormalAndIndices(slBox); ++mPlaneIndexerOffset; bTool.performFastCutting(mesh, slBox, BooleanConfigurations::BOOLEAN_DIFFERENCE()); Mesh* result = bTool.createNewMesh(); delete mesh; mesh = result; if (mesh == nullptr) { break; } center.z += z_offset + (rnd->getRandomValue()) * conf.offset_variations * z_offset; } if (mesh != nullptr) { setChunkInfoMesh(ch, mesh); ch.chunkId = createId(); newlyCreatedChunksIds.push_back(ch.chunkId); mChunkData.push_back(ch); } } delete slBox; mChunkData[chunkInfoIndex].isLeaf = false; if (replaceChunk) { deleteChunkSubhierarchy(chunkId, true); } if (mRemoveIslands) { for (auto chunkToCheck : newlyCreatedChunksIds) { islandDetectionAndRemoving(chunkToCheck); } } return 0; } int32_t FractureToolImpl::slicingNoisy(uint32_t chunkId, const SlicingConfiguration& conf, bool replaceChunk, RandomGeneratorBase* rnd) { if (replaceChunk && chunkId == 0) { return 1; } int32_t chunkInfoIndex = getChunkInfoIndex(chunkId); if (chunkInfoIndex == -1) { return 1; } if (!mChunkData[chunkInfoIndex].isLeaf) { deleteChunkSubhierarchy(chunkId); } chunkInfoIndex = getChunkInfoIndex(chunkId); Mesh* mesh = new MeshImpl(*reinterpret_cast<MeshImpl*>(mChunkData[chunkInfoIndex].getMesh())); const TransformST& tm = mChunkData[chunkInfoIndex].getTmToWorld(); BooleanEvaluator bTool; int32_t x_slices = conf.x_slices; int32_t y_slices = conf.y_slices; int32_t z_slices = conf.z_slices; const nvidia::NvBounds3 sourceBBox = toNvShared(mesh->getBoundingBox()); NvVec3 center = NvVec3(mesh->getBoundingBox().minimum.x, 0, 0); float x_offset = (sourceBBox.maximum.x - sourceBBox.minimum.x) * (1.0f / (x_slices + 1)); float y_offset = (sourceBBox.maximum.y - sourceBBox.minimum.y) * (1.0f / (y_slices + 1)); float z_offset = (sourceBBox.maximum.z - sourceBBox.minimum.z) * (1.0f / (z_slices + 1)); NvVec3 resolution(tm.s / conf.noise.samplingInterval.x, tm.s / conf.noise.samplingInterval.y, tm.s / conf.noise.samplingInterval.z); center.x += x_offset; NvVec3 dir(1, 0, 0); Mesh* slBox = nullptr; ChunkInfo ch; ch.isLeaf = true; ch.isChanged = true; ch.flags = ChunkInfo::NO_FLAGS; ch.parentChunkId = replaceChunk ? mChunkData[chunkInfoIndex].parentChunkId : chunkId; std::vector<Mesh*> xSlicedChunks; std::vector<Mesh*> ySlicedChunks; std::vector<uint32_t> newlyCreatedChunksIds; float noisyPartSize = 1.2f; // int32_t acceleratorRes = 8; /** Slice along x direction */ for (int32_t slice = 0; slice < x_slices; ++slice) { NvVec3 randVect = NvVec3(2 * rnd->getRandomValue() - 1, 2 * rnd->getRandomValue() - 1, 2 * rnd->getRandomValue() - 1); NvVec3 lDir = dir + randVect * conf.angle_variations; slBox = getNoisyCuttingBoxPair(center, lDir, 40, noisyPartSize, resolution, mPlaneIndexerOffset, conf.noise.amplitude, conf.noise.frequency, conf.noise.octaveNumber, rnd->getRandomValue(), mInteriorMaterialId); // DummyAccelerator accel(mesh->getFacetCount()); SweepingAccelerator accel(mesh); SweepingAccelerator dummy(slBox); bTool.performBoolean(mesh, slBox, &accel, &dummy, BooleanConfigurations::BOOLEAN_DIFFERENCE()); Mesh* xSlice = bTool.createNewMesh(); if (xSlice != nullptr) { xSlicedChunks.push_back(xSlice); } inverseNormalAndIndices(slBox); ++mPlaneIndexerOffset; bTool.performBoolean(mesh, slBox, &accel, &dummy, BooleanConfigurations::BOOLEAN_INTERSECTION()); Mesh* result = bTool.createNewMesh(); delete slBox; delete mesh; mesh = result; if (mesh == nullptr) { break; } center.x += x_offset + (rnd->getRandomValue()) * conf.offset_variations * x_offset; } if (mesh != nullptr) { xSlicedChunks.push_back(mesh); } slBox = getCuttingBox(center, dir, 20, 0, mInteriorMaterialId); uint32_t slicedChunkSize = xSlicedChunks.size(); for (uint32_t chunk = 0; chunk < slicedChunkSize; ++chunk) { center = NvVec3(0, sourceBBox.minimum.y, 0); center.y += y_offset; dir = NvVec3(0, 1, 0); mesh = xSlicedChunks[chunk]; for (int32_t slice = 0; slice < y_slices; ++slice) { NvVec3 randVect = NvVec3(2 * rnd->getRandomValue() - 1, 2 * rnd->getRandomValue() - 1, 2 * rnd->getRandomValue() - 1); NvVec3 lDir = dir + randVect * conf.angle_variations; slBox = getNoisyCuttingBoxPair(center, lDir, 40, noisyPartSize, resolution, mPlaneIndexerOffset, conf.noise.amplitude, conf.noise.frequency, conf.noise.octaveNumber, rnd->getRandomValue(), mInteriorMaterialId); // DummyAccelerator accel(mesh->getFacetCount()); SweepingAccelerator accel(mesh); SweepingAccelerator dummy(slBox); bTool.performBoolean(mesh, slBox, &accel, &dummy, BooleanConfigurations::BOOLEAN_DIFFERENCE()); Mesh* ySlice = bTool.createNewMesh(); if (ySlice != nullptr) { ySlicedChunks.push_back(ySlice); } inverseNormalAndIndices(slBox); ++mPlaneIndexerOffset; bTool.performBoolean(mesh, slBox, &accel, &dummy, BooleanConfigurations::BOOLEAN_INTERSECTION()); Mesh* result = bTool.createNewMesh(); delete slBox; delete mesh; mesh = result; if (mesh == nullptr) { break; } center.y += y_offset + (rnd->getRandomValue()) * conf.offset_variations * y_offset; } if (mesh != nullptr) { ySlicedChunks.push_back(mesh); } } for (uint32_t chunk = 0; chunk < ySlicedChunks.size(); ++chunk) { center = NvVec3(0, 0, sourceBBox.minimum.z); center.z += z_offset; dir = NvVec3(0, 0, 1); mesh = ySlicedChunks[chunk]; for (int32_t slice = 0; slice < z_slices; ++slice) { NvVec3 randVect = NvVec3(2 * rnd->getRandomValue() - 1, 2 * rnd->getRandomValue() - 1, 2 * rnd->getRandomValue() - 1); NvVec3 lDir = dir + randVect * conf.angle_variations; slBox = getNoisyCuttingBoxPair(center, lDir, 40, noisyPartSize, resolution, mPlaneIndexerOffset, conf.noise.amplitude, conf.noise.frequency, conf.noise.octaveNumber, rnd->getRandomValue(), mInteriorMaterialId); // DummyAccelerator accel(mesh->getFacetCount()); SweepingAccelerator accel(mesh); SweepingAccelerator dummy(slBox); bTool.performBoolean(mesh, slBox, &accel, &dummy, BooleanConfigurations::BOOLEAN_DIFFERENCE()); Mesh* ySlice = bTool.createNewMesh(); if (ySlice != nullptr) { setChunkInfoMesh(ch, ySlice); ch.chunkId = createId(); mChunkData.push_back(ch); newlyCreatedChunksIds.push_back(ch.chunkId); } inverseNormalAndIndices(slBox); ++mPlaneIndexerOffset; bTool.performBoolean(mesh, slBox, &accel, &dummy, BooleanConfigurations::BOOLEAN_INTERSECTION()); Mesh* result = bTool.createNewMesh(); delete mesh; delete slBox; mesh = result; if (mesh == nullptr) { break; } center.z += z_offset + (rnd->getRandomValue()) * conf.offset_variations * z_offset; } if (mesh != nullptr) { setChunkInfoMesh(ch, mesh); ch.chunkId = createId(); newlyCreatedChunksIds.push_back(ch.chunkId); mChunkData.push_back(ch); } } // delete slBox; mChunkData[chunkInfoIndex].isLeaf = false; if (replaceChunk) { deleteChunkSubhierarchy(chunkId, true); } if (mRemoveIslands) { for (auto chunkToCheck : newlyCreatedChunksIds) { islandDetectionAndRemoving(chunkToCheck); } } return 0; } int32_t FractureToolImpl::cut(uint32_t chunkId, const NvcVec3& normal, const NvcVec3& point, const NoiseConfiguration& noise, bool replaceChunk, RandomGeneratorBase* rnd) { if (replaceChunk && chunkId == 0) { return 1; } int32_t chunkInfoIndex = getChunkInfoIndex(chunkId); if (chunkInfoIndex == -1) { return 1; } if (!mChunkData[chunkInfoIndex].isLeaf) { deleteChunkSubhierarchy(chunkId); } chunkInfoIndex = getChunkInfoIndex(chunkId); Mesh* mesh = new MeshImpl(*reinterpret_cast<MeshImpl*>(mChunkData[chunkInfoIndex].getMesh())); BooleanEvaluator bTool; const TransformST& tm = mChunkData[chunkInfoIndex].getTmToWorld(); ChunkInfo ch; ch.chunkId = -1; ch.isLeaf = true; ch.isChanged = true; ch.flags = ChunkInfo::NO_FLAGS; ch.parentChunkId = replaceChunk ? mChunkData[chunkInfoIndex].parentChunkId : chunkId; float noisyPartSize = 1.2f; NvVec3 resolution(tm.s / noise.samplingInterval.x, tm.s / noise.samplingInterval.y, tm.s / noise.samplingInterval.z); // Perform cut Mesh* slBox = getNoisyCuttingBoxPair(toNvShared(tm.invTransformPos(point)), toNvShared(normal), // tm doesn't change normals (up to normalization) 40, noisyPartSize, resolution, mPlaneIndexerOffset, noise.amplitude, noise.frequency, noise.octaveNumber, rnd->getRandomValue(), mInteriorMaterialId); SweepingAccelerator accel(mesh); SweepingAccelerator dummy(slBox); bTool.performBoolean(mesh, slBox, &accel, &dummy, BooleanConfigurations::BOOLEAN_DIFFERENCE()); setChunkInfoMesh(ch, bTool.createNewMesh()); inverseNormalAndIndices(slBox); ++mPlaneIndexerOffset; bTool.performBoolean(mesh, slBox, &accel, &dummy, BooleanConfigurations::BOOLEAN_INTERSECTION()); Mesh* result = bTool.createNewMesh(); delete slBox; delete mesh; mesh = result; if (mesh == 0) // Return if it doesn't cut specified chunk { return 1; } if (!mChunkData[chunkInfoIndex].isLeaf) { deleteChunkSubhierarchy(chunkId); } chunkInfoIndex = getChunkInfoIndex(chunkId); int32_t firstChunkId = -1; if (ch.getMesh() != 0) { ch.chunkId = createId(); mChunkData.push_back(ch); firstChunkId = ch.chunkId; } if (mesh != 0) { ch.chunkId = createId(); setChunkInfoMesh(ch, mesh); mChunkData.push_back(ch); } mChunkData[chunkInfoIndex].isLeaf = false; if (replaceChunk) { deleteChunkSubhierarchy(chunkId, true); } if (mRemoveIslands && firstChunkId >= 0) { islandDetectionAndRemoving(firstChunkId); if (mesh != 0) { islandDetectionAndRemoving(ch.chunkId); } } return 0; } bool CmpVec::operator()(const NvVec3& v1, const NvVec3& v2) const { auto v = (v2 - v1).abs(); if (v.x < 1e-5) { if (v.y < 1e-5) { return v1.z < v2.z; } return v1.y < v2.y; } return v1.x < v2.x; } int32_t FractureToolImpl::cutout(uint32_t chunkId, CutoutConfiguration conf, bool replaceChunk, RandomGeneratorBase* rnd) { if ((replaceChunk && chunkId == 0) || conf.cutoutSet == nullptr) { return 1; } int32_t chunkInfoIndex = getChunkInfoIndex(chunkId); if (chunkInfoIndex == -1) { return 1; } if (!mChunkData[chunkInfoIndex].isLeaf) { deleteChunkSubhierarchy(chunkId); } chunkInfoIndex = getChunkInfoIndex(chunkId); Nv::Blast::CutoutSet& cutoutSet = *conf.cutoutSet; const TransformST& tm = mChunkData[chunkInfoIndex].getTmToWorld(); Mesh* mesh = new MeshImpl(*reinterpret_cast<MeshImpl*>(mChunkData[chunkInfoIndex].getMesh())); float extrusionLength = toNvShared(mesh->getBoundingBox()).getDimensions().magnitude(); auto scale = toNvShared(conf.scale); conf.transform.p = tm.invTransformPos(conf.transform.p); if (scale.x < 0.f || scale.y < 0.f) { scale = { extrusionLength, extrusionLength }; } if (conf.isRelativeTransform) { toNvShared(conf.transform.p) += toNvShared(mesh->getBoundingBox()).getCenter() / tm.s; } conf.noise.samplingInterval = conf.noise.samplingInterval / tm.s; float xDim = cutoutSet.getDimensions().x; float yDim = cutoutSet.getDimensions().y; if (conf.cutoutSet->isPeriodic()) // cutout with periodic boundary do not support noise and conicity { conf.aperture = 0.f; conf.noise.amplitude = 0.f; } BooleanEvaluator bTool; ChunkInfo ch; ch.isLeaf = true; ch.isChanged = true; ch.flags = ChunkInfo::NO_FLAGS; ch.parentChunkId = replaceChunk ? mChunkData[chunkInfoIndex].parentChunkId : chunkId; std::vector<uint32_t> newlyCreatedChunksIds; SharedFacesMap sharedFacesMap; std::vector<std::vector<NvVec3> > verts; std::vector<std::set<int32_t> > smoothingGroups; std::vector<uint32_t> cutoutStarts; for (uint32_t c = 0; c < cutoutSet.getCutoutCount(); c++) { cutoutStarts.push_back(verts.size()); for (uint32_t l = 0; l < cutoutSet.getCutoutLoopCount(c); l++) { uint32_t vertCount = cutoutSet.getCutoutVertexCount(c, l); verts.push_back(std::vector<NvVec3>(vertCount)); smoothingGroups.push_back(std::set<int32_t>()); for (uint32_t v = 0; v < vertCount; v++) { auto vert = cutoutSet.getCutoutVertex(c, l, v); vert.x = (vert.x / xDim - 0.5f) * scale.x; vert.y = (vert.y / yDim - 0.5f) * scale.y; verts.back()[v] = toNvShared(vert); if (cutoutSet.isCutoutVertexToggleSmoothingGroup(c, l, v)) { smoothingGroups.back().insert(v); } } } } float dimension = scale.magnitude(); float conicityMultiplierBot = 1.f + 2.f * extrusionLength / dimension * nvidia::NvTan(nvidia::NvClamp(conf.aperture, -179.f, 179.f) * nvidia::NvPi / 360.f); float conicityMultiplierTop = 2.f - conicityMultiplierBot; float heightBot = extrusionLength, heightTop = extrusionLength; if (conicityMultiplierBot < 0.f) { conicityMultiplierBot = 0.f; heightBot = 0.5f * dimension / std::abs(nvidia::NvTan(conf.aperture * nvidia::NvPi / 360.f)); } if (conicityMultiplierTop < 0.f) { conicityMultiplierTop = 0.f; heightTop = 0.5f * dimension / std::abs(nvidia::NvTan(conf.aperture * nvidia::NvPi / 360.f)); } uint32_t seed = rnd->getRandomValue(); buildCuttingConeFaces(conf, verts, heightBot, heightTop, conicityMultiplierBot, conicityMultiplierTop, mPlaneIndexerOffset, seed, mInteriorMaterialId, sharedFacesMap); std::vector<std::vector<Mesh*> > cutoutMeshes; for (uint32_t c = 0; c < cutoutSet.getCutoutCount(); c++) { cutoutMeshes.push_back(std::vector<Mesh*>()); for (uint32_t l = 0; l < cutoutSet.getCutoutLoopCount(c); l++) { if (verts[cutoutStarts[c] + l].size() < 4) { continue; } cutoutMeshes.back().push_back( getCuttingCone(conf, verts[cutoutStarts[c] + l], smoothingGroups[cutoutStarts[c] + l], heightBot, heightTop, conicityMultiplierBot, conicityMultiplierTop, mPlaneIndexerOffset, seed, mInteriorMaterialId, sharedFacesMap, l != 0)); } } std::stack<std::pair<int32_t, int32_t> > cellsStack; std::set<std::pair<int32_t, int32_t> > visited; cellsStack.push(std::make_pair(0, 0)); while (!cellsStack.empty()) { auto cell = cellsStack.top(); auto transformedCell = toNvShared(conf.transform).rotate(NvVec3(cell.first * scale.x, cell.second * scale.y, 0)); cellsStack.pop(); if (visited.find(cell) != visited.end()) { continue; } visited.insert(cell); bool hasCutout = false; for (uint32_t c = 0; c < cutoutMeshes.size(); c++) { setChunkInfoMesh(ch, nullptr); for (uint32_t l = 0; l < cutoutMeshes[c].size(); l++) { Mesh* cutoutMesh = cutoutMeshes[c][l]; if (cutoutMesh == nullptr) { continue; } auto vertices = cutoutMesh->getVerticesWritable(); for (uint32_t v = 0; v < cutoutMesh->getVerticesCount(); v++) { toNvShared(vertices[v].p) += transformedCell; } toNvShared(cutoutMesh->getBoundingBoxWritable().minimum) += transformedCell; toNvShared(cutoutMesh->getBoundingBoxWritable().maximum) += transformedCell; if (l == 0) { SweepingAccelerator accel(mesh); SweepingAccelerator dummy(cutoutMesh); bTool.performBoolean(mesh, cutoutMesh, &accel, &dummy, BooleanConfigurations::BOOLEAN_INTERSECTION()); setChunkInfoMesh(ch, bTool.createNewMesh()); } else { SweepingAccelerator accel(ch.getMesh()); SweepingAccelerator dummy(cutoutMesh); bTool.performBoolean(ch.getMesh(), cutoutMesh, &accel, &dummy, BooleanConfigurations::BOOLEAN_DIFFERENCE()); setChunkInfoMesh(ch, bTool.createNewMesh()); } for (uint32_t v = 0; v < cutoutMesh->getVerticesCount(); v++) { toNvShared(vertices[v].p) -= transformedCell; } toNvShared(cutoutMesh->getBoundingBoxWritable().minimum )-= transformedCell; toNvShared(cutoutMesh->getBoundingBoxWritable().maximum) -= transformedCell; } if (ch.getMesh() != 0) { ch.chunkId = createId(); newlyCreatedChunksIds.push_back(ch.chunkId); mChunkData.push_back(ch); hasCutout = true; } } if (hasCutout && cutoutSet.isPeriodic()) { for (int32_t i = 0; i < 4; ++i) { const int32_t i0 = i & 1; const int32_t i1 = (i >> 1) & 1; auto newCell = std::make_pair(cell.first + i0 - i1, cell.second + i0 + i1 - 1); if (visited.find(newCell) == visited.end()) { cellsStack.push(newCell); } } } } for (uint32_t c = 0; c < cutoutMeshes.size(); c++) { for (uint32_t l = 0; l < cutoutMeshes[c].size(); l++) { SAFE_DELETE(cutoutMeshes[c][l]); } } SAFE_DELETE(mesh); mChunkData[chunkInfoIndex].isLeaf = false; if (replaceChunk) { deleteChunkSubhierarchy(chunkId, true); } if (mRemoveIslands) { for (auto chunkToCheck : newlyCreatedChunksIds) { islandDetectionAndRemoving(chunkToCheck); } } return 0; } int32_t FractureToolImpl::getChunkInfoIndex(int32_t chunkId) const { for (uint32_t i = 0; i < mChunkData.size(); ++i) { if (mChunkData[i].chunkId == chunkId) { return i; } } return -1; } int32_t FractureToolImpl::getChunkDepth(int32_t chunkId) const { int32_t chunkInfoIndex = getChunkInfoIndex(chunkId); if (chunkInfoIndex == -1) { return -1; } int32_t depth = 0; while (mChunkData[chunkInfoIndex].parentChunkId != -1) { ++depth; chunkInfoIndex = getChunkInfoIndex(mChunkData[chunkInfoIndex].parentChunkId); } return depth; } uint32_t FractureToolImpl::getChunksIdAtDepth(uint32_t depth, int32_t*& chunkIds) const { std::vector<int32_t> _chunkIds; for (uint32_t i = 0; i < mChunkData.size(); ++i) { if (getChunkDepth(mChunkData[i].chunkId) == (int32_t)depth) { _chunkIds.push_back(mChunkData[i].chunkId); } } chunkIds = new int32_t[_chunkIds.size()]; memcpy(chunkIds, _chunkIds.data(), _chunkIds.size() * sizeof(int32_t)); return (uint32_t)_chunkIds.size(); } bool FractureToolImpl::setSourceMeshes(Mesh const * const * meshes, uint32_t meshesSize, const int32_t* ids /* = nullptr */) { if (meshes == nullptr) { return false; } reset(); for (uint32_t m = 0; m < meshesSize; m++) { const auto mesh = meshes[m]; const int32_t chunkId = (ids ? ids[m] : -1); const int32_t id = setChunkMesh(mesh, -1, chunkId); // if any mesh fails to get set up correctly, // wipe the data so it isn't in a bad state and report failure if (id < 0) { reset(); return false; } } // all source meshes were set up correctly, report success return true; } int32_t FractureToolImpl::setChunkMesh(const Mesh* meshInput, int32_t parentId, int32_t chunkId /* = -1 */) { if (chunkId < 0) { // allocate a new chunk ID chunkId = createId(); if (chunkId < 0) { return -1; } } else { // make sure the supplied chunk ID gets reserved if (!reserveId(chunkId)) { return -1; } } const int32_t parentInfoIndex = getChunkInfoIndex(parentId); if (meshInput == nullptr || (parentInfoIndex == -1 && parentId != -1)) { return -1; } mChunkData.push_back(ChunkInfo()); auto& chunk = mChunkData.back(); chunk.chunkId = chunkId; chunk.parentChunkId = parentId; chunk.isLeaf = true; chunk.isChanged = true; chunk.flags = ChunkInfo::NO_FLAGS; /** Set mesh; move to origin and scale to unit cube */ Mesh* mesh = new MeshImpl(*reinterpret_cast<const MeshImpl*>(meshInput)); setChunkInfoMesh(chunk, mesh, false); if ((size_t)parentInfoIndex < mChunkData.size()) { mChunkData[parentInfoIndex].isLeaf = false; } // Make sure our fracturing surface ID base is greater than any existing ID for (uint32_t i = 0; i < mesh->getFacetCount(); ++i) { const int64_t splitId = std::abs(mesh->getFacet(i)->userData); mPlaneIndexerOffset = std::max(mPlaneIndexerOffset, splitId + 1); } return chunk.chunkId; } void FractureToolImpl::release() { delete this; } void FractureToolImpl::reset() { for (uint32_t i = 0; i < mChunkPostprocessors.size(); ++i) { delete mChunkPostprocessors[i]; } mChunkPostprocessors.clear(); for (uint32_t i = 0; i < mChunkData.size(); ++i) { delete mChunkData[i].getMesh(); } mChunkData.clear(); mPlaneIndexerOffset = 1; mNextChunkId = 0; mChunkIdsUsed.clear(); mInteriorMaterialId = kMaterialInteriorId; } void FractureToolImpl::setInteriorMaterialId(int32_t materialId) { mInteriorMaterialId = materialId; } bool FractureToolImpl::isAncestorForChunk(int32_t ancestorId, int32_t chunkId) { if (ancestorId == chunkId) { return false; } while (chunkId != -1) { if (ancestorId == chunkId) { return true; } const int32_t chunkInfoIndex = getChunkInfoIndex(chunkId); if (chunkInfoIndex == -1) { return false; } chunkId = mChunkData[chunkInfoIndex].parentChunkId; } return false; } bool FractureToolImpl::deleteChunkSubhierarchy(int32_t chunkId, bool deleteRoot /*= false*/) { std::vector<int32_t> chunkToDelete; for (uint32_t i = 0; i < mChunkData.size(); ++i) { if (isAncestorForChunk(chunkId, mChunkData[i].chunkId) || (deleteRoot && chunkId == mChunkData[i].chunkId)) { chunkToDelete.push_back(i); } } for (int32_t i = (int32_t)chunkToDelete.size() - 1; i >= 0; --i) { int32_t m = chunkToDelete[i]; delete mChunkData[m].getMesh(); std::swap(mChunkData.back(), mChunkData[m]); mChunkData.pop_back(); } markLeaves(); return chunkToDelete.size() > 0; } void FractureToolImpl::finalizeFracturing() { std::vector<Triangulator*> oldTriangulators = mChunkPostprocessors; std::map<int32_t, int32_t> chunkIdToTriangulator; std::set<uint32_t> newChunkMask; for (uint32_t i = 0; i < oldTriangulators.size(); ++i) { chunkIdToTriangulator[oldTriangulators[i]->getParentChunkId()] = i; } mChunkPostprocessors.clear(); mChunkPostprocessors.resize(mChunkData.size()); newChunkMask.insert(0xffffffff); // To trigger masking mode, if newChunkMask will happen to be empty, all UVs will // be updated. for (uint32_t i = 0; i < mChunkPostprocessors.size(); ++i) { auto it = chunkIdToTriangulator.find(mChunkData[i].chunkId); if (mChunkData[i].isChanged || it == chunkIdToTriangulator.end()) { if (it != chunkIdToTriangulator.end()) { delete oldTriangulators[it->second]; oldTriangulators[it->second] = nullptr; } mChunkPostprocessors[i] = new Triangulator(); mChunkPostprocessors[i]->triangulate(mChunkData[i].getMesh()); mChunkPostprocessors[i]->getParentChunkId() = mChunkData[i].chunkId; newChunkMask.insert(mChunkData[i].chunkId); mChunkData[i].isChanged = false; } else { mChunkPostprocessors[i] = oldTriangulators[it->second]; } } std::vector<int32_t> badOnes; for (uint32_t i = 0; i < mChunkPostprocessors.size(); ++i) { if (mChunkPostprocessors[i]->getBaseMesh().empty()) { badOnes.push_back(i); } } for (int32_t i = (int32_t)badOnes.size() - 1; i >= 0; --i) { int32_t chunkId = mChunkData[badOnes[i]].chunkId; for (uint32_t j = 0; j < mChunkData.size(); ++j) { if (mChunkData[j].parentChunkId == chunkId) mChunkData[j].parentChunkId = mChunkData[badOnes[i]].parentChunkId; } std::swap(mChunkPostprocessors[badOnes[i]], mChunkPostprocessors.back()); mChunkPostprocessors.pop_back(); std::swap(mChunkData[badOnes[i]], mChunkData.back()); mChunkData.pop_back(); } if (!mChunkPostprocessors.empty()) // Failsafe to prevent infinite loop (leading to stack overflow) { fitAllUvToRect(1.0f, newChunkMask); } } uint32_t FractureToolImpl::getChunkCount() const { return (uint32_t)mChunkData.size(); } const ChunkInfo& FractureToolImpl::getChunkInfo(int32_t chunkInfoIndex) { return mChunkData[chunkInfoIndex]; } uint32_t FractureToolImpl::getBaseMesh(int32_t chunkInfoIndex, Triangle*& output) { NVBLAST_ASSERT(mChunkPostprocessors.size() > 0); if (mChunkPostprocessors.size() == 0) { return 0; // finalizeFracturing() should be called before getting mesh! } auto& baseMesh = mChunkPostprocessors[chunkInfoIndex]->getBaseMesh(); output = new Triangle[baseMesh.size()]; memcpy(output, baseMesh.data(), baseMesh.size() * sizeof(Triangle)); /* Scale mesh back */ const TransformST& tm = mChunkData[chunkInfoIndex].getTmToWorld(); for (uint32_t i = 0; i < baseMesh.size(); ++i) { Triangle& triangle = output[i]; triangle.a.p = tm.transformPos(triangle.a.p); triangle.b.p = tm.transformPos(triangle.b.p); triangle.c.p = tm.transformPos(triangle.c.p); } return baseMesh.size(); } uint32_t FractureToolImpl::updateBaseMesh(int32_t chunkInfoIndex, Triangle* output) { NVBLAST_ASSERT(mChunkPostprocessors.size() > 0); if (mChunkPostprocessors.size() == 0) { return 0; // finalizeFracturing() should be called before getting mesh! } auto& baseMesh = mChunkPostprocessors[chunkInfoIndex]->getBaseMesh(); memcpy(output, baseMesh.data(), baseMesh.size() * sizeof(Triangle)); /* Scale mesh back */ const TransformST& tm = mChunkData[chunkInfoIndex].getTmToWorld(); for (uint32_t i = 0; i < baseMesh.size(); ++i) { Triangle& triangle = output[i]; triangle.a.p = tm.transformPos(triangle.a.p); triangle.b.p = tm.transformPos(triangle.b.p); triangle.c.p = tm.transformPos(triangle.c.p); } return baseMesh.size(); } float getVolume(std::vector<Triangle>& triangles) { if (triangles.size() == 0) { return 0.0f; } // Find an approximate centroid for a more accurate calculation NvcVec3 centroid = { 0.0f, 0.0f, 0.0f }; for (size_t i = 0; i < triangles.size(); ++i) { centroid = centroid + triangles[i].a.p + triangles[i].b.p + triangles[i].c.p; } centroid = centroid / (3 * triangles.size()); float volume = 0.0f; for (size_t i = 0; i < triangles.size(); ++i) { const NvcVec3 a = triangles[i].a.p - centroid; const NvcVec3 b = triangles[i].b.p - centroid; const NvcVec3 c = triangles[i].c.p - centroid; volume += (a.x * b.y * c.z - a.x * b.z * c.y - a.y * b.x * c.z + a.y * b.z * c.x + a.z * b.x * c.y - a.z * b.y * c.x); } return (1.0f / 6.0f) * std::abs(volume); } float FractureToolImpl::getMeshOverlap(const Mesh& meshA, const Mesh& meshB) { BooleanEvaluator bTool; bTool.performBoolean(&meshA, &meshB, BooleanConfigurations::BOOLEAN_INTERSECTION()); Mesh* result = bTool.createNewMesh(); if (result == nullptr) { return 0.0f; } Triangulator postProcessor; postProcessor.triangulate(&meshA); float baseVolume = getVolume(postProcessor.getBaseMesh()); if (baseVolume == 0) { return 0.0f; } postProcessor.triangulate(result); float intrsVolume = getVolume(postProcessor.getBaseMesh()); delete result; return intrsVolume / baseVolume; } void weldVertices(std::map<Vertex, uint32_t, VrtComp>& vertexMapping, std::vector<Vertex>& vertexBuffer, std::vector<uint32_t>& indexBuffer, std::vector<Triangle>& trb) { for (uint32_t i = 0; i < trb.size(); ++i) { auto it = vertexMapping.find(trb[i].a); if (it == vertexMapping.end()) { indexBuffer.push_back(static_cast<uint32_t>(vertexBuffer.size())); vertexMapping[trb[i].a] = static_cast<uint32_t>(vertexBuffer.size()); vertexBuffer.push_back(trb[i].a); } else { indexBuffer.push_back(it->second); } it = vertexMapping.find(trb[i].b); if (it == vertexMapping.end()) { indexBuffer.push_back(static_cast<uint32_t>(vertexBuffer.size())); vertexMapping[trb[i].b] = static_cast<uint32_t>(vertexBuffer.size()); vertexBuffer.push_back(trb[i].b); } else { indexBuffer.push_back(it->second); } it = vertexMapping.find(trb[i].c); if (it == vertexMapping.end()) { indexBuffer.push_back(static_cast<uint32_t>(vertexBuffer.size())); vertexMapping[trb[i].c] = static_cast<uint32_t>(vertexBuffer.size()); vertexBuffer.push_back(trb[i].c); } else { indexBuffer.push_back(it->second); } } } void FractureToolImpl::setRemoveIslands(bool isRemoveIslands) { mRemoveIslands = isRemoveIslands; } int32_t FractureToolImpl::islandDetectionAndRemoving(int32_t chunkId, bool createAtNewDepth) { if (chunkId == 0 && createAtNewDepth == false) { return 0; } int32_t chunkInfoIndex = getChunkInfoIndex(chunkId); Triangulator prc; prc.triangulate(mChunkData[chunkInfoIndex].getMesh()); Mesh* chunk = mChunkData[chunkInfoIndex].getMesh(); std::vector<uint32_t>& mapping = prc.getBaseMapping(); std::vector<TriangleIndexed>& trs = prc.getBaseMeshIndexed(); std::vector<std::vector<uint32_t> > graph(prc.getWeldedVerticesCount()); std::vector<int32_t>& pm = prc.getPositionedMapping(); if (pm.size() == 0) { return 0; } /** Chunk graph */ for (uint32_t i = 0; i < trs.size(); ++i) { graph[pm[trs[i].ea]].push_back(pm[trs[i].eb]); graph[pm[trs[i].ea]].push_back(pm[trs[i].ec]); graph[pm[trs[i].ec]].push_back(pm[trs[i].eb]); graph[pm[trs[i].ec]].push_back(pm[trs[i].ea]); graph[pm[trs[i].eb]].push_back(pm[trs[i].ea]); graph[pm[trs[i].eb]].push_back(pm[trs[i].ec]); } for (uint32_t i = 0; i < chunk->getEdgesCount(); ++i) { int v1 = chunk->getEdges()[i].s; int v2 = chunk->getEdges()[i].e; v1 = pm[mapping[v1]]; v2 = pm[mapping[v2]]; graph[v1].push_back(v2); graph[v2].push_back(v1); } /** Walk graph, mark components */ std::vector<int32_t> comps(prc.getWeldedVerticesCount(), -1); std::queue<uint32_t> que; int32_t cComp = 0; for (uint32_t i = 0; i < prc.getWeldedVerticesCount(); ++i) { int32_t to = pm[i]; if (comps[to] != -1) continue; que.push(to); comps[to] = cComp; while (!que.empty()) { int32_t c = que.front(); que.pop(); for (uint32_t j = 0; j < graph[c].size(); ++j) { if (comps[graph[c][j]] == -1) { que.push(graph[c][j]); comps[graph[c][j]] = cComp; } } } cComp++; } for (uint32_t i = 0; i < prc.getWeldedVerticesCount(); ++i) { int32_t to = pm[i]; comps[i] = comps[to]; } std::vector<uint32_t> longComps(chunk->getVerticesCount()); for (uint32_t i = 0; i < chunk->getVerticesCount(); ++i) { int32_t to = mapping[i]; longComps[i] = comps[to]; } if (cComp > 1) { std::vector<std::vector<Vertex> > compVertices(cComp); std::vector<std::vector<Facet> > compFacets(cComp); std::vector<std::vector<Edge> > compEdges(cComp); std::vector<uint32_t> compVertexMapping(chunk->getVerticesCount(), 0); const Vertex* vrts = chunk->getVertices(); for (uint32_t v = 0; v < chunk->getVerticesCount(); ++v) { int32_t vComp = comps[mapping[v]]; compVertexMapping[v] = static_cast<uint32_t>(compVertices[vComp].size()); compVertices[vComp].push_back(vrts[v]); } const Facet* fcb = chunk->getFacetsBuffer(); const Edge* edb = chunk->getEdges(); for (uint32_t fc = 0; fc < chunk->getFacetCount(); ++fc) { std::vector<uint32_t> edgesPerComp(cComp, 0); for (uint32_t ep = fcb[fc].firstEdgeNumber; ep < fcb[fc].firstEdgeNumber + fcb[fc].edgesCount; ++ep) { int32_t vComp = comps[mapping[edb[ep].s]]; edgesPerComp[vComp]++; compEdges[vComp].push_back({compVertexMapping[edb[ep].s], compVertexMapping[edb[ep].e]}); } for (int32_t c = 0; c < cComp; ++c) { if (edgesPerComp[c] == 0) { continue; } compFacets[c].push_back(*chunk->getFacet(fc)); compFacets[c].back().edgesCount = edgesPerComp[c]; compFacets[c].back().firstEdgeNumber = static_cast<int32_t>(compEdges[c].size()) - edgesPerComp[c]; } } if (createAtNewDepth == false) { // We need to flag the chunk as changed, in case someone is calling this function directly // Otherwise when called as part of automatic island removal, chunks are already flagged as changed mChunkData[chunkInfoIndex].isChanged = true; delete mChunkData[chunkInfoIndex].getMesh(); Mesh* newMesh0 = new MeshImpl(compVertices[0].data(), compEdges[0].data(), compFacets[0].data(), static_cast<uint32_t>(compVertices[0].size()), static_cast<uint32_t>(compEdges[0].size()), static_cast<uint32_t>(compFacets[0].size())); setChunkInfoMesh(mChunkData[chunkInfoIndex], newMesh0); for (int32_t i = 1; i < cComp; ++i) { mChunkData.push_back(ChunkInfo(mChunkData[chunkInfoIndex])); mChunkData.back().chunkId = createId(); Mesh* newMesh_i = new MeshImpl(compVertices[i].data(), compEdges[i].data(), compFacets[i].data(), static_cast<uint32_t>(compVertices[i].size()), static_cast<uint32_t>(compEdges[i].size()), static_cast<uint32_t>(compFacets[i].size())); setChunkInfoMesh(mChunkData.back(), newMesh_i); } } else { deleteChunkSubhierarchy(chunkId); for (int32_t i = 0; i < cComp; ++i) { uint32_t nc = createNewChunk(chunkId); mChunkData[nc].isLeaf = true; mChunkData[nc].flags = ChunkInfo::APPROXIMATE_BONDING; Mesh* newMesh = new MeshImpl(compVertices[i].data(), compEdges[i].data(), compFacets[i].data(), static_cast<uint32_t>(compVertices[i].size()), static_cast<uint32_t>(compEdges[i].size()), static_cast<uint32_t>(compFacets[i].size())); setChunkInfoMesh(mChunkData[nc], newMesh); } mChunkData[chunkInfoIndex].isLeaf = false; } return cComp; } return 0; } uint32_t FractureToolImpl::getBufferedBaseMeshes(Vertex*& vertexBuffer, uint32_t*& indexBuffer, uint32_t*& indexBufferOffsets) { std::map<Vertex, uint32_t, VrtComp> vertexMapping; std::vector<Vertex> _vertexBuffer; std::vector<uint32_t> _indexBuffer; indexBufferOffsets = reinterpret_cast<uint32_t*>(NVBLAST_ALLOC((mChunkPostprocessors.size() + 1) * sizeof(uint32_t))); for (uint32_t ch = 0; ch < mChunkPostprocessors.size(); ++ch) { const TransformST& tm = mChunkData[ch].getTmToWorld(); std::vector<Triangle> trb = mChunkPostprocessors[ch]->getBaseMesh(); for (uint32_t i = 0; i < trb.size(); ++i) { Triangle& tri = trb[i]; tri.a.p = tm.transformPos(tri.a.p); tri.b.p = tm.transformPos(tri.b.p); tri.c.p = tm.transformPos(tri.c.p); } indexBufferOffsets[ch] = _indexBuffer.size(); weldVertices(vertexMapping, _vertexBuffer, _indexBuffer, trb); } indexBufferOffsets[mChunkPostprocessors.size()] = _indexBuffer.size(); vertexBuffer = reinterpret_cast<Vertex*>(NVBLAST_ALLOC(_vertexBuffer.size() * sizeof(Vertex))); indexBuffer = reinterpret_cast<uint32_t*>(NVBLAST_ALLOC(_indexBuffer.size() * sizeof(uint32_t))); memcpy(vertexBuffer, _vertexBuffer.data(), _vertexBuffer.size() * sizeof(Vertex)); memcpy(indexBuffer, _indexBuffer.data(), _indexBuffer.size() * sizeof(uint32_t)); return _vertexBuffer.size(); } int32_t FractureToolImpl::getChunkId(int32_t chunkInfoIndex) const { if (chunkInfoIndex < 0 || static_cast<uint32_t>(chunkInfoIndex) >= mChunkData.size()) { return -1; } return mChunkData[chunkInfoIndex].chunkId; } int32_t FractureToolImpl::getInteriorMaterialId() const { return mInteriorMaterialId; } void FractureToolImpl::replaceMaterialId(int32_t oldMaterialId, int32_t newMaterialId) { for (auto& chunkData : mChunkData) { if (chunkData.getMesh()) { chunkData.getMesh()->replaceMaterialId(oldMaterialId, newMaterialId); } } } uint32_t FractureToolImpl::stretchGroup(const std::vector<uint32_t>& grp, std::vector<std::vector<uint32_t> >& graph) { uint32_t parentChunkId = mChunkData[grp[0]].parentChunkId; uint32_t newChunkIndex = createNewChunk(parentChunkId); graph.push_back(std::vector<uint32_t>()); std::vector<Vertex> nVertices; std::vector<Edge> nEdges; std::vector<Facet> nFacets; uint32_t offsetVertices = 0; uint32_t offsetEdges = 0; for (uint32_t i = 0; i < grp.size(); ++i) { mChunkData[grp[i]].parentChunkId = mChunkData[newChunkIndex].chunkId; auto vr = mChunkData[grp[i]].getMesh()->getVertices(); auto ed = mChunkData[grp[i]].getMesh()->getEdges(); auto fc = mChunkData[grp[i]].getMesh()->getFacetsBuffer(); for (uint32_t v = 0; v < mChunkData[grp[i]].getMesh()->getVerticesCount(); ++v) { nVertices.push_back(vr[v]); } for (uint32_t v = 0; v < mChunkData[grp[i]].getMesh()->getEdgesCount(); ++v) { nEdges.push_back(ed[v]); nEdges.back().s += offsetVertices; nEdges.back().e += offsetVertices; } for (uint32_t v = 0; v < mChunkData[grp[i]].getMesh()->getFacetCount(); ++v) { nFacets.push_back(fc[v]); nFacets.back().firstEdgeNumber += offsetEdges; } offsetEdges = nEdges.size(); offsetVertices = nVertices.size(); if (mChunkData[grp[i]].flags & ChunkInfo::APPROXIMATE_BONDING) { mChunkData[newChunkIndex].flags |= ChunkInfo::APPROXIMATE_BONDING; } } std::vector<Facet> finalFacets; std::set<int64_t> hasCutting; for (uint32_t i = 0; i < nFacets.size(); ++i) { if (nFacets[i].userData != 0) hasCutting.insert(nFacets[i].userData); } for (uint32_t i = 0; i < nFacets.size(); ++i) { // N.B. This can lead to open meshes for non-voronoi fracturing. // We need to check if the opposing faces match exactly, or even better reconstruct parts that stick out. if (nFacets[i].userData == 0 || (hasCutting.find(-nFacets[i].userData) == hasCutting.end())) { finalFacets.push_back(nFacets[i]); } } Mesh* newMesh = new MeshImpl(nVertices.data(), nEdges.data(), finalFacets.data(), static_cast<uint32_t>(nVertices.size()), static_cast<uint32_t>(nEdges.size()), static_cast<uint32_t>(finalFacets.size())); setChunkInfoMesh(mChunkData[newChunkIndex], newMesh); return newChunkIndex; } uint32_t FractureToolImpl::createNewChunk(uint32_t parentChunkId) { const uint32_t index = static_cast<uint32_t>(mChunkData.size()); mChunkData.push_back(ChunkInfo()); mChunkData.back().parentChunkId = parentChunkId; mChunkData.back().chunkId = createId(); return index; } void FractureToolImpl::fitUvToRect(float side, uint32_t chunk) { int32_t infoIndex = getChunkInfoIndex(chunk); if (mChunkPostprocessors.empty()) // It seems finalize have not been called, call it here. { finalizeFracturing(); } if (infoIndex == -1 || (int32_t)mChunkPostprocessors.size() <= infoIndex) { return; // We dont have such chunk tringulated; } nvidia::NvBounds3 bnd; bnd.setEmpty(); std::vector<Triangle>& ctrs = mChunkPostprocessors[infoIndex]->getBaseMesh(); std::vector<Triangle>& output = mChunkPostprocessors[infoIndex]->getBaseMesh(); for (uint32_t trn = 0; trn < ctrs.size(); ++trn) { if (ctrs[trn].userData == 0) continue; bnd.include(NvVec3(ctrs[trn].a.uv[0].x, ctrs[trn].a.uv[0].y, 0.0f)); bnd.include(NvVec3(ctrs[trn].b.uv[0].x, ctrs[trn].b.uv[0].y, 0.0f)); bnd.include(NvVec3(ctrs[trn].c.uv[0].x, ctrs[trn].c.uv[0].y, 0.0f)); } float xscale = side / (bnd.maximum.x - bnd.minimum.x); float yscale = side / (bnd.maximum.y - bnd.minimum.y); xscale = std::min(xscale, yscale); // To have uniform scaling for (uint32_t trn = 0; trn < ctrs.size(); ++trn) { if (ctrs[trn].userData == 0) continue; output[trn].a.uv[0].x = (ctrs[trn].a.uv[0].x - bnd.minimum.x) * xscale; output[trn].b.uv[0].x = (ctrs[trn].b.uv[0].x - bnd.minimum.x) * xscale; output[trn].c.uv[0].x = (ctrs[trn].c.uv[0].x - bnd.minimum.x) * xscale; output[trn].a.uv[0].y = (ctrs[trn].a.uv[0].y - bnd.minimum.y) * xscale; output[trn].b.uv[0].y = (ctrs[trn].b.uv[0].y - bnd.minimum.y) * xscale; output[trn].c.uv[0].y = (ctrs[trn].c.uv[0].y - bnd.minimum.y) * xscale; } } void FractureToolImpl::fitAllUvToRect(float side) { std::set<uint32_t> mask; fitAllUvToRect(side, mask); } void FractureToolImpl::fitAllUvToRect(float side, std::set<uint32_t>& mask) { if (mChunkPostprocessors.empty()) // It seems finalize have not been called, call it here. { finalizeFracturing(); } if (mChunkPostprocessors.empty()) { return; // We dont have triangulated chunks. } nvidia::NvBounds3 bnd; bnd.setEmpty(); for (uint32_t chunk = 0; chunk < mChunkData.size(); ++chunk) { Mesh* m = mChunkData[chunk].getMesh(); const Edge* edges = m->getEdges(); const Vertex* vertices = m->getVertices(); for (uint32_t trn = 0; trn < m->getFacetCount(); ++trn) { if (m->getFacet(trn)->userData == 0) continue; for (uint32_t ei = 0; ei < m->getFacet(trn)->edgesCount; ++ei) { int32_t v1 = edges[m->getFacet(trn)->firstEdgeNumber + ei].s; int32_t v2 = edges[m->getFacet(trn)->firstEdgeNumber + ei].e; bnd.include(NvVec3(vertices[v1].uv[0].x, vertices[v1].uv[0].y, 0.0f)); bnd.include(NvVec3(vertices[v2].uv[0].x, vertices[v2].uv[0].y, 0.0f)); } } } float xscale = side / (bnd.maximum.x - bnd.minimum.x); float yscale = side / (bnd.maximum.y - bnd.minimum.y); xscale = std::min(xscale, yscale); // To have uniform scaling for (uint32_t chunk = 0; chunk < mChunkPostprocessors.size(); ++chunk) { if (!mask.empty() && mask.find(mChunkPostprocessors[chunk]->getParentChunkId()) == mask.end()) continue; std::vector<Triangle>& ctrs = mChunkPostprocessors[chunk]->getBaseMeshNotFitted(); std::vector<Triangle>& output = mChunkPostprocessors[chunk]->getBaseMesh(); for (uint32_t trn = 0; trn < ctrs.size(); ++trn) { if (ctrs[trn].userData == 0) continue; output[trn].a.uv[0].x = (ctrs[trn].a.uv[0].x - bnd.minimum.x) * xscale; output[trn].b.uv[0].x = (ctrs[trn].b.uv[0].x - bnd.minimum.x) * xscale; output[trn].c.uv[0].x = (ctrs[trn].c.uv[0].x - bnd.minimum.x) * xscale; output[trn].a.uv[0].y = (ctrs[trn].a.uv[0].y - bnd.minimum.y) * xscale; output[trn].b.uv[0].y = (ctrs[trn].b.uv[0].y - bnd.minimum.y) * xscale; output[trn].c.uv[0].y = (ctrs[trn].c.uv[0].y - bnd.minimum.y) * xscale; } } } void FractureToolImpl::markLeaves() { for (ChunkInfo& info : mChunkData) { info.isLeaf = true; } for (ChunkInfo& info : mChunkData) { const int32_t infoIndex = getChunkInfoIndex(info.parentChunkId); if (infoIndex >= 0) { mChunkData[infoIndex].isLeaf = false; } } } bool FractureToolImpl::setChunkInfoMesh(ChunkInfo& chunkInfo, Mesh* mesh, bool fromTransformed /*= true*/) { // Class to access protected ChunkInfo members struct ChunkInfoAuth : public ChunkInfo { void setMesh(Mesh* mesh, const TransformST& parentTM) { meshData = mesh; if (meshData != nullptr) { // Calculate the world transform meshData->recalculateBoundingBox(); const TransformST localTM = createCubeTMFromBounds(meshData->getBoundingBox()); tmToWorld.s = parentTM.s * localTM.s; tmToWorld.t = parentTM.s * localTM.t + parentTM.t; // Transform vertex buffer to fit in unit cube Vertex* verticesBuffer = meshData->getVerticesWritable(); for (uint32_t i = 0; i < meshData->getVerticesCount(); ++i) { Nv::Blast::Vertex& v = verticesBuffer[i]; v.p = localTM.invTransformPos(v.p); } // If none of chunk.tmToWorld scales are zero (or less than epsilon), then the bounds // will be { {-1.0f, -1.0f, -1.0f}, {1.0f, 1.0f, 1.0f} }. Just in case, we properly // calculate the bounds here. meshData->recalculateBoundingBox(); } else { tmToWorld = TransformST::identity(); } } bool isInitialized() const { return parentChunkId != ChunkInfo::UninitializedID; } }; ChunkInfoAuth* auth = static_cast<ChunkInfoAuth*>(&chunkInfo); if (!auth->isInitialized()) { return false; } const TransformST parentTM = fromTransformed && chunkInfo.parentChunkId >= 0 ? mChunkData[getChunkInfoIndex(chunkInfo.parentChunkId)].getTmToWorld() : TransformST::identity(); auth->setMesh(mesh, parentTM); return true; } void FractureToolImpl::rebuildAdjGraph(const std::vector<uint32_t>& chunks, const NvcVec2i* adjChunks, uint32_t adjChunksSize, std::vector<std::vector<uint32_t> >& chunkGraph) { std::vector<std::pair<uint64_t, uint32_t> > planeChunkIndex; for (uint32_t i = 0; i < chunks.size(); ++i) { for (uint32_t fc = 0; fc < mChunkData[chunks[i]].getMesh()->getFacetCount(); ++fc) { if (mChunkData[chunks[i]].getMesh()->getFacet(fc)->userData != 0) { planeChunkIndex.push_back( std::make_pair(std::abs(mChunkData[chunks[i]].getMesh()->getFacet(fc)->userData), chunks[i])); } } } { std::sort(planeChunkIndex.begin(), planeChunkIndex.end()); auto it = std::unique(planeChunkIndex.begin(), planeChunkIndex.end()); planeChunkIndex.resize(it - planeChunkIndex.begin()); } uint32_t a = 0; for (uint32_t i = 1; i < planeChunkIndex.size(); ++i) { if (planeChunkIndex[a].first != planeChunkIndex[i].first) { uint32_t b = i; for (uint32_t p1 = a; p1 < b; ++p1) { for (uint32_t p2 = p1 + 1; p2 < b; ++p2) { if (planeChunkIndex[p1].second == planeChunkIndex[p2].second || mChunkData[planeChunkIndex[p1].second].parentChunkId != mChunkData[planeChunkIndex[p2].second].parentChunkId) { continue; } bool has = false; for (uint32_t k = 0; k < chunkGraph[planeChunkIndex[p1].second].size(); ++k) { if (chunkGraph[planeChunkIndex[p1].second][k] == planeChunkIndex[p2].second) { has = true; break; } } if (!has) { chunkGraph[planeChunkIndex[p1].second].push_back(planeChunkIndex[p2].second); } has = false; for (uint32_t k = 0; k < chunkGraph[planeChunkIndex[p2].second].size(); ++k) { if (chunkGraph[planeChunkIndex[p2].second][k] == planeChunkIndex[p1].second) { has = true; break; } } if (!has) { chunkGraph[planeChunkIndex[p2].second].push_back(planeChunkIndex[p1].second); } } } a = b; } } // Add in extra adjacency info, if we have it if (adjChunks && adjChunksSize) { std::set<uint32_t> chunkSet(chunks.begin(), chunks.end()); #if NV_DEBUG || NV_CHECKED // Make sure these arrays are sorted for (std::vector<uint32_t>& adj : chunkGraph) { const bool isSorted = std::is_sorted(adj.begin(), adj.end()); if (!isSorted) { NVBLAST_ASSERT(0); NvBlastGlobalGetErrorCallback()->reportError(nvidia::NvErrorCode::eDEBUG_WARNING, "Adjacency array not sorted; subsequent code assumes it is.", __FILE__, __LINE__); } } #endif for (uint32_t i = 0; i < adjChunksSize; ++i) { const NvcVec2i& pair = adjChunks[i]; if (chunkSet.find((uint32_t)pair.x) == chunkSet.end() || chunkSet.find((uint32_t)pair.y) == chunkSet.end()) { continue; } { std::vector<uint32_t>& adj0 = chunkGraph[pair.x]; std::vector<uint32_t>::iterator it0 = std::lower_bound(adj0.begin(), adj0.end(), (uint32_t)pair.y); if (it0 == adj0.end() || *it0 != (uint32_t)pair.y) { adj0.insert(it0, (uint32_t)pair.y); } } { std::vector<uint32_t>& adj1 = chunkGraph[pair.y]; std::vector<uint32_t>::iterator it1 = std::lower_bound(adj1.begin(), adj1.end(), (uint32_t)pair.x); if (it1 == adj1.end() || *it1 != (uint32_t)pair.x) { adj1.insert(it1, (uint32_t)pair.x); } } } } } bool VecIntComp(const std::pair<NvcVec3, uint32_t>& a, const std::pair<NvcVec3, uint32_t>& b) { if (a.first.x < b.first.x) return true; if (a.first.x > b.first.x) return false; if (a.first.y < b.first.y) return true; if (a.first.y > b.first.y) return false; if (a.first.z < b.first.z) return true; if (a.first.z > b.first.z) return false; return a.second < b.second; } void FractureToolImpl::uniteChunks(uint32_t threshold, uint32_t targetClusterSize, const uint32_t* chunksToMerge, uint32_t mergeChunkCount, const NvcVec2i* adjChunks, uint32_t adjChunksSize, bool removeOriginalChunks /*= false*/) { std::vector<int32_t> depth(mChunkData.size(), 0); std::vector<std::vector<uint32_t> > chunkGraph(mChunkData.size()); std::vector<uint32_t> atEachDepth; std::vector<uint32_t> childNumber(mChunkData.size(), 0); std::vector<uint32_t> chunksToRemove; enum ChunkFlags { Mergeable = (1 << 0), Merged = (1 << 1) }; std::vector<uint32_t> chunkFlags(mChunkData.size()); if (chunksToMerge == nullptr) { std::fill(chunkFlags.begin(), chunkFlags.end(), Mergeable); } else { // Seed all mergeable chunks with Mergeable flag for (uint32_t chunkN = 0; chunkN < mergeChunkCount; ++chunkN) { const uint32_t chunkIndex = chunksToMerge[chunkN]; chunkFlags[chunkIndex] |= Mergeable; } // Make all descendants mergable too std::vector<int32_t> treeWalk; for (uint32_t chunkInfoIndex = 0; chunkInfoIndex < mChunkData.size(); ++chunkInfoIndex) { treeWalk.clear(); int32_t walkInfoIndex = (int32_t)chunkInfoIndex; do { if (chunkFlags[walkInfoIndex] & Mergeable) { std::for_each(treeWalk.begin(), treeWalk.end(), [&chunkFlags](int32_t index) {chunkFlags[index] |= Mergeable; }); break; } treeWalk.push_back(walkInfoIndex); } while ((walkInfoIndex = getChunkInfoIndex(mChunkData[walkInfoIndex].parentChunkId)) >= 0); } } int32_t maxDepth = 0; for (uint32_t i = 0; i < mChunkData.size(); ++i) { if (mChunkData[i].parentChunkId != -1) childNumber[getChunkInfoIndex(mChunkData[i].parentChunkId)]++; depth[i] = getChunkDepth(mChunkData[i].chunkId); NVBLAST_ASSERT(depth[i] >= 0); maxDepth = std::max(maxDepth, depth[i]); } for (int32_t level = maxDepth; level > 0; --level) // go from leaves to trunk and rebuild hierarchy { std::vector<uint32_t> cGroup; std::vector<uint32_t> chunksToUnify; NvcVec3 minPoint = {MAXIMUM_EXTENT, MAXIMUM_EXTENT, MAXIMUM_EXTENT}; VrtPositionComparator posc; for (uint32_t ch = 0; ch < depth.size(); ++ch) { if (depth[ch] == level && childNumber[getChunkInfoIndex(mChunkData[ch].parentChunkId)] > threshold && (chunkFlags[ch] & Mergeable) != 0) { chunksToUnify.push_back(ch); NvcVec3 cp = fromNvShared(toNvShared(mChunkData[ch].getMesh()->getBoundingBox()).getCenter()); if (posc(cp, minPoint)) { minPoint = cp; } } } std::vector<std::pair<float, uint32_t> > distances; for (uint32_t i = 0; i < chunksToUnify.size(); ++i) { float d = (toNvShared(minPoint) - toNvShared(mChunkData[chunksToUnify[i]].getMesh()->getBoundingBox()).getCenter()).magnitude(); distances.push_back(std::make_pair(d, chunksToUnify[i])); } std::sort(distances.begin(), distances.end()); for (uint32_t i = 0; i < chunksToUnify.size(); ++i) { chunksToUnify[i] = distances[i].second; } rebuildAdjGraph(chunksToUnify, adjChunks, adjChunksSize, chunkGraph); for (uint32_t iter = 0; iter < 32 && chunksToUnify.size() > threshold; ++iter) { std::vector<uint32_t> newChunksToUnify; for (uint32_t c = 0; c < chunksToUnify.size(); ++c) { if ((chunkFlags[chunksToUnify[c]] & Mergeable) == 0) continue; chunkFlags[chunksToUnify[c]] &= ~Mergeable; cGroup.push_back(chunksToUnify[c]); for (uint32_t sc = 0; sc < cGroup.size() && cGroup.size() < targetClusterSize; ++sc) { uint32_t sid = cGroup[sc]; for (uint32_t neighbN = 0; neighbN < chunkGraph[sid].size() && cGroup.size() < targetClusterSize; ++neighbN) { const uint32_t chunkNeighb = chunkGraph[sid][neighbN]; if (mChunkData[chunkNeighb].parentChunkId != mChunkData[sid].parentChunkId) continue; if ((chunkFlags[chunkNeighb] & Mergeable) == 0) continue; chunkFlags[chunkNeighb] &= ~Mergeable; cGroup.push_back(chunkNeighb); } } if (cGroup.size() > 1) { uint32_t newChunk = stretchGroup(cGroup, chunkGraph); for (uint32_t chunk : cGroup) { if (removeOriginalChunks && !(chunkFlags[chunk] & Merged)) { chunksToRemove.push_back(chunk); } } cGroup.clear(); newChunksToUnify.push_back(newChunk); chunkFlags.push_back(Merged); } else { cGroup.clear(); } } chunksToUnify = newChunksToUnify; rebuildAdjGraph(chunksToUnify, adjChunks, adjChunksSize, chunkGraph); } } // Remove chunks std::vector<uint32_t> remap(mChunkData.size(), 0xFFFFFFFF); std::sort(chunksToRemove.begin(), chunksToRemove.end()); std::vector<uint32_t>::iterator removeIt = chunksToRemove.begin(); size_t chunkWriteIndex = 0; for (size_t chunkReadIndex = 0; chunkReadIndex < mChunkData.size(); ++chunkReadIndex) { if (removeIt < chunksToRemove.end()) { if (*removeIt == chunkReadIndex) { ++removeIt; continue; } } if (chunkReadIndex != chunkWriteIndex) { mChunkData[chunkWriteIndex] = mChunkData[chunkReadIndex]; } remap[chunkReadIndex] = chunkWriteIndex++; } mChunkData.resize(chunkWriteIndex); for (ChunkInfo& chunkInfo : mChunkData) { if (chunkInfo.parentChunkId >= 0) { const uint32_t mappedParentIndex = remap[getChunkInfoIndex(chunkInfo.parentChunkId)]; NVBLAST_ASSERT(mappedParentIndex < mChunkData.size()); if (mappedParentIndex < mChunkData.size()) { chunkInfo.parentChunkId = mChunkData[mappedParentIndex].chunkId; } } } } bool FractureToolImpl::setApproximateBonding(uint32_t chunkIndex, bool useApproximateBonding) { if ((size_t)chunkIndex >= mChunkData.size()) { return false; } if (useApproximateBonding) { mChunkData[chunkIndex].flags |= (uint32_t)ChunkInfo::APPROXIMATE_BONDING; } else { mChunkData[chunkIndex].flags &= ~(uint32_t)ChunkInfo::APPROXIMATE_BONDING; } return true; } int32_t FractureToolImpl::createId() { // make sure there is a free ID to be returned if (mChunkIdsUsed.size() >= (size_t)INT32_MAX + 1) { NvBlastGlobalGetErrorCallback()->reportError(nvidia::NvErrorCode::eINTERNAL_ERROR, "Chunk IDs exhausted.", __FILE__, __LINE__); return -1; } // find the next free ID while (mChunkIdsUsed.count(mNextChunkId)) { // handle wrapping if (++mNextChunkId < 0) mNextChunkId = 0; } // step the counter and handle wrapping const int32_t id = mNextChunkId++; if (mNextChunkId < 0) mNextChunkId = 0; return (reserveId(id) ? id : -1); } bool FractureToolImpl::reserveId(int32_t id) { // add it to the used set and make sure it wasn't already in there const auto ret = mChunkIdsUsed.insert(id); NVBLAST_ASSERT_WITH_MESSAGE(ret.second, "Request to reserve ID, but it is already in use"); return ret.second; } } // namespace Blast } // namespace Nv
98,101
C++
33.567301
180
0.569036
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/authoring/NvBlastExtAuthoringPerlinNoise.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2022-2023 NVIDIA Corporation. All rights reserved. #ifndef NVBLASTEXTAUTHORINGPERLINNOISE_H #define NVBLASTEXTAUTHORINGPERLINNOISE_H #include <NvBlastExtAuthoringFractureTool.h> #include "NvVec4.h" #include "NvVec3.h" #define PERLIN_NOISE_SAMPLE_TABLE 512 using nvidia::NvVec3; namespace Nv { namespace Blast { /*********** Noise generation routines, copied from Apex. */ NV_INLINE float at3(const float& rx, const float& ry, const float& rz, const NvVec3 q) { return rx * q[0] + ry * q[1] + rz * q[2]; } NV_INLINE float fade(float t) { return t * t * t * (t * (t * 6.0f - 15.0f) + 10.0f); } NV_INLINE float lerp(float t, float a, float b) { return a + t * (b - a); } NV_INLINE void setup(int i, NvVec3 point, float& t, int& b0, int& b1, float& r0, float& r1) { t = point[i] + (0x1000); b0 = ((int)t) & (PERLIN_NOISE_SAMPLE_TABLE - 1); b1 = (b0 + 1) & (PERLIN_NOISE_SAMPLE_TABLE - 1); r0 = t - (int)t; r1 = r0 - 1.0f; } NV_INLINE float noiseSample(NvVec3 point, int* p, NvVec3* g) { int bx0, bx1, by0, by1, bz0, bz1, b00, b10, b01, b11; float rx0, rx1, ry0, ry1, rz0, rz1, sy, sz, a, b, c, d, t, u, v; NvVec3 q; int i, j; setup(0, point, t, bx0, bx1, rx0, rx1); setup(1, point, t, by0, by1, ry0, ry1); setup(2, point, t, bz0, bz1, rz0, rz1); i = p[bx0]; j = p[bx1]; b00 = p[i + by0]; b10 = p[j + by0]; b01 = p[i + by1]; b11 = p[j + by1]; t = fade(rx0); sy = fade(ry0); sz = fade(rz0); q = g[b00 + bz0]; u = at3(rx0, ry0, rz0, q); q = g[b10 + bz0]; v = at3(rx1, ry0, rz0, q); a = lerp(t, u, v); q = g[b01 + bz0]; u = at3(rx0, ry1, rz0, q); q = g[b11 + bz0]; v = at3(rx1, ry1, rz0, q); b = lerp(t, u, v); c = lerp(sy, a, b); q = g[b00 + bz1]; u = at3(rx0, ry0, rz1, q); q = g[b10 + bz1]; v = at3(rx1, ry0, rz1, q); a = lerp(t, u, v); q = g[b01 + bz1]; u = at3(rx0, ry1, rz1, q); q = g[b11 + bz1]; v = at3(rx1, ry1, rz1, q); b = lerp(t, u, v); d = lerp(sy, a, b); return lerp(sz, c, d); } /** Perlin Noise generation tool */ class PerlinNoise { public: /** \param[in] rnd Random value generator \param[in] octaves Number of noise octaves \param[in] frequency Frequency of noise \param[in] amplitude Amplitude of noise */ PerlinNoise(Nv::Blast::RandomGeneratorBase* rnd, int octaves = 1, float frequency = 1., float amplitude = 1.) : mRnd(rnd), mOctaves(octaves), mFrequency(frequency), mAmplitude(amplitude), mbInit(false) { } /* Reset state of noise generator \param[in] octaves Number of noise octaves \param[in] frequency Frequency of noise \param[in] amplitude Amplitude of noise */ void reset(int octaves = 1, float frequency = 1.f, float amplitude = 1.f) { mOctaves = octaves; mFrequency = frequency; mAmplitude = amplitude; init(); } /** Get Perlin Noise value at given point */ float sample(const nvidia::NvVec3& point) { return perlinNoise(point); } private: PerlinNoise& operator=(const PerlinNoise&); float perlinNoise(nvidia::NvVec3 point) { if (!mbInit) init(); const int octaves = mOctaves; const float frequency = mFrequency; float amplitude = mAmplitude; float result = 0.0f; point *= frequency; for (int i = 0; i < octaves; ++i) { NvVec3 lpnt; lpnt[0] = point.x; lpnt[1] = point.y; lpnt[2] = point.z; result += (noiseSample(lpnt, p, g)) * amplitude; point *= 2.0f; amplitude *= 0.5f; } return result; } void init(void) { mbInit = true; unsigned i, j; int k; for (i = 0; i < (unsigned)PERLIN_NOISE_SAMPLE_TABLE; i++) { p[i] = (int)i; for (j = 0; j < 3; ++j) g[i][j] = mRnd->getRandomValue(); g[i].normalize(); } while (--i) { k = p[i]; j = static_cast<uint32_t>(mRnd->getRandomValue() * PERLIN_NOISE_SAMPLE_TABLE); p[i] = p[j]; p[j] = k; } for (i = 0; i < PERLIN_NOISE_SAMPLE_TABLE + 2; ++i) { p[(unsigned)PERLIN_NOISE_SAMPLE_TABLE + i] = p[i]; for (j = 0; j < 3; ++j) g[(unsigned)PERLIN_NOISE_SAMPLE_TABLE + i][j] = g[i][j]; } } Nv::Blast::RandomGeneratorBase* mRnd; int mOctaves; float mFrequency; float mAmplitude; // Permutation vector int p[(unsigned)(PERLIN_NOISE_SAMPLE_TABLE + PERLIN_NOISE_SAMPLE_TABLE + 2)]; // Gradient vector NvVec3 g[(unsigned)(PERLIN_NOISE_SAMPLE_TABLE + PERLIN_NOISE_SAMPLE_TABLE + 2)]; bool mbInit; }; /** Simplex noise generation tool */ class SimplexNoise { int32_t mOctaves; float mAmplitude; float mFrequency; int32_t mSeed; static const int X_NOISE_GEN = 1619; static const int Y_NOISE_GEN = 31337; static const int Z_NOISE_GEN = 6971; static const int W_NOISE_GEN = 1999; static const int SEED_NOISE_GEN = 1013; static const int SHIFT_NOISE_GEN = 8; NV_INLINE int fastfloor(float x) { return (x >= 0) ? (int)x : (int)(x - 1); } SimplexNoise& operator=(const SimplexNoise&) { return *this; } public: /** \param[in] ampl Amplitude of noise \param[in] freq Frequency of noise \param[in] octaves Number of noise octaves \param[in] seed Random seed value */ SimplexNoise(float ampl, float freq, int32_t octaves, int32_t seed) : mOctaves(octaves), mAmplitude(ampl), mFrequency(freq), mSeed(seed) {}; // 4D simplex noise // returns: (x,y,z) = noise grad, w = noise value /** Evaluate noise at given 4d-point \param[in] x x coordinate of point \param[in] y y coordinate of point \param[in] z z coordinate of point \param[in] w w coordinate of point \param[in] seed Random seed value \return Noise valued vector (x,y,z) and scalar (w) */ nvidia::NvVec4 eval4D(float x, float y, float z, float w, int seed) { // The skewing and unskewing factors are hairy again for the 4D case const float F4 = (nvidia::NvSqrt(5.0f) - 1.0f) / 4.0f; const float G4 = (5.0f - nvidia::NvSqrt(5.0f)) / 20.0f; // Skew the (x,y,z,w) space to determine which cell of 24 simplices we're in float s = (x + y + z + w) * F4; // Factor for 4D skewing int ix = fastfloor(x + s); int iy = fastfloor(y + s); int iz = fastfloor(z + s); int iw = fastfloor(w + s); float tu = (ix + iy + iz + iw) * G4; // Factor for 4D unskewing // Unskew the cell origin back to (x,y,z,w) space float x0 = x - (ix - tu); // The x,y,z,w distances from the cell origin float y0 = y - (iy - tu); float z0 = z - (iz - tu); float w0 = w - (iw - tu); int c = (x0 > y0) ? (1 << 0) : (1 << 2); c += (x0 > z0) ? (1 << 0) : (1 << 4); c += (x0 > w0) ? (1 << 0) : (1 << 6); c += (y0 > z0) ? (1 << 2) : (1 << 4); c += (y0 > w0) ? (1 << 2) : (1 << 6); c += (z0 > w0) ? (1 << 4) : (1 << 6); nvidia::NvVec4 res; res.setZero(); // Calculate the contribution from the five corners for (int p = 4; p >= 0; --p) { int ixp = ((c >> 0) & 3) >= p ? 1 : 0; int iyp = ((c >> 2) & 3) >= p ? 1 : 0; int izp = ((c >> 4) & 3) >= p ? 1 : 0; int iwp = ((c >> 6) & 3) >= p ? 1 : 0; float xp = x0 - ixp + (4 - p) * G4; float yp = y0 - iyp + (4 - p) * G4; float zp = z0 - izp + (4 - p) * G4; float wp = w0 - iwp + (4 - p) * G4; float t = 0.6f - xp * xp - yp * yp - zp * zp - wp * wp; if (t > 0) { //get index int gradIndex = int(( X_NOISE_GEN * (ix + ixp) + Y_NOISE_GEN * (iy + iyp) + Z_NOISE_GEN * (iz + izp) + W_NOISE_GEN * (iw + iwp) + SEED_NOISE_GEN * seed) & 0xffffffff); gradIndex ^= (gradIndex >> SHIFT_NOISE_GEN); gradIndex &= 31; nvidia::NvVec4 g; { const int h = gradIndex; const int hs = 2 - (h >> 4); const int h1 = (h >> 3); g.x = (h1 == 0) ? 0.0f : ((h & 4) ? -1.0f : 1.0f); g.y = (h1 == 1) ? 0.0f : ((h & (hs << 1)) ? -1.0f : 1.0f); g.z = (h1 == 2) ? 0.0f : ((h & hs) ? -1.0f : 1.0f); g.w = (h1 == 3) ? 0.0f : ((h & 1) ? -1.0f : 1.0f); } float gdot = (g.x * xp + g.y * yp + g.z * zp + g.w * wp); float t2 = t * t; float t3 = t2 * t; float t4 = t3 * t; float dt4gdot = 8 * t3 * gdot; res.x += t4 * g.x - dt4gdot * xp; res.y += t4 * g.y - dt4gdot * yp; res.z += t4 * g.z - dt4gdot * zp; res.w += t4 * gdot; } } // scale the result to cover the range [-1,1] res *= 27; return res; } /** Evaluate noise at given 3d-point \param[in] p Point in which noise will be evaluated \return Noise value at given point */ float sample(nvidia::NvVec3 p) { p *= mFrequency; float result = 0.0f; float alpha = 1; for (int32_t i = 1; i <= mOctaves; ++i) { result += eval4D(p.x * i, p.y * i, p.z * i, i * 5.0f, mSeed).w * alpha; alpha *= 0.45f; } return result * mAmplitude; } }; } // Blast namespace } // Nv namespace #endif
11,924
C
29.655527
144
0.507632
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/authoring/NvBlastExtAuthoringFractureToolImpl.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved. #ifndef NVBLASTAUTHORINGFRACTURETOOLIMPL_H #define NVBLASTAUTHORINGFRACTURETOOLIMPL_H #include "NvBlastExtAuthoringFractureTool.h" #include "NvBlastExtAuthoringMesh.h" #include <vector> #include <set> namespace Nv { namespace Blast { class SpatialAccelerator; class Triangulator; /** Class for voronoi sites generation inside supplied mesh. */ class VoronoiSitesGeneratorImpl : public VoronoiSitesGenerator { public: /** Voronoi sites should not be generated outside of the fractured mesh, so VoronoiSitesGenerator should be supplied with fracture mesh. \param[in] mesh Fracture mesh \param[in] rnd User supplied random value generator. \return */ VoronoiSitesGeneratorImpl(const Mesh* mesh, RandomGeneratorBase* rnd); ~VoronoiSitesGeneratorImpl(); void release() override; /** Set base fracture mesh */ void setBaseMesh(const Mesh* m) override; /** Access to generated voronoi sites. \note User should call NVBLAST_FREE for hulls and hullsOffset when it not needed anymore \param[out] Pointer to generated voronoi sites \return Count of generated voronoi sites. */ uint32_t getVoronoiSites(const NvcVec3*& sites) override; /** Add site in particular point \param[in] site Site coordinates */ void addSite(const NvcVec3& site) override; /** Uniformly generate sites inside the mesh \param[in] numberOfSites Number of generated sites */ void uniformlyGenerateSitesInMesh(uint32_t numberOfSites) override; /** Generate sites in clustered fashion \param[in] numberOfClusters Number of generated clusters \param[in] sitesPerCluster Number of sites in each cluster \param[in] clusterRadius Voronoi cells cluster radius */ void clusteredSitesGeneration(uint32_t numberOfClusters, uint32_t sitesPerCluster, float clusterRadius) override; /** Radial pattern of sites generation \param[in] center Center of generated pattern \param[in] normal Normal to plane in which sites are generated \param[in] radius Pattern radius \param[in] angularSteps Number of angular steps \param[in] radialSteps Number of radial steps \param[in] angleOffset Angle offset at each radial step \param[in] variability Randomness of sites distribution */ void radialPattern(const NvcVec3& center, const NvcVec3& normal, float radius, int32_t angularSteps, int32_t radialSteps, float angleOffset = 0.0f, float variability = 0.0f) override; /** Generate sites inside sphere \param[in] count Count of generated sites \param[in] radius Radius of sphere \param[in] center Center of sphere */ void generateInSphere(const uint32_t count, const float radius, const NvcVec3& center) override; /** Set stencil mesh. With stencil mesh sites are generated only inside both of fracture and stencil meshes. \param[in] stencil Stencil mesh. */ void setStencil(const Mesh* stencil) override; /** Removes stencil mesh */ void clearStencil() override; /** Deletes sites inside supplied sphere \param[in] radius Radius of sphere \param[in] center Center of sphere \param[in] eraserProbability Probability of removing some particular site */ void deleteInSphere(const float radius, const NvcVec3& center, const float eraserProbability = 1) override; private: std::vector <NvcVec3> mGeneratedSites; const Mesh* mMesh; const Mesh* mStencil; RandomGeneratorBase* mRnd; SpatialAccelerator* mAccelerator; }; /** FractureTool class provides methods to fracture provided mesh and generate Blast asset data */ class FractureToolImpl : public FractureTool { public: /** FractureTool can log asset creation info if logCallback is provided. */ FractureToolImpl() : mRemoveIslands(false) { reset(); } ~FractureToolImpl() { reset(); } void release() override; /** Reset FractureTool state. */ void reset() override; /** Set the material id to use for new interior faces. Defaults to kMaterialInteriorId */ void setInteriorMaterialId(int32_t materialId) override; /** Gets the material id to use for new interior faces */ int32_t getInteriorMaterialId() const override; /** Replaces an material id on faces with a new one */ void replaceMaterialId(int32_t oldMaterialId, int32_t newMaterialId) override; /** Set input meshes which will be fractured, FractureTool will be reset. If ids != nullptr, it must point to an array of length meshSizes. Each mesh will be assigned to a chunk with ID given by the corresponding element in ids. If the corresponding element is negative, or ids is NULL, then the chunk will be assigned an arbitrary (but currently unused) ID. Returns true iff all meshes were assigned chunks with valid IDs. */ bool setSourceMeshes(Mesh const * const * meshes, uint32_t meshesSize, const int32_t* ids = nullptr) override; /** Set chunk mesh, parentId should be valid, return ID of new chunk. if chunkId >= 0 and currently unused, then that ID will be used (and returned). Otherwise an arbitrary (but currently unused) ID will be used and returned. */ int32_t setChunkMesh(const Mesh* mesh, int32_t parentId, int32_t chunkId = -1) override; /** Get chunk mesh in polygonal representation */ Mesh* createChunkMesh(int32_t chunkInfoIndex, bool splitUVs = true) override; /** Fractures specified chunk with voronoi method. \param[in] chunkId Chunk to fracture \param[in] cellPoints Array of voronoi sites \param[in] replaceChunk if 'true', newly generated chunks will replace source chunk, if 'false', newly generated chunks will be at next depth level, source chunk will be parent for them. Case replaceChunk == true && chunkId == 0 considered as wrong input parameters \return If 0, fracturing is successful. */ int32_t voronoiFracturing(uint32_t chunkId, uint32_t cellCount, const NvcVec3* cellPoints, bool replaceChunk) override; /** Fractures specified chunk with voronoi method. Cells can be scaled along x,y,z axes. \param[in] chunkId Chunk to fracture \param[in] cellPoints Array of voronoi sites \param[in] cellPoints Array of voronoi sites \param[in] scale Voronoi cells scaling factor \param[in] rotation Voronoi cells rotation. Has no effect without cells scale factor \param[in] replaceChunk if 'true', newly generated chunks will replace source chunk, if 'false', newly generated chunks will be at next depth level, source chunk will be parent for them. Case replaceChunk == true && chunkId == 0 considered as wrong input parameters \return If 0, fracturing is successful. */ int32_t voronoiFracturing(uint32_t chunkId, uint32_t cellCount, const NvcVec3* cellPoints, const NvcVec3& scale, const NvcQuat& rotation, bool replaceChunk) override; /** Fractures specified chunk with slicing method. \param[in] chunkId Chunk to fracture \param[in] conf Slicing parameters, see SlicingConfiguration. \param[in] replaceChunk if 'true', newly generated chunks will replace source chunk, if 'false', newly generated chunks will be at next depth level, source chunk will be parent for them. Case replaceChunk == true && chunkId == 0 considered as wrong input parameters \param[in] rnd User supplied random number generator \return If 0, fracturing is successful. */ int32_t slicing(uint32_t chunkId, const SlicingConfiguration& conf, bool replaceChunk, RandomGeneratorBase* rnd) override; /** Cut chunk with plane. \param[in] chunkId Chunk to fracture \param[in] normal Plane normal \param[in] position Point on plane \param[in] noise Noise configuration for plane-chunk intersection, see NoiseConfiguration. \param[in] replaceChunk if 'true', newly generated chunks will replace source chunk, if 'false', newly generated chunks will be at next depth level, source chunk will be parent for them. Case replaceChunk == true && chunkId == 0 considered as wrong input parameters \param[in] rnd User supplied random number generator \return If 0, fracturing is successful. */ int32_t cut(uint32_t chunkId, const NvcVec3& normal, const NvcVec3& position, const NoiseConfiguration& noise, bool replaceChunk, RandomGeneratorBase* rnd) override; /** Cutout fracture for specified chunk. \param[in] chunkId Chunk to fracture \param[in] conf Cutout parameters, see CutoutConfiguration. \param[in] replaceChunk if 'true', newly generated chunks will replace source chunk, if 'false', newly generated chunks will be at next depth level, source chunk will be parent for them. Case replaceChunk == true && chunkId == 0 considered as wrong input parameters \param[in] rnd User supplied random number generator \return If 0, fracturing is successful. */ int32_t cutout(uint32_t chunkId, CutoutConfiguration conf, bool replaceChunk, RandomGeneratorBase* rnd) override; /** Creates resulting fractured mesh geometry from intermediate format */ void finalizeFracturing() override; uint32_t getChunkCount() const override; /** Get chunk information */ const ChunkInfo& getChunkInfo(int32_t chunkInfoIndex) override; /** Get percentage of mesh overlap. percentage computed as volume(intersection(meshA , meshB)) / volume (meshA) \param[in] meshA Mesh A \param[in] meshB Mesh B \return mesh overlap percentage */ float getMeshOverlap(const Mesh& meshA, const Mesh& meshB) override; /** Get chunk base mesh \note User should call NVBLAST_FREE for output when it not needed anymore \param[in] chunkIndex Chunk index \param[out] output Array of triangles to be filled \return number of triangles in base mesh */ uint32_t getBaseMesh(int32_t chunkIndex, Triangle*& output) override; /** Update chunk base mesh \note Doesn't allocates output array, Triangle* output should be preallocated by user \param[in] chunkIndex Chunk index \param[out] output Array of triangles to be filled \return number of triangles in base mesh */ uint32_t updateBaseMesh(int32_t chunkIndex, Triangle* output) override; /** Return info index of chunk with specified chunkId \param[in] chunkId Chunk ID \return Chunk index in internal buffer, if not exist -1 is returned. */ int32_t getChunkInfoIndex(int32_t chunkId) const override; /** Return id of chunk with specified index. \param[in] chunkInfoIndex Chunk info index \return Chunk id or -1 if there is no such chunk. */ int32_t getChunkId(int32_t chunkInfoIndex) const override; /** Return depth level of the given chunk \param[in] chunkId Chunk ID \return Chunk depth or -1 if there is no such chunk. */ int32_t getChunkDepth(int32_t chunkId) const override; /** Return array of chunks IDs with given depth. \note User should call NVBLAST_FREE for chunkIds when it not needed anymore \param[in] depth Chunk depth \param[out] Pointer to array of chunk IDs \return Number of chunks in array */ uint32_t getChunksIdAtDepth(uint32_t depth, int32_t*& chunkIds) const override; /** Get result geometry without noise as vertex and index buffers, where index buffers contain series of triplets which represent triangles. \note User should call NVBLAST_FREE for vertexBuffer, indexBuffer and indexBufferOffsets when it not needed anymore \param[out] vertexBuffer Array of vertices to be filled \param[out] indexBuffer Array of indices to be filled \param[out] indexBufferOffsets Array of offsets in indexBuffer for each base mesh. Contains getChunkCount() + 1 elements. Last one is indexBuffer size \return Number of vertices in vertexBuffer */ uint32_t getBufferedBaseMeshes(Vertex*& vertexBuffer, uint32_t*& indexBuffer, uint32_t*& indexBufferOffsets) override; /** Set automatic islands removing. May cause instabilities. \param[in] isRemoveIslands Flag whether remove or not islands. */ void setRemoveIslands(bool isRemoveIslands) override; /** Try find islands and remove them on some specifical chunk. If chunk has childs, island removing can lead to wrong results! Apply it before further chunk splitting. \param[in] chunkId Chunk ID which should be checked for islands \return Number of found islands is returned */ int32_t islandDetectionAndRemoving(int32_t chunkId, bool createAtNewDepth = false) override; /** Check if input mesh contains open edges. Open edges can lead to wrong fracturing results. \return true if mesh contains open edges */ bool isMeshContainOpenEdges(const Mesh* input) override; bool deleteChunkSubhierarchy(int32_t chunkId, bool deleteRoot = false) override; void uniteChunks(uint32_t threshold, uint32_t targetClusterSize, const uint32_t* chunksToMerge, uint32_t mergeChunkCount, const NvcVec2i* adjChunks, uint32_t adjChunksSize, bool removeOriginalChunks = false) override; bool setApproximateBonding(uint32_t chunkId, bool useApproximateBonding) override; /** Rescale interior uv coordinates of given chunk to fit square of given size. \param[in] side Size of square side \param[in] chunkId Chunk ID for which UVs should be scaled. */ void fitUvToRect(float side, uint32_t chunkId) override; /** Rescale interior uv coordinates of all existing chunks to fit square of given size, relative sizes will be preserved. \param[in] side Size of square side */ void fitAllUvToRect(float side) override; private: bool isAncestorForChunk(int32_t ancestorId, int32_t chunkId); int32_t slicingNoisy(uint32_t chunkId, const SlicingConfiguration& conf, bool replaceChunk, RandomGeneratorBase* rnd); uint32_t stretchGroup(const std::vector<uint32_t>& group, std::vector<std::vector<uint32_t>>& graph); void rebuildAdjGraph(const std::vector<uint32_t>& chunksToRebuild, const NvcVec2i* adjChunks, uint32_t adjChunksSize, std::vector<std::vector<uint32_t> >& chunkGraph); void fitAllUvToRect(float side, std::set<uint32_t>& mask); void markLeaves(); /* * Meshes are transformed to fit a unit cube, for algorithmic stability. This transform is stored * in the ChunkInfo. Some meshes are created from already-transformed chunks. If so, set * fromTransformed = true, so that the transform-to-world can be concatenated with the source mesh's. * * chunkInfo.parentChunkId must be valid if fromTransformed == true. * * Returns true iff successful. */ bool setChunkInfoMesh(ChunkInfo& chunkInfo, Mesh* mesh, bool fromTransformed = true); /** Returns newly created chunk index in mChunkData. */ uint32_t createNewChunk(uint32_t parentChunkId); /** * Returns a previously unused ID. */ int32_t createId(); /** * Mark the given ID as being used. Returns false if that ID was already marked as in use, true otherwise */ bool reserveId(int32_t id); protected: /* Chunk mesh wrappers */ std::vector<Triangulator*> mChunkPostprocessors; int64_t mPlaneIndexerOffset; int32_t mNextChunkId; std::set<int32_t> mChunkIdsUsed; std::vector<ChunkInfo> mChunkData; bool mRemoveIslands; int32_t mInteriorMaterialId; }; int32_t findCellBasePlanes(const std::vector<NvcVec3>& sites, std::vector<std::vector<std::pair<int32_t, int32_t>>>& neighbors); Mesh* getCellMesh(class BooleanEvaluator& eval, int32_t planeIndexerOffset, int32_t cellId, const std::vector<NvcVec3>& sites, const std::vector<std::vector<std::pair<int32_t, int32_t>>>& neighbors, int32_t interiorMaterialId, NvcVec3 origin); } // namespace Blast } // namespace Nv #endif // ifndef NVBLASTAUTHORINGFRACTURETOOLIMPL_H
20,783
C
45.084257
243
0.615407
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/authoring/NvBlastExtAuthoringBondGeneratorImpl.cpp
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2022-2023 NVIDIA Corporation. All rights reserved. // This warning arises when using some stl containers with older versions of VC // c:\program files (x86)\microsoft visual studio 12.0\vc\include\xtree(1826): warning C4702: unreachable code #include "NvPreprocessor.h" #if NV_VC && NV_VC < 14 #pragma warning(disable : 4702) #endif #include <NvBlastExtAuthoringBondGeneratorImpl.h> #include <NvBlast.h> #include <NvBlastGlobals.h> #include <NvBlastNvSharedHelpers.h> #include "NvBlastExtTriangleProcessor.h" #include "NvBlastExtApexSharedParts.h" #include "NvBlastExtAuthoringInternalCommon.h" #include "NvBlastExtAuthoringTypes.h" #include <vector> #include <map> #include "NvPlane.h" #include <algorithm> #include <cmath> #include <memory> #include <set> #define SAFE_ARRAY_NEW(T, x) ((x) > 0) ? reinterpret_cast<T*>(NVBLAST_ALLOC(sizeof(T) * (x))) : nullptr; //#define DEBUG_OUTPUT #ifdef DEBUG_OUTPUT void saveGeometryToObj(std::vector<NvVec3>& triangles, const char* filepath) { FILE* outStream = fopen(filepath, "w"); for (uint32_t i = 0; i < triangles.size(); ++i) { fprintf(outStream, "v %lf %lf %lf\n", triangles[i].x, triangles[i].y, triangles[i].z); ++i; fprintf(outStream, "v %lf %lf %lf\n", triangles[i].x, triangles[i].y, triangles[i].z); ++i; fprintf(outStream, "v %lf %lf %lf\n", triangles[i].x, triangles[i].y, triangles[i].z); } for (uint32_t i = 0; i < triangles.size() / 3; ++i) { NvVec3 normal = (triangles[3 * i + 2] - triangles[3 * i]).cross((triangles[3 * i + 1] - triangles[3 * i])).getNormalized(); fprintf(outStream, "vn %lf %lf %lf\n", normal.x, normal.y, normal.z); fprintf(outStream, "vn %lf %lf %lf\n", normal.x, normal.y, normal.z); fprintf(outStream, "vn %lf %lf %lf\n", normal.x, normal.y, normal.z); } int indx = 1; for (uint32_t i = 0; i < triangles.size() / 3; ++i) { fprintf(outStream, "f %d//%d ", indx, indx); indx++; fprintf(outStream, "%d//%d ", indx, indx); indx++; fprintf(outStream, "%d//%d \n", indx, indx); indx++; } fclose(outStream); } std::vector<NvVec3> intersectionBuffer; std::vector<NvVec3> meshBuffer; #endif namespace Nv { namespace Blast { #define EPS_PLANE 0.0001f nvidia::NvVec3 getNormal(const Triangle& t) { return toNvShared(t.b.p - t.a.p).cross(toNvShared(t.c.p - t.a.p)); } bool planeComparer(const PlaneChunkIndexer& as, const PlaneChunkIndexer& bs) { const NvcPlane& a = as.plane; const NvcPlane& b = bs.plane; if (a.d + EPS_PLANE < b.d) return true; if (a.d - EPS_PLANE > b.d) return false; if (a.n.x + EPS_PLANE < b.n.x) return true; if (a.n.x - EPS_PLANE > b.n.x) return false; if (a.n.y + EPS_PLANE < b.n.y) return true; if (a.n.y - EPS_PLANE > b.n.y) return false; return a.n.z + EPS_PLANE < b.n.z; } struct Bond { int32_t m_chunkId; int32_t m_planeIndex; int32_t triangleIndex; bool operator<(const Bond& inp) const { if (abs(m_planeIndex) == abs(inp.m_planeIndex)) { return m_chunkId < inp.m_chunkId; } else { return abs(m_planeIndex) < abs(inp.m_planeIndex); } } }; struct BondInfo { float area; nvidia::NvBounds3 m_bb; nvidia::NvVec3 centroid; nvidia::NvVec3 normal; int32_t m_chunkId; }; inline nvidia::NvVec3 getVertex(const Triangle& t, uint32_t i) { return toNvShared((&t.a)[i].p); } void AddTtAnchorPoints(const Triangle* a, const Triangle* b, std::vector<NvVec3>& points) { nvidia::NvVec3 na = getNormal(*a).getNormalized(); nvidia::NvVec3 nb = getNormal(*b).getNormalized(); nvidia::NvPlane pla(toNvShared(a->a.p), na); nvidia::NvPlane plb(toNvShared(b->a.p), nb); ProjectionDirections da = getProjectionDirection(na); ProjectionDirections db = getProjectionDirection(nb); TriangleProcessor prc; TrPrcTriangle2d ta(getProjectedPoint(toNvShared(a->a.p), da), getProjectedPoint(toNvShared(a->b.p), da), getProjectedPoint(toNvShared(a->c.p), da)); TrPrcTriangle2d tb(getProjectedPoint(toNvShared(b->a.p), db), getProjectedPoint(toNvShared(b->b.p), db), getProjectedPoint(toNvShared(b->c.p), db)); /** Compute */ for (uint32_t i = 0; i < 3; ++i) { nvidia::NvVec3 pt; if (getPlaneSegmentIntersection(pla, getVertex(*b, i), getVertex(*b, (i + 1) % 3), pt)) { nvidia::NvVec2 pt2 = getProjectedPoint(pt, da); if (prc.isPointInside(pt2, ta)) { points.push_back(pt); } } if (getPlaneSegmentIntersection(plb, getVertex(*a, i), getVertex(*a, (i + 1) % 3), pt)) { NvVec2 pt2 = getProjectedPoint(pt, db); if (prc.isPointInside(pt2, tb)) { points.push_back(pt); } } } } inline bool pointInsidePoly(const NvVec3& pt, const uint8_t* indices, uint16_t indexCount, const NvVec3* verts, const NvVec3& n) { int s = 0; for (uint16_t i = 0; i < indexCount; ++i) { const NvVec3 r0 = verts[indices[i]] - pt; const NvVec3 r1 = verts[indices[(i + 1) % indexCount]] - pt; const float cn = r0.cross(r1).dot(n); const int cns = cn >= 0 ? 1 : -1; if (!s) { s = cns; } if (cns * s < 0) { return false; } } return true; } void AddPpAnchorPoints(const uint8_t* indicesA, uint16_t indexCountA, const NvVec3* vertsA, const float planeA[4], const uint8_t* indicesB, uint16_t indexCountB, const NvVec3* vertsB, const float planeB[4], std::vector<NvVec3>& points) { NvPlane pla(planeA[0], planeA[1], planeA[2], planeA[3]); NvPlane plb(planeB[0], planeB[1], planeB[2], planeB[3]); for (uint16_t iA = 0; iA < indexCountA; ++iA) { NvVec3 pt; if (getPlaneSegmentIntersection(plb, vertsA[indicesA[iA]], vertsA[indicesA[(iA + 1) % indexCountA]], pt)) { if (pointInsidePoly(pt, indicesB, indexCountB, vertsB, plb.n)) { points.push_back(pt); } } } for (uint16_t iB = 0; iB < indexCountA; ++iB) { NvVec3 pt; if (getPlaneSegmentIntersection(pla, vertsB[indicesB[iB]], vertsB[indicesA[(iB + 1) % indexCountB]], pt)) { if (pointInsidePoly(pt, indicesA, indexCountA, vertsA, pla.n)) { points.push_back(pt); } } } } float BlastBondGeneratorImpl::processWithMidplanes(TriangleProcessor* trProcessor, const Triangle* mA, uint32_t mavc, const Triangle* mB, uint32_t mbvc, const CollisionHull* hull1, const CollisionHull* hull2, const std::vector<NvVec3>& hull1p, const std::vector<NvVec3>& hull2p, NvVec3& normal, NvVec3& centroid, float maxRelSeparation) { NvBounds3 bounds; NvBounds3 aBounds; NvBounds3 bBounds; bounds.setEmpty(); aBounds.setEmpty(); bBounds.setEmpty(); NvVec3 chunk1Centroid(0, 0, 0); NvVec3 chunk2Centroid(0, 0, 0); /////////////////////////////////////////////////////////////////////////////////// if (hull1p.size() < 4 || hull2p.size() < 4) { return 0.0f; } for (uint32_t i = 0; i < hull1p.size(); ++i) { chunk1Centroid += hull1p[i]; bounds.include(hull1p[i]); aBounds.include(hull1p[i]); } for (uint32_t i = 0; i < hull2p.size(); ++i) { chunk2Centroid += hull2p[i]; bounds.include(hull2p[i]); bBounds.include(hull2p[i]); } chunk1Centroid *= (1.0f / hull1p.size()); chunk2Centroid *= (1.0f / hull2p.size()); const float maxSeparation = maxRelSeparation * std::sqrt(std::max(aBounds.getExtents().magnitudeSquared(), bBounds.getExtents().magnitudeSquared())); Separation separation; if (!importerHullsInProximityApexFree(hull1p.size(), hull1p.data(), aBounds, NvTransform(NvIdentity), NvVec3(1, 1, 1), hull2p.size(), hull2p.data(), bBounds, NvTransform(NvIdentity), NvVec3(1, 1, 1), 2.0f * maxSeparation, &separation)) { return 0.0f; } const bool have_geometry = (mA != nullptr && mB != nullptr) || (hull1 != nullptr && hull2 != nullptr); if (separation.getDistance() > 0 || !have_geometry) // If chunks don't intersect then use midplane to produce bond, // otherwise midplane can be wrong (only if we have geometry) { // Build first plane interface NvPlane midplane = separation.plane; if (!midplane.n.isFinite()) { return 0.0f; } std::vector<NvVec3> interfacePoints; float firstCentroidSide = (midplane.distance(chunk1Centroid) > 0) ? 1 : -1; float secondCentroidSide = (midplane.distance(chunk2Centroid) > 0) ? 1 : -1; for (uint32_t i = 0; i < hull1p.size(); ++i) { float dst = midplane.distance(hull1p[i]); if (dst * firstCentroidSide < maxSeparation) { interfacePoints.push_back(hull1p[i]); } } for (uint32_t i = 0; i < hull2p.size(); ++i) { float dst = midplane.distance(hull2p[i]); if (dst * secondCentroidSide < maxSeparation) { interfacePoints.push_back(hull2p[i]); } } std::vector<NvVec3> convexHull; trProcessor->buildConvexHull(interfacePoints, convexHull, midplane.n); float area = 0; NvVec3 centroidLocal(0, 0, 0); if (convexHull.size() < 3) { return 0.0f; } for (uint32_t i = 0; i < convexHull.size() - 1; ++i) { centroidLocal += convexHull[i]; area += (convexHull[i] - convexHull[0]).cross((convexHull[i + 1] - convexHull[0])).magnitude(); } centroidLocal += convexHull.back(); centroidLocal *= (1.0f / convexHull.size()); float direction = midplane.n.dot(chunk2Centroid - chunk1Centroid); if (direction < 0) { normal = -1.0f * normal; } normal = midplane.n; centroid = centroidLocal; return area * 0.5f; } else { float area = 0.0f; std::vector<NvVec3> intersectionAnchors; if (hull1 != nullptr && hull2 != nullptr) // Use hulls { for (uint32_t i1 = 0; i1 < hull1->polygonDataCount; ++i1) { HullPolygon& poly1 = hull1->polygonData[i1]; for (uint32_t i2 = 0; i2 < hull2->polygonDataCount; ++i2) { HullPolygon& poly2 = hull2->polygonData[i2]; AddPpAnchorPoints(reinterpret_cast<uint8_t*>(hull1->indices) + poly1.indexBase, poly1.vertexCount, toNvShared(hull1->points), poly1.plane, reinterpret_cast<uint8_t*>(hull2->indices) + poly2.indexBase, poly2.vertexCount, toNvShared(hull2->points), poly2.plane, intersectionAnchors); } } } else if (mA != nullptr && mB != nullptr) // Use triangles { for (uint32_t i = 0; i < mavc; ++i) { for (uint32_t j = 0; j < mbvc; ++j) { AddTtAnchorPoints(mA + i, mB + j, intersectionAnchors); } } } else { NVBLAST_ASSERT_WITH_MESSAGE(false, "collision hulls and triangle data are both invalid, this shouldn't happen"); return 0.0f; } NvVec3 lcoid(0, 0, 0); for (uint32_t i = 0; i < intersectionAnchors.size(); ++i) { lcoid += intersectionAnchors[i]; } lcoid *= (1.0f / intersectionAnchors.size()); centroid = lcoid; if (intersectionAnchors.size() < 2) { return 0.0f; } NvVec3 dir1 = intersectionAnchors[0] - lcoid; NvVec3 dir2 = chunk2Centroid - chunk1Centroid; // A more reasonable fallback than (0,0,0) float maxMagn = 0.0f; float maxDist = 0.0f; for (uint32_t j = 0; j < intersectionAnchors.size(); ++j) { float d = (intersectionAnchors[j] - lcoid).magnitude(); NvVec3 tempNormal = (intersectionAnchors[j] - lcoid).cross(dir1); maxDist = std::max(d, maxDist); if (tempNormal.magnitude() > maxMagn) { dir2 = tempNormal; } } normal = dir2.getNormalized(); area = (maxDist * maxDist) * 3.14f; // Compute area like circle area; return area; } } struct BondGenerationCandidate { NvVec3 point; bool end; uint32_t parentChunk; uint32_t parentComponent; BondGenerationCandidate(); BondGenerationCandidate(const NvVec3& p, bool isEnd, uint32_t pr, uint32_t c) : point(p), end(isEnd), parentChunk(pr), parentComponent(c){}; bool operator<(const BondGenerationCandidate& in) const { if (point.x < in.point.x) return true; if (point.x > in.point.x) return false; if (point.y < in.point.y) return true; if (point.y > in.point.y) return false; if (point.z < in.point.z) return true; if (point.z > in.point.z) return false; return end < in.end; }; }; int32_t BlastBondGeneratorImpl::createFullBondListAveraged(uint32_t meshCount, const uint32_t* geometryOffset, const Triangle* geometry, const CollisionHull** chunkHulls, const bool* supportFlags, const uint32_t* meshGroups, NvBlastBondDesc*& resultBondDescs, BondGenerationConfig conf, std::set<std::pair<uint32_t, uint32_t> >* pairNotToTest) { std::vector<std::vector<NvcVec3> > chunksPoints(meshCount); std::vector<NvBounds3> bounds(meshCount); if (!chunkHulls) { for (uint32_t i = 0; i < meshCount; ++i) { bounds[i].setEmpty(); if (!supportFlags[i]) { continue; } uint32_t count = geometryOffset[i + 1] - geometryOffset[i]; for (uint32_t j = 0; j < count; ++j) { chunksPoints[i].push_back(geometry[geometryOffset[i] + j].a.p); chunksPoints[i].push_back(geometry[geometryOffset[i] + j].b.p); chunksPoints[i].push_back(geometry[geometryOffset[i] + j].c.p); bounds[i].include(toNvShared(geometry[geometryOffset[i] + j].a.p)); bounds[i].include(toNvShared(geometry[geometryOffset[i] + j].b.p)); bounds[i].include(toNvShared(geometry[geometryOffset[i] + j].c.p)); } } } std::vector<std::vector<std::vector<NvVec3> > > hullPoints(meshCount); std::vector<BondGenerationCandidate> candidates; std::vector<CollisionHull*> tempChunkHulls(meshCount, nullptr); for (uint32_t chunk = 0; chunk < meshCount; ++chunk) { if (!supportFlags[chunk]) { continue; } NvBounds3 bnd(NvBounds3::empty()); uint32_t hullCountForMesh = 0; const CollisionHull** beginChunkHulls = nullptr; if (chunkHulls) { hullCountForMesh = geometryOffset[chunk + 1] - geometryOffset[chunk]; beginChunkHulls = chunkHulls + geometryOffset[chunk]; } else { // build a convex hull and store it in the temp slot tempChunkHulls[chunk] = mConvexMeshBuilder->buildCollisionGeometry(chunksPoints[chunk].size(), chunksPoints[chunk].data()); hullCountForMesh = 1; beginChunkHulls = const_cast<const CollisionHull**>(&tempChunkHulls[chunk]); } hullPoints[chunk].resize(hullCountForMesh); for (uint32_t hull = 0; hull < hullCountForMesh; ++hull) { auto& curHull = hullPoints[chunk][hull]; const uint32_t pointCount = beginChunkHulls[hull]->pointsCount; curHull.resize(pointCount); for (uint32_t i = 0; i < pointCount; ++i) { curHull[i] = toNvShared(beginChunkHulls[hull]->points[i]); bnd.include(curHull[i]); } } float minSide = bnd.getDimensions().abs().minElement(); if (minSide > 0.f) { float scaling = std::max(1.1f, conf.maxSeparation / (minSide)); bnd.scaleFast(scaling); } candidates.push_back( BondGenerationCandidate(bnd.minimum, false, chunk, meshGroups != nullptr ? meshGroups[chunk] : 0)); candidates.push_back( BondGenerationCandidate(bnd.maximum, true, chunk, meshGroups != nullptr ? meshGroups[chunk] : 0)); } std::sort(candidates.begin(), candidates.end()); std::set<uint32_t> listOfActiveChunks; std::vector<std::vector<uint32_t> > possibleBondGraph(meshCount); for (uint32_t idx = 0; idx < candidates.size(); ++idx) { if (!candidates[idx].end) // If new candidate { for (uint32_t activeChunk : listOfActiveChunks) { if (meshGroups != nullptr && (meshGroups[activeChunk] == candidates[idx].parentComponent)) continue; // Don't connect components with itself. possibleBondGraph[activeChunk].push_back(candidates[idx].parentChunk); } listOfActiveChunks.insert(candidates[idx].parentChunk); } else { listOfActiveChunks.erase(candidates[idx].parentChunk); } } TriangleProcessor trProcessor; std::vector<NvBlastBondDesc> mResultBondDescs; for (uint32_t i = 0; i < meshCount; ++i) { const uint32_t ihullCount = hullPoints[i].size(); for (uint32_t tj = 0; tj < possibleBondGraph[i].size(); ++tj) { uint32_t j = possibleBondGraph[i][tj]; auto pr = (i < j) ? std::make_pair(i, j) : std::make_pair(j, i); if (pairNotToTest != nullptr && pairNotToTest->find(pr) != pairNotToTest->end()) { continue; // This chunks should not generate bonds. This is used for mixed generation with bondFrom } const uint32_t jhullCount = hullPoints[j].size(); for (uint32_t ihull = 0; ihull < ihullCount; ++ihull) { for (uint32_t jhull = 0; jhull < jhullCount; ++jhull) { NvVec3 normal; NvVec3 centroid; float area = processWithMidplanes( &trProcessor, geometry ? geometry + geometryOffset[i] : nullptr, geometryOffset[i + 1] - geometryOffset[i], geometry ? geometry + geometryOffset[j] : nullptr, geometryOffset[j + 1] - geometryOffset[j], chunkHulls ? chunkHulls[geometryOffset[i] + ihull] : tempChunkHulls[i], chunkHulls ? chunkHulls[geometryOffset[j] + jhull] : tempChunkHulls[j], hullPoints[i][ihull], hullPoints[j][jhull], normal, centroid, conf.maxSeparation); if (area > 0) { NvBlastBondDesc bDesc = NvBlastBondDesc(); bDesc.chunkIndices[0] = i; bDesc.chunkIndices[1] = j; bDesc.bond.area = area; bDesc.bond.centroid[0] = centroid.x; bDesc.bond.centroid[1] = centroid.y; bDesc.bond.centroid[2] = centroid.z; uint32_t maxIndex = std::max(i, j); if ((bounds[maxIndex].getCenter() - centroid).dot(normal) < 0) { normal = -normal; } bDesc.bond.normal[0] = normal.x; bDesc.bond.normal[1] = normal.y; bDesc.bond.normal[2] = normal.z; mResultBondDescs.push_back(bDesc); } } } } } // release any temp hulls allocated for (CollisionHull* tempHullPtr : tempChunkHulls) { if (tempHullPtr) { mConvexMeshBuilder->releaseCollisionHull(tempHullPtr); } } resultBondDescs = SAFE_ARRAY_NEW(NvBlastBondDesc, mResultBondDescs.size()); memcpy(resultBondDescs, mResultBondDescs.data(), sizeof(NvBlastBondDesc) * mResultBondDescs.size()); return mResultBondDescs.size(); } uint32_t isSamePlane(NvcPlane& a, NvcPlane& b) { if (NvAbs(a.d - b.d) > EPS_PLANE) return 0; if (NvAbs(a.n.x - b.n.x) > EPS_PLANE) return 0; if (NvAbs(a.n.y - b.n.y) > EPS_PLANE) return 0; if (NvAbs(a.n.z - b.n.z) > EPS_PLANE) return 0; return 1; } int32_t BlastBondGeneratorImpl::createFullBondListExact(uint32_t meshCount, const uint32_t* geometryOffset, const Triangle* geometry, const bool* supportFlags, NvBlastBondDesc*& resultBondDescs, BondGenerationConfig conf) { std::vector<PlaneChunkIndexer> planeTriangleMapping; NV_UNUSED(conf); for (uint32_t i = 0; i < meshCount; ++i) { if (!supportFlags[i]) { continue; } uint32_t count = geometryOffset[i + 1] - geometryOffset[i]; for (uint32_t j = 0; j < count; ++j) { #ifdef DEBUG_OUTPUT meshBuffer.push_back(geometry[geometryOffset[i] + j].a.p); meshBuffer.push_back(geometry[geometryOffset[i] + j].b.p); meshBuffer.push_back(geometry[geometryOffset[i] + j].c.p); #endif NvcPlane nPlane = fromNvShared(nvidia::NvPlane(toNvShared(geometry[geometryOffset[i] + j].a.p), toNvShared(geometry[geometryOffset[i] + j].b.p), toNvShared(geometry[geometryOffset[i] + j].c.p))); planeTriangleMapping.push_back({ (int32_t)i, (int32_t)j, nPlane }); } } std::sort(planeTriangleMapping.begin(), planeTriangleMapping.end(), planeComparer); return createFullBondListExactInternal(meshCount, geometryOffset, geometry, planeTriangleMapping, resultBondDescs); } void BlastBondGeneratorImpl::buildGeometryCache(uint32_t meshCount, const uint32_t* geometryOffset, const Triangle* geometry) { uint32_t geometryCount = geometryOffset[meshCount]; for (uint32_t i = 0; i < meshCount; i++) { mGeometryCache.push_back(std::vector<Triangle>()); uint32_t count = geometryOffset[i + 1] - geometryOffset[i]; mGeometryCache.back().resize(count); memcpy(mGeometryCache.back().data(), geometry + geometryOffset[i], sizeof(Triangle) * count); } mHullsPointsCache.resize(geometryCount); mBoundsCache.resize(geometryCount); mCHullCache.resize(geometryCount); for (uint32_t i = 0; i < mGeometryCache.size(); ++i) { for (uint32_t j = 0; j < mGeometryCache[i].size(); ++j) { NvcPlane nPlane = fromNvShared(nvidia::NvPlane(toNvShared(mGeometryCache[i][j].a.p), toNvShared(mGeometryCache[i][j].b.p), toNvShared(mGeometryCache[i][j].c.p))); mPlaneCache.push_back({ (int32_t)i, (int32_t)j, nPlane }); } } for (uint32_t ch = 0; ch < mGeometryCache.size(); ++ch) { std::vector<NvcVec3> chunksPoints(mGeometryCache[ch].size() * 3); int32_t sp = 0; for (uint32_t i = 0; i < mGeometryCache[ch].size(); ++i) { chunksPoints[sp++] = mGeometryCache[ch][i].a.p; chunksPoints[sp++] = mGeometryCache[ch][i].b.p; chunksPoints[sp++] = mGeometryCache[ch][i].c.p; } mCHullCache[ch] = mConvexMeshBuilder->buildCollisionGeometry(chunksPoints.size(), chunksPoints.data()); mHullsPointsCache[ch].resize(mCHullCache[ch]->pointsCount); mBoundsCache[ch].setEmpty(); for (uint32_t i = 0; i < mCHullCache[ch]->pointsCount; ++i) { mHullsPointsCache[ch][i] = toNvShared(mCHullCache[ch]->points[i]); mBoundsCache[ch].include(mHullsPointsCache[ch][i]); } } } void BlastBondGeneratorImpl::resetGeometryCache() { mGeometryCache.clear(); mPlaneCache.clear(); mHullsPointsCache.clear(); for (auto h : mCHullCache) { mConvexMeshBuilder->releaseCollisionHull(h); } mCHullCache.clear(); mBoundsCache.clear(); } int32_t BlastBondGeneratorImpl::createFullBondListExactInternal(uint32_t meshCount, const uint32_t* geometryOffset, const Triangle* geometry, std::vector<PlaneChunkIndexer>& planeTriangleMapping, NvBlastBondDesc*& resultBondDescs) { NV_UNUSED(meshCount); std::map<std::pair<int32_t, int32_t>, std::pair<NvBlastBondDesc, int32_t> > bonds; TriangleProcessor trPrc; std::vector<NvVec3> intersectionBufferLocal; NvBlastBondDesc cleanBond = NvBlastBondDesc(); memset(&cleanBond, 0, sizeof(NvBlastBondDesc)); for (uint32_t tIndex = 0; tIndex < planeTriangleMapping.size(); ++tIndex) { PlaneChunkIndexer opp = planeTriangleMapping[tIndex]; opp.plane.d *= -1; opp.plane.n = opp.plane.n * - 1; uint32_t startIndex = (uint32_t)(std::lower_bound(planeTriangleMapping.begin(), planeTriangleMapping.end(), opp, planeComparer) - planeTriangleMapping.begin()); uint32_t endIndex = (uint32_t)(std::upper_bound(planeTriangleMapping.begin(), planeTriangleMapping.end(), opp, planeComparer) - planeTriangleMapping.begin()); // uint32_t startIndex = 0; // uint32_t endIndex = (uint32_t)planeTriangleMapping.size(); PlaneChunkIndexer& mappedTr = planeTriangleMapping[tIndex]; const Triangle& trl = geometry[geometryOffset[mappedTr.chunkId] + mappedTr.trId]; NvPlane pln = toNvShared(mappedTr.plane); TrPrcTriangle trp(toNvShared(trl.a.p), toNvShared(trl.b.p), toNvShared(trl.c.p)); NvVec3 trCentroid = toNvShared(trl.a.p + trl.b.p + trl.c.p) * (1.0f / 3.0f); trp.points[0] -= trCentroid; trp.points[1] -= trCentroid; trp.points[2] -= trCentroid; ProjectionDirections pDir = getProjectionDirection(pln.n); TrPrcTriangle2d trp2d; trp2d.points[0] = getProjectedPointWithWinding(trp.points[0], pDir); trp2d.points[1] = getProjectedPointWithWinding(trp.points[1], pDir); trp2d.points[2] = getProjectedPointWithWinding(trp.points[2], pDir); for (uint32_t i = startIndex; i <= endIndex && i < planeTriangleMapping.size(); ++i) { PlaneChunkIndexer& mappedTr2 = planeTriangleMapping[i]; if (mappedTr2.trId == opp.chunkId) { continue; } if (!isSamePlane(opp.plane, mappedTr2.plane)) { continue; } if (mappedTr.chunkId == mappedTr2.chunkId) { continue; } std::pair<int32_t, int32_t> bondEndPoints = std::make_pair(mappedTr.chunkId, mappedTr2.chunkId); if (bondEndPoints.second < bondEndPoints.first) continue; std::pair<int32_t, int32_t> bondEndPointsSwapped = std::make_pair(mappedTr2.chunkId, mappedTr.chunkId); if (bonds.find(bondEndPoints) == bonds.end() && bonds.find(bondEndPointsSwapped) != bonds.end()) { continue; // We do not need account interface surface twice } if (bonds.find(bondEndPoints) == bonds.end()) { bonds[bondEndPoints].second = 0; bonds[bondEndPoints].first = cleanBond; bonds[bondEndPoints].first.chunkIndices[0] = bondEndPoints.first; bonds[bondEndPoints].first.chunkIndices[1] = bondEndPoints.second; bonds[bondEndPoints].first.bond.normal[0] = pln.n[0]; bonds[bondEndPoints].first.bond.normal[1] = pln.n[1]; bonds[bondEndPoints].first.bond.normal[2] = pln.n[2]; } const Triangle& trl2 = geometry[geometryOffset[mappedTr2.chunkId] + mappedTr2.trId]; TrPrcTriangle trp2(toNvShared(trl2.a.p), toNvShared(trl2.b.p), toNvShared(trl2.c.p)); intersectionBufferLocal.clear(); intersectionBufferLocal.reserve(32); trPrc.getTriangleIntersection(trp, trp2d, trp2, trCentroid, intersectionBufferLocal, pln.n); NvVec3 centroidPoint(0, 0, 0); int32_t collectedVerticesCount = 0; float area = 0; if (intersectionBufferLocal.size() >= 3) { #ifdef DEBUG_OUTPUT for (uint32_t p = 1; p < intersectionBufferLocal.size() - 1; ++p) { intersectionBuffer.push_back(intersectionBufferLocal[0]); intersectionBuffer.push_back(intersectionBufferLocal[p]); intersectionBuffer.push_back(intersectionBufferLocal[p + 1]); } #endif centroidPoint = intersectionBufferLocal[0] + intersectionBufferLocal.back(); collectedVerticesCount = 2; for (uint32_t j = 1; j < intersectionBufferLocal.size() - 1; ++j) { ++collectedVerticesCount; centroidPoint += intersectionBufferLocal[j]; area += (intersectionBufferLocal[j + 1] - intersectionBufferLocal[0]) .cross(intersectionBufferLocal[j] - intersectionBufferLocal[0]) .magnitude(); } } if (area > 0.00001f) { bonds[bondEndPoints].second += collectedVerticesCount; bonds[bondEndPoints].first.bond.area += area * 0.5f; bonds[bondEndPoints].first.bond.centroid[0] += (centroidPoint.x); bonds[bondEndPoints].first.bond.centroid[1] += (centroidPoint.y); bonds[bondEndPoints].first.bond.centroid[2] += (centroidPoint.z); } } } std::vector<NvBlastBondDesc> mResultBondDescs; for (auto it : bonds) { if (it.second.first.bond.area > 0) { float mlt = 1.0f / (it.second.second); it.second.first.bond.centroid[0] *= mlt; it.second.first.bond.centroid[1] *= mlt; it.second.first.bond.centroid[2] *= mlt; mResultBondDescs.push_back(it.second.first); } } #ifdef DEBUG_OUTPUT saveGeometryToObj(meshBuffer, "Mesh.obj"); saveGeometryToObj(intersectionBuffer, "inter.obj"); #endif resultBondDescs = SAFE_ARRAY_NEW(NvBlastBondDesc, mResultBondDescs.size()); memcpy(resultBondDescs, mResultBondDescs.data(), sizeof(NvBlastBondDesc) * mResultBondDescs.size()); return mResultBondDescs.size(); } int32_t BlastBondGeneratorImpl::createBondForcedInternal(const std::vector<NvVec3>& hull0, const std::vector<NvVec3>& hull1, const CollisionHull& cHull0, const CollisionHull& cHull1, NvBounds3 bound0, NvBounds3 bound1, NvBlastBond& resultBond, float overlapping) { TriangleProcessor trProcessor; Separation separation; importerHullsInProximityApexFree(hull0.size(), hull0.data(), bound0, NvTransform(NvIdentity), NvVec3(1, 1, 1), hull1.size(), hull1.data(), bound1, NvTransform(NvIdentity), NvVec3(1, 1, 1), 0.000, &separation); if (std::isnan(separation.plane.d)) { importerHullsInProximityApexFree( hull0.size(), hull0.data(), bound0, NvTransform(NvVec3(0.000001f, 0.000001f, 0.000001f)), NvVec3(1, 1, 1), hull1.size(), hull1.data(), bound1, NvTransform(NvIdentity), NvVec3(1, 1, 1), 0.000, &separation); if (std::isnan(separation.plane.d)) { return 1; } } NvPlane pl = separation.plane; std::vector<NvVec3> ifsPoints[2]; float dst[2][2]; dst[0][0] = 0; dst[0][1] = MAXIMUM_EXTENT; for (uint32_t p = 0; p < cHull0.pointsCount; ++p) { float d = pl.distance(toNvShared(cHull0.points[p])); if (NvAbs(d) > NvAbs(dst[0][0])) { dst[0][0] = d; } if (NvAbs(d) < NvAbs(dst[0][1])) { dst[0][1] = d; } } dst[1][0] = 0; dst[1][1] = MAXIMUM_EXTENT; for (uint32_t p = 0; p < cHull1.pointsCount; ++p) { float d = pl.distance(toNvShared(cHull0.points[p])); if (NvAbs(d) > NvAbs(dst[1][0])) { dst[1][0] = d; } if (NvAbs(d) < NvAbs(dst[1][1])) { dst[1][1] = d; } } float cvOffset[2] = { dst[0][1] + (dst[0][0] - dst[0][1]) * overlapping, dst[1][1] + (dst[1][0] - dst[1][1]) * overlapping }; for (uint32_t i = 0; i < cHull0.polygonDataCount; ++i) { auto& pd = cHull0.polygonData[i]; NvVec3 result; for (uint32_t j = 0; j < pd.vertexCount; ++j) { uint32_t nxj = (j + 1) % pd.vertexCount; const uint32_t* ind = cHull0.indices; NvVec3 a = hull0[ind[j + pd.indexBase]] - pl.n * cvOffset[0]; NvVec3 b = hull0[ind[nxj + pd.indexBase]] - pl.n * cvOffset[0]; if (getPlaneSegmentIntersection(pl, a, b, result)) { ifsPoints[0].push_back(result); } } } for (uint32_t i = 0; i < cHull1.polygonDataCount; ++i) { auto& pd = cHull1.polygonData[i]; NvVec3 result; for (uint32_t j = 0; j < pd.vertexCount; ++j) { uint32_t nxj = (j + 1) % pd.vertexCount; const uint32_t* ind = cHull1.indices; NvVec3 a = hull1[ind[j + pd.indexBase]] - pl.n * cvOffset[1]; NvVec3 b = hull1[ind[nxj + pd.indexBase]] - pl.n * cvOffset[1]; if (getPlaneSegmentIntersection(pl, a, b, result)) { ifsPoints[1].push_back(result); } } } std::vector<NvVec3> convexes[2]; trProcessor.buildConvexHull(ifsPoints[0], convexes[0], pl.n); trProcessor.buildConvexHull(ifsPoints[1], convexes[1], pl.n); float areas[2] = { 0, 0 }; NvVec3 centroids[2] = { NvVec3(0, 0, 0), NvVec3(0, 0, 0) }; for (uint32_t cv = 0; cv < 2; ++cv) { if (convexes[cv].size() == 0) { continue; } centroids[cv] = convexes[cv][0] + convexes[cv].back(); for (uint32_t i = 1; i < convexes[cv].size() - 1; ++i) { centroids[cv] += convexes[cv][i]; areas[cv] += (convexes[cv][i + 1] - convexes[cv][0]).cross(convexes[cv][i] - convexes[cv][0]).magnitude(); #ifdef DEBUG_OUTPUT intersectionBuffer.push_back(convexes[cv][0]); intersectionBuffer.push_back(convexes[cv][i]); intersectionBuffer.push_back(convexes[cv][i + 1]); #endif } centroids[cv] *= (1.0f / convexes[cv].size()); areas[cv] = NvAbs(areas[cv]); } resultBond.area = (areas[0] + areas[1]) * 0.5f; resultBond.centroid[0] = (centroids[0][0] + centroids[1][0]) * 0.5f; resultBond.centroid[1] = (centroids[0][1] + centroids[1][1]) * 0.5f; resultBond.centroid[2] = (centroids[0][2] + centroids[1][2]) * 0.5f; resultBond.normal[0] = pl.n[0]; resultBond.normal[1] = pl.n[1]; resultBond.normal[2] = pl.n[2]; resultBond.userData = 0; #ifdef DEBUG_OUTPUT saveGeometryToObj(meshBuffer, "ArbitMeshes.obj"); saveGeometryToObj(intersectionBuffer, "inter.obj"); #endif return 0; } int32_t BlastBondGeneratorImpl::buildDescFromInternalFracture(FractureTool* tool, const bool* chunkIsSupport, NvBlastBondDesc*& resultBondDescs, NvBlastChunkDesc*& resultChunkDescriptors) { uint32_t chunkCount = tool->getChunkCount(); std::vector<uint32_t> trianglesCount(chunkCount); std::vector<std::shared_ptr<Triangle> > trianglesBuffer; for (uint32_t i = 0; i < chunkCount; ++i) { Triangle* t; trianglesCount[i] = tool->getBaseMesh(i, t); trianglesBuffer.push_back(std::shared_ptr<Triangle>(t, [](Triangle* t) { delete[] t; })); } if (chunkCount == 0) { return 0; } resultChunkDescriptors = SAFE_ARRAY_NEW(NvBlastChunkDesc, trianglesBuffer.size()); std::vector<Bond> bondDescriptors; bool hasApproximateBonding = false; for (uint32_t i = 0; i < chunkCount; ++i) { NvBlastChunkDesc& desc = resultChunkDescriptors[i]; desc.userData = tool->getChunkId(i); desc.parentChunkDescIndex = tool->getChunkInfoIndex(tool->getChunkInfo(i).parentChunkId); desc.flags = NvBlastChunkDesc::NoFlags; hasApproximateBonding |= !!(tool->getChunkInfo(i).flags & ChunkInfo::APPROXIMATE_BONDING); if (chunkIsSupport[i]) { desc.flags = NvBlastChunkDesc::SupportFlag; } NvVec3 chunkCentroid(0, 0, 0); for (uint32_t tr = 0; tr < trianglesCount[i]; ++tr) { auto& trRef = trianglesBuffer[i].get()[tr]; chunkCentroid += toNvShared(trRef.a.p); chunkCentroid += toNvShared(trRef.b.p); chunkCentroid += toNvShared(trRef.c.p); int32_t id = trRef.userData; if (id == 0) continue; bondDescriptors.push_back(Bond()); Bond& bond = bondDescriptors.back(); bond.m_chunkId = i; bond.m_planeIndex = id; bond.triangleIndex = tr; } chunkCentroid *= (1.0f / (3 * trianglesCount[i])); desc.centroid[0] = chunkCentroid[0]; desc.centroid[1] = chunkCentroid[1]; desc.centroid[2] = chunkCentroid[2]; } std::sort(bondDescriptors.begin(), bondDescriptors.end()); std::vector<NvBlastBondDesc> mResultBondDescs; if (!bondDescriptors.empty()) { int32_t chunkId, planeId; chunkId = bondDescriptors[0].m_chunkId; planeId = bondDescriptors[0].m_planeIndex; std::vector<BondInfo> forwardChunks; std::vector<BondInfo> backwardChunks; float area = 0; NvVec3 normal(0, 0, 0); NvVec3 centroid(0, 0, 0); int32_t collected = 0; NvBounds3 bb = NvBounds3::empty(); chunkId = -1; planeId = bondDescriptors[0].m_planeIndex; for (uint32_t i = 0; i <= bondDescriptors.size(); ++i) { if (i == bondDescriptors.size() || (chunkId != bondDescriptors[i].m_chunkId || abs(planeId) != abs(bondDescriptors[i].m_planeIndex))) { if (chunkId != -1) { area = 0.5f * normal.normalize(); centroid /= 3.0f * collected; if (bondDescriptors[i - 1].m_planeIndex > 0) { forwardChunks.push_back(BondInfo()); forwardChunks.back().area = area; forwardChunks.back().normal = normal; forwardChunks.back().centroid = centroid; forwardChunks.back().m_chunkId = chunkId; forwardChunks.back().m_bb = bb; } else { backwardChunks.push_back(BondInfo()); backwardChunks.back().area = area; backwardChunks.back().normal = normal; backwardChunks.back().centroid = centroid; backwardChunks.back().m_chunkId = chunkId; backwardChunks.back().m_bb = bb; } } bb.setEmpty(); collected = 0; area = 0; normal = NvVec3(0, 0, 0); centroid = NvVec3(0, 0, 0); if (i != bondDescriptors.size()) chunkId = bondDescriptors[i].m_chunkId; } if (i == bondDescriptors.size() || abs(planeId) != abs(bondDescriptors[i].m_planeIndex)) { for (uint32_t fchunk = 0; fchunk < forwardChunks.size(); ++fchunk) { const BondInfo& fInfo = forwardChunks[fchunk]; if (chunkIsSupport[fInfo.m_chunkId] == false) { continue; } for (uint32_t bchunk = 0; bchunk < backwardChunks.size(); ++bchunk) { const BondInfo& bInfo = backwardChunks[bchunk]; if (weakBoundingBoxIntersection(fInfo.m_bb, bInfo.m_bb) == 0) { continue; } if (chunkIsSupport[bInfo.m_chunkId] == false) { continue; } mResultBondDescs.push_back(NvBlastBondDesc()); NvBlastBondDesc& bondDesc = mResultBondDescs.back(); // Use the minimum-area patch for the bond area and centroid if (fInfo.area < bInfo.area) { bondDesc.bond.area = fInfo.area; bondDesc.bond.centroid[0] = fInfo.centroid.x; bondDesc.bond.centroid[1] = fInfo.centroid.y; bondDesc.bond.centroid[2] = fInfo.centroid.z; bondDesc.bond.normal[0] = fInfo.normal.x; bondDesc.bond.normal[1] = fInfo.normal.y; bondDesc.bond.normal[2] = fInfo.normal.z; } else { bondDesc.bond.area = bInfo.area; bondDesc.bond.centroid[0] = bInfo.centroid.x; bondDesc.bond.centroid[1] = bInfo.centroid.y; bondDesc.bond.centroid[2] = bInfo.centroid.z; bondDesc.bond.normal[0] = -bInfo.normal.x; bondDesc.bond.normal[1] = -bInfo.normal.y; bondDesc.bond.normal[2] = -bInfo.normal.z; } bondDesc.chunkIndices[0] = fInfo.m_chunkId; bondDesc.chunkIndices[1] = bInfo.m_chunkId; } } forwardChunks.clear(); backwardChunks.clear(); if (i != bondDescriptors.size()) { planeId = bondDescriptors[i].m_planeIndex; } else { break; } } collected++; auto& trRef = trianglesBuffer[chunkId].get()[bondDescriptors[i].triangleIndex]; normal += getNormal(trRef); centroid += toNvShared(trRef.a.p); centroid += toNvShared(trRef.b.p); centroid += toNvShared(trRef.c.p); bb.include(toNvShared(trRef.a.p)); bb.include(toNvShared(trRef.b.p)); bb.include(toNvShared(trRef.c.p)); } } if (hasApproximateBonding) { std::vector<Triangle> chunkTriangles; std::vector<uint32_t> chunkTrianglesOffsets; std::set<std::pair<uint32_t, uint32_t> > pairsAlreadyCreated; for (uint32_t i = 0; i < mResultBondDescs.size(); ++i) { auto pr = (mResultBondDescs[i].chunkIndices[0] < mResultBondDescs[i].chunkIndices[1]) ? std::make_pair(mResultBondDescs[i].chunkIndices[0], mResultBondDescs[i].chunkIndices[1]) : std::make_pair(mResultBondDescs[i].chunkIndices[1], mResultBondDescs[i].chunkIndices[0]); pairsAlreadyCreated.insert(pr); } const float EXPANSION = 0.01f; chunkTrianglesOffsets.push_back(0); for (uint32_t i = 0; i < chunkCount; ++i) { const float SCALE_FACTOR = 1.001f; NvcVec3 centroid = {resultChunkDescriptors[i].centroid[0], resultChunkDescriptors[i].centroid[1], resultChunkDescriptors[i].centroid[2]}; for (uint32_t k = 0; k < trianglesCount[i]; ++k) { chunkTriangles.push_back(trianglesBuffer[i].get()[k]); // inflate mesh a bit chunkTriangles.back().a.p = chunkTriangles.back().a.p + (chunkTriangles.back().a.p - centroid) * EXPANSION; chunkTriangles.back().b.p = chunkTriangles.back().b.p + (chunkTriangles.back().b.p - centroid) * EXPANSION; chunkTriangles.back().c.p = chunkTriangles.back().c.p + (chunkTriangles.back().c.p - centroid) * EXPANSION; } chunkTrianglesOffsets.push_back(chunkTriangles.size()); } NvBlastBondDesc* adsc; BondGenerationConfig cfg; cfg.bondMode = BondGenerationConfig::AVERAGE; cfg.maxSeparation = EXPANSION; uint32_t nbListSize = createFullBondListAveraged(chunkCount, chunkTrianglesOffsets.data(), chunkTriangles.data(), nullptr, chunkIsSupport, nullptr, adsc, cfg, &pairsAlreadyCreated); for (uint32_t i = 0; i < nbListSize; ++i) { mResultBondDescs.push_back(adsc[i]); } NVBLAST_FREE(adsc); } resultBondDescs = SAFE_ARRAY_NEW(NvBlastBondDesc, mResultBondDescs.size()); memcpy(resultBondDescs, mResultBondDescs.data(), sizeof(NvBlastBondDesc) * mResultBondDescs.size()); return mResultBondDescs.size(); } int32_t BlastBondGeneratorImpl::createBondBetweenMeshes(uint32_t meshCount, const uint32_t* geometryOffset, const Triangle* geometry, uint32_t overlapsCount, const uint32_t* overlapsA, const uint32_t* overlapsB, NvBlastBondDesc*& resultBond, BondGenerationConfig cfg) { if (cfg.bondMode == BondGenerationConfig::AVERAGE) { resetGeometryCache(); buildGeometryCache(meshCount, geometryOffset, geometry); } resultBond = SAFE_ARRAY_NEW(NvBlastBondDesc, overlapsCount); if (cfg.bondMode == BondGenerationConfig::EXACT) { for (uint32_t i = 0; i < overlapsCount; ++i) { NvBlastBondDesc& desc = resultBond[i]; desc.chunkIndices[0] = overlapsA[i]; desc.chunkIndices[1] = overlapsB[i]; uint32_t meshACount = geometryOffset[overlapsA[i] + 1] - geometryOffset[overlapsA[i]]; uint32_t meshBCount = geometryOffset[overlapsB[i] + 1] - geometryOffset[overlapsB[i]]; createBondBetweenMeshes(meshACount, geometry + geometryOffset[overlapsA[i]], meshBCount, geometry + geometryOffset[overlapsB[i]], desc.bond, cfg); } } else { for (uint32_t i = 0; i < overlapsCount; ++i) { NvBlastBondDesc& desc = resultBond[i]; desc.chunkIndices[0] = overlapsA[i]; desc.chunkIndices[1] = overlapsB[i]; createBondForcedInternal(mHullsPointsCache[overlapsA[i]], mHullsPointsCache[overlapsB[i]], *mCHullCache[overlapsA[i]], *mCHullCache[overlapsB[i]], mBoundsCache[overlapsA[i]], mBoundsCache[overlapsB[i]], desc.bond, 0.3f); } } return overlapsCount; } int32_t BlastBondGeneratorImpl::createBondBetweenMeshes(uint32_t meshACount, const Triangle* meshA, uint32_t meshBCount, const Triangle* meshB, NvBlastBond& resultBond, BondGenerationConfig conf) { float overlapping = 0.3f; if (conf.bondMode == BondGenerationConfig::EXACT) { std::vector<uint32_t> chunksOffsets = { 0, meshACount, meshACount + meshBCount }; std::vector<Triangle> chunks; chunks.resize(meshACount + meshBCount); memcpy(chunks.data(), meshA, sizeof(Triangle) * meshACount); memcpy(chunks.data() + meshACount, meshB, sizeof(Triangle) * meshBCount); std::shared_ptr<bool> isSupport(new bool[2]{ true, true }, [](bool* b) { delete[] b; }); NvBlastBondDesc* desc; uint32_t descSize = createFullBondListExact(2, chunksOffsets.data(), chunks.data(), isSupport.get(), desc, conf); if (descSize > 0) { resultBond = desc->bond; } else { memset(&resultBond, 0, sizeof(NvBlastBond)); return 1; } return 0; } std::vector<NvcVec3> chunksPoints1(meshACount * 3); std::vector<NvcVec3> chunksPoints2(meshBCount * 3); int32_t sp = 0; for (uint32_t i = 0; i < meshACount; ++i) { chunksPoints1[sp++] = meshA[i].a.p; chunksPoints1[sp++] = meshA[i].b.p; chunksPoints1[sp++] = meshA[i].c.p; #ifdef DEBUG_OUTPUT meshBuffer.push_back(meshA[i].a.p); meshBuffer.push_back(meshA[i].b.p); meshBuffer.push_back(meshA[i].c.p); #endif } sp = 0; for (uint32_t i = 0; i < meshBCount; ++i) { chunksPoints2[sp++] = meshB[i].a.p; chunksPoints2[sp++] = meshB[i].b.p; chunksPoints2[sp++] = meshB[i].c.p; #ifdef DEBUG_OUTPUT meshBuffer.push_back(meshB[i].a.p); meshBuffer.push_back(meshB[i].b.p); meshBuffer.push_back(meshB[i].c.p); #endif } CollisionHull* cHull[2]; cHull[0] = mConvexMeshBuilder->buildCollisionGeometry(chunksPoints1.size(), chunksPoints1.data()); cHull[1] = mConvexMeshBuilder->buildCollisionGeometry(chunksPoints2.size(), chunksPoints2.data()); std::vector<NvVec3> hullPoints[2]; hullPoints[0].resize(cHull[0]->pointsCount); hullPoints[1].resize(cHull[1]->pointsCount); NvBounds3 bb[2]; bb[0].setEmpty(); bb[1].setEmpty(); for (uint32_t cv = 0; cv < 2; ++cv) { for (uint32_t i = 0; i < cHull[cv]->pointsCount; ++i) { hullPoints[cv][i] = toNvShared(cHull[cv]->points[i]); bb[cv].include(hullPoints[cv][i]); } } auto ret = createBondForcedInternal(hullPoints[0], hullPoints[1], *cHull[0], *cHull[1], bb[0], bb[1], resultBond, overlapping); mConvexMeshBuilder->releaseCollisionHull(cHull[0]); mConvexMeshBuilder->releaseCollisionHull(cHull[1]); return ret; } int32_t BlastBondGeneratorImpl::bondsFromPrefractured(uint32_t meshCount, const uint32_t* geometryCount, const Triangle* geometry, const bool* chunkIsSupport, NvBlastBondDesc*& resultBondDescs, BondGenerationConfig conf) { int32_t ret_val = 0; switch (conf.bondMode) { case BondGenerationConfig::AVERAGE: ret_val = createFullBondListAveraged(meshCount, geometryCount, geometry, nullptr, chunkIsSupport, nullptr, resultBondDescs, conf); break; case BondGenerationConfig::EXACT: ret_val = createFullBondListExact(meshCount, geometryCount, geometry, chunkIsSupport, resultBondDescs, conf); break; } return ret_val; } int32_t BlastBondGeneratorImpl::bondsFromPrefractured(uint32_t meshCount, const uint32_t* convexHullOffset, const CollisionHull** chunkHulls, const bool* chunkIsSupport, const uint32_t* meshGroups, NvBlastBondDesc*& resultBondDescs, float maxSeparation) { BondGenerationConfig conf; conf.maxSeparation = maxSeparation; conf.bondMode = BondGenerationConfig::AVERAGE; return createFullBondListAveraged(meshCount, convexHullOffset, nullptr, chunkHulls, chunkIsSupport, meshGroups, resultBondDescs, conf); } void BlastBondGeneratorImpl::release() { delete this; } } // namespace Blast } // namespace Nv
56,069
C++
37.091033
153
0.547433
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/authoring/NvBlastExtAuthoringCutoutImpl.cpp
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved. #include "NvBlastGlobals.h" #include <NvBlastAssert.h> #include "NvBounds3.h" #include "NvMath.h" #include "NvAssert.h" #include <NvBlastNvSharedHelpers.h> #include "NvBlastExtAuthoringCutoutImpl.h" #include <algorithm> #include <set> #include <map> #include <stack> #define CUTOUT_DISTANCE_THRESHOLD (0.7f) #define CUTOUT_DISTANCE_EPS (0.01f) using namespace Nv::Blast; // Unsigned modulus uint32_t mod(int32_t n, uint32_t modulus) { const int32_t d = n/(int32_t)modulus; const int32_t m = n - d*(int32_t)modulus; return m >= 0 ? (uint32_t)m : (uint32_t)m + modulus; } float square(float x) { return x * x; } // 2D cross product float dotXY(const nvidia::NvVec3& v, const nvidia::NvVec3& w) { return v.x * w.x + v.y * w.y; } // Z-component of cross product float crossZ(const nvidia::NvVec3& v, const nvidia::NvVec3& w) { return v.x * w.y - v.y * w.x; } // z coordinates may be used to store extra info - only deal with x and y float perpendicularDistanceSquared(const nvidia::NvVec3& v0, const nvidia::NvVec3& v1, const nvidia::NvVec3& v2) { const nvidia::NvVec3 base = v2 - v0; const nvidia::NvVec3 leg = v1 - v0; const float baseLen2 = dotXY(base, base); return baseLen2 > NV_EPS_F32 * dotXY(leg, leg) ? square(crossZ(base, leg)) / baseLen2 : 0.0f; } // z coordinates may be used to store extra info - only deal with x and y float perpendicularDistanceSquared(const std::vector< nvidia::NvVec3 >& cutout, uint32_t index) { const uint32_t size = cutout.size(); return perpendicularDistanceSquared(cutout[(index + size - 1) % size], cutout[index], cutout[(index + 1) % size]); } //////////////////////////////////////////////// // ApexShareUtils - Begin //////////////////////////////////////////////// struct BoundsRep { BoundsRep() : type(0) { aabb.setEmpty(); } nvidia::NvBounds3 aabb; uint32_t type; // By default only reports if subtypes are the same, configurable. Valid range {0...7} }; struct IntPair { void set(int32_t _i0, int32_t _i1) { i0 = _i0; i1 = _i1; } int32_t i0, i1; static int compare(const void* a, const void* b) { const int32_t diff0 = ((IntPair*)a)->i0 - ((IntPair*)b)->i0; return diff0 ? diff0 : (((IntPair*)a)->i1 - ((IntPair*)b)->i1); } }; struct BoundsInteractions { BoundsInteractions() : bits(0x8040201008040201ULL) {} BoundsInteractions(bool setAll) : bits(setAll ? 0xFFFFFFFFFFFFFFFFULL : 0x0000000000000000ULL) {} bool set(unsigned group1, unsigned group2, bool interacts) { if (group1 >= 8 || group2 >= 8) { return false; } const uint64_t mask = (uint64_t)1 << ((group1 << 3) + group2) | (uint64_t)1 << ((group2 << 3) + group1); if (interacts) { bits |= mask; } else { bits &= ~mask; } return true; } uint64_t bits; }; enum Bounds3Axes { Bounds3X = 1, Bounds3Y = 2, Bounds3Z = 4, Bounds3XY = Bounds3X | Bounds3Y, Bounds3YZ = Bounds3Y | Bounds3Z, Bounds3ZX = Bounds3Z | Bounds3X, Bounds3XYZ = Bounds3X | Bounds3Y | Bounds3Z }; void boundsCalculateOverlaps(std::vector<IntPair>& overlaps, Bounds3Axes axesToUse, const BoundsRep* bounds, uint32_t boundsCount, uint32_t boundsByteStride, const BoundsInteractions& interactions = BoundsInteractions(), bool append = false); void createIndexStartLookup(std::vector<uint32_t>& lookup, int32_t indexBase, uint32_t indexRange, int32_t* indexSource, uint32_t indexCount, uint32_t indexByteStride); /* Index bank - double-sided free list for O(1) borrow/return of unique IDs Type IndexType should be an unsigned integer type or something that can be cast to and from an integer */ template <class IndexType> class IndexBank { public: IndexBank<IndexType>(uint32_t capacity = 0) : indexCount(0), capacityLocked(false) { maxCapacity = calculateMaxCapacity(); reserve_internal(capacity); } // Copy constructor IndexBank<IndexType>(const IndexBank<IndexType>& other) { *this = other; } virtual ~IndexBank<IndexType>() {} // Assignment operator IndexBank<IndexType>& operator = (const IndexBank<IndexType>& other) { indices = other.indices; ranks = other.ranks; maxCapacity = other.maxCapacity; indexCount = other.indexCount; capacityLocked = other.capacityLocked; return *this; } void setIndicesAndRanks(uint16_t* indicesIn, uint16_t* ranksIn, uint32_t capacityIn, uint32_t usedCountIn) { indexCount = usedCountIn; reserve_internal(capacityIn); for (uint32_t i = 0; i < capacityIn; ++i) { indices[i] = indicesIn[i]; ranks[i] = ranksIn[i]; } } void clear(uint32_t capacity = 0, bool used = false) { capacityLocked = false; indices.reset(); ranks.reset(); reserve_internal(capacity); if (used) { indexCount = capacity; indices.resize(capacity); for (IndexType i = (IndexType)0; i < (IndexType)capacity; ++i) { indices[i] = i; } } else { indexCount = 0; } } // Equivalent to calling freeLastUsed() until the used list is empty. void clearFast() { indexCount = 0; } // This is the reserve size. The bank can only grow, due to shuffling of indices virtual void reserve(uint32_t capacity) { reserve_internal(capacity); } // If lock = true, keeps bank from automatically resizing void lockCapacity(bool lock) { capacityLocked = lock; } bool isCapacityLocked() const { return capacityLocked; } void setMaxCapacity(uint32_t inMaxCapacity) { // Cannot drop below current capacity, nor above max set by data types maxCapacity = nvidia::NvClamp(inMaxCapacity, capacity(), calculateMaxCapacity()); } uint32_t capacity() const { return indices.size(); } uint32_t usedCount() const { return indexCount; } uint32_t freeCount() const { return capacity() - usedCount(); } // valid from [0] to [size()-1] const IndexType* usedIndices() const { return indices.data(); } // valid from [0] to [free()-1] const IndexType* freeIndices() const { return indices.begin() + usedCount(); } bool isValid(IndexType index) const { return index < (IndexType)capacity(); } bool isUsed(IndexType index) const { return isValid(index) && (ranks[index] < (IndexType)usedCount()); } bool isFree(IndexType index) const { return isValid(index) && !isUsed(); } IndexType getRank(IndexType index) const { return ranks[index]; } // Gets the next available index, if any bool useNextFree(IndexType& index) { if (freeCount() == 0) { if (capacityLocked) { return false; } if (capacity() >= maxCapacity) { return false; } reserve(nvidia::NvClamp(capacity() * 2, (uint32_t)1, maxCapacity)); NVBLAST_ASSERT(freeCount() > 0); } index = indices[indexCount++]; return true; } // Frees the last used index, if any bool freeLastUsed(IndexType& index) { if (usedCount() == 0) { return false; } index = indices[--indexCount]; return true; } // Requests a particular index. If that index is available, it is borrowed and the function // returns true. Otherwise nothing happens and the function returns false. bool use(IndexType index) { if (!indexIsValidForUse(index)) { return false; } IndexType oldRank; placeIndexAtRank(index, (IndexType)indexCount++, oldRank); return true; } bool free(IndexType index) { if (!indexIsValidForFreeing(index)) { return false; } IndexType oldRank; placeIndexAtRank(index, (IndexType)--indexCount, oldRank); return true; } bool useAndReturnRanks(IndexType index, IndexType& newRank, IndexType& oldRank) { if (!indexIsValidForUse(index)) { return false; } newRank = (IndexType)indexCount++; placeIndexAtRank(index, newRank, oldRank); return true; } bool freeAndReturnRanks(IndexType index, IndexType& newRank, IndexType& oldRank) { if (!indexIsValidForFreeing(index)) { return false; } newRank = (IndexType)--indexCount; placeIndexAtRank(index, newRank, oldRank); return true; } protected: bool indexIsValidForUse(IndexType index) { if (!isValid(index)) { if (capacityLocked) { return false; } if (capacity() >= maxCapacity) { return false; } reserve(nvidia::NvClamp(2 * (uint32_t)index, (uint32_t)1, maxCapacity)); NVBLAST_ASSERT(isValid(index)); } return !isUsed(index); } bool indexIsValidForFreeing(IndexType index) { if (!isValid(index)) { // Invalid index return false; } return isUsed(index); } // This is the reserve size. The bank can only grow, due to shuffling of indices void reserve_internal(uint32_t capacity) { capacity = std::min(capacity, maxCapacity); const uint32_t oldCapacity = indices.size(); if (capacity > oldCapacity) { indices.resize(capacity); ranks.resize(capacity); for (IndexType i = (IndexType)oldCapacity; i < (IndexType)capacity; ++i) { indices[i] = i; ranks[i] = i; } } } private: void placeIndexAtRank(IndexType index, IndexType newRank, IndexType& oldRank) // returns old rank { const IndexType replacementIndex = indices[newRank]; oldRank = ranks[index]; indices[oldRank] = replacementIndex; indices[newRank] = index; ranks[replacementIndex] = oldRank; ranks[index] = newRank; } uint32_t calculateMaxCapacity() { #pragma warning(push) #pragma warning(disable: 4127) // conditional expression is constant if (sizeof(IndexType) >= sizeof(uint32_t)) { return 0xFFFFFFFF; // Limited by data type we use to report capacity } else { return (1u << (8 * std::min((uint32_t)sizeof(IndexType), 3u))) - 1; // Limited by data type we use for indices } #pragma warning(pop) } protected: std::vector<IndexType> indices; std::vector<IndexType> ranks; uint32_t maxCapacity; uint32_t indexCount; bool capacityLocked; }; struct Marker { float pos; uint32_t id; // lsb = type (0 = max, 1 = min), other bits used for object index void set(float _pos, int32_t _id) { pos = _pos; id = (uint32_t)_id; } }; static int compareMarkers(const void* A, const void* B) { // Sorts by value. If values equal, sorts min types greater than max types, to reduce the # of overlaps const float delta = ((Marker*)A)->pos - ((Marker*)B)->pos; return delta != 0 ? (delta < 0 ? -1 : 1) : ((int)(((Marker*)A)->id & 1) - (int)(((Marker*)B)->id & 1)); } void boundsCalculateOverlaps(std::vector<IntPair>& overlaps, Bounds3Axes axesToUse, const BoundsRep* bounds, uint32_t boundsCount, uint32_t boundsByteStride, const BoundsInteractions& interactions, bool append) { if (!append) { overlaps.clear(); } uint32_t D = 0; uint32_t axisNums[3]; for (unsigned i = 0; i < 3; ++i) { if ((axesToUse >> i) & 1) { axisNums[D++] = i; } } if (D == 0 || D > 3) { return; } std::vector< std::vector<Marker> > axes; axes.resize(D); uint32_t overlapCount[3]; for (uint32_t n = 0; n < D; ++n) { const uint32_t axisNum = axisNums[n]; std::vector<Marker>& axis = axes[n]; overlapCount[n] = 0; axis.resize(2 * boundsCount); uint8_t* boundsPtr = (uint8_t*)bounds; for (uint32_t i = 0; i < boundsCount; ++i, boundsPtr += boundsByteStride) { const BoundsRep& boundsRep = *(const BoundsRep*)boundsPtr; const nvidia::NvBounds3& box = boundsRep.aabb; float min = box.minimum[axisNum]; float max = box.maximum[axisNum]; if (min >= max) { const float mid = 0.5f * (min + max); float pad = 0.000001f * fabsf(mid); min = mid - pad; max = mid + pad; } axis[i << 1].set(min, (int32_t)i << 1 | 1); axis[i << 1 | 1].set(max, (int32_t)i << 1); } qsort(axis.data(), axis.size(), sizeof(Marker), compareMarkers); uint32_t localOverlapCount = 0; for (uint32_t i = 0; i < axis.size(); ++i) { Marker& marker = axis[i]; if (marker.id & 1) { overlapCount[n] += localOverlapCount; ++localOverlapCount; } else { --localOverlapCount; } } } unsigned int axis0; unsigned int axis1; unsigned int axis2; unsigned int maxBin; if (D == 1) { maxBin = 0; axis0 = axisNums[0]; axis1 = axis0; axis2 = axis0; } else if (D == 2) { if (overlapCount[0] < overlapCount[1]) { maxBin = 0; axis0 = axisNums[0]; axis1 = axisNums[1]; axis2 = axis0; } else { maxBin = 1; axis0 = axisNums[1]; axis1 = axisNums[0]; axis2 = axis0; } } else { maxBin = overlapCount[0] < overlapCount[1] ? (overlapCount[0] < overlapCount[2] ? 0U : 2U) : (overlapCount[1] < overlapCount[2] ? 1U : 2U); axis0 = axisNums[maxBin]; axis1 = (axis0 + 1) % 3; axis2 = (axis0 + 2) % 3; } const uint64_t interactionBits = interactions.bits; IndexBank<uint32_t> localOverlaps(boundsCount); std::vector<Marker>& axis = axes[maxBin]; float boxMin1 = 0.0f; float boxMax1 = 0.0f; float boxMin2 = 0.0f; float boxMax2 = 0.0f; for (uint32_t i = 0; i < axis.size(); ++i) { Marker& marker = axis[i]; const uint32_t index = marker.id >> 1; if (marker.id & 1) { const BoundsRep& boundsRep = *(const BoundsRep*)((uint8_t*)bounds + index*boundsByteStride); const uint8_t interaction = (uint8_t)((interactionBits >> (boundsRep.type << 3)) & 0xFF); const nvidia::NvBounds3& box = boundsRep.aabb; // These conditionals compile out with optimization: if (D > 1) { boxMin1 = box.minimum[axis1]; boxMax1 = box.maximum[axis1]; if (D == 3) { boxMin2 = box.minimum[axis2]; boxMax2 = box.maximum[axis2]; } } const uint32_t localOverlapCount = localOverlaps.usedCount(); const uint32_t* localOverlapIndices = localOverlaps.usedIndices(); for (uint32_t j = 0; j < localOverlapCount; ++j) { const uint32_t overlapIndex = localOverlapIndices[j]; const BoundsRep& overlapBoundsRep = *(const BoundsRep*)((uint8_t*)bounds + overlapIndex*boundsByteStride); if ((interaction >> overlapBoundsRep.type) & 1) { const nvidia::NvBounds3& overlapBox = overlapBoundsRep.aabb; // These conditionals compile out with optimization: if (D > 1) { if (boxMin1 >= overlapBox.maximum[axis1] || boxMax1 <= overlapBox.minimum[axis1]) { continue; } if (D == 3) { if (boxMin2 >= overlapBox.maximum[axis2] || boxMax2 <= overlapBox.minimum[axis2]) { continue; } } } // Add overlap IntPair pair; pair.i0 = (int32_t)index; pair.i1 = (int32_t)overlapIndex; overlaps.push_back(pair); } } NVBLAST_ASSERT(localOverlaps.isValid(index)); NVBLAST_ASSERT(!localOverlaps.isUsed(index)); localOverlaps.use(index); } else { // Remove local overlap NVBLAST_ASSERT(localOverlaps.isValid(index)); localOverlaps.free(index); } } } void createIndexStartLookup(std::vector<uint32_t>& lookup, int32_t indexBase, uint32_t indexRange, int32_t* indexSource, uint32_t indexCount, uint32_t indexByteStride) { if (indexRange == 0) { lookup.resize(std::max(indexRange + 1, 2u)); lookup[0] = 0; lookup[1] = indexCount; } else { lookup.resize(indexRange + 1); uint32_t indexPos = 0; for (uint32_t i = 0; i < indexRange; ++i) { for (; indexPos < indexCount; ++indexPos, indexSource = (int32_t*)((uintptr_t)indexSource + indexByteStride)) { if (*indexSource >= (int32_t)i + indexBase) { lookup[i] = indexPos; break; } } if (indexPos == indexCount) { lookup[i] = indexPos; } } lookup[indexRange] = indexCount; } } //////////////////////////////////////////////// // ApexShareUtils - End //////////////////////////////////////////////// struct CutoutVert { int32_t cutoutIndex; int32_t vertIndex; void set(int32_t _cutoutIndex, int32_t _vertIndex) { cutoutIndex = _cutoutIndex; vertIndex = _vertIndex; } }; struct NewVertex { CutoutVert vertex; float edgeProj; }; static int compareNewVertices(const void* a, const void* b) { const int32_t cutoutDiff = ((NewVertex*)a)->vertex.cutoutIndex - ((NewVertex*)b)->vertex.cutoutIndex; if (cutoutDiff) { return cutoutDiff; } const int32_t vertDiff = ((NewVertex*)a)->vertex.vertIndex - ((NewVertex*)b)->vertex.vertIndex; if (vertDiff) { return vertDiff; } const float projDiff = ((NewVertex*)a)->edgeProj - ((NewVertex*)b)->edgeProj; return projDiff ? (projDiff < 0.0f ? -1 : 1) : 0; } template<typename T> class Map2d { public: Map2d(uint32_t width, uint32_t height) { create_internal(width, height, NULL); } Map2d(uint32_t width, uint32_t height, T fillValue) { create_internal(width, height, &fillValue); } Map2d(const Map2d& map) { *this = map; } Map2d& operator = (const Map2d& map) { mMem.clear(); create_internal(map.mWidth, map.mHeight, NULL); return *this; } void create(uint32_t width, uint32_t height) { return create_internal(width, height, NULL); } void create(uint32_t width, uint32_t height, T fillValue) { create_internal(width, height, &fillValue); } //void clear(const T value) //{ // for (auto it = mMem.begin(); it != mMem.end(); it++) // { // for (auto it2 = it->begin(); it2 != it->end(); it2++) // { // *it2 = value; // } // } //} void setOrigin(uint32_t x, uint32_t y) { mOriginX = x; mOriginY = y; } const T& operator()(int32_t x, int32_t y) const { x = (int32_t)mod(x+(int32_t)mOriginX, mWidth); y = (int32_t)mod(y+(int32_t)mOriginY, mHeight); return mMem[y][x]; } T& operator()(int32_t x, int32_t y) { x = (int32_t)mod(x+(int32_t)mOriginX, mWidth); y = (int32_t)mod(y+(int32_t)mOriginY, mHeight); return mMem[y][x]; } private: void create_internal(uint32_t width, uint32_t height, T* val) { mMem.clear(); mWidth = width; mHeight = height; mMem.resize(mHeight); for (auto it = mMem.begin(); it != mMem.end(); it++) { it->resize(mWidth, val ? *val : 0); } mOriginX = 0; mOriginY = 0; } std::vector<std::vector<T>> mMem; uint32_t mWidth; uint32_t mHeight; uint32_t mOriginX; uint32_t mOriginY; }; class BitMap { public: BitMap() : mMem(NULL) {} BitMap(uint32_t width, uint32_t height) : mMem(NULL) { create_internal(width, height, NULL); } BitMap(uint32_t width, uint32_t height, bool fillValue) : mMem(NULL) { create_internal(width, height, &fillValue); } BitMap(const BitMap& map) { *this = map; } ~BitMap() { delete [] mMem; } BitMap& operator = (const BitMap& map) { delete [] mMem; mMem = NULL; if (map.mMem) { create_internal(map.mWidth, map.mHeight, NULL); memcpy(mMem, map.mMem, mHeight * mRowBytes); } return *this; } void create(uint32_t width, uint32_t height) { return create_internal(width, height, NULL); } void create(uint32_t width, uint32_t height, bool fillValue) { create_internal(width, height, &fillValue); } void clear(bool value) { memset(mMem, value ? 0xFF : 0x00, mRowBytes * mHeight); } void setOrigin(uint32_t x, uint32_t y) { mOriginX = x; mOriginY = y; } bool read(int32_t x, int32_t y) const { x = (int32_t)mod(x+(int32_t)mOriginX, mWidth); y = (int32_t)mod(y+(int32_t)mOriginY, mHeight); return ((mMem[(x >> 3) + y * mRowBytes] >> (x & 7)) & 1) != 0; } void set(int32_t x, int32_t y) { x = (int32_t)mod(x+(int32_t)mOriginX, mWidth); y = (int32_t)mod(y+(int32_t)mOriginY, mHeight); mMem[(x >> 3) + y * mRowBytes] |= 1 << (x & 7); } void reset(int32_t x, int32_t y) { x = (int32_t)mod(x+(int32_t)mOriginX, mWidth); y = (int32_t)mod(y+(int32_t)mOriginY, mHeight); mMem[(x >> 3) + y * mRowBytes] &= ~(1 << (x & 7)); } private: void create_internal(uint32_t width, uint32_t height, bool* val) { delete [] mMem; mRowBytes = (width + 7) >> 3; const uint32_t bytes = mRowBytes * height; if (bytes == 0) { mWidth = mHeight = 0; mMem = NULL; return; } mWidth = width; mHeight = height; mMem = new uint8_t[bytes]; mOriginX = 0; mOriginY = 0; if (val) { clear(*val); } } uint8_t* mMem; uint32_t mWidth; uint32_t mHeight; uint32_t mRowBytes; uint32_t mOriginX; uint32_t mOriginY; }; int32_t taxicabSine(int32_t i) { // 0 1 1 1 0 -1 -1 -1 return (int32_t)((0x01A9 >> ((i & 7) << 1)) & 3) - 1; } // Only looks at x and y components bool directionsXYOrderedCCW(const nvidia::NvVec3& d0, const nvidia::NvVec3& d1, const nvidia::NvVec3& d2) { const bool ccw02 = crossZ(d0, d2) > 0.0f; const bool ccw01 = crossZ(d0, d1) > 0.0f; const bool ccw21 = crossZ(d2, d1) > 0.0f; return ccw02 ? ccw01 && ccw21 : ccw01 || ccw21; } std::pair<float, float> compareTraceSegmentToLineSegment(const std::vector<POINT2D>& trace, int _start, int delta, float distThreshold, uint32_t width, uint32_t height, bool hasBorder) { if (delta < 2) { return std::make_pair(0.0f, 0.0f); } const uint32_t size = trace.size(); uint32_t start = (uint32_t)_start, end = (uint32_t)(_start + delta) % size; const bool startIsOnBorder = hasBorder && (trace[start].x == -1 || trace[start].x == (int)width || trace[start].y == -1 || trace[start].y == (int)height); const bool endIsOnBorder = hasBorder && (trace[end].x == -1 || trace[end].x == (int)width || trace[end].y == -1 || trace[end].y == (int)height); if (startIsOnBorder || endIsOnBorder) { if ((trace[start].x == -1 && trace[end].x == -1) || (trace[start].y == -1 && trace[end].y == -1) || (trace[start].x == (int)width && trace[end].x == (int)width) || (trace[start].y == (int)height && trace[end].y == (int)height)) { return std::make_pair(0.0f, 0.0f); } return std::make_pair(NV_MAX_F32, NV_MAX_F32); } nvidia::NvVec3 orig((float)trace[start].x, (float)trace[start].y, 0); nvidia::NvVec3 dest((float)trace[end].x, (float)trace[end].y, 0); nvidia::NvVec3 dir = dest - orig; dir.normalize(); float aveError = 0.0f; float aveError2 = 0.0f; for (;;) { if (++start >= size) { start = 0; } if (start == end) { break; } nvidia::NvVec3 testDisp((float)trace[start].x, (float)trace[start].y, 0); testDisp -= orig; aveError += (float)(nvidia::NvAbs(testDisp.x * dir.y - testDisp.y * dir.x) >= distThreshold); aveError2 += nvidia::NvAbs(testDisp.x * dir.y - testDisp.y * dir.x); } aveError /= delta - 1; aveError2 /= delta - 1; return std::make_pair(aveError, aveError2); } // Segment i starts at vi and ends at vi+ei // Tests for overlap in segments' projection onto xy plane // Returns distance between line segments. (Negative value indicates overlap.) float segmentsIntersectXY(const nvidia::NvVec3& v0, const nvidia::NvVec3& e0, const nvidia::NvVec3& v1, const nvidia::NvVec3& e1) { const nvidia::NvVec3 dv = v1 - v0; nvidia::NvVec3 d0 = e0; d0.normalize(); nvidia::NvVec3 d1 = e1; d1.normalize(); const float c10 = crossZ(dv, d0); const float d10 = crossZ(e1, d0); float a1 = nvidia::NvAbs(c10); float b1 = nvidia::NvAbs(c10 + d10); if (c10 * (c10 + d10) < 0.0f) { if (a1 < b1) { a1 = -a1; } else { b1 = -b1; } } const float c01 = crossZ(d1, dv); const float d01 = crossZ(e0, d1); float a2 = nvidia::NvAbs(c01); float b2 = nvidia::NvAbs(c01 + d01); if (c01 * (c01 + d01) < 0.0f) { if (a2 < b2) { a2 = -a2; } else { b2 = -b2; } } return nvidia::NvMax(nvidia::NvMin(a1, b1), nvidia::NvMin(a2, b2)); } // If point projects onto segment, returns true and proj is set to a // value in the range [0,1], indicating where along the segment (from v0 to v1) // the projection lies, and dist2 is set to the distance squared from point to // the line segment. Otherwise, returns false. // Note, if v1 = v0, then the function returns true with proj = 0. bool projectOntoSegmentXY(float& proj, float& dist2, const nvidia::NvVec3& point, const nvidia::NvVec3& v0, const nvidia::NvVec3& v1, float margin) { const nvidia::NvVec3 seg = v1 - v0; const nvidia::NvVec3 x = point - v0; const float seg2 = dotXY(seg, seg); const float d = dotXY(x, seg); if (d < 0.0f || d > seg2) { return false; } const float margin2 = margin * margin; const float p = seg2 > 0.0f ? d / seg2 : 0.0f; const float lineDist2 = d * p; if (lineDist2 < margin2) { return false; } const float pPrime = 1.0f - p; const float dPrime = seg2 - d; const float lineDistPrime2 = dPrime * pPrime; if (lineDistPrime2 < margin2) { return false; } proj = p; dist2 = dotXY(x, x) - lineDist2; return true; } bool isOnBorder(const nvidia::NvVec3& v, uint32_t width, uint32_t height) { return v.x < -0.5f || v.x >= width - 0.5f || v.y < -0.5f || v.y >= height - 0.5f; } static void createCutout(Nv::Blast::Cutout& cutout, const std::vector<POINT2D>& trace, float segmentationErrorThreshold, float snapThreshold, uint32_t width, uint32_t height, bool hasBorder) { cutout.vertices.clear(); cutout.smoothingGroups.clear(); std::vector<int> smoothingGroups; const uint32_t traceSize = trace.size(); if (traceSize == 0) { return; // Nothing to do } uint32_t size = traceSize; std::vector<int> vertexIndices; const float pixelCenterOffset = hasBorder ? 0.5f : 0.0f; // Find best segment uint32_t start = 0; uint32_t delta = 0; float err2 = 0.f; for (uint32_t iStart = 0; iStart < size; ++iStart) { uint32_t iDelta = (size >> 1) + (size & 1); for (; iDelta > 1; --iDelta) { auto fit = compareTraceSegmentToLineSegment(trace, (int32_t)iStart, (int32_t)iDelta, CUTOUT_DISTANCE_THRESHOLD, width, height, hasBorder); if (fit.first < segmentationErrorThreshold) { err2 = fit.second; break; } } if (iDelta > delta) { start = iStart; delta = iDelta; } } if (err2 < segmentationErrorThreshold) { smoothingGroups.push_back(cutout.vertices.size()); } cutout.vertices.push_back(nvidia::NvVec3((float)trace[start].x + pixelCenterOffset, (float)trace[start].y + pixelCenterOffset, 0)); // Now complete the loop while ((size -= delta) > 0) { start = (start + delta) % traceSize; cutout.vertices.push_back(nvidia::NvVec3((float)trace[start].x + pixelCenterOffset, (float)trace[start].y + pixelCenterOffset, 0)); if (size == 1) { delta = 1; break; } bool sg = true; for (delta = size - 1; delta > 1; --delta) { auto fit = compareTraceSegmentToLineSegment(trace, (int32_t)start, (int32_t)delta, CUTOUT_DISTANCE_THRESHOLD, width, height, hasBorder); if (fit.first < segmentationErrorThreshold) { if (fit.second > segmentationErrorThreshold) { sg = false; } break; } } if (sg) { smoothingGroups.push_back(cutout.vertices.size()); } } const float snapThresh2 = square(snapThreshold); // Use the snapThreshold to clean up while ((size = cutout.vertices.size()) >= 4) { bool reduced = false; for (uint32_t i = 0; i < size; ++i) { const uint32_t i1 = (i + 1) % size; const uint32_t i2 = (i + 2) % size; const uint32_t i3 = (i + 3) % size; nvidia::NvVec3& v0 = cutout.vertices[i]; nvidia::NvVec3& v1 = cutout.vertices[i1]; nvidia::NvVec3& v2 = cutout.vertices[i2]; nvidia::NvVec3& v3 = cutout.vertices[i3]; const nvidia::NvVec3 d0 = v1 - v0; const nvidia::NvVec3 d1 = v2 - v1; const nvidia::NvVec3 d2 = v3 - v2; const float den = crossZ(d0, d2); if (den != 0) { const float recipDen = 1.0f / den; const float s0 = crossZ(d1, d2) * recipDen; const float s2 = crossZ(d0, d1) * recipDen; if (s0 >= 0 || s2 >= 0) { if (d0.magnitudeSquared()*s0* s0 <= snapThresh2 && d2.magnitudeSquared()*s2* s2 <= snapThresh2) { v1 += d0 * s0; //uint32_t index = (uint32_t)(&v2 - cutout.vertices.begin()); int dist = std::distance(cutout.vertices.data(), &v2); cutout.vertices.erase(cutout.vertices.begin() + dist); for (auto& idx : smoothingGroups) { if (idx > dist) { idx--; } } reduced = true; break; } } } } if (!reduced) { break; } } for (size_t i = 0; i < smoothingGroups.size(); i++) { if (i > 0 && smoothingGroups[i] == smoothingGroups[i - 1]) { continue; } if (smoothingGroups[i] < static_cast<int>(cutout.vertices.size())) { cutout.smoothingGroups.push_back(cutout.vertices[smoothingGroups[i]]); } } } static void splitTJunctions(Nv::Blast::CutoutSetImpl& cutoutSet, float threshold) { // Set bounds reps std::vector<BoundsRep> bounds; std::vector<CutoutVert> cutoutMap; // maps bounds # -> ( cutout #, vertex # ). std::vector<IntPair> overlaps; const float distThreshold2 = threshold * threshold; // Split T-junctions uint32_t edgeCount = 0; for (uint32_t i = 0; i < cutoutSet.cutoutLoops.size(); ++i) { edgeCount += cutoutSet.cutoutLoops[i].vertices.size(); } bounds.resize(edgeCount); cutoutMap.resize(edgeCount); edgeCount = 0; for (uint32_t i = 0; i < cutoutSet.cutoutLoops.size(); ++i) { Nv::Blast::Cutout& cutout = cutoutSet.cutoutLoops[i]; const uint32_t cutoutSize = cutout.vertices.size(); for (uint32_t j = 0; j < cutoutSize; ++j) { bounds[edgeCount].aabb.include(cutout.vertices[j]); bounds[edgeCount].aabb.include(cutout.vertices[(j + 1) % cutoutSize]); NVBLAST_ASSERT(!bounds[edgeCount].aabb.isEmpty()); bounds[edgeCount].aabb.fattenFast(threshold); cutoutMap[edgeCount].set((int32_t)i, (int32_t)j); ++edgeCount; } } // Find bounds overlaps if (bounds.size() > 0) { boundsCalculateOverlaps(overlaps, Bounds3XY, &bounds[0], bounds.size(), sizeof(bounds[0])); } std::vector<NewVertex> newVertices; for (uint32_t overlapIndex = 0; overlapIndex < overlaps.size(); ++overlapIndex) { const IntPair& mapPair = overlaps[overlapIndex]; const CutoutVert& seg0Map = cutoutMap[(uint32_t)mapPair.i0]; const CutoutVert& seg1Map = cutoutMap[(uint32_t)mapPair.i1]; if (seg0Map.cutoutIndex == seg1Map.cutoutIndex) { // Only split based on vertex/segment junctions from different cutouts continue; } NewVertex newVertex; float dist2 = 0; const Nv::Blast::Cutout& cutout0 = cutoutSet.cutoutLoops[(uint32_t)seg0Map.cutoutIndex]; const uint32_t cutoutSize0 = cutout0.vertices.size(); const Nv::Blast::Cutout& cutout1 = cutoutSet.cutoutLoops[(uint32_t)seg1Map.cutoutIndex]; const uint32_t cutoutSize1 = cutout1.vertices.size(); if (projectOntoSegmentXY(newVertex.edgeProj, dist2, cutout0.vertices[(uint32_t)seg0Map.vertIndex], cutout1.vertices[(uint32_t)seg1Map.vertIndex], cutout1.vertices[(uint32_t)(seg1Map.vertIndex + 1) % cutoutSize1], 0.25f)) { if (dist2 <= distThreshold2) { newVertex.vertex = seg1Map; newVertices.push_back(newVertex); } } if (projectOntoSegmentXY(newVertex.edgeProj, dist2, cutout1.vertices[(uint32_t)seg1Map.vertIndex], cutout0.vertices[(uint32_t)seg0Map.vertIndex], cutout0.vertices[(uint32_t)(seg0Map.vertIndex + 1) % cutoutSize0], 0.25f)) { if (dist2 <= distThreshold2) { newVertex.vertex = seg0Map; newVertices.push_back(newVertex); } } } if (newVertices.size()) { // Sort new vertices qsort(newVertices.data(), newVertices.size(), sizeof(NewVertex), compareNewVertices); // Insert new vertices uint32_t lastCutoutIndex = 0xFFFFFFFF; uint32_t lastVertexIndex = 0xFFFFFFFF; float lastProj = 1.0f; for (uint32_t newVertexIndex = newVertices.size(); newVertexIndex--;) { const NewVertex& newVertex = newVertices[newVertexIndex]; if (newVertex.vertex.cutoutIndex != (int32_t)lastCutoutIndex) { lastCutoutIndex = (uint32_t)newVertex.vertex.cutoutIndex; lastVertexIndex = 0xFFFFFFFF; } if (newVertex.vertex.vertIndex != (int32_t)lastVertexIndex) { lastVertexIndex = (uint32_t)newVertex.vertex.vertIndex; lastProj = 1.0f; } Nv::Blast::Cutout& cutout = cutoutSet.cutoutLoops[(uint32_t)newVertex.vertex.cutoutIndex]; const float proj = lastProj > 0.0f ? newVertex.edgeProj / lastProj : 0.0f; const nvidia::NvVec3 pos = (1.0f - proj) * cutout.vertices[(uint32_t)newVertex.vertex.vertIndex] + proj * cutout.vertices[(uint32_t)(newVertex.vertex.vertIndex + 1) % cutout.vertices.size()]; cutout.vertices.push_back(nvidia::NvVec3()); for (uint32_t n = cutout.vertices.size(); --n > (uint32_t)newVertex.vertex.vertIndex + 1;) { cutout.vertices[n] = cutout.vertices[n - 1]; } cutout.vertices[(uint32_t)newVertex.vertex.vertIndex + 1] = pos; lastProj = newVertex.edgeProj; } } } static void mergeVertices(Nv::Blast::CutoutSetImpl& cutoutSet, float threshold, uint32_t width, uint32_t height) { // Set bounds reps uint32_t vertexCount = 0; for (uint32_t i = 0; i < cutoutSet.cutoutLoops.size(); ++i) { vertexCount += cutoutSet.cutoutLoops[i].vertices.size(); } std::vector<BoundsRep> bounds; std::vector<CutoutVert> cutoutMap; // maps bounds # -> ( cutout #, vertex # ). bounds.resize(vertexCount); cutoutMap.resize(vertexCount); vertexCount = 0; for (uint32_t i = 0; i < cutoutSet.cutoutLoops.size(); ++i) { Nv::Blast::Cutout& cutout = cutoutSet.cutoutLoops[i]; for (uint32_t j = 0; j < cutout.vertices.size(); ++j) { nvidia::NvVec3& vertex = cutout.vertices[j]; nvidia::NvVec3 min(vertex.x - threshold, vertex.y - threshold, 0.0f); nvidia::NvVec3 max(vertex.x + threshold, vertex.y + threshold, 0.0f); bounds[vertexCount].aabb = nvidia::NvBounds3(min, max); cutoutMap[vertexCount].set((int32_t)i, (int32_t)j); ++vertexCount; } } // Find bounds overlaps std::vector<IntPair> overlaps; if (bounds.size() > 0) { boundsCalculateOverlaps(overlaps, Bounds3XY, &bounds[0], bounds.size(), sizeof(bounds[0])); } uint32_t overlapCount = overlaps.size(); if (overlapCount == 0) { return; } // Sort by first index qsort(overlaps.data(), overlapCount, sizeof(IntPair), IntPair::compare); const float threshold2 = threshold * threshold; std::vector<IntPair> pairs; // Group by first index std::vector<uint32_t> lookup; createIndexStartLookup(lookup, 0, vertexCount, &overlaps.begin()->i0, overlapCount, sizeof(IntPair)); for (uint32_t i = 0; i < vertexCount; ++i) { const uint32_t start = lookup[i]; const uint32_t stop = lookup[i + 1]; if (start == stop) { continue; } const CutoutVert& cutoutVert0 = cutoutMap[(uint32_t)overlaps[start].i0]; const nvidia::NvVec3& vert0 = cutoutSet.cutoutLoops[(uint32_t)cutoutVert0.cutoutIndex].vertices[(uint32_t)cutoutVert0.vertIndex]; const bool isOnBorder0 = !cutoutSet.periodic && isOnBorder(vert0, width, height); for (uint32_t j = start; j < stop; ++j) { const CutoutVert& cutoutVert1 = cutoutMap[(uint32_t)overlaps[j].i1]; if (cutoutVert0.cutoutIndex == cutoutVert1.cutoutIndex) { // No pairs from the same cutout continue; } const nvidia::NvVec3& vert1 = cutoutSet.cutoutLoops[(uint32_t)cutoutVert1.cutoutIndex].vertices[(uint32_t)cutoutVert1.vertIndex]; const bool isOnBorder1 = !cutoutSet.periodic && isOnBorder(vert1, width, height); if (isOnBorder0 != isOnBorder1) { // No border/non-border pairs continue; } if ((vert0 - vert1).magnitudeSquared() > threshold2) { // Distance outside threshold continue; } // A keeper. Keep a symmetric list IntPair overlap = overlaps[j]; pairs.push_back(overlap); const int32_t i0 = overlap.i0; overlap.i0 = overlap.i1; overlap.i1 = i0; pairs.push_back(overlap); } } if (pairs.size() == 0) { return; } // Sort by first index qsort(pairs.data(), pairs.size(), sizeof(IntPair), IntPair::compare); // For every vertex, only keep closest neighbor from each cutout createIndexStartLookup(lookup, 0, vertexCount, &pairs.begin()->i0, pairs.size(), sizeof(IntPair)); for (uint32_t i = 0; i < vertexCount; ++i) { const uint32_t start = lookup[i]; const uint32_t stop = lookup[i + 1]; if (start == stop) { continue; } const CutoutVert& cutoutVert0 = cutoutMap[(uint32_t)pairs[start].i0]; const nvidia::NvVec3& vert0 = cutoutSet.cutoutLoops[(uint32_t)cutoutVert0.cutoutIndex].vertices[(uint32_t)cutoutVert0.vertIndex]; uint32_t groupStart = start; while (groupStart < stop) { uint32_t next = groupStart; const CutoutVert& cutoutVert1 = cutoutMap[(uint32_t)pairs[next].i1]; int32_t currentOtherCutoutIndex = cutoutVert1.cutoutIndex; const nvidia::NvVec3& vert1 = cutoutSet.cutoutLoops[(uint32_t)currentOtherCutoutIndex].vertices[(uint32_t)cutoutVert1.vertIndex]; uint32_t keep = groupStart; float minDist2 = (vert0 - vert1).magnitudeSquared(); while (++next < stop) { const CutoutVert& cutoutVertNext = cutoutMap[(uint32_t)pairs[next].i1]; if (currentOtherCutoutIndex != cutoutVertNext.cutoutIndex) { break; } const nvidia::NvVec3& vertNext = cutoutSet.cutoutLoops[(uint32_t)cutoutVertNext.cutoutIndex].vertices[(uint32_t)cutoutVertNext.vertIndex]; const float dist2 = (vert0 - vertNext).magnitudeSquared(); if (dist2 < minDist2) { pairs[keep].set(-1, -1); // Invalidate keep = next; minDist2 = dist2; } else { pairs[next].set(-1, -1); // Invalidate } } groupStart = next; } } // Eliminate invalid pairs (compactify) uint32_t pairCount = 0; for (uint32_t i = 0; i < pairs.size(); ++i) { if (pairs[i].i0 >= 0 && pairs[i].i1 >= 0) { pairs[pairCount++] = pairs[i]; } } pairs.resize(pairCount); // Snap points together std::vector<bool> pinned(vertexCount, false); for (uint32_t i = 0; i < pairCount; ++i) { const uint32_t i0 = (uint32_t)pairs[i].i0; if (pinned[i0]) { continue; } const CutoutVert& cutoutVert0 = cutoutMap[i0]; nvidia::NvVec3& vert0 = cutoutSet.cutoutLoops[(uint32_t)cutoutVert0.cutoutIndex].vertices[(uint32_t)cutoutVert0.vertIndex]; const uint32_t i1 = (uint32_t)pairs[i].i1; const CutoutVert& cutoutVert1 = cutoutMap[i1]; nvidia::NvVec3& vert1 = cutoutSet.cutoutLoops[(uint32_t)cutoutVert1.cutoutIndex].vertices[(uint32_t)cutoutVert1.vertIndex]; const nvidia::NvVec3 disp = vert1 - vert0; // Move and pin pinned[i0] = true; if (pinned[i1]) { vert0 = vert1; } else { vert0 += 0.5f * disp; vert1 = vert0; pinned[i1] = true; } } } static void eliminateStraightAngles(Nv::Blast::CutoutSetImpl& cutoutSet) { // Eliminate straight angles for (uint32_t i = 0; i < cutoutSet.cutoutLoops.size(); ++i) { Nv::Blast::Cutout& cutout = cutoutSet.cutoutLoops[i]; uint32_t oldSize; do { oldSize = cutout.vertices.size(); for (uint32_t j = 0; j < cutout.vertices.size();) { // if( isOnBorder( cutout.vertices[j], width, height ) ) // { // Don't eliminate border vertices // ++j; // continue; // } if (perpendicularDistanceSquared(cutout.vertices, j) < CUTOUT_DISTANCE_EPS * CUTOUT_DISTANCE_EPS) { cutout.vertices.erase(cutout.vertices.begin() + j); } else { ++j; } } } while (cutout.vertices.size() != oldSize); } } static void removeTheSamePoints(Nv::Blast::CutoutSetImpl& cutoutSet) { for (uint32_t i = 0; i < cutoutSet.cutoutLoops.size(); ++i) { Nv::Blast::Cutout& cutout = cutoutSet.cutoutLoops[i]; uint32_t oldSize; do { oldSize = cutout.vertices.size(); for (uint32_t j = 0; j < cutout.vertices.size();) { if ((cutout.vertices[(j + cutout.vertices.size() - 1) % cutout.vertices.size()] - cutout.vertices[j]).magnitudeSquared() < CUTOUT_DISTANCE_EPS * CUTOUT_DISTANCE_EPS) { cutout.vertices.erase(cutout.vertices.begin() + j); } else { ++j; } } } while (cutout.vertices.size() != oldSize); } } static void simplifyCutoutSetImpl(Nv::Blast::CutoutSetImpl& cutoutSet, float threshold, uint32_t width, uint32_t height) { splitTJunctions(cutoutSet, 1.0f); mergeVertices(cutoutSet, threshold, width, height); eliminateStraightAngles(cutoutSet); splitTJunctions(cutoutSet, 1.0f); removeTheSamePoints(cutoutSet); } //static void cleanCutout(Nv::Blast::Cutout& cutout, uint32_t loopIndex, float tolerance) //{ // Nv::Blast::ConvexLoop& loop = cutout.convexLoops[loopIndex]; // const float tolerance2 = tolerance * tolerance; // uint32_t oldSize; // do // { // oldSize = loop.polyVerts.size(); // uint32_t size = oldSize; // for (uint32_t i = 0; i < size; ++i) // { // Nv::Blast::PolyVert& v0 = loop.polyVerts[(i + size - 1) % size]; // Nv::Blast::PolyVert& v1 = loop.polyVerts[i]; // Nv::Blast::PolyVert& v2 = loop.polyVerts[(i + 1) % size]; // if (perpendicularDistanceSquared(cutout.vertices[v0.index], cutout.vertices[v1.index], cutout.vertices[v2.index]) <= tolerance2) // { // loop.polyVerts.erase(loop.polyVerts.begin() + i); // --size; // --i; // } // } // } // while (loop.polyVerts.size() != oldSize); //} //static bool decomposeCutoutIntoConvexLoops(Nv::Blast::Cutout& cutout, float cleanupTolerance = 0.0f) //{ // const uint32_t size = cutout.vertices.size(); // // if (size < 3) // { // return false; // } // // // Initialize to one loop, which may not be convex // cutout.convexLoops.resize(1); // cutout.convexLoops[0].polyVerts.resize(size); // // // See if the winding is ccw: // // // Scale to normalized size to avoid overflows // nvidia::NvBounds3 bounds; // bounds.setEmpty(); // for (uint32_t i = 0; i < size; ++i) // { // bounds.include(cutout.vertices[i]); // } // nvidia::NvVec3 center = bounds.getCenter(); // nvidia::NvVec3 extent = bounds.getExtents(); // if (extent[0] < NV_EPS_F32 || extent[1] < NV_EPS_F32) // { // return false; // } // const nvidia::NvVec3 scale(1.0f / extent[0], 1.0f / extent[1], 0.0f); // // // Find "area" (it will only be correct in sign!) // nvidia::NvVec3 prevV = (cutout.vertices[size - 1] - center).multiply(scale); // float area = 0.0f; // for (uint32_t i = 0; i < size; ++i) // { // const nvidia::NvVec3 v = (cutout.vertices[i] - center).multiply(scale); // area += crossZ(prevV, v); // prevV = v; // } // // if (nvidia::NvAbs(area) < NV_EPS_F32 * NV_EPS_F32) // { // return false; // } // // const bool ccw = area > 0.0f; // // for (uint32_t i = 0; i < size; ++i) // { // Nv::Blast::PolyVert& vert = cutout.convexLoops[0].polyVerts[i]; // vert.index = (uint16_t)(ccw ? i : size - i - 1); // vert.flags = 0; // } // // const float cleanupTolerance2 = square(cleanupTolerance); // // // Find reflex vertices // for (uint32_t i = 0; i < cutout.convexLoops.size();) // { // Nv::Blast::ConvexLoop& loop = cutout.convexLoops[i]; // const uint32_t loopSize = loop.polyVerts.size(); // if (loopSize <= 3) // { // ++i; // continue; // } // uint32_t j = 0; // for (; j < loopSize; ++j) // { // const nvidia::NvVec3& v0 = cutout.vertices[loop.polyVerts[(j + loopSize - 1) % loopSize].index]; // const nvidia::NvVec3& v1 = cutout.vertices[loop.polyVerts[j].index]; // const nvidia::NvVec3& v2 = cutout.vertices[loop.polyVerts[(j + 1) % loopSize].index]; // const nvidia::NvVec3 e0 = v1 - v0; // if (crossZ(e0, v2 - v1) < 0.0f) // { // // reflex // break; // } // } // if (j < loopSize) // { // // Find a vertex // float minLen2 = NV_MAX_F32; // float maxMinDist = -NV_MAX_F32; // uint32_t kToUse = 0; // uint32_t mToUse = 2; // bool cleanSliceFound = false; // A transversal is parallel with an edge // for (uint32_t k = 0; k < loopSize; ++k) // { // const nvidia::NvVec3& vkPrev = cutout.vertices[loop.polyVerts[(k + loopSize - 1) % loopSize].index]; // const nvidia::NvVec3& vk = cutout.vertices[loop.polyVerts[k].index]; // const nvidia::NvVec3& vkNext = cutout.vertices[loop.polyVerts[(k + 1) % loopSize].index]; // const uint32_t mStop = k ? loopSize : loopSize - 1; // for (uint32_t m = k + 2; m < mStop; ++m) // { // const nvidia::NvVec3& vmPrev = cutout.vertices[loop.polyVerts[(m + loopSize - 1) % loopSize].index]; // const nvidia::NvVec3& vm = cutout.vertices[loop.polyVerts[m].index]; // const nvidia::NvVec3& vmNext = cutout.vertices[loop.polyVerts[(m + 1) % loopSize].index]; // const nvidia::NvVec3 newEdge = vm - vk; // if (!directionsXYOrderedCCW(vk - vkPrev, newEdge, vkNext - vk) || // !directionsXYOrderedCCW(vm - vmPrev, -newEdge, vmNext - vm)) // { // continue; // } // const float len2 = newEdge.magnitudeSquared(); // float minDist = NV_MAX_F32; // for (uint32_t l = 0; l < loopSize; ++l) // { // const uint32_t l1 = (l + 1) % loopSize; // if (l == k || l1 == k || l == m || l1 == m) // { // continue; // } // const nvidia::NvVec3& vl = cutout.vertices[loop.polyVerts[l].index]; // const nvidia::NvVec3& vl1 = cutout.vertices[loop.polyVerts[l1].index]; // const float dist = segmentsIntersectXY(vl, vl1 - vl, vk, newEdge); // if (dist < minDist) // { // minDist = dist; // } // } // if (minDist <= 0.0f) // { // if (minDist > maxMinDist) // { // maxMinDist = minDist; // kToUse = k; // mToUse = m; // } // } // else // { // if (perpendicularDistanceSquared(vkPrev, vk, vm) <= cleanupTolerance2 || // perpendicularDistanceSquared(vk, vm, vmNext) <= cleanupTolerance2) // { // if (!cleanSliceFound) // { // minLen2 = len2; // kToUse = k; // mToUse = m; // } // else // { // if (len2 < minLen2) // { // minLen2 = len2; // kToUse = k; // mToUse = m; // } // } // cleanSliceFound = true; // } // else if (!cleanSliceFound && len2 < minLen2) // { // minLen2 = len2; // kToUse = k; // mToUse = m; // } // } // } // } // cutout.convexLoops.push_back(Nv::Blast::ConvexLoop()); // Nv::Blast::ConvexLoop& newLoop = cutout.convexLoops.back(); // Nv::Blast::ConvexLoop& oldLoop = cutout.convexLoops[i]; // newLoop.polyVerts.resize(mToUse - kToUse + 1); // for (uint32_t n = 0; n <= mToUse - kToUse; ++n) // { // newLoop.polyVerts[n] = oldLoop.polyVerts[kToUse + n]; // } // newLoop.polyVerts[mToUse - kToUse].flags = 1; // Mark this vertex (and edge that follows) as a split edge // oldLoop.polyVerts[kToUse].flags = 1; // Mark this vertex (and edge that follows) as a split edge // oldLoop.polyVerts.erase(oldLoop.polyVerts.begin() + kToUse + 1, oldLoop.polyVerts.begin() + (mToUse - (kToUse + 1))); // if (cleanupTolerance > 0.0f) // { // cleanCutout(cutout, i, cleanupTolerance); // cleanCutout(cutout, cutout.convexLoops.size() - 1, cleanupTolerance); // } // } // else // { // if (cleanupTolerance > 0.0f) // { // cleanCutout(cutout, i, cleanupTolerance); // } // ++i; // } // } // // return true; //} static void traceRegion(std::vector<POINT2D>& trace, Map2d<uint32_t>& regions, Map2d<uint8_t>& pathCounts, uint32_t regionIndex, const POINT2D& startPoint) { POINT2D t = startPoint; trace.clear(); trace.push_back(t); ++pathCounts(t.x, t.y); // Increment path count // Find initial path direction int32_t dirN; uint32_t previousRegion = 0xFFFFFFFF; for (dirN = 0; dirN < 8; ++dirN) //TODO Should we start from dirN = 0? { const POINT2D t1 = POINT2D(t.x + taxicabSine(dirN + 2), t.y + taxicabSine(dirN)); if (regions(t1.x, t1.y) != regionIndex && previousRegion == regionIndex) { break; } previousRegion = regions(t1.x, t1.y); } bool done = false; do { for (int32_t i = 1; i < 8; ++i) // Skip direction we just came from { --dirN; const POINT2D t1 = POINT2D(t.x + taxicabSine(dirN + 2), t.y + taxicabSine(dirN)); if (regions(t1.x, t1.y) != regionIndex) { if (t1.x == trace[0].x && t1.y == trace[0].y) { done = true; break; } trace.push_back(t1); t = t1; ++pathCounts(t.x, t.y); // Increment path count dirN += 4; break; } } } while (!done && dirN >= 0); //NvBlast GWD-399: Try to fix bad corners int32_t sz = (int32_t)trace.size(); if (sz > 4) { struct CornerPixel { int32_t id; POINT2D p; CornerPixel(int32_t id, int32_t x, int32_t y) : id(id), p(x, y) { } }; std::vector <CornerPixel> cp; int32_t xb = 0, yb = 0; //bit buffer stores 1 if value do not changed from preview point and 0 otherwise (5 bits is used) for (int32_t i = -4; i < sz; i++) //fill buffer with 4 elements from the end of trace { //idx, idx - 1, idx - 2, idx - 3 values with correct indexing to trace int32_t idx = (sz + i) % sz, idx_ = (sz + i - 1) % sz, idx__ = (sz + i - 2) % sz, idx___ = (sz + i - 3) % sz; //update buffer xb <<= 1; yb <<= 1; xb += (trace[idx].x - trace[idx_].x) == 0; yb += (trace[idx].y - trace[idx_].y) == 0; //filter buffer for 11100-00111 or 00111-11100 corner patterns if (i >= 0 && ((xb & 0x1F) ^ (yb & 0x1F)) == 0x1B) { if ((xb & 3) == 3) { if (((yb >> 3) & 3) == 3) { cp.push_back(CornerPixel(idx__, trace[idx].x, trace[idx___].y)); } } else if ((yb & 3) == 3) { if (((xb >> 3) & 3) == 3) { cp.push_back(CornerPixel(idx__, trace[idx___].x, trace[idx].y)); } } } } std::sort(cp.begin(), cp.end(), [](const CornerPixel& cp1, const CornerPixel& cp2) -> bool { return cp1.id > cp2.id; }); for (auto it = cp.begin(); it != cp.end(); it++) { trace.insert(trace.begin() + it->id, it->p); ++pathCounts(it->p.x, it->p.y); } } } void Nv::Blast::createCutoutSet(Nv::Blast::CutoutSetImpl& cutoutSet, const uint8_t* pixelBuffer, uint32_t bufferWidth, uint32_t bufferHeight, float segmentationErrorThreshold, float snapThreshold, bool periodic, bool expandGaps) { cutoutSet.cutouts.clear(); cutoutSet.cutoutLoops.clear(); cutoutSet.periodic = periodic; cutoutSet.dimensions = nvidia::NvVec2((float)bufferWidth, (float)bufferHeight); if (!periodic) { cutoutSet.dimensions[0] += 1.0f; cutoutSet.dimensions[1] += 1.0f; } if (pixelBuffer == NULL || bufferWidth == 0 || bufferHeight == 0) { return; } const int borderPad = periodic ? 0 : 2; // Padded for borders if not periodic const int originCoord = periodic ? 0 : 1; BitMap map(bufferWidth + borderPad, bufferHeight + borderPad, 0); map.setOrigin((uint32_t)originCoord, (uint32_t)originCoord); bool hasBorder = false; for (uint32_t y = 0; y < bufferHeight; ++y) { for (uint32_t x = 0; x < bufferWidth; ++x) { const uint32_t pix = 5033165 * (uint32_t)pixelBuffer[0] + 9898557 * (uint32_t)pixelBuffer[1] + 1845494 * (uint32_t)pixelBuffer[2]; pixelBuffer += 3; if ((pix >> 28) != 0) { map.set((int32_t)x, (int32_t)y); hasBorder = true; } } } // Add borders if not tiling if (!periodic) { for (int32_t x = -1; x <= (int32_t)bufferWidth; ++x) { map.set(x, -1); map.set(x, (int32_t)bufferHeight); } for (int32_t y = -1; y <= (int32_t)bufferHeight; ++y) { map.set(-1, y); map.set((int32_t)bufferWidth, y); } } // Now search for regions // Create a region map Map2d<uint32_t> regions(bufferWidth + borderPad, bufferHeight + borderPad, 0xFFFFFFFF); // Initially an invalid value regions.setOrigin((uint32_t)originCoord, (uint32_t)originCoord); // Create a path counting map Map2d<uint8_t> pathCounts(bufferWidth + borderPad, bufferHeight + borderPad, 0); pathCounts.setOrigin((uint32_t)originCoord, (uint32_t)originCoord); // Bump path counts on borders if (!periodic) { for (int32_t x = -1; x <= (int32_t)bufferWidth; ++x) { pathCounts(x, -1) = 1; pathCounts(x, (int32_t)bufferHeight) = 1; } for (int32_t y = -1; y <= (int32_t)bufferHeight; ++y) { pathCounts(-1, y) = 1; pathCounts((int32_t)bufferWidth, y) = 1; } } std::vector<POINT2D> stack; std::vector<uint32_t> newCutout; std::vector<POINT2D> traceStarts; std::vector<std::vector<POINT2D>* > traces; std::set<uint64_t> regionBoundary; // Initial fill of region maps and path maps for (int32_t y = 0; y < (int32_t)bufferHeight; ++y) { for (int32_t x = 0; x < (int32_t)bufferWidth; ++x) { if (map.read(x - 1, y) && !map.read(x, y)) { // Found an empty spot next to a filled spot POINT2D t(x - 1, y); const uint32_t regionIndex = traceStarts.size(); newCutout.push_back(traces.size()); traceStarts.push_back(t); // Save off initial point traces.push_back(new std::vector<POINT2D>()); NVBLAST_ASSERT(traces.size() == traceStarts.size()); // This must be the same size as traceStarts //traces.back() = (std::vector<POINT2D>*)NVBLAST_ALLOC(sizeof(std::vector<POINT2D>), NV_DEBUG_EXP("CutoutPoint2DSet")); //new(traces.back()) std::vector<POINT2D>; // Flood fill region map std::set<uint64_t> visited; stack.push_back(POINT2D(x, y)); #define COMPRESS(x, y) (((uint64_t)(x) << 32) + (y)) visited.insert(COMPRESS(x, y)); do { const POINT2D s = stack.back(); stack.pop_back(); map.set(s.x, s.y); regions(s.x, s.y) = regionIndex; POINT2D n; for (int32_t i = 0; i < 4; ++i) { const int32_t i0 = i & 1; const int32_t i1 = (i >> 1) & 1; n.x = s.x + i0 - i1; n.y = s.y + i0 + i1 - 1; if (visited.find(COMPRESS(n.x, n.y)) == visited.end()) { if (!map.read(n.x, n.y)) { stack.push_back(n); visited.insert(COMPRESS(n.x, n.y)); } else { regionBoundary.insert(COMPRESS(n.x, n.y)); } } } } while (stack.size()); // Trace region NVBLAST_ASSERT(map.read(t.x, t.y)); std::vector<POINT2D>* trace = traces.back(); traceRegion(*trace, regions, pathCounts, regionIndex, t); //Find innner traces while(true) { for (auto& point : *trace) { regionBoundary.erase(COMPRESS(point.x, point.y)); } if (trace->size() < 4) { trace->~vector<POINT2D>(); delete trace; traces.pop_back(); traceStarts.pop_back(); } if (!regionBoundary.empty()) { auto it = regionBoundary.begin(); t.x = *it >> 32; t.y = *it & 0xFFFFFFFF; traces.push_back(new std::vector<POINT2D>()); traceStarts.push_back(t); trace = traces.back(); traceRegion(*trace, regions, pathCounts, regionIndex, t); continue; } break; } #undef COMPRESS } } } uint32_t cutoutCount = traces.size(); //find internal traces // Now expand regions until the paths completely overlap if (expandGaps) { bool somePathChanged; int sanityCounter = 1000; bool abort = false; do { somePathChanged = false; for (uint32_t i = 0; i < cutoutCount; ++i) { if (traces[i] == nullptr) { continue; } uint32_t regionIndex = 0; for (uint32_t c : newCutout) { if (i >= c) { regionIndex = c; } else { break; } } bool pathChanged = false; std::vector<POINT2D>& trace = *traces[i]; for (size_t j = 0; j < trace.size(); ++j) { const POINT2D& t = trace[j]; if (pathCounts(t.x, t.y) == 1) { if (regions(t.x, t.y) == 0xFFFFFFFF) { regions(t.x, t.y) = regionIndex; pathChanged = true; } else { trace.erase(trace.begin() + j--); } } } if (pathChanged) { // Recalculate cutout // Decrement pathCounts for (uint32_t j = 0; j < trace.size(); ++j) { const POINT2D& t = trace[j]; --pathCounts(t.x, t.y); } // Erase trace // Calculate new start point POINT2D& t = traceStarts[i]; POINT2D t1 = t; abort = true; for (int32_t dirN = 0; dirN < 8; ++dirN) { t1 = POINT2D(t.x + taxicabSine(dirN + 2), t.y + taxicabSine(dirN)); if (regions(t1.x, t1.y) != regionIndex) { t = t1; abort = false; break; } } if (abort) { break; } traceRegion(trace, regions, pathCounts, regionIndex, t); somePathChanged = true; } } if (--sanityCounter <= 0) { abort = true; break; } } while (somePathChanged); if (abort) { for (uint32_t i = 0; i < cutoutCount; ++i) { traces[i]->~vector<POINT2D>(); delete traces[i]; } cutoutCount = 0; } } // Create cutouts cutoutSet.cutouts = newCutout; cutoutSet.cutouts.push_back(cutoutCount); cutoutSet.cutoutLoops.resize(cutoutCount); for (uint32_t i = 0; i < cutoutCount; ++i) { createCutout(cutoutSet.cutoutLoops[i], *traces[i], segmentationErrorThreshold, snapThreshold, bufferWidth, bufferHeight, !cutoutSet.periodic); } if (expandGaps) { simplifyCutoutSetImpl(cutoutSet, snapThreshold, bufferWidth, bufferHeight); } // Release traces for (uint32_t i = 0; i < cutoutCount; ++i) { if (traces[i] != nullptr) { traces[i]->~vector<POINT2D>(); delete traces[i]; } } // Decompose each cutout in the set into convex loops //uint32_t cutoutSetSize = 0; //for (uint32_t i = 0; i < cutoutSet.cutoutLoops.size(); ++i) //{ // bool success = decomposeCutoutIntoConvexLoops(cutoutSet.cutoutLoops[i]); // if (success) // { // if (cutoutSetSize != i) // { // cutoutSet.cutouts[cutoutSetSize] = cutoutSet.cutoutLoops[i]; // } // ++cutoutSetSize; // } //} //cutoutSet.cutoutLoops.resize(cutoutSetSize); //Check if single cutout spread to the whole area for non periodic (no need to cutout then) if (!periodic && cutoutSet.cutoutLoops.size() == 1 && (expandGaps || !hasBorder)) { cutoutSet.cutoutLoops.clear(); } } class Matrix22 { public: //! Default constructor Matrix22() {} //! Construct from two base vectors Matrix22(const nvidia::NvVec2& col0, const nvidia::NvVec2& col1) : column0(col0), column1(col1) {} //! Construct from float[4] explicit Matrix22(float values[]): column0(values[0],values[1]), column1(values[2],values[3]) { } //! Copy constructor Matrix22(const Matrix22& other) : column0(other.column0), column1(other.column1) {} //! Assignment operator Matrix22& operator=(const Matrix22& other) { column0 = other.column0; column1 = other.column1; return *this; } //! Set to identity matrix static Matrix22 createIdentity() { return Matrix22(nvidia::NvVec2(1,0), nvidia::NvVec2(0,1)); } //! Set to zero matrix static Matrix22 createZero() { return Matrix22(nvidia::NvVec2(0.0f), nvidia::NvVec2(0.0f)); } //! Construct from diagonal, off-diagonals are zero. static Matrix22 createDiagonal(const nvidia::NvVec2& d) { return Matrix22(nvidia::NvVec2(d.x,0.0f), nvidia::NvVec2(0.0f,d.y)); } //! Get transposed matrix Matrix22 getTranspose() const { const nvidia::NvVec2 v0(column0.x, column1.x); const nvidia::NvVec2 v1(column0.y, column1.y); return Matrix22(v0,v1); } //! Get the real inverse Matrix22 getInverse() const { const float det = getDeterminant(); Matrix22 inverse; if(det != 0) { const float invDet = 1.0f/det; inverse.column0[0] = invDet * column1[1]; inverse.column0[1] = invDet * (-column0[1]); inverse.column1[0] = invDet * (-column1[0]); inverse.column1[1] = invDet * column0[0]; return inverse; } else { return createIdentity(); } } //! Get determinant float getDeterminant() const { return column0[0] * column1[1] - column0[1] * column1[0]; } //! Unary minus Matrix22 operator-() const { return Matrix22(-column0, -column1); } //! Add Matrix22 operator+(const Matrix22& other) const { return Matrix22( column0+other.column0, column1+other.column1); } //! Subtract Matrix22 operator-(const Matrix22& other) const { return Matrix22( column0-other.column0, column1-other.column1); } //! Scalar multiplication Matrix22 operator*(float scalar) const { return Matrix22(column0*scalar, column1*scalar); } //! Matrix vector multiplication (returns 'this->transform(vec)') nvidia::NvVec2 operator*(const nvidia::NvVec2& vec) const { return transform(vec); } //! Matrix multiplication Matrix22 operator*(const Matrix22& other) const { //Rows from this <dot> columns from other //column0 = transform(other.column0) etc return Matrix22(transform(other.column0), transform(other.column1)); } // a <op>= b operators //! Equals-add Matrix22& operator+=(const Matrix22& other) { column0 += other.column0; column1 += other.column1; return *this; } //! Equals-sub Matrix22& operator-=(const Matrix22& other) { column0 -= other.column0; column1 -= other.column1; return *this; } //! Equals scalar multiplication Matrix22& operator*=(float scalar) { column0 *= scalar; column1 *= scalar; return *this; } //! Element access, mathematical way! float operator()(unsigned int row, unsigned int col) const { return (*this)[col][(int)row]; } //! Element access, mathematical way! float& operator()(unsigned int row, unsigned int col) { return (*this)[col][(int)row]; } // Transform etc //! Transform vector by matrix, equal to v' = M*v nvidia::NvVec2 transform(const nvidia::NvVec2& other) const { return column0*other.x + column1*other.y; } nvidia::NvVec2& operator[](unsigned int num) {return (&column0)[num];} const nvidia::NvVec2& operator[](unsigned int num) const {return (&column0)[num];} //Data, see above for format! nvidia::NvVec2 column0, column1; //the two base vectors }; bool calculateUVMapping(const Nv::Blast::Triangle& triangle, nvidia::NvMat33& theResultMapping) { nvidia::NvMat33 rMat; nvidia::NvMat33 uvMat; for (unsigned col = 0; col < 3; ++col) { auto v = (&triangle.a)[col]; rMat[col] = toNvShared(v.p); uvMat[col] = nvidia::NvVec3(v.uv[0].x, v.uv[0].y, 1.0f); } if (uvMat.getDeterminant() == 0.0f) { return false; } theResultMapping = rMat*uvMat.getInverse(); return true; } //static bool calculateUVMapping(ExplicitHierarchicalMesh& theHMesh, const nvidia::NvVec3& theDir, nvidia::NvMat33& theResultMapping) //{ // nvidia::NvVec3 cutoutDir( theDir ); // cutoutDir.normalize( ); // // const float cosineThreshold = nvidia::NvCos(3.141593f / 180); // 1 degree // // ExplicitRenderTriangle* triangleToUse = NULL; // float greatestCosine = -NV_MAX_F32; // float greatestArea = 0.0f; // for normals within the threshold // for ( uint32_t partIndex = 0; partIndex < theHMesh.partCount(); ++partIndex ) // { // ExplicitRenderTriangle* theTriangles = theHMesh.meshTriangles( partIndex ); // uint32_t triangleCount = theHMesh.meshTriangleCount( partIndex ); // for ( uint32_t tIndex = 0; tIndex < triangleCount; ++tIndex ) // { // ExplicitRenderTriangle& theTriangle = theTriangles[tIndex]; // nvidia::NvVec3 theEdge1 = theTriangle.vertices[1].position - theTriangle.vertices[0].position; // nvidia::NvVec3 theEdge2 = theTriangle.vertices[2].position - theTriangle.vertices[0].position; // nvidia::NvVec3 theNormal = theEdge1.cross( theEdge2 ); // float theArea = theNormal.normalize(); // twice the area, but that's ok // // if (theArea == 0.0f) // { // continue; // } // // const float cosine = cutoutDir.dot(theNormal); // // if (cosine < cosineThreshold) // { // if (cosine > greatestCosine && greatestArea == 0.0f) // { // greatestCosine = cosine; // triangleToUse = &theTriangle; // } // } // else // { // if (theArea > greatestArea) // { // greatestArea = theArea; // triangleToUse = &theTriangle; // } // } // } // } // // if (triangleToUse == NULL) // { // return false; // } // // return calculateUVMapping(*triangleToUse, theResultMapping); //} //bool calculateCutoutUVMapping(ExplicitHierarchicalMesh& hMesh, const nvidia::NvVec3& targetDirection, nvidia::NvMat33& theMapping) //{ // return ::calculateUVMapping(hMesh, targetDirection, theMapping); //} //bool calculateCutoutUVMapping(const Nv::Blast::Triangle& targetDirection, nvidia::NvMat33& theMapping) //{ // return ::calculateUVMapping(targetDirection, theMapping); //} const NvcVec3& CutoutSetImpl::getCutoutVertex(uint32_t cutoutIndex, uint32_t loopIndex, uint32_t vertexIndex) const { return fromNvShared(cutoutLoops[cutouts[cutoutIndex] + loopIndex].vertices[vertexIndex]); } const NvcVec2& CutoutSetImpl::getDimensions() const { return fromNvShared(dimensions); }
80,008
C++
30.787445
190
0.519748
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/authoring/NvBlastExtAuthoringCutoutImpl.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved. #ifndef NVBLASTAUTHORINGFCUTOUTIMPL_H #define NVBLASTAUTHORINGFCUTOUTIMPL_H #include "NvBlastExtAuthoringCutout.h" #include <vector> #include "NvVec2.h" #include "NvVec3.h" #include "NvMat44.h" namespace Nv { namespace Blast { struct PolyVert { uint16_t index; uint16_t flags; }; struct ConvexLoop { std::vector<PolyVert> polyVerts; }; struct Cutout { std::vector<nvidia::NvVec3> vertices; //std::vector<ConvexLoop> convexLoops; std::vector<nvidia::NvVec3> smoothingGroups; }; struct POINT2D { POINT2D() {} POINT2D(int32_t _x, int32_t _y) : x(_x), y(_y) {} int32_t x; int32_t y; bool operator==(const POINT2D& other) const { return x == other.x && y == other.y; } bool operator<(const POINT2D& other) const { if (x == other.x) return y < other.y; return x < other.x; } }; struct CutoutSetImpl : public CutoutSet { CutoutSetImpl() : periodic(false), dimensions(0.0f) { } uint32_t getCutoutCount() const { return (uint32_t)cutouts.size() - 1; } uint32_t getCutoutVertexCount(uint32_t cutoutIndex, uint32_t loopIndex) const { return (uint32_t)cutoutLoops[cutouts[cutoutIndex] + loopIndex].vertices.size(); } uint32_t getCutoutLoopCount(uint32_t cutoutIndex) const { return (uint32_t)cutouts[cutoutIndex + 1] - cutouts[cutoutIndex]; } const NvcVec3& getCutoutVertex(uint32_t cutoutIndex, uint32_t loopIndex, uint32_t vertexIndex) const; bool isCutoutVertexToggleSmoothingGroup(uint32_t cutoutIndex, uint32_t loopIndex, uint32_t vertexIndex) const { auto& vRef = cutoutLoops[cutouts[cutoutIndex] + loopIndex].vertices[vertexIndex]; for (auto& v : cutoutLoops[cutouts[cutoutIndex] + loopIndex].smoothingGroups) { if ((vRef - v).magnitudeSquared() < 1e-5) { return true; } } return false; } bool isPeriodic() const { return periodic; } const NvcVec2& getDimensions() const; //void serialize(nvidia::NvFileBuf& stream) const; //void deserialize(nvidia::NvFileBuf& stream); void release() { delete this; } std::vector<Cutout> cutoutLoops; std::vector<uint32_t> cutouts; bool periodic; nvidia::NvVec2 dimensions; }; void createCutoutSet(Nv::Blast::CutoutSetImpl& cutoutSet, const uint8_t* pixelBuffer, uint32_t bufferWidth, uint32_t bufferHeight, float segmentationErrorThreshold, float snapThreshold, bool periodic, bool expandGaps); } // namespace Blast } // namespace Nv #endif // ifndef NVBLASTAUTHORINGFCUTOUTIMPL_H
4,391
C
29.929577
130
0.670462
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/authoring/NvBlastExtAuthoringCollisionBuilderImpl.cpp
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved. #include <NvBlastGlobals.h> #include "NvBlastExtAuthoringCollisionBuilderImpl.h" #include <NvBlastExtApexSharedParts.h> #include <NvBlastExtAuthoringInternalCommon.h> #include <NvBlastExtAuthoringBooleanToolImpl.h> #include <NvBlastExtAuthoringMeshImpl.h> #include <NvBlastExtAuthoringMeshUtils.h> #include <NvBlastNvSharedHelpers.h> #include <VHACD.h> #include <vector> using namespace nvidia; namespace Nv { namespace Blast { #define SAFE_ARRAY_NEW(T, x) ((x) > 0) ? reinterpret_cast<T*>(NVBLAST_ALLOC(sizeof(T) * (x))) : nullptr; #define SAFE_ARRAY_DELETE(x) if (x != nullptr) {NVBLAST_FREE(x); x = nullptr;} void trimCollisionGeometry(ConvexMeshBuilder& cmb, uint32_t chunksCount, CollisionHull** in, const uint32_t* chunkDepth) { std::vector<std::vector<NvPlane> > chunkMidplanes(chunksCount); std::vector<NvVec3> centers(chunksCount); std::vector<NvBounds3> hullsBounds(chunksCount); for (uint32_t i = 0; i < chunksCount; ++i) { hullsBounds[i].setEmpty(); centers[i] = NvVec3(0, 0, 0); for (uint32_t p = 0; p < in[i]->pointsCount; ++p) { centers[i] += toNvShared(in[i]->points[p]); hullsBounds[i].include(toNvShared(in[i]->points[p])); } centers[i] = hullsBounds[i].getCenter(); } Separation params; for (uint32_t hull = 0; hull < chunksCount; ++hull) { for (uint32_t hull2 = hull + 1; hull2 < chunksCount; ++hull2) { if (chunkDepth[hull] != chunkDepth[hull2]) { continue; } if (importerHullsInProximityApexFree(in[hull]->pointsCount, toNvShared(in[hull]->points), hullsBounds[hull], NvTransform(NvIdentity), NvVec3(1, 1, 1), in[hull2]->pointsCount, toNvShared(in[hull2]->points), hullsBounds[hull2], NvTransform(NvIdentity), NvVec3(1, 1, 1), 0.0, &params) == false) { continue; } NvVec3 c1 = centers[hull]; NvVec3 c2 = centers[hull2]; float d = FLT_MAX; NvVec3 n1; NvVec3 n2; for (uint32_t p = 0; p < in[hull]->pointsCount; ++p) { float ld = (toNvShared(in[hull]->points[p]) - c2).magnitude(); if (ld < d) { n1 = toNvShared(in[hull]->points[p]); d = ld; } } d = FLT_MAX; for (uint32_t p = 0; p < in[hull2]->pointsCount; ++p) { float ld = (toNvShared(in[hull2]->points[p]) - c1).magnitude(); if (ld < d) { n2 = toNvShared(in[hull2]->points[p]); d = ld; } } NvVec3 dir = c2 - c1; NvPlane pl = NvPlane((n1 + n2) * 0.5, dir.getNormalized()); chunkMidplanes[hull].push_back(pl); NvPlane pl2 = NvPlane((n1 + n2) * 0.5, -dir.getNormalized()); chunkMidplanes[hull2].push_back(pl2); } } std::vector<NvVec3> hPoints; for (uint32_t i = 0; i < chunksCount; ++i) { std::vector<Facet> facets; std::vector<Vertex> vertices; std::vector<Edge> edges; for (uint32_t fc = 0; fc < in[i]->polygonDataCount; ++fc) { Facet nFc; nFc.firstEdgeNumber = edges.size(); auto& pd = in[i]->polygonData[fc]; uint32_t n = pd.vertexCount; for (uint32_t ed = 0; ed < n; ++ed) { uint32_t vr1 = in[i]->indices[(ed) + pd.indexBase]; uint32_t vr2 = in[i]->indices[(ed + 1) % n + pd.indexBase]; edges.push_back({vr1, vr2}); } nFc.edgesCount = n; facets.push_back(nFc); } vertices.resize(in[i]->pointsCount); for (uint32_t vr = 0; vr < in[i]->pointsCount; ++vr) { vertices[vr].p = in[i]->points[vr]; } Mesh* hullMesh = new MeshImpl(vertices.data(), edges.data(), facets.data(), vertices.size(), edges.size(), facets.size()); BooleanEvaluator evl; //I think the material ID is unused for collision meshes so harcoding MATERIAL_INTERIOR is ok Mesh* cuttingMesh = getCuttingBox(NvVec3(0, 0, 0), NvVec3(0, 0, 1), 40, 0, kMaterialInteriorId); for (uint32_t p = 0; p < chunkMidplanes[i].size(); ++p) { NvPlane& pl = chunkMidplanes[i][p]; setCuttingBox(pl.pointInPlane(), pl.n.getNormalized(), cuttingMesh, 60, 0); evl.performFastCutting(hullMesh, cuttingMesh, BooleanConfigurations::BOOLEAN_DIFFERENCE()); Mesh* result = evl.createNewMesh(); if (result == nullptr) { break; } delete hullMesh; hullMesh = result; } delete cuttingMesh; if (hullMesh == nullptr) { continue; } hPoints.clear(); hPoints.resize(hullMesh->getVerticesCount()); for (uint32_t v = 0; v < hullMesh->getVerticesCount(); ++v) { hPoints[v] = toNvShared(hullMesh->getVertices()[v].p); } delete hullMesh; if (in[i] != nullptr) { delete in[i]; } in[i] = cmb.buildCollisionGeometry(hPoints.size(), fromNvShared(hPoints.data())); } } int32_t buildMeshConvexDecomposition(ConvexMeshBuilder& cmb, const Triangle* mesh, uint32_t triangleCount, const ConvexDecompositionParams& iparams, CollisionHull**& convexes) { std::vector<float> coords(triangleCount * 9); std::vector<uint32_t> indices(triangleCount * 3); uint32_t indx = 0; uint32_t indxCoord = 0; NvBounds3 chunkBound = NvBounds3::empty(); for (uint32_t i = 0; i < triangleCount; ++i) { for (auto& t : { mesh[i].a.p , mesh[i].b.p , mesh[i].c.p }) { chunkBound.include(toNvShared(t)); coords[indxCoord] = t.x; coords[indxCoord + 1] = t.y; coords[indxCoord + 2] = t.z; indxCoord += 3; } indices[indx] = indx; indices[indx + 1] = indx + 1; indices[indx + 2] = indx + 2; indx += 3; } NvVec3 rsc = chunkBound.getDimensions(); for (uint32_t i = 0; i < coords.size(); i += 3) { coords[i] = (coords[i] - chunkBound.minimum.x) / rsc.x; coords[i + 1] = (coords[i + 1] - chunkBound.minimum.y) / rsc.y; coords[i + 2] = (coords[i + 2] - chunkBound.minimum.z) / rsc.z; } VHACD::IVHACD* decomposer = VHACD::CreateVHACD(); VHACD::IVHACD::Parameters vhacdParam; vhacdParam.m_maxConvexHulls = iparams.maximumNumberOfHulls; vhacdParam.m_resolution = iparams.voxelGridResolution; vhacdParam.m_concavity = iparams.concavity; vhacdParam.m_oclAcceleration = false; //TODO vhacdParam.m_callback vhacdParam.m_minVolumePerCH = 0.003f; // 1.f / (3 * vhacdParam.m_resolution ^ (1 / 3)); decomposer->Compute(coords.data(), triangleCount * 3, indices.data(), triangleCount, vhacdParam); const uint32_t nConvexHulls = decomposer->GetNConvexHulls(); convexes = SAFE_ARRAY_NEW(CollisionHull*, nConvexHulls); for (uint32_t i = 0; i < nConvexHulls; ++i) { VHACD::IVHACD::ConvexHull hl; decomposer->GetConvexHull(i, hl); std::vector<NvVec3> vertices; for (uint32_t v = 0; v < hl.m_nPoints; ++v) { vertices.push_back(NvVec3(hl.m_points[v * 3], hl.m_points[v * 3 + 1], hl.m_points[v * 3 + 2])); vertices.back().x = vertices.back().x * rsc.x + chunkBound.minimum.x; vertices.back().y = vertices.back().y * rsc.y + chunkBound.minimum.y; vertices.back().z = vertices.back().z * rsc.z + chunkBound.minimum.z; } convexes[i] = cmb.buildCollisionGeometry(vertices.size(), fromNvShared(vertices.data())); } //VHACD::~VHACD called from release does nothign and does not call Clean() decomposer->Clean(); decomposer->Release(); return nConvexHulls; } } // namespace Blast } // namespace Nv
9,910
C++
37.866667
130
0.580626
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/authoring/NvBlastExtAuthoringMeshUtils.h
#include <cinttypes> #include <map> #include <set> #include <vector> #include "NvBlastExtAuthoringTypes.h" namespace nvidia { class NvVec3; }; namespace Nv { namespace Blast { class Mesh; /** Helper functions */ /** Set cutting box at some particular position. \param[in] point Cutting face center \param[in] normal Cutting face normal \param[in] mesh Cutting box mesh \param[in] size Cutting box size \param[in] id Cutting box ID */ void setCuttingBox(const nvidia::NvVec3& point, const nvidia::NvVec3& normal, Mesh* mesh, float size, int64_t id); /** Create cutting box at some particular position. \param[in] point Cutting face center \param[in] normal Cutting face normal \param[in] size Cutting box size \param[in] id Cutting box ID */ Mesh* getCuttingBox(const nvidia::NvVec3& point, const nvidia::NvVec3& normal, float size, int64_t id, int32_t interiorMaterialId); /** Create box at some particular position. \param[in] point Cutting face center \param[in] size Cutting box size */ Mesh* getBigBox(const nvidia::NvVec3& point, float size, int32_t interiorMaterialId); /** Create slicing box with noisy cutting surface. \param[in] point Cutting face center \param[in] normal Cutting face normal \param[in] size Cutting box size \param[in] jaggedPlaneSize Noisy surface size \param[in] resolution Noisy surface resolution \param[in] id Cutting box ID \param[in] amplitude Noise amplitude \param[in] frequency Noise frequency \param[in] octaves Noise octaves \param[in] seed Random generator seed, used for noise generation. */ Mesh* getNoisyCuttingBoxPair(const nvidia::NvVec3& point, const nvidia::NvVec3& normal, float size, float jaggedPlaneSize, nvidia::NvVec3 resolution, int64_t id, float amplitude, float frequency, int32_t octaves, int32_t seed, int32_t interiorMaterialId); /** Inverses normals of cutting box and sets indices. \param[in] mesh Cutting box mesh */ void inverseNormalAndIndices(Mesh* mesh); struct CmpVec { bool operator()(const nvidia::NvVec3& v1, const nvidia::NvVec3& v2) const; }; typedef std::map<nvidia::NvVec3, std::map<uint32_t, uint32_t>, CmpVec> PointMap; struct SharedFace { SharedFace() {} SharedFace(uint32_t inW, uint32_t inH, int64_t inUD, int32_t inMatId) : w(inW), h(inH), f(Facet( 0, 3, inUD, inMatId )) { vertices.reserve((w + 1) * (h + 1)); } uint32_t w, h; Facet f; std::vector<Nv::Blast::Vertex> vertices; std::vector<Nv::Blast::Edge> edges; std::vector<Nv::Blast::Facet> facets; }; struct CmpSharedFace { bool operator()(const std::pair<nvidia::NvVec3, nvidia::NvVec3>& pv1, const std::pair<nvidia::NvVec3, nvidia::NvVec3>& pv2) const; }; typedef std::map<std::pair<nvidia::NvVec3, nvidia::NvVec3>, SharedFace, CmpSharedFace> SharedFacesMap; struct CutoutConfiguration; void buildCuttingConeFaces(const CutoutConfiguration& conf, const std::vector<std::vector<nvidia::NvVec3>>& points, float heightBot, float heightTop, float conicityBot, float conicityTop, int64_t& id, int32_t seed, int32_t interiorMaterialId, SharedFacesMap& sharedFacesMap); /** Create cutting cone at some particular position. \param[in] conf Cutout configuration parameters and data \param[in] meshId Cutout index \param[in] points Array of points for loop \param[in] smoothingGroups Array of point indices at which smoothing group should be toggled \param[in] heightBot Cutting cone bottom height (below z = 0) \param[in] heightTop Cutting cone top height (below z = 0) \param[in] conicityBot Cutting cone bottom points multiplier \param[in] conicityTop Cutting cone top points multiplier \param[in] id Cutting cylinder ID \param[in] seed Seed for RNG \param[in] interiorMaterialId Interior material index \param[in] sharedFacesMap Shared faces for noisy fracture */ Mesh* getCuttingCone(const CutoutConfiguration& conf, const std::vector<nvidia::NvVec3>& points, const std::set<int32_t>& smoothingGroups, float heightBot, float heightTop, float conicityBot, float conicityTop, int64_t& id, int32_t seed, int32_t interiorMaterialId, const SharedFacesMap& sharedFacesMap, bool inverseNormals = false); }; };
4,321
C
33.576
255
0.714881
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/authoring/VHACD/src/vhacdVolume.cpp
/* Copyright (c) 2011 Khaled Mamou (kmamou at gmail dot com) All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. The names of the contributors may not be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #define _CRT_SECURE_NO_WARNINGS #include "btConvexHullComputer.h" #include "vhacdVolume.h" #include <algorithm> #include <float.h> #include <math.h> #include <queue> #include <string.h> #ifdef _MSC_VER #pragma warning(disable:4458 4100) #endif namespace VHACD { /********************************************************/ /* AABB-triangle overlap test code */ /* by Tomas Akenine-Meuller */ /* Function: int32_t triBoxOverlap(float boxcenter[3], */ /* float boxhalfsize[3],float triverts[3][3]); */ /* History: */ /* 2001-03-05: released the code in its first version */ /* 2001-06-18: changed the order of the tests, faster */ /* */ /* Acknowledgement: Many thanks to Pierre Terdiman for */ /* suggestions and discussions on how to optimize code. */ /* Thanks to David Hunt for finding a ">="-bug! */ /********************************************************/ #define X 0 #define Y 1 #define Z 2 #define FINDMINMAX(x0, x1, x2, min, max) \ min = max = x0; \ if (x1 < min) \ min = x1; \ if (x1 > max) \ max = x1; \ if (x2 < min) \ min = x2; \ if (x2 > max) \ max = x2; #define AXISTEST_X01(a, b, fa, fb) \ p0 = a * v0[Y] - b * v0[Z]; \ p2 = a * v2[Y] - b * v2[Z]; \ if (p0 < p2) { \ min = p0; \ max = p2; \ } \ else { \ min = p2; \ max = p0; \ } \ rad = fa * boxhalfsize[Y] + fb * boxhalfsize[Z]; \ if (min > rad || max < -rad) \ return 0; #define AXISTEST_X2(a, b, fa, fb) \ p0 = a * v0[Y] - b * v0[Z]; \ p1 = a * v1[Y] - b * v1[Z]; \ if (p0 < p1) { \ min = p0; \ max = p1; \ } \ else { \ min = p1; \ max = p0; \ } \ rad = fa * boxhalfsize[Y] + fb * boxhalfsize[Z]; \ if (min > rad || max < -rad) \ return 0; #define AXISTEST_Y02(a, b, fa, fb) \ p0 = -a * v0[X] + b * v0[Z]; \ p2 = -a * v2[X] + b * v2[Z]; \ if (p0 < p2) { \ min = p0; \ max = p2; \ } \ else { \ min = p2; \ max = p0; \ } \ rad = fa * boxhalfsize[X] + fb * boxhalfsize[Z]; \ if (min > rad || max < -rad) \ return 0; #define AXISTEST_Y1(a, b, fa, fb) \ p0 = -a * v0[X] + b * v0[Z]; \ p1 = -a * v1[X] + b * v1[Z]; \ if (p0 < p1) { \ min = p0; \ max = p1; \ } \ else { \ min = p1; \ max = p0; \ } \ rad = fa * boxhalfsize[X] + fb * boxhalfsize[Z]; \ if (min > rad || max < -rad) \ return 0; #define AXISTEST_Z12(a, b, fa, fb) \ p1 = a * v1[X] - b * v1[Y]; \ p2 = a * v2[X] - b * v2[Y]; \ if (p2 < p1) { \ min = p2; \ max = p1; \ } \ else { \ min = p1; \ max = p2; \ } \ rad = fa * boxhalfsize[X] + fb * boxhalfsize[Y]; \ if (min > rad || max < -rad) \ return 0; #define AXISTEST_Z0(a, b, fa, fb) \ p0 = a * v0[X] - b * v0[Y]; \ p1 = a * v1[X] - b * v1[Y]; \ if (p0 < p1) { \ min = p0; \ max = p1; \ } \ else { \ min = p1; \ max = p0; \ } \ rad = fa * boxhalfsize[X] + fb * boxhalfsize[Y]; \ if (min > rad || max < -rad) \ return 0; int32_t PlaneBoxOverlap(const Vec3<double>& normal, const Vec3<double>& vert, const Vec3<double>& maxbox) { int32_t q; Vec3<double> vmin, vmax; double v; for (q = X; q <= Z; q++) { v = vert[q]; if (normal[q] > 0.0) { vmin[q] = -maxbox[q] - v; vmax[q] = maxbox[q] - v; } else { vmin[q] = maxbox[q] - v; vmax[q] = -maxbox[q] - v; } } if (normal * vmin > 0.0) return 0; if (normal * vmax >= 0.0) return 1; return 0; } int32_t TriBoxOverlap(const Vec3<double>& boxcenter, const Vec3<double>& boxhalfsize, const Vec3<double>& triver0, const Vec3<double>& triver1, const Vec3<double>& triver2) { /* use separating axis theorem to test overlap between triangle and box */ /* need to test for overlap in these directions: */ /* 1) the {x,y,z}-directions (actually, since we use the AABB of the triangle */ /* we do not even need to test these) */ /* 2) normal of the triangle */ /* 3) crossproduct(edge from tri, {x,y,z}-directin) */ /* this gives 3x3=9 more tests */ Vec3<double> v0, v1, v2; double min, max, p0, p1, p2, rad, fex, fey, fez; // -NJMP- "d" local variable removed Vec3<double> normal, e0, e1, e2; /* This is the fastest branch on Sun */ /* move everything so that the boxcenter is in (0,0,0) */ v0 = triver0 - boxcenter; v1 = triver1 - boxcenter; v2 = triver2 - boxcenter; /* compute triangle edges */ e0 = v1 - v0; /* tri edge 0 */ e1 = v2 - v1; /* tri edge 1 */ e2 = v0 - v2; /* tri edge 2 */ /* Bullet 3: */ /* test the 9 tests first (this was faster) */ fex = fabs(e0[X]); fey = fabs(e0[Y]); fez = fabs(e0[Z]); AXISTEST_X01(e0[Z], e0[Y], fez, fey); AXISTEST_Y02(e0[Z], e0[X], fez, fex); AXISTEST_Z12(e0[Y], e0[X], fey, fex); fex = fabs(e1[X]); fey = fabs(e1[Y]); fez = fabs(e1[Z]); AXISTEST_X01(e1[Z], e1[Y], fez, fey); AXISTEST_Y02(e1[Z], e1[X], fez, fex); AXISTEST_Z0(e1[Y], e1[X], fey, fex); fex = fabs(e2[X]); fey = fabs(e2[Y]); fez = fabs(e2[Z]); AXISTEST_X2(e2[Z], e2[Y], fez, fey); AXISTEST_Y1(e2[Z], e2[X], fez, fex); AXISTEST_Z12(e2[Y], e2[X], fey, fex); /* Bullet 1: */ /* first test overlap in the {x,y,z}-directions */ /* find min, max of the triangle each direction, and test for overlap in */ /* that direction -- this is equivalent to testing a minimal AABB around */ /* the triangle against the AABB */ /* test in X-direction */ FINDMINMAX(v0[X], v1[X], v2[X], min, max); if (min > boxhalfsize[X] || max < -boxhalfsize[X]) return 0; /* test in Y-direction */ FINDMINMAX(v0[Y], v1[Y], v2[Y], min, max); if (min > boxhalfsize[Y] || max < -boxhalfsize[Y]) return 0; /* test in Z-direction */ FINDMINMAX(v0[Z], v1[Z], v2[Z], min, max); if (min > boxhalfsize[Z] || max < -boxhalfsize[Z]) return 0; /* Bullet 2: */ /* test if the box intersects the plane of the triangle */ /* compute plane equation of triangle: normal*x+d=0 */ normal = e0 ^ e1; if (!PlaneBoxOverlap(normal, v0, boxhalfsize)) return 0; return 1; /* box and triangle overlaps */ } // Slightly modified version of Stan Melax's code for 3x3 matrix diagonalization (Thanks Stan!) // source: http://www.melax.com/diag.html?attredirects=0 void Diagonalize(const double (&A)[3][3], double (&Q)[3][3], double (&D)[3][3]) { // A must be a symmetric matrix. // returns Q and D such that // Diagonal matrix D = QT * A * Q; and A = Q*D*QT const int32_t maxsteps = 24; // certainly wont need that many. int32_t k0, k1, k2; double o[3], m[3]; double q[4] = { 0.0, 0.0, 0.0, 1.0 }; double jr[4]; double sqw, sqx, sqy, sqz; double tmp1, tmp2, mq; double AQ[3][3]; double thet, sgn, t, c; for (int32_t i = 0; i < maxsteps; ++i) { // quat to matrix sqx = q[0] * q[0]; sqy = q[1] * q[1]; sqz = q[2] * q[2]; sqw = q[3] * q[3]; Q[0][0] = (sqx - sqy - sqz + sqw); Q[1][1] = (-sqx + sqy - sqz + sqw); Q[2][2] = (-sqx - sqy + sqz + sqw); tmp1 = q[0] * q[1]; tmp2 = q[2] * q[3]; Q[1][0] = 2.0 * (tmp1 + tmp2); Q[0][1] = 2.0 * (tmp1 - tmp2); tmp1 = q[0] * q[2]; tmp2 = q[1] * q[3]; Q[2][0] = 2.0 * (tmp1 - tmp2); Q[0][2] = 2.0 * (tmp1 + tmp2); tmp1 = q[1] * q[2]; tmp2 = q[0] * q[3]; Q[2][1] = 2.0 * (tmp1 + tmp2); Q[1][2] = 2.0 * (tmp1 - tmp2); // AQ = A * Q AQ[0][0] = Q[0][0] * A[0][0] + Q[1][0] * A[0][1] + Q[2][0] * A[0][2]; AQ[0][1] = Q[0][1] * A[0][0] + Q[1][1] * A[0][1] + Q[2][1] * A[0][2]; AQ[0][2] = Q[0][2] * A[0][0] + Q[1][2] * A[0][1] + Q[2][2] * A[0][2]; AQ[1][0] = Q[0][0] * A[0][1] + Q[1][0] * A[1][1] + Q[2][0] * A[1][2]; AQ[1][1] = Q[0][1] * A[0][1] + Q[1][1] * A[1][1] + Q[2][1] * A[1][2]; AQ[1][2] = Q[0][2] * A[0][1] + Q[1][2] * A[1][1] + Q[2][2] * A[1][2]; AQ[2][0] = Q[0][0] * A[0][2] + Q[1][0] * A[1][2] + Q[2][0] * A[2][2]; AQ[2][1] = Q[0][1] * A[0][2] + Q[1][1] * A[1][2] + Q[2][1] * A[2][2]; AQ[2][2] = Q[0][2] * A[0][2] + Q[1][2] * A[1][2] + Q[2][2] * A[2][2]; // D = Qt * AQ D[0][0] = AQ[0][0] * Q[0][0] + AQ[1][0] * Q[1][0] + AQ[2][0] * Q[2][0]; D[0][1] = AQ[0][0] * Q[0][1] + AQ[1][0] * Q[1][1] + AQ[2][0] * Q[2][1]; D[0][2] = AQ[0][0] * Q[0][2] + AQ[1][0] * Q[1][2] + AQ[2][0] * Q[2][2]; D[1][0] = AQ[0][1] * Q[0][0] + AQ[1][1] * Q[1][0] + AQ[2][1] * Q[2][0]; D[1][1] = AQ[0][1] * Q[0][1] + AQ[1][1] * Q[1][1] + AQ[2][1] * Q[2][1]; D[1][2] = AQ[0][1] * Q[0][2] + AQ[1][1] * Q[1][2] + AQ[2][1] * Q[2][2]; D[2][0] = AQ[0][2] * Q[0][0] + AQ[1][2] * Q[1][0] + AQ[2][2] * Q[2][0]; D[2][1] = AQ[0][2] * Q[0][1] + AQ[1][2] * Q[1][1] + AQ[2][2] * Q[2][1]; D[2][2] = AQ[0][2] * Q[0][2] + AQ[1][2] * Q[1][2] + AQ[2][2] * Q[2][2]; o[0] = D[1][2]; o[1] = D[0][2]; o[2] = D[0][1]; m[0] = fabs(o[0]); m[1] = fabs(o[1]); m[2] = fabs(o[2]); k0 = (m[0] > m[1] && m[0] > m[2]) ? 0 : (m[1] > m[2]) ? 1 : 2; // index of largest element of offdiag k1 = (k0 + 1) % 3; k2 = (k0 + 2) % 3; if (o[k0] == 0.0) { break; // diagonal already } thet = (D[k2][k2] - D[k1][k1]) / (2.0 * o[k0]); sgn = (thet > 0.0) ? 1.0 : -1.0; thet *= sgn; // make it positive t = sgn / (thet + ((thet < 1.E6) ? sqrt(thet * thet + 1.0) : thet)); // sign(T)/(|T|+sqrt(T^2+1)) c = 1.0 / sqrt(t * t + 1.0); // c= 1/(t^2+1) , t=s/c if (c == 1.0) { break; // no room for improvement - reached machine precision. } jr[0] = jr[1] = jr[2] = jr[3] = 0.0; jr[k0] = sgn * sqrt((1.0 - c) / 2.0); // using 1/2 angle identity sin(a/2) = sqrt((1-cos(a))/2) jr[k0] *= -1.0; // since our quat-to-matrix convention was for v*M instead of M*v jr[3] = sqrt(1.0 - jr[k0] * jr[k0]); if (jr[3] == 1.0) { break; // reached limits of floating point precision } q[0] = (q[3] * jr[0] + q[0] * jr[3] + q[1] * jr[2] - q[2] * jr[1]); q[1] = (q[3] * jr[1] - q[0] * jr[2] + q[1] * jr[3] + q[2] * jr[0]); q[2] = (q[3] * jr[2] + q[0] * jr[1] - q[1] * jr[0] + q[2] * jr[3]); q[3] = (q[3] * jr[3] - q[0] * jr[0] - q[1] * jr[1] - q[2] * jr[2]); mq = sqrt(q[0] * q[0] + q[1] * q[1] + q[2] * q[2] + q[3] * q[3]); q[0] /= mq; q[1] /= mq; q[2] /= mq; q[3] /= mq; } } const double TetrahedronSet::EPS = 0.0000000000001; VoxelSet::VoxelSet() { m_minBB[0] = m_minBB[1] = m_minBB[2] = 0.0; m_minBBVoxels[0] = m_minBBVoxels[1] = m_minBBVoxels[2] = 0; m_maxBBVoxels[0] = m_maxBBVoxels[1] = m_maxBBVoxels[2] = 1; m_minBBPts[0] = m_minBBPts[1] = m_minBBPts[2] = 0; m_maxBBPts[0] = m_maxBBPts[1] = m_maxBBPts[2] = 1; m_barycenter[0] = m_barycenter[1] = m_barycenter[2] = 0; m_barycenterPCA[0] = m_barycenterPCA[1] = m_barycenterPCA[2] = 0.0; m_scale = 1.0; m_unitVolume = 1.0; m_numVoxelsOnSurface = 0; m_numVoxelsInsideSurface = 0; memset(m_Q, 0, sizeof(double) * 9); memset(m_D, 0, sizeof(double) * 9); } VoxelSet::~VoxelSet(void) { } void VoxelSet::ComputeBB() { const size_t nVoxels = m_voxels.Size(); if (nVoxels == 0) return; for (int32_t h = 0; h < 3; ++h) { m_minBBVoxels[h] = m_voxels[0].m_coord[h]; m_maxBBVoxels[h] = m_voxels[0].m_coord[h]; } Vec3<double> bary(0.0); for (size_t p = 0; p < nVoxels; ++p) { for (int32_t h = 0; h < 3; ++h) { bary[h] += m_voxels[p].m_coord[h]; if (m_minBBVoxels[h] > m_voxels[p].m_coord[h]) m_minBBVoxels[h] = m_voxels[p].m_coord[h]; if (m_maxBBVoxels[h] < m_voxels[p].m_coord[h]) m_maxBBVoxels[h] = m_voxels[p].m_coord[h]; } } bary /= (double)nVoxels; for (int32_t h = 0; h < 3; ++h) { m_minBBPts[h] = m_minBBVoxels[h] * m_scale + m_minBB[h]; m_maxBBPts[h] = m_maxBBVoxels[h] * m_scale + m_minBB[h]; m_barycenter[h] = (short)(bary[h] + 0.5); } } void VoxelSet::ComputeConvexHull(Mesh& meshCH, const size_t sampling) const { const size_t CLUSTER_SIZE = 65536; const size_t nVoxels = m_voxels.Size(); if (nVoxels == 0) return; SArray<Vec3<double> > cpoints; Vec3<double>* points = new Vec3<double>[CLUSTER_SIZE]; size_t p = 0; size_t s = 0; short i, j, k; while (p < nVoxels) { size_t q = 0; while (q < CLUSTER_SIZE && p < nVoxels) { if (m_voxels[p].m_data == PRIMITIVE_ON_SURFACE) { ++s; if (s == sampling) { s = 0; i = m_voxels[p].m_coord[0]; j = m_voxels[p].m_coord[1]; k = m_voxels[p].m_coord[2]; Vec3<double> p0((i - 0.5) * m_scale, (j - 0.5) * m_scale, (k - 0.5) * m_scale); Vec3<double> p1((i + 0.5) * m_scale, (j - 0.5) * m_scale, (k - 0.5) * m_scale); Vec3<double> p2((i + 0.5) * m_scale, (j + 0.5) * m_scale, (k - 0.5) * m_scale); Vec3<double> p3((i - 0.5) * m_scale, (j + 0.5) * m_scale, (k - 0.5) * m_scale); Vec3<double> p4((i - 0.5) * m_scale, (j - 0.5) * m_scale, (k + 0.5) * m_scale); Vec3<double> p5((i + 0.5) * m_scale, (j - 0.5) * m_scale, (k + 0.5) * m_scale); Vec3<double> p6((i + 0.5) * m_scale, (j + 0.5) * m_scale, (k + 0.5) * m_scale); Vec3<double> p7((i - 0.5) * m_scale, (j + 0.5) * m_scale, (k + 0.5) * m_scale); points[q++] = p0 + m_minBB; points[q++] = p1 + m_minBB; points[q++] = p2 + m_minBB; points[q++] = p3 + m_minBB; points[q++] = p4 + m_minBB; points[q++] = p5 + m_minBB; points[q++] = p6 + m_minBB; points[q++] = p7 + m_minBB; } } ++p; } btConvexHullComputer ch; ch.compute((double*)points, 3 * sizeof(double), (int32_t)q, -1.0, -1.0); for (int32_t v = 0; v < ch.vertices.size(); v++) { cpoints.PushBack(Vec3<double>(ch.vertices[v].getX(), ch.vertices[v].getY(), ch.vertices[v].getZ())); } } delete[] points; points = cpoints.Data(); btConvexHullComputer ch; ch.compute((double*)points, 3 * sizeof(double), (int32_t)cpoints.Size(), -1.0, -1.0); meshCH.ResizePoints(0); meshCH.ResizeTriangles(0); for (int32_t v = 0; v < ch.vertices.size(); v++) { meshCH.AddPoint(Vec3<double>(ch.vertices[v].getX(), ch.vertices[v].getY(), ch.vertices[v].getZ())); } const int32_t nt = ch.faces.size(); for (int32_t t = 0; t < nt; ++t) { const btConvexHullComputer::Edge* sourceEdge = &(ch.edges[ch.faces[t]]); int32_t a = sourceEdge->getSourceVertex(); int32_t b = sourceEdge->getTargetVertex(); const btConvexHullComputer::Edge* edge = sourceEdge->getNextEdgeOfFace(); int32_t c = edge->getTargetVertex(); while (c != a) { meshCH.AddTriangle(Vec3<int32_t>(a, b, c)); edge = edge->getNextEdgeOfFace(); b = c; c = edge->getTargetVertex(); } } } void VoxelSet::GetPoints(const Voxel& voxel, Vec3<double>* const pts) const { short i = voxel.m_coord[0]; short j = voxel.m_coord[1]; short k = voxel.m_coord[2]; pts[0][0] = (i - 0.5) * m_scale + m_minBB[0]; pts[1][0] = (i + 0.5) * m_scale + m_minBB[0]; pts[2][0] = (i + 0.5) * m_scale + m_minBB[0]; pts[3][0] = (i - 0.5) * m_scale + m_minBB[0]; pts[4][0] = (i - 0.5) * m_scale + m_minBB[0]; pts[5][0] = (i + 0.5) * m_scale + m_minBB[0]; pts[6][0] = (i + 0.5) * m_scale + m_minBB[0]; pts[7][0] = (i - 0.5) * m_scale + m_minBB[0]; pts[0][1] = (j - 0.5) * m_scale + m_minBB[1]; pts[1][1] = (j - 0.5) * m_scale + m_minBB[1]; pts[2][1] = (j + 0.5) * m_scale + m_minBB[1]; pts[3][1] = (j + 0.5) * m_scale + m_minBB[1]; pts[4][1] = (j - 0.5) * m_scale + m_minBB[1]; pts[5][1] = (j - 0.5) * m_scale + m_minBB[1]; pts[6][1] = (j + 0.5) * m_scale + m_minBB[1]; pts[7][1] = (j + 0.5) * m_scale + m_minBB[1]; pts[0][2] = (k - 0.5) * m_scale + m_minBB[2]; pts[1][2] = (k - 0.5) * m_scale + m_minBB[2]; pts[2][2] = (k - 0.5) * m_scale + m_minBB[2]; pts[3][2] = (k - 0.5) * m_scale + m_minBB[2]; pts[4][2] = (k + 0.5) * m_scale + m_minBB[2]; pts[5][2] = (k + 0.5) * m_scale + m_minBB[2]; pts[6][2] = (k + 0.5) * m_scale + m_minBB[2]; pts[7][2] = (k + 0.5) * m_scale + m_minBB[2]; } void VoxelSet::Intersect(const Plane& plane, SArray<Vec3<double> >* const positivePts, SArray<Vec3<double> >* const negativePts, const size_t sampling) const { const size_t nVoxels = m_voxels.Size(); if (nVoxels == 0) return; const double d0 = m_scale; double d; Vec3<double> pts[8]; Vec3<double> pt; Voxel voxel; size_t sp = 0; size_t sn = 0; for (size_t v = 0; v < nVoxels; ++v) { voxel = m_voxels[v]; pt = GetPoint(voxel); d = plane.m_a * pt[0] + plane.m_b * pt[1] + plane.m_c * pt[2] + plane.m_d; // if (d >= 0.0 && d <= d0) positivePts->PushBack(pt); // else if (d < 0.0 && -d <= d0) negativePts->PushBack(pt); if (d >= 0.0) { if (d <= d0) { GetPoints(voxel, pts); for (int32_t k = 0; k < 8; ++k) { positivePts->PushBack(pts[k]); } } else { if (++sp == sampling) { // positivePts->PushBack(pt); GetPoints(voxel, pts); for (int32_t k = 0; k < 8; ++k) { positivePts->PushBack(pts[k]); } sp = 0; } } } else { if (-d <= d0) { GetPoints(voxel, pts); for (int32_t k = 0; k < 8; ++k) { negativePts->PushBack(pts[k]); } } else { if (++sn == sampling) { // negativePts->PushBack(pt); GetPoints(voxel, pts); for (int32_t k = 0; k < 8; ++k) { negativePts->PushBack(pts[k]); } sn = 0; } } } } } void VoxelSet::ComputeExteriorPoints(const Plane& plane, const Mesh& mesh, SArray<Vec3<double> >* const exteriorPts) const { const size_t nVoxels = m_voxels.Size(); if (nVoxels == 0) return; double d; Vec3<double> pt; Vec3<double> pts[8]; Voxel voxel; for (size_t v = 0; v < nVoxels; ++v) { voxel = m_voxels[v]; pt = GetPoint(voxel); d = plane.m_a * pt[0] + plane.m_b * pt[1] + plane.m_c * pt[2] + plane.m_d; if (d >= 0.0) { if (!mesh.IsInside(pt)) { GetPoints(voxel, pts); for (int32_t k = 0; k < 8; ++k) { exteriorPts->PushBack(pts[k]); } } } } } void VoxelSet::ComputeClippedVolumes(const Plane& plane, double& positiveVolume, double& negativeVolume) const { negativeVolume = 0.0; positiveVolume = 0.0; const size_t nVoxels = m_voxels.Size(); if (nVoxels == 0) return; double d; Vec3<double> pt; size_t nPositiveVoxels = 0; for (size_t v = 0; v < nVoxels; ++v) { pt = GetPoint(m_voxels[v]); d = plane.m_a * pt[0] + plane.m_b * pt[1] + plane.m_c * pt[2] + plane.m_d; nPositiveVoxels += (d >= 0.0); } size_t nNegativeVoxels = nVoxels - nPositiveVoxels; positiveVolume = m_unitVolume * nPositiveVoxels; negativeVolume = m_unitVolume * nNegativeVoxels; } void VoxelSet::SelectOnSurface(PrimitiveSet* const onSurfP) const { VoxelSet* const onSurf = (VoxelSet*)onSurfP; const size_t nVoxels = m_voxels.Size(); if (nVoxels == 0) return; for (int32_t h = 0; h < 3; ++h) { onSurf->m_minBB[h] = m_minBB[h]; } onSurf->m_voxels.Resize(0); onSurf->m_scale = m_scale; onSurf->m_unitVolume = m_unitVolume; onSurf->m_numVoxelsOnSurface = 0; onSurf->m_numVoxelsInsideSurface = 0; Voxel voxel; for (size_t v = 0; v < nVoxels; ++v) { voxel = m_voxels[v]; if (voxel.m_data == PRIMITIVE_ON_SURFACE) { onSurf->m_voxels.PushBack(voxel); ++onSurf->m_numVoxelsOnSurface; } } } void VoxelSet::Clip(const Plane& plane, PrimitiveSet* const positivePartP, PrimitiveSet* const negativePartP) const { VoxelSet* const positivePart = (VoxelSet*)positivePartP; VoxelSet* const negativePart = (VoxelSet*)negativePartP; const size_t nVoxels = m_voxels.Size(); if (nVoxels == 0) return; for (int32_t h = 0; h < 3; ++h) { negativePart->m_minBB[h] = positivePart->m_minBB[h] = m_minBB[h]; } positivePart->m_voxels.Resize(0); negativePart->m_voxels.Resize(0); positivePart->m_voxels.Allocate(nVoxels); negativePart->m_voxels.Allocate(nVoxels); negativePart->m_scale = positivePart->m_scale = m_scale; negativePart->m_unitVolume = positivePart->m_unitVolume = m_unitVolume; negativePart->m_numVoxelsOnSurface = positivePart->m_numVoxelsOnSurface = 0; negativePart->m_numVoxelsInsideSurface = positivePart->m_numVoxelsInsideSurface = 0; double d; Vec3<double> pt; Voxel voxel; const double d0 = m_scale; for (size_t v = 0; v < nVoxels; ++v) { voxel = m_voxels[v]; pt = GetPoint(voxel); d = plane.m_a * pt[0] + plane.m_b * pt[1] + plane.m_c * pt[2] + plane.m_d; if (d >= 0.0) { if (voxel.m_data == PRIMITIVE_ON_SURFACE || d <= d0) { voxel.m_data = PRIMITIVE_ON_SURFACE; positivePart->m_voxels.PushBack(voxel); ++positivePart->m_numVoxelsOnSurface; } else { positivePart->m_voxels.PushBack(voxel); ++positivePart->m_numVoxelsInsideSurface; } } else { if (voxel.m_data == PRIMITIVE_ON_SURFACE || -d <= d0) { voxel.m_data = PRIMITIVE_ON_SURFACE; negativePart->m_voxels.PushBack(voxel); ++negativePart->m_numVoxelsOnSurface; } else { negativePart->m_voxels.PushBack(voxel); ++negativePart->m_numVoxelsInsideSurface; } } } } void VoxelSet::Convert(Mesh& mesh, const VOXEL_VALUE value) const { const size_t nVoxels = m_voxels.Size(); if (nVoxels == 0) return; Voxel voxel; Vec3<double> pts[8]; for (size_t v = 0; v < nVoxels; ++v) { voxel = m_voxels[v]; if (voxel.m_data == value) { GetPoints(voxel, pts); int32_t s = (int32_t)mesh.GetNPoints(); for (int32_t k = 0; k < 8; ++k) { mesh.AddPoint(pts[k]); } mesh.AddTriangle(Vec3<int32_t>(s + 0, s + 2, s + 1)); mesh.AddTriangle(Vec3<int32_t>(s + 0, s + 3, s + 2)); mesh.AddTriangle(Vec3<int32_t>(s + 4, s + 5, s + 6)); mesh.AddTriangle(Vec3<int32_t>(s + 4, s + 6, s + 7)); mesh.AddTriangle(Vec3<int32_t>(s + 7, s + 6, s + 2)); mesh.AddTriangle(Vec3<int32_t>(s + 7, s + 2, s + 3)); mesh.AddTriangle(Vec3<int32_t>(s + 4, s + 1, s + 5)); mesh.AddTriangle(Vec3<int32_t>(s + 4, s + 0, s + 1)); mesh.AddTriangle(Vec3<int32_t>(s + 6, s + 5, s + 1)); mesh.AddTriangle(Vec3<int32_t>(s + 6, s + 1, s + 2)); mesh.AddTriangle(Vec3<int32_t>(s + 7, s + 0, s + 4)); mesh.AddTriangle(Vec3<int32_t>(s + 7, s + 3, s + 0)); } } } void VoxelSet::ComputePrincipalAxes() { const size_t nVoxels = m_voxels.Size(); if (nVoxels == 0) return; m_barycenterPCA[0] = m_barycenterPCA[1] = m_barycenterPCA[2] = 0.0; for (size_t v = 0; v < nVoxels; ++v) { Voxel& voxel = m_voxels[v]; m_barycenterPCA[0] += voxel.m_coord[0]; m_barycenterPCA[1] += voxel.m_coord[1]; m_barycenterPCA[2] += voxel.m_coord[2]; } m_barycenterPCA /= (double)nVoxels; double covMat[3][3] = { { 0.0, 0.0, 0.0 }, { 0.0, 0.0, 0.0 }, { 0.0, 0.0, 0.0 } }; double x, y, z; for (size_t v = 0; v < nVoxels; ++v) { Voxel& voxel = m_voxels[v]; x = voxel.m_coord[0] - m_barycenter[0]; y = voxel.m_coord[1] - m_barycenter[1]; z = voxel.m_coord[2] - m_barycenter[2]; covMat[0][0] += x * x; covMat[1][1] += y * y; covMat[2][2] += z * z; covMat[0][1] += x * y; covMat[0][2] += x * z; covMat[1][2] += y * z; } covMat[0][0] /= nVoxels; covMat[1][1] /= nVoxels; covMat[2][2] /= nVoxels; covMat[0][1] /= nVoxels; covMat[0][2] /= nVoxels; covMat[1][2] /= nVoxels; covMat[1][0] = covMat[0][1]; covMat[2][0] = covMat[0][2]; covMat[2][1] = covMat[1][2]; Diagonalize(covMat, m_Q, m_D); } Volume::Volume() { m_dim[0] = m_dim[1] = m_dim[2] = 0; m_minBB[0] = m_minBB[1] = m_minBB[2] = 0.0; m_maxBB[0] = m_maxBB[1] = m_maxBB[2] = 1.0; m_numVoxelsOnSurface = 0; m_numVoxelsInsideSurface = 0; m_numVoxelsOutsideSurface = 0; m_scale = 1.0; m_data = 0; } Volume::~Volume(void) { delete[] m_data; } void Volume::Allocate() { delete[] m_data; size_t size = m_dim[0] * m_dim[1] * m_dim[2]; m_data = new unsigned char[size]; memset(m_data, PRIMITIVE_UNDEFINED, sizeof(unsigned char) * size); } void Volume::Free() { delete[] m_data; m_data = 0; } void Volume::FillOutsideSurface(const size_t i0, const size_t j0, const size_t k0, const size_t i1, const size_t j1, const size_t k1) { const short neighbours[6][3] = { { 1, 0, 0 }, { 0, 1, 0 }, { 0, 0, 1 }, { -1, 0, 0 }, { 0, -1, 0 }, { 0, 0, -1 } }; std::queue<Vec3<short> > fifo; Vec3<short> current; short a, b, c; for (size_t i = i0; i < i1; ++i) { for (size_t j = j0; j < j1; ++j) { for (size_t k = k0; k < k1; ++k) { if (GetVoxel(i, j, k) == PRIMITIVE_UNDEFINED) { current[0] = (short)i; current[1] = (short)j; current[2] = (short)k; fifo.push(current); GetVoxel(current[0], current[1], current[2]) = PRIMITIVE_OUTSIDE_SURFACE; ++m_numVoxelsOutsideSurface; while (fifo.size() > 0) { current = fifo.front(); fifo.pop(); for (int32_t h = 0; h < 6; ++h) { a = current[0] + neighbours[h][0]; b = current[1] + neighbours[h][1]; c = current[2] + neighbours[h][2]; if (a < 0 || a >= (int32_t)m_dim[0] || b < 0 || b >= (int32_t)m_dim[1] || c < 0 || c >= (int32_t)m_dim[2]) { continue; } unsigned char& v = GetVoxel(a, b, c); if (v == PRIMITIVE_UNDEFINED) { v = PRIMITIVE_OUTSIDE_SURFACE; ++m_numVoxelsOutsideSurface; fifo.push(Vec3<short>(a, b, c)); } } } } } } } } void Volume::FillInsideSurface() { const size_t i0 = m_dim[0]; const size_t j0 = m_dim[1]; const size_t k0 = m_dim[2]; for (size_t i = 0; i < i0; ++i) { for (size_t j = 0; j < j0; ++j) { for (size_t k = 0; k < k0; ++k) { unsigned char& v = GetVoxel(i, j, k); if (v == PRIMITIVE_UNDEFINED) { v = PRIMITIVE_INSIDE_SURFACE; ++m_numVoxelsInsideSurface; } } } } } void Volume::Convert(Mesh& mesh, const VOXEL_VALUE value) const { const size_t i0 = m_dim[0]; const size_t j0 = m_dim[1]; const size_t k0 = m_dim[2]; for (size_t i = 0; i < i0; ++i) { for (size_t j = 0; j < j0; ++j) { for (size_t k = 0; k < k0; ++k) { const unsigned char& voxel = GetVoxel(i, j, k); if (voxel == value) { Vec3<double> p0((i - 0.5) * m_scale, (j - 0.5) * m_scale, (k - 0.5) * m_scale); Vec3<double> p1((i + 0.5) * m_scale, (j - 0.5) * m_scale, (k - 0.5) * m_scale); Vec3<double> p2((i + 0.5) * m_scale, (j + 0.5) * m_scale, (k - 0.5) * m_scale); Vec3<double> p3((i - 0.5) * m_scale, (j + 0.5) * m_scale, (k - 0.5) * m_scale); Vec3<double> p4((i - 0.5) * m_scale, (j - 0.5) * m_scale, (k + 0.5) * m_scale); Vec3<double> p5((i + 0.5) * m_scale, (j - 0.5) * m_scale, (k + 0.5) * m_scale); Vec3<double> p6((i + 0.5) * m_scale, (j + 0.5) * m_scale, (k + 0.5) * m_scale); Vec3<double> p7((i - 0.5) * m_scale, (j + 0.5) * m_scale, (k + 0.5) * m_scale); int32_t s = (int32_t)mesh.GetNPoints(); mesh.AddPoint(p0 + m_minBB); mesh.AddPoint(p1 + m_minBB); mesh.AddPoint(p2 + m_minBB); mesh.AddPoint(p3 + m_minBB); mesh.AddPoint(p4 + m_minBB); mesh.AddPoint(p5 + m_minBB); mesh.AddPoint(p6 + m_minBB); mesh.AddPoint(p7 + m_minBB); mesh.AddTriangle(Vec3<int32_t>(s + 0, s + 2, s + 1)); mesh.AddTriangle(Vec3<int32_t>(s + 0, s + 3, s + 2)); mesh.AddTriangle(Vec3<int32_t>(s + 4, s + 5, s + 6)); mesh.AddTriangle(Vec3<int32_t>(s + 4, s + 6, s + 7)); mesh.AddTriangle(Vec3<int32_t>(s + 7, s + 6, s + 2)); mesh.AddTriangle(Vec3<int32_t>(s + 7, s + 2, s + 3)); mesh.AddTriangle(Vec3<int32_t>(s + 4, s + 1, s + 5)); mesh.AddTriangle(Vec3<int32_t>(s + 4, s + 0, s + 1)); mesh.AddTriangle(Vec3<int32_t>(s + 6, s + 5, s + 1)); mesh.AddTriangle(Vec3<int32_t>(s + 6, s + 1, s + 2)); mesh.AddTriangle(Vec3<int32_t>(s + 7, s + 0, s + 4)); mesh.AddTriangle(Vec3<int32_t>(s + 7, s + 3, s + 0)); } } } } } void Volume::Convert(VoxelSet& vset) const { for (int32_t h = 0; h < 3; ++h) { vset.m_minBB[h] = m_minBB[h]; } vset.m_voxels.Allocate(m_numVoxelsInsideSurface + m_numVoxelsOnSurface); vset.m_scale = m_scale; vset.m_unitVolume = m_scale * m_scale * m_scale; const short i0 = (short)m_dim[0]; const short j0 = (short)m_dim[1]; const short k0 = (short)m_dim[2]; Voxel voxel; vset.m_numVoxelsOnSurface = 0; vset.m_numVoxelsInsideSurface = 0; for (short i = 0; i < i0; ++i) { for (short j = 0; j < j0; ++j) { for (short k = 0; k < k0; ++k) { const unsigned char& value = GetVoxel(i, j, k); if (value == PRIMITIVE_INSIDE_SURFACE) { voxel.m_coord[0] = i; voxel.m_coord[1] = j; voxel.m_coord[2] = k; voxel.m_data = PRIMITIVE_INSIDE_SURFACE; vset.m_voxels.PushBack(voxel); ++vset.m_numVoxelsInsideSurface; } else if (value == PRIMITIVE_ON_SURFACE) { voxel.m_coord[0] = i; voxel.m_coord[1] = j; voxel.m_coord[2] = k; voxel.m_data = PRIMITIVE_ON_SURFACE; vset.m_voxels.PushBack(voxel); ++vset.m_numVoxelsOnSurface; } } } } } void Volume::Convert(TetrahedronSet& tset) const { tset.m_tetrahedra.Allocate(5 * (m_numVoxelsInsideSurface + m_numVoxelsOnSurface)); tset.m_scale = m_scale; const short i0 = (short)m_dim[0]; const short j0 = (short)m_dim[1]; const short k0 = (short)m_dim[2]; tset.m_numTetrahedraOnSurface = 0; tset.m_numTetrahedraInsideSurface = 0; Tetrahedron tetrahedron; for (short i = 0; i < i0; ++i) { for (short j = 0; j < j0; ++j) { for (short k = 0; k < k0; ++k) { const unsigned char& value = GetVoxel(i, j, k); if (value == PRIMITIVE_INSIDE_SURFACE || value == PRIMITIVE_ON_SURFACE) { tetrahedron.m_data = value; Vec3<double> p1((i - 0.5) * m_scale + m_minBB[0], (j - 0.5) * m_scale + m_minBB[1], (k - 0.5) * m_scale + m_minBB[2]); Vec3<double> p2((i + 0.5) * m_scale + m_minBB[0], (j - 0.5) * m_scale + m_minBB[1], (k - 0.5) * m_scale + m_minBB[2]); Vec3<double> p3((i + 0.5) * m_scale + m_minBB[0], (j + 0.5) * m_scale + m_minBB[1], (k - 0.5) * m_scale + m_minBB[2]); Vec3<double> p4((i - 0.5) * m_scale + m_minBB[0], (j + 0.5) * m_scale + m_minBB[1], (k - 0.5) * m_scale + m_minBB[2]); Vec3<double> p5((i - 0.5) * m_scale + m_minBB[0], (j - 0.5) * m_scale + m_minBB[1], (k + 0.5) * m_scale + m_minBB[2]); Vec3<double> p6((i + 0.5) * m_scale + m_minBB[0], (j - 0.5) * m_scale + m_minBB[1], (k + 0.5) * m_scale + m_minBB[2]); Vec3<double> p7((i + 0.5) * m_scale + m_minBB[0], (j + 0.5) * m_scale + m_minBB[1], (k + 0.5) * m_scale + m_minBB[2]); Vec3<double> p8((i - 0.5) * m_scale + m_minBB[0], (j + 0.5) * m_scale + m_minBB[1], (k + 0.5) * m_scale + m_minBB[2]); tetrahedron.m_pts[0] = p2; tetrahedron.m_pts[1] = p4; tetrahedron.m_pts[2] = p7; tetrahedron.m_pts[3] = p5; tset.m_tetrahedra.PushBack(tetrahedron); tetrahedron.m_pts[0] = p6; tetrahedron.m_pts[1] = p2; tetrahedron.m_pts[2] = p7; tetrahedron.m_pts[3] = p5; tset.m_tetrahedra.PushBack(tetrahedron); tetrahedron.m_pts[0] = p3; tetrahedron.m_pts[1] = p4; tetrahedron.m_pts[2] = p7; tetrahedron.m_pts[3] = p2; tset.m_tetrahedra.PushBack(tetrahedron); tetrahedron.m_pts[0] = p1; tetrahedron.m_pts[1] = p4; tetrahedron.m_pts[2] = p2; tetrahedron.m_pts[3] = p5; tset.m_tetrahedra.PushBack(tetrahedron); tetrahedron.m_pts[0] = p8; tetrahedron.m_pts[1] = p5; tetrahedron.m_pts[2] = p7; tetrahedron.m_pts[3] = p4; tset.m_tetrahedra.PushBack(tetrahedron); if (value == PRIMITIVE_INSIDE_SURFACE) { tset.m_numTetrahedraInsideSurface += 5; } else { tset.m_numTetrahedraOnSurface += 5; } } } } } } void Volume::AlignToPrincipalAxes(double (&rot)[3][3]) const { const short i0 = (short)m_dim[0]; const short j0 = (short)m_dim[1]; const short k0 = (short)m_dim[2]; Vec3<double> barycenter(0.0); size_t nVoxels = 0; for (short i = 0; i < i0; ++i) { for (short j = 0; j < j0; ++j) { for (short k = 0; k < k0; ++k) { const unsigned char& value = GetVoxel(i, j, k); if (value == PRIMITIVE_INSIDE_SURFACE || value == PRIMITIVE_ON_SURFACE) { barycenter[0] += i; barycenter[1] += j; barycenter[2] += k; ++nVoxels; } } } } barycenter /= (double)nVoxels; double covMat[3][3] = { { 0.0, 0.0, 0.0 }, { 0.0, 0.0, 0.0 }, { 0.0, 0.0, 0.0 } }; double x, y, z; for (short i = 0; i < i0; ++i) { for (short j = 0; j < j0; ++j) { for (short k = 0; k < k0; ++k) { const unsigned char& value = GetVoxel(i, j, k); if (value == PRIMITIVE_INSIDE_SURFACE || value == PRIMITIVE_ON_SURFACE) { x = i - barycenter[0]; y = j - barycenter[1]; z = k - barycenter[2]; covMat[0][0] += x * x; covMat[1][1] += y * y; covMat[2][2] += z * z; covMat[0][1] += x * y; covMat[0][2] += x * z; covMat[1][2] += y * z; } } } } covMat[1][0] = covMat[0][1]; covMat[2][0] = covMat[0][2]; covMat[2][1] = covMat[1][2]; double D[3][3]; Diagonalize(covMat, rot, D); } TetrahedronSet::TetrahedronSet() { m_minBB[0] = m_minBB[1] = m_minBB[2] = 0.0; m_maxBB[0] = m_maxBB[1] = m_maxBB[2] = 1.0; m_barycenter[0] = m_barycenter[1] = m_barycenter[2] = 0.0; m_scale = 1.0; m_numTetrahedraOnSurface = 0; m_numTetrahedraInsideSurface = 0; memset(m_Q, 0, sizeof(double) * 9); memset(m_D, 0, sizeof(double) * 9); } TetrahedronSet::~TetrahedronSet(void) { } void TetrahedronSet::ComputeBB() { const size_t nTetrahedra = m_tetrahedra.Size(); if (nTetrahedra == 0) return; for (int32_t h = 0; h < 3; ++h) { m_minBB[h] = m_maxBB[h] = m_tetrahedra[0].m_pts[0][h]; m_barycenter[h] = 0.0; } for (size_t p = 0; p < nTetrahedra; ++p) { for (int32_t i = 0; i < 4; ++i) { for (int32_t h = 0; h < 3; ++h) { if (m_minBB[h] > m_tetrahedra[p].m_pts[i][h]) m_minBB[h] = m_tetrahedra[p].m_pts[i][h]; if (m_maxBB[h] < m_tetrahedra[p].m_pts[i][h]) m_maxBB[h] = m_tetrahedra[p].m_pts[i][h]; m_barycenter[h] += m_tetrahedra[p].m_pts[i][h]; } } } m_barycenter /= (double)(4 * nTetrahedra); } void TetrahedronSet::ComputeConvexHull(Mesh& meshCH, const size_t sampling) const { const size_t CLUSTER_SIZE = 65536; const size_t nTetrahedra = m_tetrahedra.Size(); if (nTetrahedra == 0) return; SArray<Vec3<double> > cpoints; Vec3<double>* points = new Vec3<double>[CLUSTER_SIZE]; size_t p = 0; while (p < nTetrahedra) { size_t q = 0; size_t s = 0; while (q < CLUSTER_SIZE && p < nTetrahedra) { if (m_tetrahedra[p].m_data == PRIMITIVE_ON_SURFACE) { ++s; if (s == sampling) { s = 0; for (int32_t a = 0; a < 4; ++a) { points[q++] = m_tetrahedra[p].m_pts[a]; for (int32_t xx = 0; xx < 3; ++xx) { assert(m_tetrahedra[p].m_pts[a][xx] + EPS >= m_minBB[xx]); assert(m_tetrahedra[p].m_pts[a][xx] <= m_maxBB[xx] + EPS); } } } } ++p; } btConvexHullComputer ch; ch.compute((double*)points, 3 * sizeof(double), (int32_t)q, -1.0, -1.0); for (int32_t v = 0; v < ch.vertices.size(); v++) { cpoints.PushBack(Vec3<double>(ch.vertices[v].getX(), ch.vertices[v].getY(), ch.vertices[v].getZ())); } } delete[] points; points = cpoints.Data(); btConvexHullComputer ch; ch.compute((double*)points, 3 * sizeof(double), (int32_t)cpoints.Size(), -1.0, -1.0); meshCH.ResizePoints(0); meshCH.ResizeTriangles(0); for (int32_t v = 0; v < ch.vertices.size(); v++) { meshCH.AddPoint(Vec3<double>(ch.vertices[v].getX(), ch.vertices[v].getY(), ch.vertices[v].getZ())); } const int32_t nt = ch.faces.size(); for (int32_t t = 0; t < nt; ++t) { const btConvexHullComputer::Edge* sourceEdge = &(ch.edges[ch.faces[t]]); int32_t a = sourceEdge->getSourceVertex(); int32_t b = sourceEdge->getTargetVertex(); const btConvexHullComputer::Edge* edge = sourceEdge->getNextEdgeOfFace(); int32_t c = edge->getTargetVertex(); while (c != a) { meshCH.AddTriangle(Vec3<int32_t>(a, b, c)); edge = edge->getNextEdgeOfFace(); b = c; c = edge->getTargetVertex(); } } } inline bool TetrahedronSet::Add(Tetrahedron& tetrahedron) { double v = ComputeVolume4(tetrahedron.m_pts[0], tetrahedron.m_pts[1], tetrahedron.m_pts[2], tetrahedron.m_pts[3]); const double EPS = 0.0000000001; if (fabs(v) < EPS) { return false; } else if (v < 0.0) { Vec3<double> tmp = tetrahedron.m_pts[0]; tetrahedron.m_pts[0] = tetrahedron.m_pts[1]; tetrahedron.m_pts[1] = tmp; } for (int32_t a = 0; a < 4; ++a) { for (int32_t xx = 0; xx < 3; ++xx) { assert(tetrahedron.m_pts[a][xx] + EPS >= m_minBB[xx]); assert(tetrahedron.m_pts[a][xx] <= m_maxBB[xx] + EPS); } } m_tetrahedra.PushBack(tetrahedron); return true; } void TetrahedronSet::AddClippedTetrahedra(const Vec3<double> (&pts)[10], const int32_t nPts) { const int32_t tetF[4][3] = { { 0, 1, 2 }, { 2, 1, 3 }, { 3, 1, 0 }, { 3, 0, 2 } }; if (nPts < 4) { return; } else if (nPts == 4) { Tetrahedron tetrahedron; tetrahedron.m_data = PRIMITIVE_ON_SURFACE; tetrahedron.m_pts[0] = pts[0]; tetrahedron.m_pts[1] = pts[1]; tetrahedron.m_pts[2] = pts[2]; tetrahedron.m_pts[3] = pts[3]; if (Add(tetrahedron)) { ++m_numTetrahedraOnSurface; } } else if (nPts == 5) { const int32_t tet[15][4] = { { 0, 1, 2, 3 }, { 1, 2, 3, 4 }, { 0, 2, 3, 4 }, { 0, 1, 3, 4 }, { 0, 1, 2, 4 }, }; const int32_t rem[5] = { 4, 0, 1, 2, 3 }; double maxVol = 0.0; int32_t h0 = -1; Tetrahedron tetrahedron0; tetrahedron0.m_data = PRIMITIVE_ON_SURFACE; for (int32_t h = 0; h < 5; ++h) { double v = ComputeVolume4(pts[tet[h][0]], pts[tet[h][1]], pts[tet[h][2]], pts[tet[h][3]]); if (v > maxVol) { h0 = h; tetrahedron0.m_pts[0] = pts[tet[h][0]]; tetrahedron0.m_pts[1] = pts[tet[h][1]]; tetrahedron0.m_pts[2] = pts[tet[h][2]]; tetrahedron0.m_pts[3] = pts[tet[h][3]]; maxVol = v; } else if (-v > maxVol) { h0 = h; tetrahedron0.m_pts[0] = pts[tet[h][1]]; tetrahedron0.m_pts[1] = pts[tet[h][0]]; tetrahedron0.m_pts[2] = pts[tet[h][2]]; tetrahedron0.m_pts[3] = pts[tet[h][3]]; maxVol = -v; } } if (h0 == -1) return; if (Add(tetrahedron0)) { ++m_numTetrahedraOnSurface; } else { return; } int32_t a = rem[h0]; maxVol = 0.0; int32_t h1 = -1; Tetrahedron tetrahedron1; tetrahedron1.m_data = PRIMITIVE_ON_SURFACE; for (int32_t h = 0; h < 4; ++h) { double v = ComputeVolume4(pts[a], tetrahedron0.m_pts[tetF[h][0]], tetrahedron0.m_pts[tetF[h][1]], tetrahedron0.m_pts[tetF[h][2]]); if (v > maxVol) { h1 = h; tetrahedron1.m_pts[0] = pts[a]; tetrahedron1.m_pts[1] = tetrahedron0.m_pts[tetF[h][0]]; tetrahedron1.m_pts[2] = tetrahedron0.m_pts[tetF[h][1]]; tetrahedron1.m_pts[3] = tetrahedron0.m_pts[tetF[h][2]]; maxVol = v; } } if (h1 == -1 && Add(tetrahedron1)) { ++m_numTetrahedraOnSurface; } } else if (nPts == 6) { const int32_t tet[15][4] = { { 2, 3, 4, 5 }, { 1, 3, 4, 5 }, { 1, 2, 4, 5 }, { 1, 2, 3, 5 }, { 1, 2, 3, 4 }, { 0, 3, 4, 5 }, { 0, 2, 4, 5 }, { 0, 2, 3, 5 }, { 0, 2, 3, 4 }, { 0, 1, 4, 5 }, { 0, 1, 3, 5 }, { 0, 1, 3, 4 }, { 0, 1, 2, 5 }, { 0, 1, 2, 4 }, { 0, 1, 2, 3 } }; const int32_t rem[15][2] = { { 0, 1 }, { 0, 2 }, { 0, 3 }, { 0, 4 }, { 0, 5 }, { 1, 2 }, { 1, 3 }, { 1, 4 }, { 1, 5 }, { 2, 3 }, { 2, 4 }, { 2, 5 }, { 3, 4 }, { 3, 5 }, { 4, 5 } }; double maxVol = 0.0; int32_t h0 = -1; Tetrahedron tetrahedron0; tetrahedron0.m_data = PRIMITIVE_ON_SURFACE; for (int32_t h = 0; h < 15; ++h) { double v = ComputeVolume4(pts[tet[h][0]], pts[tet[h][1]], pts[tet[h][2]], pts[tet[h][3]]); if (v > maxVol) { h0 = h; tetrahedron0.m_pts[0] = pts[tet[h][0]]; tetrahedron0.m_pts[1] = pts[tet[h][1]]; tetrahedron0.m_pts[2] = pts[tet[h][2]]; tetrahedron0.m_pts[3] = pts[tet[h][3]]; maxVol = v; } else if (-v > maxVol) { h0 = h; tetrahedron0.m_pts[0] = pts[tet[h][1]]; tetrahedron0.m_pts[1] = pts[tet[h][0]]; tetrahedron0.m_pts[2] = pts[tet[h][2]]; tetrahedron0.m_pts[3] = pts[tet[h][3]]; maxVol = -v; } } if (h0 == -1) return; if (Add(tetrahedron0)) { ++m_numTetrahedraOnSurface; } else { return; } int32_t a0 = rem[h0][0]; int32_t a1 = rem[h0][1]; int32_t h1 = -1; Tetrahedron tetrahedron1; tetrahedron1.m_data = PRIMITIVE_ON_SURFACE; maxVol = 0.0; for (int32_t h = 0; h < 4; ++h) { double v = ComputeVolume4(pts[a0], tetrahedron0.m_pts[tetF[h][0]], tetrahedron0.m_pts[tetF[h][1]], tetrahedron0.m_pts[tetF[h][2]]); if (v > maxVol) { h1 = h; tetrahedron1.m_pts[0] = pts[a0]; tetrahedron1.m_pts[1] = tetrahedron0.m_pts[tetF[h][0]]; tetrahedron1.m_pts[2] = tetrahedron0.m_pts[tetF[h][1]]; tetrahedron1.m_pts[3] = tetrahedron0.m_pts[tetF[h][2]]; maxVol = v; } } if (h1 != -1 && Add(tetrahedron1)) { ++m_numTetrahedraOnSurface; } else { h1 = -1; } maxVol = 0.0; int32_t h2 = -1; Tetrahedron tetrahedron2; tetrahedron2.m_data = PRIMITIVE_ON_SURFACE; for (int32_t h = 0; h < 4; ++h) { double v = ComputeVolume4(pts[a0], tetrahedron0.m_pts[tetF[h][0]], tetrahedron0.m_pts[tetF[h][1]], tetrahedron0.m_pts[tetF[h][2]]); if (h == h1) continue; if (v > maxVol) { h2 = h; tetrahedron2.m_pts[0] = pts[a1]; tetrahedron2.m_pts[1] = tetrahedron0.m_pts[tetF[h][0]]; tetrahedron2.m_pts[2] = tetrahedron0.m_pts[tetF[h][1]]; tetrahedron2.m_pts[3] = tetrahedron0.m_pts[tetF[h][2]]; maxVol = v; } } if (h1 != -1) { for (int32_t h = 0; h < 4; ++h) { double v = ComputeVolume4(pts[a1], tetrahedron1.m_pts[tetF[h][0]], tetrahedron1.m_pts[tetF[h][1]], tetrahedron1.m_pts[tetF[h][2]]); if (h == 1) continue; if (v > maxVol) { h2 = h; tetrahedron2.m_pts[0] = pts[a1]; tetrahedron2.m_pts[1] = tetrahedron1.m_pts[tetF[h][0]]; tetrahedron2.m_pts[2] = tetrahedron1.m_pts[tetF[h][1]]; tetrahedron2.m_pts[3] = tetrahedron1.m_pts[tetF[h][2]]; maxVol = v; } } } if (h2 != -1 && Add(tetrahedron2)) { ++m_numTetrahedraOnSurface; } } else { assert(0); } } void TetrahedronSet::Intersect(const Plane& plane, SArray<Vec3<double> >* const positivePts, SArray<Vec3<double> >* const negativePts, const size_t sampling) const { const size_t nTetrahedra = m_tetrahedra.Size(); if (nTetrahedra == 0) return; } void TetrahedronSet::ComputeExteriorPoints(const Plane& plane, const Mesh& mesh, SArray<Vec3<double> >* const exteriorPts) const { } void TetrahedronSet::ComputeClippedVolumes(const Plane& plane, double& positiveVolume, double& negativeVolume) const { const size_t nTetrahedra = m_tetrahedra.Size(); if (nTetrahedra == 0) return; } void TetrahedronSet::SelectOnSurface(PrimitiveSet* const onSurfP) const { TetrahedronSet* const onSurf = (TetrahedronSet*)onSurfP; const size_t nTetrahedra = m_tetrahedra.Size(); if (nTetrahedra == 0) return; onSurf->m_tetrahedra.Resize(0); onSurf->m_scale = m_scale; onSurf->m_numTetrahedraOnSurface = 0; onSurf->m_numTetrahedraInsideSurface = 0; onSurf->m_barycenter = m_barycenter; onSurf->m_minBB = m_minBB; onSurf->m_maxBB = m_maxBB; for (int32_t i = 0; i < 3; ++i) { for (int32_t j = 0; j < 3; ++j) { onSurf->m_Q[i][j] = m_Q[i][j]; onSurf->m_D[i][j] = m_D[i][j]; } } Tetrahedron tetrahedron; for (size_t v = 0; v < nTetrahedra; ++v) { tetrahedron = m_tetrahedra[v]; if (tetrahedron.m_data == PRIMITIVE_ON_SURFACE) { onSurf->m_tetrahedra.PushBack(tetrahedron); ++onSurf->m_numTetrahedraOnSurface; } } } void TetrahedronSet::Clip(const Plane& plane, PrimitiveSet* const positivePartP, PrimitiveSet* const negativePartP) const { TetrahedronSet* const positivePart = (TetrahedronSet*)positivePartP; TetrahedronSet* const negativePart = (TetrahedronSet*)negativePartP; const size_t nTetrahedra = m_tetrahedra.Size(); if (nTetrahedra == 0) return; positivePart->m_tetrahedra.Resize(0); negativePart->m_tetrahedra.Resize(0); positivePart->m_tetrahedra.Allocate(nTetrahedra); negativePart->m_tetrahedra.Allocate(nTetrahedra); negativePart->m_scale = positivePart->m_scale = m_scale; negativePart->m_numTetrahedraOnSurface = positivePart->m_numTetrahedraOnSurface = 0; negativePart->m_numTetrahedraInsideSurface = positivePart->m_numTetrahedraInsideSurface = 0; negativePart->m_barycenter = m_barycenter; positivePart->m_barycenter = m_barycenter; negativePart->m_minBB = m_minBB; positivePart->m_minBB = m_minBB; negativePart->m_maxBB = m_maxBB; positivePart->m_maxBB = m_maxBB; for (int32_t i = 0; i < 3; ++i) { for (int32_t j = 0; j < 3; ++j) { negativePart->m_Q[i][j] = positivePart->m_Q[i][j] = m_Q[i][j]; negativePart->m_D[i][j] = positivePart->m_D[i][j] = m_D[i][j]; } } Tetrahedron tetrahedron; double delta, alpha; int32_t sign[4]; int32_t npos, nneg; Vec3<double> posPts[10]; Vec3<double> negPts[10]; Vec3<double> P0, P1, M; const Vec3<double> n(plane.m_a, plane.m_b, plane.m_c); const int32_t edges[6][2] = { { 0, 1 }, { 0, 2 }, { 0, 3 }, { 1, 2 }, { 1, 3 }, { 2, 3 } }; double dist; for (size_t v = 0; v < nTetrahedra; ++v) { tetrahedron = m_tetrahedra[v]; npos = nneg = 0; for (int32_t i = 0; i < 4; ++i) { dist = plane.m_a * tetrahedron.m_pts[i][0] + plane.m_b * tetrahedron.m_pts[i][1] + plane.m_c * tetrahedron.m_pts[i][2] + plane.m_d; if (dist > 0.0) { sign[i] = 1; posPts[npos] = tetrahedron.m_pts[i]; ++npos; } else { sign[i] = -1; negPts[nneg] = tetrahedron.m_pts[i]; ++nneg; } } if (npos == 4) { positivePart->Add(tetrahedron); if (tetrahedron.m_data == PRIMITIVE_ON_SURFACE) { ++positivePart->m_numTetrahedraOnSurface; } else { ++positivePart->m_numTetrahedraInsideSurface; } } else if (nneg == 4) { negativePart->Add(tetrahedron); if (tetrahedron.m_data == PRIMITIVE_ON_SURFACE) { ++negativePart->m_numTetrahedraOnSurface; } else { ++negativePart->m_numTetrahedraInsideSurface; } } else { int32_t nnew = 0; for (int32_t j = 0; j < 6; ++j) { if (sign[edges[j][0]] * sign[edges[j][1]] == -1) { P0 = tetrahedron.m_pts[edges[j][0]]; P1 = tetrahedron.m_pts[edges[j][1]]; delta = (P0 - P1) * n; alpha = -(plane.m_d + (n * P1)) / delta; assert(alpha >= 0.0 && alpha <= 1.0); M = alpha * P0 + (1 - alpha) * P1; for (int32_t xx = 0; xx < 3; ++xx) { assert(M[xx] + EPS >= m_minBB[xx]); assert(M[xx] <= m_maxBB[xx] + EPS); } posPts[npos++] = M; negPts[nneg++] = M; ++nnew; } } negativePart->AddClippedTetrahedra(negPts, nneg); positivePart->AddClippedTetrahedra(posPts, npos); } } } void TetrahedronSet::Convert(Mesh& mesh, const VOXEL_VALUE value) const { const size_t nTetrahedra = m_tetrahedra.Size(); if (nTetrahedra == 0) return; for (size_t v = 0; v < nTetrahedra; ++v) { const Tetrahedron& tetrahedron = m_tetrahedra[v]; if (tetrahedron.m_data == value) { int32_t s = (int32_t)mesh.GetNPoints(); mesh.AddPoint(tetrahedron.m_pts[0]); mesh.AddPoint(tetrahedron.m_pts[1]); mesh.AddPoint(tetrahedron.m_pts[2]); mesh.AddPoint(tetrahedron.m_pts[3]); mesh.AddTriangle(Vec3<int32_t>(s + 0, s + 1, s + 2)); mesh.AddTriangle(Vec3<int32_t>(s + 2, s + 1, s + 3)); mesh.AddTriangle(Vec3<int32_t>(s + 3, s + 1, s + 0)); mesh.AddTriangle(Vec3<int32_t>(s + 3, s + 0, s + 2)); } } } const double TetrahedronSet::ComputeVolume() const { const size_t nTetrahedra = m_tetrahedra.Size(); if (nTetrahedra == 0) return 0.0; double volume = 0.0; for (size_t v = 0; v < nTetrahedra; ++v) { const Tetrahedron& tetrahedron = m_tetrahedra[v]; volume += fabs(ComputeVolume4(tetrahedron.m_pts[0], tetrahedron.m_pts[1], tetrahedron.m_pts[2], tetrahedron.m_pts[3])); } return volume / 6.0; } const double TetrahedronSet::ComputeMaxVolumeError() const { const size_t nTetrahedra = m_tetrahedra.Size(); if (nTetrahedra == 0) return 0.0; double volume = 0.0; for (size_t v = 0; v < nTetrahedra; ++v) { const Tetrahedron& tetrahedron = m_tetrahedra[v]; if (tetrahedron.m_data == PRIMITIVE_ON_SURFACE) { volume += fabs(ComputeVolume4(tetrahedron.m_pts[0], tetrahedron.m_pts[1], tetrahedron.m_pts[2], tetrahedron.m_pts[3])); } } return volume / 6.0; } void TetrahedronSet::RevertAlignToPrincipalAxes() { const size_t nTetrahedra = m_tetrahedra.Size(); if (nTetrahedra == 0) return; double x, y, z; for (size_t v = 0; v < nTetrahedra; ++v) { Tetrahedron& tetrahedron = m_tetrahedra[v]; for (int32_t i = 0; i < 4; ++i) { x = tetrahedron.m_pts[i][0] - m_barycenter[0]; y = tetrahedron.m_pts[i][1] - m_barycenter[1]; z = tetrahedron.m_pts[i][2] - m_barycenter[2]; tetrahedron.m_pts[i][0] = m_Q[0][0] * x + m_Q[0][1] * y + m_Q[0][2] * z + m_barycenter[0]; tetrahedron.m_pts[i][1] = m_Q[1][0] * x + m_Q[1][1] * y + m_Q[1][2] * z + m_barycenter[1]; tetrahedron.m_pts[i][2] = m_Q[2][0] * x + m_Q[2][1] * y + m_Q[2][2] * z + m_barycenter[2]; } } ComputeBB(); } void TetrahedronSet::ComputePrincipalAxes() { const size_t nTetrahedra = m_tetrahedra.Size(); if (nTetrahedra == 0) return; double covMat[3][3] = { { 0.0, 0.0, 0.0 }, { 0.0, 0.0, 0.0 }, { 0.0, 0.0, 0.0 } }; double x, y, z; for (size_t v = 0; v < nTetrahedra; ++v) { Tetrahedron& tetrahedron = m_tetrahedra[v]; for (int32_t i = 0; i < 4; ++i) { x = tetrahedron.m_pts[i][0] - m_barycenter[0]; y = tetrahedron.m_pts[i][1] - m_barycenter[1]; z = tetrahedron.m_pts[i][2] - m_barycenter[2]; covMat[0][0] += x * x; covMat[1][1] += y * y; covMat[2][2] += z * z; covMat[0][1] += x * y; covMat[0][2] += x * z; covMat[1][2] += y * z; } } double n = nTetrahedra * 4.0; covMat[0][0] /= n; covMat[1][1] /= n; covMat[2][2] /= n; covMat[0][1] /= n; covMat[0][2] /= n; covMat[1][2] /= n; covMat[1][0] = covMat[0][1]; covMat[2][0] = covMat[0][2]; covMat[2][1] = covMat[1][2]; Diagonalize(covMat, m_Q, m_D); } void TetrahedronSet::AlignToPrincipalAxes() { const size_t nTetrahedra = m_tetrahedra.Size(); if (nTetrahedra == 0) return; double x, y, z; for (size_t v = 0; v < nTetrahedra; ++v) { Tetrahedron& tetrahedron = m_tetrahedra[v]; for (int32_t i = 0; i < 4; ++i) { x = tetrahedron.m_pts[i][0] - m_barycenter[0]; y = tetrahedron.m_pts[i][1] - m_barycenter[1]; z = tetrahedron.m_pts[i][2] - m_barycenter[2]; tetrahedron.m_pts[i][0] = m_Q[0][0] * x + m_Q[1][0] * y + m_Q[2][0] * z + m_barycenter[0]; tetrahedron.m_pts[i][1] = m_Q[0][1] * x + m_Q[1][1] * y + m_Q[2][1] * z + m_barycenter[1]; tetrahedron.m_pts[i][2] = m_Q[0][2] * x + m_Q[1][2] * y + m_Q[2][2] * z + m_barycenter[2]; } } ComputeBB(); } }
63,585
C++
38.178065
756
0.455957
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/authoring/VHACD/src/VHACD.cpp
/* Copyright (c) 2011 Khaled Mamou (kmamou at gmail dot com) All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. The names of the contributors may not be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #define _CRT_SECURE_NO_WARNINGS #include <algorithm> #include <fstream> #include <iomanip> #include <limits> #include <sstream> #if _OPENMP #include <omp.h> #endif // _OPENMP #include "../public/VHACD.h" #include "btConvexHullComputer.h" #include "vhacdICHull.h" #include "vhacdMesh.h" #include "vhacdSArray.h" #include "vhacdTimer.h" #include "vhacdVHACD.h" #include "vhacdVector.h" #include "vhacdVolume.h" #include "FloatMath.h" // Internal debugging feature only #define DEBUG_VISUALIZE_CONSTRAINTS 0 #if DEBUG_VISUALIZE_CONSTRAINTS #include "NvRenderDebug.h" extern RENDER_DEBUG::RenderDebug *gRenderDebug; #pragma warning(disable:4702) #endif #define MAX(a, b) (((a) > (b)) ? (a) : (b)) #define MIN(a, b) (((a) < (b)) ? (a) : (b)) #define ABS(a) (((a) < 0) ? -(a) : (a)) #define ZSGN(a) (((a) < 0) ? -1 : (a) > 0 ? 1 : 0) #define MAX_DOUBLE (1.79769e+308) #ifdef _MSC_VER #pragma warning(disable:4267 4100 4244 4456) #endif #ifdef USE_SSE #include <immintrin.h> const int32_t SIMD_WIDTH = 4; inline int32_t FindMinimumElement(const float* const d, float* const _, const int32_t n) { // Min within vectors __m128 min_i = _mm_set1_ps(-1.0f); __m128 min_v = _mm_set1_ps(std::numeric_limits<float>::max()); for (int32_t i = 0; i <= n - SIMD_WIDTH; i += SIMD_WIDTH) { const __m128 data = _mm_load_ps(&d[i]); const __m128 pred = _mm_cmplt_ps(data, min_v); min_i = _mm_blendv_ps(min_i, _mm_set1_ps(i), pred); min_v = _mm_min_ps(data, min_v); } /* Min within vector */ const __m128 min1 = _mm_shuffle_ps(min_v, min_v, _MM_SHUFFLE(1, 0, 3, 2)); const __m128 min2 = _mm_min_ps(min_v, min1); const __m128 min3 = _mm_shuffle_ps(min2, min2, _MM_SHUFFLE(0, 1, 0, 1)); const __m128 min4 = _mm_min_ps(min2, min3); float min_d = _mm_cvtss_f32(min4); // Min index const int32_t min_idx = __builtin_ctz(_mm_movemask_ps(_mm_cmpeq_ps(min_v, min4))); int32_t ret = min_i[min_idx] + min_idx; // Trailing elements for (int32_t i = (n & ~(SIMD_WIDTH - 1)); i < n; ++i) { if (d[i] < min_d) { min_d = d[i]; ret = i; } } *m = min_d; return ret; } inline int32_t FindMinimumElement(const float* const d, float* const m, const int32_t begin, const int32_t end) { // Leading elements int32_t min_i = -1; float min_d = std::numeric_limits<float>::max(); const int32_t aligned = (begin & ~(SIMD_WIDTH - 1)) + ((begin & (SIMD_WIDTH - 1)) ? SIMD_WIDTH : 0); for (int32_t i = begin; i < std::min(end, aligned); ++i) { if (d[i] < min_d) { min_d = d[i]; min_i = i; } } // Middle and trailing elements float r_m = std::numeric_limits<float>::max(); const int32_t n = end - aligned; const int32_t r_i = (n > 0) ? FindMinimumElement(&d[aligned], &r_m, n) : 0; // Pick the lowest if (r_m < min_d) { *m = r_m; return r_i + aligned; } else { *m = min_d; return min_i; } } #else inline int32_t FindMinimumElement(const float* const d, float* const m, const int32_t begin, const int32_t end) { int32_t idx = -1; float min = (std::numeric_limits<float>::max)(); for (size_t i = begin; i < size_t(end); ++i) { if (d[i] < min) { idx = i; min = d[i]; } } *m = min; return idx; } #endif //#define OCL_SOURCE_FROM_FILE #ifndef OCL_SOURCE_FROM_FILE const char* oclProgramSource = "\ __kernel void ComputePartialVolumes(__global short4 * voxels, \ const int32_t numVoxels, \ const float4 plane, \ const float4 minBB, \ const float4 scale, \ __local uint4 * localPartialVolumes, \ __global uint4 * partialVolumes) \ { \ int32_t localId = get_local_id(0); \ int32_t groupSize = get_local_size(0); \ int32_t i0 = get_global_id(0) << 2; \ float4 voxel; \ uint4 v; \ voxel = convert_float4(voxels[i0]); \ v.s0 = (dot(plane, mad(scale, voxel, minBB)) >= 0.0f) * (i0 < numVoxels);\ voxel = convert_float4(voxels[i0 + 1]); \ v.s1 = (dot(plane, mad(scale, voxel, minBB)) >= 0.0f) * (i0 + 1 < numVoxels);\ voxel = convert_float4(voxels[i0 + 2]); \ v.s2 = (dot(plane, mad(scale, voxel, minBB)) >= 0.0f) * (i0 + 2 < numVoxels);\ voxel = convert_float4(voxels[i0 + 3]); \ v.s3 = (dot(plane, mad(scale, voxel, minBB)) >= 0.0f) * (i0 + 3 < numVoxels);\ localPartialVolumes[localId] = v; \ barrier(CLK_LOCAL_MEM_FENCE); \ for (int32_t i = groupSize >> 1; i > 0; i >>= 1) \ { \ if (localId < i) \ { \ localPartialVolumes[localId] += localPartialVolumes[localId + i]; \ } \ barrier(CLK_LOCAL_MEM_FENCE); \ } \ if (localId == 0) \ { \ partialVolumes[get_group_id(0)] = localPartialVolumes[0]; \ } \ } \ __kernel void ComputePartialSums(__global uint4 * data, \ const int32_t dataSize, \ __local uint4 * partialSums) \ { \ int32_t globalId = get_global_id(0); \ int32_t localId = get_local_id(0); \ int32_t groupSize = get_local_size(0); \ int32_t i; \ if (globalId < dataSize) \ { \ partialSums[localId] = data[globalId]; \ } \ else \ { \ partialSums[localId] = (0, 0, 0, 0); \ } \ barrier(CLK_LOCAL_MEM_FENCE); \ for (i = groupSize >> 1; i > 0; i >>= 1) \ { \ if (localId < i) \ { \ partialSums[localId] += partialSums[localId + i]; \ } \ barrier(CLK_LOCAL_MEM_FENCE); \ } \ if (localId == 0) \ { \ data[get_group_id(0)] = partialSums[0]; \ } \ }"; #endif //OCL_SOURCE_FROM_FILE namespace VHACD { IVHACD* CreateVHACD(void) { return new VHACD(); } bool VHACD::OCLInit(void* const oclDevice, IUserLogger* const logger) { #ifdef CL_VERSION_1_1 m_oclDevice = (cl_device_id*)oclDevice; cl_int error; m_oclContext = clCreateContext(NULL, 1, m_oclDevice, NULL, NULL, &error); if (error != CL_SUCCESS) { if (logger) { logger->Log("Couldn't create context\n"); } return false; } #ifdef OCL_SOURCE_FROM_FILE std::string cl_files = OPENCL_CL_FILES; // read kernal from file #ifdef _WIN32 std::replace(cl_files.begin(), cl_files.end(), '/', '\\'); #endif // _WIN32 FILE* program_handle = fopen(cl_files.c_str(), "rb"); fseek(program_handle, 0, SEEK_END); size_t program_size = ftell(program_handle); rewind(program_handle); char* program_buffer = new char[program_size + 1]; program_buffer[program_size] = '\0'; fread(program_buffer, sizeof(char), program_size, program_handle); fclose(program_handle); // create program m_oclProgram = clCreateProgramWithSource(m_oclContext, 1, (const char**)&program_buffer, &program_size, &error); delete[] program_buffer; #else size_t program_size = strlen(oclProgramSource); m_oclProgram = clCreateProgramWithSource(m_oclContext, 1, (const char**)&oclProgramSource, &program_size, &error); #endif if (error != CL_SUCCESS) { if (logger) { logger->Log("Couldn't create program\n"); } return false; } /* Build program */ error = clBuildProgram(m_oclProgram, 1, m_oclDevice, "-cl-denorms-are-zero", NULL, NULL); if (error != CL_SUCCESS) { size_t log_size; /* Find Size of log and print to std output */ clGetProgramBuildInfo(m_oclProgram, *m_oclDevice, CL_PROGRAM_BUILD_LOG, 0, NULL, &log_size); char* program_log = new char[log_size + 2]; program_log[log_size] = '\n'; program_log[log_size + 1] = '\0'; clGetProgramBuildInfo(m_oclProgram, *m_oclDevice, CL_PROGRAM_BUILD_LOG, log_size + 1, program_log, NULL); if (logger) { logger->Log("Couldn't build program\n"); logger->Log(program_log); } delete[] program_log; return false; } delete[] m_oclQueue; delete[] m_oclKernelComputePartialVolumes; delete[] m_oclKernelComputeSum; m_oclQueue = new cl_command_queue[m_ompNumProcessors]; m_oclKernelComputePartialVolumes = new cl_kernel[m_ompNumProcessors]; m_oclKernelComputeSum = new cl_kernel[m_ompNumProcessors]; const char nameKernelComputePartialVolumes[] = "ComputePartialVolumes"; const char nameKernelComputeSum[] = "ComputePartialSums"; for (int32_t k = 0; k < m_ompNumProcessors; ++k) { m_oclKernelComputePartialVolumes[k] = clCreateKernel(m_oclProgram, nameKernelComputePartialVolumes, &error); if (error != CL_SUCCESS) { if (logger) { logger->Log("Couldn't create kernel\n"); } return false; } m_oclKernelComputeSum[k] = clCreateKernel(m_oclProgram, nameKernelComputeSum, &error); if (error != CL_SUCCESS) { if (logger) { logger->Log("Couldn't create kernel\n"); } return false; } } error = clGetKernelWorkGroupInfo(m_oclKernelComputePartialVolumes[0], *m_oclDevice, CL_KERNEL_WORK_GROUP_SIZE, sizeof(size_t), &m_oclWorkGroupSize, NULL); size_t workGroupSize = 0; error = clGetKernelWorkGroupInfo(m_oclKernelComputeSum[0], *m_oclDevice, CL_KERNEL_WORK_GROUP_SIZE, sizeof(size_t), &workGroupSize, NULL); if (error != CL_SUCCESS) { if (logger) { logger->Log("Couldn't query work group info\n"); } return false; } if (workGroupSize < m_oclWorkGroupSize) { m_oclWorkGroupSize = workGroupSize; } for (int32_t k = 0; k < m_ompNumProcessors; ++k) { m_oclQueue[k] = clCreateCommandQueue(m_oclContext, *m_oclDevice, 0 /*CL_QUEUE_PROFILING_ENABLE*/, &error); if (error != CL_SUCCESS) { if (logger) { logger->Log("Couldn't create queue\n"); } return false; } } return true; #else //CL_VERSION_1_1 return false; #endif //CL_VERSION_1_1 } bool VHACD::OCLRelease(IUserLogger* const logger) { #ifdef CL_VERSION_1_1 cl_int error; if (m_oclKernelComputePartialVolumes) { for (int32_t k = 0; k < m_ompNumProcessors; ++k) { error = clReleaseKernel(m_oclKernelComputePartialVolumes[k]); if (error != CL_SUCCESS) { if (logger) { logger->Log("Couldn't release kernal\n"); } return false; } } delete[] m_oclKernelComputePartialVolumes; } if (m_oclKernelComputeSum) { for (int32_t k = 0; k < m_ompNumProcessors; ++k) { error = clReleaseKernel(m_oclKernelComputeSum[k]); if (error != CL_SUCCESS) { if (logger) { logger->Log("Couldn't release kernal\n"); } return false; } } delete[] m_oclKernelComputeSum; } if (m_oclQueue) { for (int32_t k = 0; k < m_ompNumProcessors; ++k) { error = clReleaseCommandQueue(m_oclQueue[k]); if (error != CL_SUCCESS) { if (logger) { logger->Log("Couldn't release queue\n"); } return false; } } delete[] m_oclQueue; } error = clReleaseProgram(m_oclProgram); if (error != CL_SUCCESS) { if (logger) { logger->Log("Couldn't release program\n"); } return false; } error = clReleaseContext(m_oclContext); if (error != CL_SUCCESS) { if (logger) { logger->Log("Couldn't release context\n"); } return false; } return true; #else //CL_VERSION_1_1 return false; #endif //CL_VERSION_1_1 } void VHACD::ComputePrimitiveSet(const Parameters& params) { if (GetCancel()) { return; } m_timer.Tic(); m_stage = "Compute primitive set"; m_operation = "Convert volume to pset"; std::ostringstream msg; if (params.m_logger) { msg << "+ " << m_stage << std::endl; params.m_logger->Log(msg.str().c_str()); } Update(0.0, 0.0, params); if (params.m_mode == 0) { VoxelSet* vset = new VoxelSet; m_volume->Convert(*vset); m_pset = vset; } else { TetrahedronSet* tset = new TetrahedronSet; m_volume->Convert(*tset); m_pset = tset; } delete m_volume; m_volume = 0; if (params.m_logger) { msg.str(""); msg << "\t # primitives " << m_pset->GetNPrimitives() << std::endl; msg << "\t # inside surface " << m_pset->GetNPrimitivesInsideSurf() << std::endl; msg << "\t # on surface " << m_pset->GetNPrimitivesOnSurf() << std::endl; params.m_logger->Log(msg.str().c_str()); } m_overallProgress = 15.0; Update(100.0, 100.0, params); m_timer.Toc(); if (params.m_logger) { msg.str(""); msg << "\t time " << m_timer.GetElapsedTime() / 1000.0 << "s" << std::endl; params.m_logger->Log(msg.str().c_str()); } } bool VHACD::Compute(const double* const points, const uint32_t nPoints, const uint32_t* const triangles,const uint32_t nTriangles, const Parameters& params) { return ComputeACD(points, nPoints, triangles, nTriangles, params); } bool VHACD::Compute(const float* const points,const uint32_t nPoints, const uint32_t* const triangles,const uint32_t nTriangles, const Parameters& params) { return ComputeACD(points, nPoints, triangles, nTriangles, params); } double ComputePreferredCuttingDirection(const PrimitiveSet* const tset, Vec3<double>& dir) { double ex = tset->GetEigenValue(AXIS_X); double ey = tset->GetEigenValue(AXIS_Y); double ez = tset->GetEigenValue(AXIS_Z); double vx = (ey - ez) * (ey - ez); double vy = (ex - ez) * (ex - ez); double vz = (ex - ey) * (ex - ey); if (vx < vy && vx < vz) { double e = ey * ey + ez * ez; dir[0] = 1.0; dir[1] = 0.0; dir[2] = 0.0; return (e == 0.0) ? 0.0 : 1.0 - vx / e; } else if (vy < vx && vy < vz) { double e = ex * ex + ez * ez; dir[0] = 0.0; dir[1] = 1.0; dir[2] = 0.0; return (e == 0.0) ? 0.0 : 1.0 - vy / e; } else { double e = ex * ex + ey * ey; dir[0] = 0.0; dir[1] = 0.0; dir[2] = 1.0; return (e == 0.0) ? 0.0 : 1.0 - vz / e; } } void ComputeAxesAlignedClippingPlanes(const VoxelSet& vset, const short downsampling, SArray<Plane>& planes) { const Vec3<short> minV = vset.GetMinBBVoxels(); const Vec3<short> maxV = vset.GetMaxBBVoxels(); Vec3<double> pt; Plane plane; const short i0 = minV[0]; const short i1 = maxV[0]; plane.m_a = 1.0; plane.m_b = 0.0; plane.m_c = 0.0; plane.m_axis = AXIS_X; for (short i = i0; i <= i1; i += downsampling) { pt = vset.GetPoint(Vec3<double>(i + 0.5, 0.0, 0.0)); plane.m_d = -pt[0]; plane.m_index = i; planes.PushBack(plane); } const short j0 = minV[1]; const short j1 = maxV[1]; plane.m_a = 0.0; plane.m_b = 1.0; plane.m_c = 0.0; plane.m_axis = AXIS_Y; for (short j = j0; j <= j1; j += downsampling) { pt = vset.GetPoint(Vec3<double>(0.0, j + 0.5, 0.0)); plane.m_d = -pt[1]; plane.m_index = j; planes.PushBack(plane); } const short k0 = minV[2]; const short k1 = maxV[2]; plane.m_a = 0.0; plane.m_b = 0.0; plane.m_c = 1.0; plane.m_axis = AXIS_Z; for (short k = k0; k <= k1; k += downsampling) { pt = vset.GetPoint(Vec3<double>(0.0, 0.0, k + 0.5)); plane.m_d = -pt[2]; plane.m_index = k; planes.PushBack(plane); } } void ComputeAxesAlignedClippingPlanes(const TetrahedronSet& tset, const short downsampling, SArray<Plane>& planes) { const Vec3<double> minV = tset.GetMinBB(); const Vec3<double> maxV = tset.GetMaxBB(); const double scale = tset.GetSacle(); const short i0 = 0; const short j0 = 0; const short k0 = 0; const short i1 = static_cast<short>((maxV[0] - minV[0]) / scale + 0.5); const short j1 = static_cast<short>((maxV[1] - minV[1]) / scale + 0.5); const short k1 = static_cast<short>((maxV[2] - minV[2]) / scale + 0.5); Plane plane; plane.m_a = 1.0; plane.m_b = 0.0; plane.m_c = 0.0; plane.m_axis = AXIS_X; for (short i = i0; i <= i1; i += downsampling) { double x = minV[0] + scale * i; plane.m_d = -x; plane.m_index = i; planes.PushBack(plane); } plane.m_a = 0.0; plane.m_b = 1.0; plane.m_c = 0.0; plane.m_axis = AXIS_Y; for (short j = j0; j <= j1; j += downsampling) { double y = minV[1] + scale * j; plane.m_d = -y; plane.m_index = j; planes.PushBack(plane); } plane.m_a = 0.0; plane.m_b = 0.0; plane.m_c = 1.0; plane.m_axis = AXIS_Z; for (short k = k0; k <= k1; k += downsampling) { double z = minV[2] + scale * k; plane.m_d = -z; plane.m_index = k; planes.PushBack(plane); } } void RefineAxesAlignedClippingPlanes(const VoxelSet& vset, const Plane& bestPlane, const short downsampling, SArray<Plane>& planes) { const Vec3<short> minV = vset.GetMinBBVoxels(); const Vec3<short> maxV = vset.GetMaxBBVoxels(); Vec3<double> pt; Plane plane; if (bestPlane.m_axis == AXIS_X) { const short i0 = MAX(minV[0], bestPlane.m_index - downsampling); const short i1 = MIN(maxV[0], bestPlane.m_index + downsampling); plane.m_a = 1.0; plane.m_b = 0.0; plane.m_c = 0.0; plane.m_axis = AXIS_X; for (short i = i0; i <= i1; ++i) { pt = vset.GetPoint(Vec3<double>(i + 0.5, 0.0, 0.0)); plane.m_d = -pt[0]; plane.m_index = i; planes.PushBack(plane); } } else if (bestPlane.m_axis == AXIS_Y) { const short j0 = MAX(minV[1], bestPlane.m_index - downsampling); const short j1 = MIN(maxV[1], bestPlane.m_index + downsampling); plane.m_a = 0.0; plane.m_b = 1.0; plane.m_c = 0.0; plane.m_axis = AXIS_Y; for (short j = j0; j <= j1; ++j) { pt = vset.GetPoint(Vec3<double>(0.0, j + 0.5, 0.0)); plane.m_d = -pt[1]; plane.m_index = j; planes.PushBack(plane); } } else { const short k0 = MAX(minV[2], bestPlane.m_index - downsampling); const short k1 = MIN(maxV[2], bestPlane.m_index + downsampling); plane.m_a = 0.0; plane.m_b = 0.0; plane.m_c = 1.0; plane.m_axis = AXIS_Z; for (short k = k0; k <= k1; ++k) { pt = vset.GetPoint(Vec3<double>(0.0, 0.0, k + 0.5)); plane.m_d = -pt[2]; plane.m_index = k; planes.PushBack(plane); } } } void RefineAxesAlignedClippingPlanes(const TetrahedronSet& tset, const Plane& bestPlane, const short downsampling, SArray<Plane>& planes) { const Vec3<double> minV = tset.GetMinBB(); const Vec3<double> maxV = tset.GetMaxBB(); const double scale = tset.GetSacle(); Plane plane; if (bestPlane.m_axis == AXIS_X) { const short i0 = MAX(0, bestPlane.m_index - downsampling); const short i1 = static_cast<short>(MIN((maxV[0] - minV[0]) / scale + 0.5, bestPlane.m_index + downsampling)); plane.m_a = 1.0; plane.m_b = 0.0; plane.m_c = 0.0; plane.m_axis = AXIS_X; for (short i = i0; i <= i1; ++i) { double x = minV[0] + scale * i; plane.m_d = -x; plane.m_index = i; planes.PushBack(plane); } } else if (bestPlane.m_axis == AXIS_Y) { const short j0 = MAX(0, bestPlane.m_index - downsampling); const short j1 = static_cast<short>(MIN((maxV[1] - minV[1]) / scale + 0.5, bestPlane.m_index + downsampling)); plane.m_a = 0.0; plane.m_b = 1.0; plane.m_c = 0.0; plane.m_axis = AXIS_Y; for (short j = j0; j <= j1; ++j) { double y = minV[1] + scale * j; plane.m_d = -y; plane.m_index = j; planes.PushBack(plane); } } else { const short k0 = MAX(0, bestPlane.m_index - downsampling); const short k1 = static_cast<short>(MIN((maxV[2] - minV[2]) / scale + 0.5, bestPlane.m_index + downsampling)); plane.m_a = 0.0; plane.m_b = 0.0; plane.m_c = 1.0; plane.m_axis = AXIS_Z; for (short k = k0; k <= k1; ++k) { double z = minV[2] + scale * k; plane.m_d = -z; plane.m_index = k; planes.PushBack(plane); } } } inline double ComputeLocalConcavity(const double volume, const double volumeCH) { return fabs(volumeCH - volume) / volumeCH; } inline double ComputeConcavity(const double volume, const double volumeCH, const double volume0) { return fabs(volumeCH - volume) / volume0; } //#define DEBUG_TEMP void VHACD::ComputeBestClippingPlane(const PrimitiveSet* inputPSet, const double volume, const SArray<Plane>& planes, const Vec3<double>& preferredCuttingDirection, const double w, const double alpha, const double beta, const int32_t convexhullDownsampling, const double progress0, const double progress1, Plane& bestPlane, double& minConcavity, const Parameters& params) { if (GetCancel()) { return; } char msg[256]; size_t nPrimitives = inputPSet->GetNPrimitives(); bool oclAcceleration = (nPrimitives > OCL_MIN_NUM_PRIMITIVES && params.m_oclAcceleration && params.m_mode == 0) ? true : false; int32_t iBest = -1; int32_t nPlanes = static_cast<int32_t>(planes.Size()); bool cancel = false; int32_t done = 0; double minTotal = MAX_DOUBLE; double minBalance = MAX_DOUBLE; double minSymmetry = MAX_DOUBLE; minConcavity = MAX_DOUBLE; SArray<Vec3<double> >* chPts = new SArray<Vec3<double> >[2 * m_ompNumProcessors]; Mesh* chs = new Mesh[2 * m_ompNumProcessors]; PrimitiveSet* onSurfacePSet = inputPSet->Create(); inputPSet->SelectOnSurface(onSurfacePSet); PrimitiveSet** psets = 0; if (!params.m_convexhullApproximation) { psets = new PrimitiveSet*[2 * m_ompNumProcessors]; for (int32_t i = 0; i < 2 * m_ompNumProcessors; ++i) { psets[i] = inputPSet->Create(); } } #ifdef CL_VERSION_1_1 // allocate OpenCL data structures cl_mem voxels; cl_mem* partialVolumes = 0; size_t globalSize = 0; size_t nWorkGroups = 0; double unitVolume = 0.0; if (oclAcceleration) { VoxelSet* vset = (VoxelSet*)inputPSet; const Vec3<double> minBB = vset->GetMinBB(); const float fMinBB[4] = { (float)minBB[0], (float)minBB[1], (float)minBB[2], 1.0f }; const float fSclae[4] = { (float)vset->GetScale(), (float)vset->GetScale(), (float)vset->GetScale(), 0.0f }; const int32_t nVoxels = (int32_t)nPrimitives; unitVolume = vset->GetUnitVolume(); nWorkGroups = (nPrimitives + 4 * m_oclWorkGroupSize - 1) / (4 * m_oclWorkGroupSize); globalSize = nWorkGroups * m_oclWorkGroupSize; cl_int error; voxels = clCreateBuffer(m_oclContext, CL_MEM_READ_ONLY | CL_MEM_COPY_HOST_PTR, sizeof(Voxel) * nPrimitives, vset->GetVoxels(), &error); if (error != CL_SUCCESS) { if (params.m_logger) { params.m_logger->Log("Couldn't create buffer\n"); } SetCancel(true); } partialVolumes = new cl_mem[m_ompNumProcessors]; for (int32_t i = 0; i < m_ompNumProcessors; ++i) { partialVolumes[i] = clCreateBuffer(m_oclContext, CL_MEM_WRITE_ONLY, sizeof(uint32_t) * 4 * nWorkGroups, NULL, &error); if (error != CL_SUCCESS) { if (params.m_logger) { params.m_logger->Log("Couldn't create buffer\n"); } SetCancel(true); break; } error = clSetKernelArg(m_oclKernelComputePartialVolumes[i], 0, sizeof(cl_mem), &voxels); error |= clSetKernelArg(m_oclKernelComputePartialVolumes[i], 1, sizeof(uint32_t), &nVoxels); error |= clSetKernelArg(m_oclKernelComputePartialVolumes[i], 3, sizeof(float) * 4, fMinBB); error |= clSetKernelArg(m_oclKernelComputePartialVolumes[i], 4, sizeof(float) * 4, &fSclae); error |= clSetKernelArg(m_oclKernelComputePartialVolumes[i], 5, sizeof(uint32_t) * 4 * m_oclWorkGroupSize, NULL); error |= clSetKernelArg(m_oclKernelComputePartialVolumes[i], 6, sizeof(cl_mem), &(partialVolumes[i])); error |= clSetKernelArg(m_oclKernelComputeSum[i], 0, sizeof(cl_mem), &(partialVolumes[i])); error |= clSetKernelArg(m_oclKernelComputeSum[i], 2, sizeof(uint32_t) * 4 * m_oclWorkGroupSize, NULL); if (error != CL_SUCCESS) { if (params.m_logger) { params.m_logger->Log("Couldn't kernel atguments \n"); } SetCancel(true); } } } #else // CL_VERSION_1_1 oclAcceleration = false; #endif // CL_VERSION_1_1 #ifdef DEBUG_TEMP Timer timerComputeCost; timerComputeCost.Tic(); #endif // DEBUG_TEMP #if USE_THREAD == 1 && _OPENMP #pragma omp parallel for #endif for (int32_t x = 0; x < nPlanes; ++x) { int32_t threadID = 0; #if USE_THREAD == 1 && _OPENMP threadID = omp_get_thread_num(); #pragma omp flush(cancel) #endif if (!cancel) { //Update progress if (GetCancel()) { cancel = true; #if USE_THREAD == 1 && _OPENMP #pragma omp flush(cancel) #endif } Plane plane = planes[x]; if (oclAcceleration) { #ifdef CL_VERSION_1_1 const float fPlane[4] = { (float)plane.m_a, (float)plane.m_b, (float)plane.m_c, (float)plane.m_d }; cl_int error = clSetKernelArg(m_oclKernelComputePartialVolumes[threadID], 2, sizeof(float) * 4, fPlane); if (error != CL_SUCCESS) { if (params.m_logger) { params.m_logger->Log("Couldn't kernel atguments \n"); } SetCancel(true); } error = clEnqueueNDRangeKernel(m_oclQueue[threadID], m_oclKernelComputePartialVolumes[threadID], 1, NULL, &globalSize, &m_oclWorkGroupSize, 0, NULL, NULL); if (error != CL_SUCCESS) { if (params.m_logger) { params.m_logger->Log("Couldn't run kernel \n"); } SetCancel(true); } int32_t nValues = (int32_t)nWorkGroups; while (nValues > 1) { error = clSetKernelArg(m_oclKernelComputeSum[threadID], 1, sizeof(int32_t), &nValues); if (error != CL_SUCCESS) { if (params.m_logger) { params.m_logger->Log("Couldn't kernel atguments \n"); } SetCancel(true); } size_t nWorkGroups = (nValues + m_oclWorkGroupSize - 1) / m_oclWorkGroupSize; size_t globalSize = nWorkGroups * m_oclWorkGroupSize; error = clEnqueueNDRangeKernel(m_oclQueue[threadID], m_oclKernelComputeSum[threadID], 1, NULL, &globalSize, &m_oclWorkGroupSize, 0, NULL, NULL); if (error != CL_SUCCESS) { if (params.m_logger) { params.m_logger->Log("Couldn't run kernel \n"); } SetCancel(true); } nValues = (int32_t)nWorkGroups; } #endif // CL_VERSION_1_1 } Mesh& leftCH = chs[threadID]; Mesh& rightCH = chs[threadID + m_ompNumProcessors]; rightCH.ResizePoints(0); leftCH.ResizePoints(0); rightCH.ResizeTriangles(0); leftCH.ResizeTriangles(0); // compute convex-hulls #ifdef TEST_APPROX_CH double volumeLeftCH1; double volumeRightCH1; #endif //TEST_APPROX_CH if (params.m_convexhullApproximation) { SArray<Vec3<double> >& leftCHPts = chPts[threadID]; SArray<Vec3<double> >& rightCHPts = chPts[threadID + m_ompNumProcessors]; rightCHPts.Resize(0); leftCHPts.Resize(0); onSurfacePSet->Intersect(plane, &rightCHPts, &leftCHPts, convexhullDownsampling * 32); inputPSet->GetConvexHull().Clip(plane, rightCHPts, leftCHPts); rightCH.ComputeConvexHull((double*)rightCHPts.Data(), rightCHPts.Size()); leftCH.ComputeConvexHull((double*)leftCHPts.Data(), leftCHPts.Size()); #ifdef TEST_APPROX_CH Mesh leftCH1; Mesh rightCH1; VoxelSet right; VoxelSet left; onSurfacePSet->Clip(plane, &right, &left); right.ComputeConvexHull(rightCH1, convexhullDownsampling); left.ComputeConvexHull(leftCH1, convexhullDownsampling); volumeLeftCH1 = leftCH1.ComputeVolume(); volumeRightCH1 = rightCH1.ComputeVolume(); #endif //TEST_APPROX_CH } else { PrimitiveSet* const right = psets[threadID]; PrimitiveSet* const left = psets[threadID + m_ompNumProcessors]; onSurfacePSet->Clip(plane, right, left); right->ComputeConvexHull(rightCH, convexhullDownsampling); left->ComputeConvexHull(leftCH, convexhullDownsampling); } double volumeLeftCH = leftCH.ComputeVolume(); double volumeRightCH = rightCH.ComputeVolume(); // compute clipped volumes double volumeLeft = 0.0; double volumeRight = 0.0; if (oclAcceleration) { #ifdef CL_VERSION_1_1 uint32_t volumes[4]; cl_int error = clEnqueueReadBuffer(m_oclQueue[threadID], partialVolumes[threadID], CL_TRUE, 0, sizeof(uint32_t) * 4, volumes, 0, NULL, NULL); size_t nPrimitivesRight = volumes[0] + volumes[1] + volumes[2] + volumes[3]; size_t nPrimitivesLeft = nPrimitives - nPrimitivesRight; volumeRight = nPrimitivesRight * unitVolume; volumeLeft = nPrimitivesLeft * unitVolume; if (error != CL_SUCCESS) { if (params.m_logger) { params.m_logger->Log("Couldn't read buffer \n"); } SetCancel(true); } #endif // CL_VERSION_1_1 } else { inputPSet->ComputeClippedVolumes(plane, volumeRight, volumeLeft); } double concavityLeft = ComputeConcavity(volumeLeft, volumeLeftCH, m_volumeCH0); double concavityRight = ComputeConcavity(volumeRight, volumeRightCH, m_volumeCH0); double concavity = (concavityLeft + concavityRight); // compute cost double balance = alpha * fabs(volumeLeft - volumeRight) / m_volumeCH0; double d = w * (preferredCuttingDirection[0] * plane.m_a + preferredCuttingDirection[1] * plane.m_b + preferredCuttingDirection[2] * plane.m_c); double symmetry = beta * d; double total = concavity + balance + symmetry; #if USE_THREAD == 1 && _OPENMP #pragma omp critical #endif { if (total < minTotal || (total == minTotal && x < iBest)) { minConcavity = concavity; minBalance = balance; minSymmetry = symmetry; bestPlane = plane; minTotal = total; iBest = x; } ++done; if (!(done & 127)) // reduce update frequency { double progress = done * (progress1 - progress0) / nPlanes + progress0; Update(m_stageProgress, progress, params); } } } } #ifdef DEBUG_TEMP timerComputeCost.Toc(); printf_s("Cost[%i] = %f\n", nPlanes, timerComputeCost.GetElapsedTime()); #endif // DEBUG_TEMP #ifdef CL_VERSION_1_1 if (oclAcceleration) { clReleaseMemObject(voxels); for (int32_t i = 0; i < m_ompNumProcessors; ++i) { clReleaseMemObject(partialVolumes[i]); } delete[] partialVolumes; } #endif // CL_VERSION_1_1 if (psets) { for (int32_t i = 0; i < 2 * m_ompNumProcessors; ++i) { delete psets[i]; } delete[] psets; } delete onSurfacePSet; delete[] chPts; delete[] chs; if (params.m_logger) { sprintf(msg, "\n\t\t\t Best %04i T=%2.6f C=%2.6f B=%2.6f S=%2.6f (%1.1f, %1.1f, %1.1f, %3.3f)\n\n", iBest, minTotal, minConcavity, minBalance, minSymmetry, bestPlane.m_a, bestPlane.m_b, bestPlane.m_c, bestPlane.m_d); params.m_logger->Log(msg); } } void VHACD::ComputeACD(const Parameters& params) { if (GetCancel()) { return; } m_timer.Tic(); m_stage = "Approximate Convex Decomposition"; m_stageProgress = 0.0; std::ostringstream msg; if (params.m_logger) { msg << "+ " << m_stage << std::endl; params.m_logger->Log(msg.str().c_str()); } SArray<PrimitiveSet*> parts; SArray<PrimitiveSet*> inputParts; SArray<PrimitiveSet*> temp; inputParts.PushBack(m_pset); m_pset = 0; SArray<Plane> planes; SArray<Plane> planesRef; uint32_t sub = 0; bool firstIteration = true; m_volumeCH0 = 1.0; // Compute the decomposition depth based on the number of convex hulls being requested.. uint32_t hullCount = 2; uint32_t depth = 1; while (params.m_maxConvexHulls > hullCount) { depth++; hullCount *= 2; } // We must always increment the decomposition depth one higher than the maximum number of hulls requested. // The reason for this is as follows. // Say, for example, the user requests 32 convex hulls exactly. This would be a decomposition depth of 5. // However, when we do that, we do *not* necessarily get 32 hulls as a result. This is because, during // the recursive descent of the binary tree, one or more of the leaf nodes may have no concavity and // will not be split. So, in this way, even with a decomposition depth of 5, you can produce fewer than // 32 hulls. So, in this case, we would set the decomposition depth to 6 (producing up to as high as 64 convex hulls). // Then, the merge step which combines over-described hulls down to the user requested amount, we will end up // getting exactly 32 convex hulls as a result. // We could just allow the artist to directly control the decomposition depth directly, but this would be a bit // too complex and the preference is simply to let them specify how many hulls they want and derive the solution // from that. depth++; while (sub++ < depth && inputParts.Size() > 0 && !m_cancel) { msg.str(""); msg << "Subdivision level " << sub; m_operation = msg.str(); if (params.m_logger) { msg.str(""); msg << "\t Subdivision level " << sub << std::endl; params.m_logger->Log(msg.str().c_str()); } double maxConcavity = 0.0; const size_t nInputParts = inputParts.Size(); Update(m_stageProgress, 0.0, params); for (size_t p = 0; p < nInputParts && !m_cancel; ++p) { const double progress0 = p * 100.0 / nInputParts; const double progress1 = (p + 0.75) * 100.0 / nInputParts; const double progress2 = (p + 1.00) * 100.0 / nInputParts; Update(m_stageProgress, progress0, params); PrimitiveSet* pset = inputParts[p]; inputParts[p] = 0; double volume = pset->ComputeVolume(); pset->ComputeBB(); pset->ComputePrincipalAxes(); if (params.m_pca) { pset->AlignToPrincipalAxes(); } pset->ComputeConvexHull(pset->GetConvexHull()); double volumeCH = fabs(pset->GetConvexHull().ComputeVolume()); if (firstIteration) { m_volumeCH0 = volumeCH; } double concavity = ComputeConcavity(volume, volumeCH, m_volumeCH0); double error = 1.01 * pset->ComputeMaxVolumeError() / m_volumeCH0; if (firstIteration) { firstIteration = false; } if (params.m_logger) { msg.str(""); msg << "\t -> Part[" << p << "] C = " << concavity << ", E = " << error << ", VS = " << pset->GetNPrimitivesOnSurf() << ", VI = " << pset->GetNPrimitivesInsideSurf() << std::endl; params.m_logger->Log(msg.str().c_str()); } if (concavity > params.m_concavity && concavity > error) { Vec3<double> preferredCuttingDirection; double w = ComputePreferredCuttingDirection(pset, preferredCuttingDirection); planes.Resize(0); if (params.m_mode == 0) { VoxelSet* vset = (VoxelSet*)pset; ComputeAxesAlignedClippingPlanes(*vset, params.m_planeDownsampling, planes); } else { TetrahedronSet* tset = (TetrahedronSet*)pset; ComputeAxesAlignedClippingPlanes(*tset, params.m_planeDownsampling, planes); } if (params.m_logger) { msg.str(""); msg << "\t\t [Regular sampling] Number of clipping planes " << planes.Size() << std::endl; params.m_logger->Log(msg.str().c_str()); } Plane bestPlane; double minConcavity = MAX_DOUBLE; ComputeBestClippingPlane(pset, volume, planes, preferredCuttingDirection, w, concavity * params.m_alpha, concavity * params.m_beta, params.m_convexhullDownsampling, progress0, progress1, bestPlane, minConcavity, params); if (!m_cancel && (params.m_planeDownsampling > 1 || params.m_convexhullDownsampling > 1)) { planesRef.Resize(0); if (params.m_mode == 0) { VoxelSet* vset = (VoxelSet*)pset; RefineAxesAlignedClippingPlanes(*vset, bestPlane, params.m_planeDownsampling, planesRef); } else { TetrahedronSet* tset = (TetrahedronSet*)pset; RefineAxesAlignedClippingPlanes(*tset, bestPlane, params.m_planeDownsampling, planesRef); } if (params.m_logger) { msg.str(""); msg << "\t\t [Refining] Number of clipping planes " << planesRef.Size() << std::endl; params.m_logger->Log(msg.str().c_str()); } ComputeBestClippingPlane(pset, volume, planesRef, preferredCuttingDirection, w, concavity * params.m_alpha, concavity * params.m_beta, 1, // convexhullDownsampling = 1 progress1, progress2, bestPlane, minConcavity, params); } if (GetCancel()) { delete pset; // clean up break; } else { if (maxConcavity < minConcavity) { maxConcavity = minConcavity; } PrimitiveSet* bestLeft = pset->Create(); PrimitiveSet* bestRight = pset->Create(); temp.PushBack(bestLeft); temp.PushBack(bestRight); pset->Clip(bestPlane, bestRight, bestLeft); if (params.m_pca) { bestRight->RevertAlignToPrincipalAxes(); bestLeft->RevertAlignToPrincipalAxes(); } delete pset; } } else { if (params.m_pca) { pset->RevertAlignToPrincipalAxes(); } parts.PushBack(pset); } } Update(95.0 * (1.0 - maxConcavity) / (1.0 - params.m_concavity), 100.0, params); if (GetCancel()) { const size_t nTempParts = temp.Size(); for (size_t p = 0; p < nTempParts; ++p) { delete temp[p]; } temp.Resize(0); } else { inputParts = temp; temp.Resize(0); } } const size_t nInputParts = inputParts.Size(); for (size_t p = 0; p < nInputParts; ++p) { parts.PushBack(inputParts[p]); } if (GetCancel()) { const size_t nParts = parts.Size(); for (size_t p = 0; p < nParts; ++p) { delete parts[p]; } return; } m_overallProgress = 90.0; Update(m_stageProgress, 100.0, params); msg.str(""); msg << "Generate convex-hulls"; m_operation = msg.str(); size_t nConvexHulls = parts.Size(); if (params.m_logger) { msg.str(""); msg << "+ Generate " << nConvexHulls << " convex-hulls " << std::endl; params.m_logger->Log(msg.str().c_str()); } Update(m_stageProgress, 0.0, params); m_convexHulls.Resize(0); for (size_t p = 0; p < nConvexHulls && !m_cancel; ++p) { Update(m_stageProgress, p * 100.0 / nConvexHulls, params); m_convexHulls.PushBack(new Mesh); parts[p]->ComputeConvexHull(*m_convexHulls[p]); size_t nv = m_convexHulls[p]->GetNPoints(); double x, y, z; for (size_t i = 0; i < nv; ++i) { Vec3<double>& pt = m_convexHulls[p]->GetPoint(i); x = pt[0]; y = pt[1]; z = pt[2]; pt[0] = m_rot[0][0] * x + m_rot[0][1] * y + m_rot[0][2] * z + m_barycenter[0]; pt[1] = m_rot[1][0] * x + m_rot[1][1] * y + m_rot[1][2] * z + m_barycenter[1]; pt[2] = m_rot[2][0] * x + m_rot[2][1] * y + m_rot[2][2] * z + m_barycenter[2]; } } const size_t nParts = parts.Size(); for (size_t p = 0; p < nParts; ++p) { delete parts[p]; parts[p] = 0; } parts.Resize(0); if (GetCancel()) { const size_t nConvexHulls = m_convexHulls.Size(); for (size_t p = 0; p < nConvexHulls; ++p) { delete m_convexHulls[p]; } m_convexHulls.Clear(); return; } m_overallProgress = 95.0; Update(100.0, 100.0, params); m_timer.Toc(); if (params.m_logger) { msg.str(""); msg << "\t time " << m_timer.GetElapsedTime() / 1000.0 << "s" << std::endl; params.m_logger->Log(msg.str().c_str()); } } void AddPoints(const Mesh* const mesh, SArray<Vec3<double> >& pts) { const int32_t n = (int32_t)mesh->GetNPoints(); for (int32_t i = 0; i < n; ++i) { pts.PushBack(mesh->GetPoint(i)); } } void ComputeConvexHull(const Mesh* const ch1, const Mesh* const ch2, SArray<Vec3<double> >& pts, Mesh* const combinedCH) { pts.Resize(0); AddPoints(ch1, pts); AddPoints(ch2, pts); btConvexHullComputer ch; ch.compute((double*)pts.Data(), 3 * sizeof(double), (int32_t)pts.Size(), -1.0, -1.0); combinedCH->ResizePoints(0); combinedCH->ResizeTriangles(0); for (int32_t v = 0; v < ch.vertices.size(); v++) { combinedCH->AddPoint(Vec3<double>(ch.vertices[v].getX(), ch.vertices[v].getY(), ch.vertices[v].getZ())); } const int32_t nt = ch.faces.size(); for (int32_t t = 0; t < nt; ++t) { const btConvexHullComputer::Edge* sourceEdge = &(ch.edges[ch.faces[t]]); int32_t a = sourceEdge->getSourceVertex(); int32_t b = sourceEdge->getTargetVertex(); const btConvexHullComputer::Edge* edge = sourceEdge->getNextEdgeOfFace(); int32_t c = edge->getTargetVertex(); while (c != a) { combinedCH->AddTriangle(Vec3<int32_t>(a, b, c)); edge = edge->getNextEdgeOfFace(); b = c; c = edge->getTargetVertex(); } } } void VHACD::MergeConvexHulls(const Parameters& params) { if (GetCancel()) { return; } m_timer.Tic(); m_stage = "Merge Convex Hulls"; std::ostringstream msg; if (params.m_logger) { msg << "+ " << m_stage << std::endl; params.m_logger->Log(msg.str().c_str()); } // Get the current number of convex hulls size_t nConvexHulls = m_convexHulls.Size(); // Iteration counter int32_t iteration = 0; // While we have more than at least one convex hull and the user has not asked us to cancel the operation if (nConvexHulls > 1 && !m_cancel) { // Get the gamma error threshold for when to exit SArray<Vec3<double> > pts; Mesh combinedCH; // Populate the cost matrix size_t idx = 0; SArray<float> costMatrix; costMatrix.Resize(((nConvexHulls * nConvexHulls) - nConvexHulls) >> 1); for (size_t p1 = 1; p1 < nConvexHulls; ++p1) { const float volume1 = m_convexHulls[p1]->ComputeVolume(); for (size_t p2 = 0; p2 < p1; ++p2) { ComputeConvexHull(m_convexHulls[p1], m_convexHulls[p2], pts, &combinedCH); costMatrix[idx++] = ComputeConcavity(volume1 + m_convexHulls[p2]->ComputeVolume(), combinedCH.ComputeVolume(), m_volumeCH0); } } // Until we cant merge below the maximum cost size_t costSize = m_convexHulls.Size(); while (!m_cancel) { msg.str(""); msg << "Iteration " << iteration++; m_operation = msg.str(); // Search for lowest cost float bestCost = (std::numeric_limits<float>::max)(); const size_t addr = FindMinimumElement(costMatrix.Data(), &bestCost, 0, costMatrix.Size()); if ( (costSize-1) < params.m_maxConvexHulls) { break; } const size_t addrI = (static_cast<int32_t>(sqrt(1 + (8 * addr))) - 1) >> 1; const size_t p1 = addrI + 1; const size_t p2 = addr - ((addrI * (addrI + 1)) >> 1); assert(p1 >= 0); assert(p2 >= 0); assert(p1 < costSize); assert(p2 < costSize); if (params.m_logger) { msg.str(""); msg << "\t\t Merging (" << p1 << ", " << p2 << ") " << bestCost << std::endl << std::endl; params.m_logger->Log(msg.str().c_str()); } // Make the lowest cost row and column into a new hull Mesh* cch = new Mesh; ComputeConvexHull(m_convexHulls[p1], m_convexHulls[p2], pts, cch); delete m_convexHulls[p2]; m_convexHulls[p2] = cch; delete m_convexHulls[p1]; std::swap(m_convexHulls[p1], m_convexHulls[m_convexHulls.Size() - 1]); m_convexHulls.PopBack(); costSize = costSize - 1; // Calculate costs versus the new hull size_t rowIdx = ((p2 - 1) * p2) >> 1; const float volume1 = m_convexHulls[p2]->ComputeVolume(); for (size_t i = 0; (i < p2) && (!m_cancel); ++i) { ComputeConvexHull(m_convexHulls[p2], m_convexHulls[i], pts, &combinedCH); costMatrix[rowIdx++] = ComputeConcavity(volume1 + m_convexHulls[i]->ComputeVolume(), combinedCH.ComputeVolume(), m_volumeCH0); } rowIdx += p2; for (size_t i = p2 + 1; (i < costSize) && (!m_cancel); ++i) { ComputeConvexHull(m_convexHulls[p2], m_convexHulls[i], pts, &combinedCH); costMatrix[rowIdx] = ComputeConcavity(volume1 + m_convexHulls[i]->ComputeVolume(), combinedCH.ComputeVolume(), m_volumeCH0); rowIdx += i; assert(rowIdx >= 0); } // Move the top column in to replace its space const size_t erase_idx = ((costSize - 1) * costSize) >> 1; if (p1 < costSize) { rowIdx = (addrI * p1) >> 1; size_t top_row = erase_idx; for (size_t i = 0; i < p1; ++i) { if (i != p2) { costMatrix[rowIdx] = costMatrix[top_row]; } ++rowIdx; ++top_row; } ++top_row; rowIdx += p1; for (size_t i = p1 + 1; i < (costSize + 1); ++i) { costMatrix[rowIdx] = costMatrix[top_row++]; rowIdx += i; assert(rowIdx >= 0); } } costMatrix.Resize(erase_idx); } } m_overallProgress = 99.0; Update(100.0, 100.0, params); m_timer.Toc(); if (params.m_logger) { msg.str(""); msg << "\t time " << m_timer.GetElapsedTime() / 1000.0 << "s" << std::endl; params.m_logger->Log(msg.str().c_str()); } } void VHACD::SimplifyConvexHull(Mesh* const ch, const size_t nvertices, const double minVolume) { if (nvertices <= 4) { return; } ICHull icHull; if (mRaycastMesh) { // We project these points onto the original source mesh to increase precision // The voxelization process drops floating point precision so returned data points are not exactly lying on the // surface of the original source mesh. // The first step is we need to compute the bounding box of the mesh we are trying to build a convex hull for. // From this bounding box, we compute the length of the diagonal to get a relative size and center for point projection uint32_t nPoints = ch->GetNPoints(); Vec3<double> *inputPoints = ch->GetPointsBuffer(); Vec3<double> bmin(inputPoints[0]); Vec3<double> bmax(inputPoints[1]); for (uint32_t i = 1; i < nPoints; i++) { const Vec3<double> &p = inputPoints[i]; p.UpdateMinMax(bmin, bmax); } Vec3<double> center; double diagonalLength = center.GetCenter(bmin, bmax); // Get the center of the bounding box // This is the error threshold for determining if we should use the raycast result data point vs. the voxelized result. double pointDistanceThreshold = diagonalLength * 0.05; // If a new point is within 1/100th the diagonal length of the bounding volume we do not add it. To do so would create a // thin sliver in the resulting convex hull double snapDistanceThreshold = diagonalLength * 0.01; double snapDistanceThresholdSquared = snapDistanceThreshold*snapDistanceThreshold; // Allocate buffer for projected vertices Vec3<double> *outputPoints = new Vec3<double>[nPoints]; uint32_t outCount = 0; for (uint32_t i = 0; i < nPoints; i++) { Vec3<double> &inputPoint = inputPoints[i]; Vec3<double> &outputPoint = outputPoints[outCount]; // Compute the direction vector from the center of this mesh to the vertex Vec3<double> dir = inputPoint - center; // Normalize the direction vector. dir.Normalize(); // Multiply times the diagonal length of the mesh dir *= diagonalLength; // Add the center back in again to get the destination point dir += center; // By default the output point is equal to the input point outputPoint = inputPoint; double pointDistance; if (mRaycastMesh->raycast(center.GetData(), dir.GetData(), inputPoint.GetData(), outputPoint.GetData(),&pointDistance) ) { // If the nearest intersection point is too far away, we keep the original source data point. // Not all points lie directly on the original mesh surface if (pointDistance > pointDistanceThreshold) { outputPoint = inputPoint; } } // Ok, before we add this point, we do not want to create points which are extremely close to each other. // This will result in tiny sliver triangles which are really bad for collision detection. bool foundNearbyPoint = false; for (uint32_t j = 0; j < outCount; j++) { // If this new point is extremely close to an existing point, we do not add it! double squaredDistance = outputPoints[j].GetDistanceSquared(outputPoint); if (squaredDistance < snapDistanceThresholdSquared ) { foundNearbyPoint = true; break; } } if (!foundNearbyPoint) { outCount++; } } icHull.AddPoints(outputPoints, outCount); delete[]outputPoints; } else { icHull.AddPoints(ch->GetPointsBuffer(), ch->GetNPoints()); } icHull.Process((uint32_t)nvertices, minVolume); TMMesh& mesh = icHull.GetMesh(); const size_t nT = mesh.GetNTriangles(); const size_t nV = mesh.GetNVertices(); ch->ResizePoints(nV); ch->ResizeTriangles(nT); mesh.GetIFS(ch->GetPointsBuffer(), ch->GetTrianglesBuffer()); } void VHACD::SimplifyConvexHulls(const Parameters& params) { if (m_cancel || params.m_maxNumVerticesPerCH < 4) { return; } m_timer.Tic(); m_stage = "Simplify convex-hulls"; m_operation = "Simplify convex-hulls"; std::ostringstream msg; const size_t nConvexHulls = m_convexHulls.Size(); if (params.m_logger) { msg << "+ Simplify " << nConvexHulls << " convex-hulls " << std::endl; params.m_logger->Log(msg.str().c_str()); } Update(0.0, 0.0, params); for (size_t i = 0; i < nConvexHulls && !m_cancel; ++i) { if (params.m_logger) { msg.str(""); msg << "\t\t Simplify CH[" << std::setfill('0') << std::setw(5) << i << "] " << m_convexHulls[i]->GetNPoints() << " V, " << m_convexHulls[i]->GetNTriangles() << " T" << std::endl; params.m_logger->Log(msg.str().c_str()); } SimplifyConvexHull(m_convexHulls[i], params.m_maxNumVerticesPerCH, m_volumeCH0 * params.m_minVolumePerCH); } m_overallProgress = 100.0; Update(100.0, 100.0, params); m_timer.Toc(); if (params.m_logger) { msg.str(""); msg << "\t time " << m_timer.GetElapsedTime() / 1000.0 << "s" << std::endl; params.m_logger->Log(msg.str().c_str()); } } bool VHACD::ComputeCenterOfMass(double centerOfMass[3]) const { bool ret = false; centerOfMass[0] = 0; centerOfMass[1] = 0; centerOfMass[2] = 0; // Get number of convex hulls in the result uint32_t hullCount = GetNConvexHulls(); if (hullCount) // if we have results { ret = true; double totalVolume = 0; // Initialize the center of mass to zero centerOfMass[0] = 0; centerOfMass[1] = 0; centerOfMass[2] = 0; // Compute the total volume of all convex hulls for (uint32_t i = 0; i < hullCount; i++) { ConvexHull ch; GetConvexHull(i, ch); totalVolume += ch.m_volume; } // compute the reciprocal of the total volume double recipVolume = 1.0 / totalVolume; // Add in the weighted by volume average of the center point of each convex hull for (uint32_t i = 0; i < hullCount; i++) { ConvexHull ch; GetConvexHull(i, ch); double ratio = ch.m_volume*recipVolume; centerOfMass[0] += ch.m_center[0] * ratio; centerOfMass[1] += ch.m_center[1] * ratio; centerOfMass[2] += ch.m_center[2] * ratio; } } return ret; } #pragma warning(disable:4189 4101) // Will analyze the HACD results and compute the constraints solutions. // It will analyze the point at which any two convex hulls touch each other and // return the total number of constraint pairs found uint32_t VHACD::ComputeConstraints(void) { mConstraints.clear(); // erase any previous constraint results uint32_t hullCount = GetNConvexHulls(); // get the number of convex hulls in the results if (hullCount == 0) return 0; #if DEBUG_VISUALIZE_CONSTRAINTS gRenderDebug->pushRenderState(); gRenderDebug->setCurrentDisplayTime(10); #endif // We voxelize the convex hull class HullData { public: HullData(void) { FLOAT_MATH::fm_initMinMax(mBmin, mBmax); } ~HullData(void) { FLOAT_MATH::fm_releaseVertexIndex(mVertexIndex); FLOAT_MATH::fm_releaseTesselate(mTesselate); delete[]mIndices; } void computeResolution(void) { mDiagonalDistance = FLOAT_MATH::fm_distance(mBmin, mBmax); mTessellateDistance = mDiagonalDistance / 20; mNearestPointDistance = mDiagonalDistance / 20.0f; mPointResolution = mDiagonalDistance / 100; mVertexIndex = FLOAT_MATH::fm_createVertexIndex(mPointResolution, false); mTesselate = FLOAT_MATH::fm_createTesselate(); } void computeTesselation(void) { mTesselationIndices = mTesselate->tesselate(mVertexIndex, mSourceTriangleCount, mIndices, mTessellateDistance, 6, mTessellateTriangleCount); uint32_t vcount = mVertexIndex->getVcount(); } bool getNearestVert(const double sourcePoint[3], double nearest[3], const HullData &other, double nearestThreshold) { bool ret = false; double nt2 = nearestThreshold*nearestThreshold; uint32_t vcount = other.mVertexIndex->getVcount(); for (uint32_t i = 0; i < vcount; i++) { const double *p = other.mVertexIndex->getVertexDouble(i); double d2 = FLOAT_MATH::fm_distanceSquared(sourcePoint, p); if (d2 < nt2) { nearest[0] = p[0]; nearest[1] = p[1]; nearest[2] = p[2]; nt2 = d2; ret = true; } } return ret; } void findMatchingPoints(const HullData &other) { uint32_t vcount = mVertexIndex->getVcount(); for (uint32_t i = 0; i < vcount; i++) { const double *sourcePoint = mVertexIndex->getVertexDouble(i); double nearestPoint[3]; if (getNearestVert(sourcePoint, nearestPoint, other, mNearestPointDistance)) { #if DEBUG_VISUALIZE_CONSTRAINTS float fp1[3]; float fp2[3]; FLOAT_MATH::fm_doubleToFloat3(sourcePoint, fp1); FLOAT_MATH::fm_doubleToFloat3(nearestPoint, fp2); gRenderDebug->debugRay(fp1, fp2); #endif } } } double mBmin[3]; double mBmax[3]; double mDiagonalDistance; double mTessellateDistance; double mPointResolution; double mNearestPointDistance; uint32_t mSourceTriangleCount{ 0 }; uint32_t mTessellateTriangleCount{ 0 }; uint32_t *mIndices{ nullptr }; FLOAT_MATH::fm_VertexIndex *mVertexIndex{ nullptr }; FLOAT_MATH::fm_Tesselate *mTesselate{ nullptr }; const uint32_t *mTesselationIndices{ nullptr }; }; HullData *hullData = new HullData[hullCount]; for (uint32_t i = 0; i < hullCount; i++) { HullData &hd = hullData[i]; ConvexHull ch; GetConvexHull(i, ch); // Compute the bounding volume of this convex hull for (uint32_t j = 0; j < ch.m_nPoints; j++) { const double *p = &ch.m_points[j * 3]; FLOAT_MATH::fm_minmax(p, hd.mBmin, hd.mBmax); } hd.computeResolution(); // Compute the tessellation resolution uint32_t tcount = ch.m_nTriangles; hd.mSourceTriangleCount = tcount; hd.mIndices = new uint32_t[tcount * 3]; for (uint32_t j = 0; j < tcount; j++) { uint32_t i1 = ch.m_triangles[j * 3 + 0]; uint32_t i2 = ch.m_triangles[j * 3 + 1]; uint32_t i3 = ch.m_triangles[j * 3 + 2]; const double *p1 = &ch.m_points[i1 * 3]; const double *p2 = &ch.m_points[i2 * 3]; const double *p3 = &ch.m_points[i3 * 3]; bool newPos; hd.mIndices[j * 3 + 0] = hd.mVertexIndex->getIndex(p1, newPos); hd.mIndices[j * 3 + 1] = hd.mVertexIndex->getIndex(p2, newPos); hd.mIndices[j * 3 + 2] = hd.mVertexIndex->getIndex(p3, newPos); } hd.computeTesselation(); } for (uint32_t i = 0; i < hullCount; i++) { HullData &hd = hullData[i]; // Slightly inflate the bounding box around each convex hull for intersection tests // during the constraint building phase FLOAT_MATH::fm_inflateMinMax(hd.mBmin, hd.mBmax, 0.05f); } // Look for every possible pair of convex hulls as possible constraints for (uint32_t i = 0; i < hullCount; i++) { HullData &hd1 = hullData[i]; for (uint32_t j = i + 1; j < hullCount; j++) { HullData &hd2 = hullData[j]; if (FLOAT_MATH::fm_intersectAABB(hd1.mBmin, hd1.mBmax, hd2.mBmin, hd2.mBmax)) { // ok. if two convex hulls intersect, we are going to find the <n> number of nearest // matching points between them. hd1.findMatchingPoints(hd2); } } } #if DEBUG_VISUALIZE_CONSTRAINTS gRenderDebug->popRenderState(); #endif return uint32_t(mConstraints.size()); } // Returns a pointer to the constraint index; null if the index is not valid or // the user did not previously call 'ComputeConstraints' const VHACD::IVHACD::Constraint *VHACD::GetConstraint(uint32_t index) const { const Constraint *ret = nullptr; if (index < mConstraints.size()) { ret = &mConstraints[index]; } return ret; } } // end of VHACD namespace
69,088
C++
37.727018
756
0.521046
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/authoring/VHACD/src/btConvexHullComputer.cpp
/* Copyright (c) 2011 Ole Kniemeyer, MAXON, www.maxon.net This software is provided 'as-is', without any express or implied warranty. In no event will the authors be held liable for any damages arising from the use of this software. Permission is granted to anyone to use this software for any purpose, including commercial applications, and to alter it and redistribute it freely, subject to the following restrictions: 1. The origin of this software must not be misrepresented; you must not claim that you wrote the original software. If you use this software in a product, an acknowledgment in the product documentation would be appreciated but is not required. 2. Altered source versions must be plainly marked as such, and must not be misrepresented as being the original software. 3. This notice may not be removed or altered from any source distribution. */ #include <string.h> #include "btAlignedObjectArray.h" #include "btConvexHullComputer.h" #include "btMinMax.h" #include "btVector3.h" #ifdef __GNUC__ #include <stdint.h> #elif defined(_MSC_VER) typedef __int32 int32_t; typedef __int64 int64_t; typedef unsigned __int32 uint32_t; typedef unsigned __int64 uint64_t; #else typedef int32_t int32_t; typedef long long int32_t int64_t; typedef uint32_t uint32_t; typedef unsigned long long int32_t uint64_t; #endif #ifdef _MSC_VER #pragma warning(disable:4458) #endif //The definition of USE_X86_64_ASM is moved into the build system. You can enable it manually by commenting out the following lines //#if (defined(__GNUC__) && defined(__x86_64__) && !defined(__ICL)) // || (defined(__ICL) && defined(_M_X64)) bug in Intel compiler, disable inline assembly // #define USE_X86_64_ASM //#endif //#define DEBUG_CONVEX_HULL //#define SHOW_ITERATIONS #if defined(DEBUG_CONVEX_HULL) || defined(SHOW_ITERATIONS) #include <stdio.h> #endif // Convex hull implementation based on Preparata and Hong // Ole Kniemeyer, MAXON Computer GmbH class btConvexHullInternal { public: class Point64 { public: int64_t x; int64_t y; int64_t z; Point64(int64_t x, int64_t y, int64_t z) : x(x) , y(y) , z(z) { } bool isZero() { return (x == 0) && (y == 0) && (z == 0); } int64_t dot(const Point64& b) const { return x * b.x + y * b.y + z * b.z; } }; class Point32 { public: int32_t x; int32_t y; int32_t z; int32_t index; Point32() { } Point32(int32_t x, int32_t y, int32_t z) : x(x) , y(y) , z(z) , index(-1) { } bool operator==(const Point32& b) const { return (x == b.x) && (y == b.y) && (z == b.z); } bool operator!=(const Point32& b) const { return (x != b.x) || (y != b.y) || (z != b.z); } bool isZero() { return (x == 0) && (y == 0) && (z == 0); } Point64 cross(const Point32& b) const { return Point64(y * b.z - z * b.y, z * b.x - x * b.z, x * b.y - y * b.x); } Point64 cross(const Point64& b) const { return Point64(y * b.z - z * b.y, z * b.x - x * b.z, x * b.y - y * b.x); } int64_t dot(const Point32& b) const { return x * b.x + y * b.y + z * b.z; } int64_t dot(const Point64& b) const { return x * b.x + y * b.y + z * b.z; } Point32 operator+(const Point32& b) const { return Point32(x + b.x, y + b.y, z + b.z); } Point32 operator-(const Point32& b) const { return Point32(x - b.x, y - b.y, z - b.z); } }; class Int128 { public: uint64_t low; uint64_t high; Int128() { } Int128(uint64_t low, uint64_t high) : low(low) , high(high) { } Int128(uint64_t low) : low(low) , high(0) { } Int128(int64_t value) : low(value) , high((value >= 0) ? 0 : (uint64_t)-1LL) { } static Int128 mul(int64_t a, int64_t b); static Int128 mul(uint64_t a, uint64_t b); Int128 operator-() const { return Int128((uint64_t) - (int64_t)low, ~high + (low == 0)); } Int128 operator+(const Int128& b) const { #ifdef USE_X86_64_ASM Int128 result; __asm__("addq %[bl], %[rl]\n\t" "adcq %[bh], %[rh]\n\t" : [rl] "=r"(result.low), [rh] "=r"(result.high) : "0"(low), "1"(high), [bl] "g"(b.low), [bh] "g"(b.high) : "cc"); return result; #else uint64_t lo = low + b.low; return Int128(lo, high + b.high + (lo < low)); #endif } Int128 operator-(const Int128& b) const { #ifdef USE_X86_64_ASM Int128 result; __asm__("subq %[bl], %[rl]\n\t" "sbbq %[bh], %[rh]\n\t" : [rl] "=r"(result.low), [rh] "=r"(result.high) : "0"(low), "1"(high), [bl] "g"(b.low), [bh] "g"(b.high) : "cc"); return result; #else return *this + -b; #endif } Int128& operator+=(const Int128& b) { #ifdef USE_X86_64_ASM __asm__("addq %[bl], %[rl]\n\t" "adcq %[bh], %[rh]\n\t" : [rl] "=r"(low), [rh] "=r"(high) : "0"(low), "1"(high), [bl] "g"(b.low), [bh] "g"(b.high) : "cc"); #else uint64_t lo = low + b.low; if (lo < low) { ++high; } low = lo; high += b.high; #endif return *this; } Int128& operator++() { if (++low == 0) { ++high; } return *this; } Int128 operator*(int64_t b) const; btScalar toScalar() const { return ((int64_t)high >= 0) ? btScalar(high) * (btScalar(0x100000000LL) * btScalar(0x100000000LL)) + btScalar(low) : -(-*this).toScalar(); } int32_t getSign() const { return ((int64_t)high < 0) ? -1 : (high || low) ? 1 : 0; } bool operator<(const Int128& b) const { return (high < b.high) || ((high == b.high) && (low < b.low)); } int32_t ucmp(const Int128& b) const { if (high < b.high) { return -1; } if (high > b.high) { return 1; } if (low < b.low) { return -1; } if (low > b.low) { return 1; } return 0; } }; class Rational64 { private: uint64_t m_numerator; uint64_t m_denominator; int32_t sign; public: Rational64(int64_t numerator, int64_t denominator) { if (numerator > 0) { sign = 1; m_numerator = (uint64_t)numerator; } else if (numerator < 0) { sign = -1; m_numerator = (uint64_t)-numerator; } else { sign = 0; m_numerator = 0; } if (denominator > 0) { m_denominator = (uint64_t)denominator; } else if (denominator < 0) { sign = -sign; m_denominator = (uint64_t)-denominator; } else { m_denominator = 0; } } bool isNegativeInfinity() const { return (sign < 0) && (m_denominator == 0); } bool isNaN() const { return (sign == 0) && (m_denominator == 0); } int32_t compare(const Rational64& b) const; btScalar toScalar() const { return sign * ((m_denominator == 0) ? SIMD_INFINITY : (btScalar)m_numerator / m_denominator); } }; class Rational128 { private: Int128 numerator; Int128 denominator; int32_t sign; bool isInt64; public: Rational128(int64_t value) { if (value > 0) { sign = 1; this->numerator = value; } else if (value < 0) { sign = -1; this->numerator = -value; } else { sign = 0; this->numerator = (uint64_t)0; } this->denominator = (uint64_t)1; isInt64 = true; } Rational128(const Int128& numerator, const Int128& denominator) { sign = numerator.getSign(); if (sign >= 0) { this->numerator = numerator; } else { this->numerator = -numerator; } int32_t dsign = denominator.getSign(); if (dsign >= 0) { this->denominator = denominator; } else { sign = -sign; this->denominator = -denominator; } isInt64 = false; } int32_t compare(const Rational128& b) const; int32_t compare(int64_t b) const; btScalar toScalar() const { return sign * ((denominator.getSign() == 0) ? SIMD_INFINITY : numerator.toScalar() / denominator.toScalar()); } }; class PointR128 { public: Int128 x; Int128 y; Int128 z; Int128 denominator; PointR128() { } PointR128(Int128 x, Int128 y, Int128 z, Int128 denominator) : x(x) , y(y) , z(z) , denominator(denominator) { } btScalar xvalue() const { return x.toScalar() / denominator.toScalar(); } btScalar yvalue() const { return y.toScalar() / denominator.toScalar(); } btScalar zvalue() const { return z.toScalar() / denominator.toScalar(); } }; class Edge; class Face; class Vertex { public: Vertex* next; Vertex* prev; Edge* edges; Face* firstNearbyFace; Face* lastNearbyFace; PointR128 point128; Point32 point; int32_t copy; Vertex() : next(NULL) , prev(NULL) , edges(NULL) , firstNearbyFace(NULL) , lastNearbyFace(NULL) , copy(-1) { } #ifdef DEBUG_CONVEX_HULL void print() { printf("V%d (%d, %d, %d)", point.index, point.x, point.y, point.z); } void printGraph(); #endif Point32 operator-(const Vertex& b) const { return point - b.point; } Rational128 dot(const Point64& b) const { return (point.index >= 0) ? Rational128(point.dot(b)) : Rational128(point128.x * b.x + point128.y * b.y + point128.z * b.z, point128.denominator); } btScalar xvalue() const { return (point.index >= 0) ? btScalar(point.x) : point128.xvalue(); } btScalar yvalue() const { return (point.index >= 0) ? btScalar(point.y) : point128.yvalue(); } btScalar zvalue() const { return (point.index >= 0) ? btScalar(point.z) : point128.zvalue(); } void receiveNearbyFaces(Vertex* src) { if (lastNearbyFace) { lastNearbyFace->nextWithSameNearbyVertex = src->firstNearbyFace; } else { firstNearbyFace = src->firstNearbyFace; } if (src->lastNearbyFace) { lastNearbyFace = src->lastNearbyFace; } for (Face* f = src->firstNearbyFace; f; f = f->nextWithSameNearbyVertex) { btAssert(f->nearbyVertex == src); f->nearbyVertex = this; } src->firstNearbyFace = NULL; src->lastNearbyFace = NULL; } }; class Edge { public: Edge* next; Edge* prev; Edge* reverse; Vertex* target; Face* face; int32_t copy; ~Edge() { next = NULL; prev = NULL; reverse = NULL; target = NULL; face = NULL; } void link(Edge* n) { btAssert(reverse->target == n->reverse->target); next = n; n->prev = this; } #ifdef DEBUG_CONVEX_HULL void print() { printf("E%p : %d -> %d, n=%p p=%p (0 %d\t%d\t%d) -> (%d %d %d)", this, reverse->target->point.index, target->point.index, next, prev, reverse->target->point.x, reverse->target->point.y, reverse->target->point.z, target->point.x, target->point.y, target->point.z); } #endif }; class Face { public: Face* next; Vertex* nearbyVertex; Face* nextWithSameNearbyVertex; Point32 origin; Point32 dir0; Point32 dir1; Face() : next(NULL) , nearbyVertex(NULL) , nextWithSameNearbyVertex(NULL) { } void init(Vertex* a, Vertex* b, Vertex* c) { nearbyVertex = a; origin = a->point; dir0 = *b - *a; dir1 = *c - *a; if (a->lastNearbyFace) { a->lastNearbyFace->nextWithSameNearbyVertex = this; } else { a->firstNearbyFace = this; } a->lastNearbyFace = this; } Point64 getNormal() { return dir0.cross(dir1); } }; template <typename UWord, typename UHWord> class DMul { private: static uint32_t high(uint64_t value) { return (uint32_t)(value >> 32); } static uint32_t low(uint64_t value) { return (uint32_t)value; } static uint64_t mul(uint32_t a, uint32_t b) { return (uint64_t)a * (uint64_t)b; } static void shlHalf(uint64_t& value) { value <<= 32; } static uint64_t high(Int128 value) { return value.high; } static uint64_t low(Int128 value) { return value.low; } static Int128 mul(uint64_t a, uint64_t b) { return Int128::mul(a, b); } static void shlHalf(Int128& value) { value.high = value.low; value.low = 0; } public: static void mul(UWord a, UWord b, UWord& resLow, UWord& resHigh) { UWord p00 = mul(low(a), low(b)); UWord p01 = mul(low(a), high(b)); UWord p10 = mul(high(a), low(b)); UWord p11 = mul(high(a), high(b)); UWord p0110 = UWord(low(p01)) + UWord(low(p10)); p11 += high(p01); p11 += high(p10); p11 += high(p0110); shlHalf(p0110); p00 += p0110; if (p00 < p0110) { ++p11; } resLow = p00; resHigh = p11; } }; private: class IntermediateHull { public: Vertex* minXy; Vertex* maxXy; Vertex* minYx; Vertex* maxYx; IntermediateHull() : minXy(NULL) , maxXy(NULL) , minYx(NULL) , maxYx(NULL) { } void print(); }; enum Orientation { NONE, CLOCKWISE, COUNTER_CLOCKWISE }; template <typename T> class PoolArray { private: T* array; int32_t size; public: PoolArray<T>* next; PoolArray(int32_t size) : size(size) , next(NULL) { array = (T*)btAlignedAlloc(sizeof(T) * size, 16); } ~PoolArray() { btAlignedFree(array); } T* init() { T* o = array; for (int32_t i = 0; i < size; i++, o++) { o->next = (i + 1 < size) ? o + 1 : NULL; } return array; } }; template <typename T> class Pool { private: PoolArray<T>* arrays; PoolArray<T>* nextArray; T* freeObjects; int32_t arraySize; public: Pool() : arrays(NULL) , nextArray(NULL) , freeObjects(NULL) , arraySize(256) { } ~Pool() { while (arrays) { PoolArray<T>* p = arrays; arrays = p->next; p->~PoolArray<T>(); btAlignedFree(p); } } void reset() { nextArray = arrays; freeObjects = NULL; } void setArraySize(int32_t arraySize) { this->arraySize = arraySize; } T* newObject() { T* o = freeObjects; if (!o) { PoolArray<T>* p = nextArray; if (p) { nextArray = p->next; } else { p = new (btAlignedAlloc(sizeof(PoolArray<T>), 16)) PoolArray<T>(arraySize); p->next = arrays; arrays = p; } o = p->init(); } freeObjects = o->next; return new (o) T(); }; void freeObject(T* object) { object->~T(); object->next = freeObjects; freeObjects = object; } }; btVector3 scaling; btVector3 center; Pool<Vertex> vertexPool; Pool<Edge> edgePool; Pool<Face> facePool; btAlignedObjectArray<Vertex*> originalVertices; int32_t mergeStamp; int32_t minAxis; int32_t medAxis; int32_t maxAxis; int32_t usedEdgePairs; int32_t maxUsedEdgePairs; static Orientation getOrientation(const Edge* prev, const Edge* next, const Point32& s, const Point32& t); Edge* findMaxAngle(bool ccw, const Vertex* start, const Point32& s, const Point64& rxs, const Point64& sxrxs, Rational64& minCot); void findEdgeForCoplanarFaces(Vertex* c0, Vertex* c1, Edge*& e0, Edge*& e1, Vertex* stop0, Vertex* stop1); Edge* newEdgePair(Vertex* from, Vertex* to); void removeEdgePair(Edge* edge) { Edge* n = edge->next; Edge* r = edge->reverse; btAssert(edge->target && r->target); if (n != edge) { n->prev = edge->prev; edge->prev->next = n; r->target->edges = n; } else { r->target->edges = NULL; } n = r->next; if (n != r) { n->prev = r->prev; r->prev->next = n; edge->target->edges = n; } else { edge->target->edges = NULL; } edgePool.freeObject(edge); edgePool.freeObject(r); usedEdgePairs--; } void computeInternal(int32_t start, int32_t end, IntermediateHull& result); bool mergeProjection(IntermediateHull& h0, IntermediateHull& h1, Vertex*& c0, Vertex*& c1); void merge(IntermediateHull& h0, IntermediateHull& h1); btVector3 toBtVector(const Point32& v); btVector3 getBtNormal(Face* face); bool shiftFace(Face* face, btScalar amount, btAlignedObjectArray<Vertex*> stack); public: Vertex* vertexList; void compute(const void* coords, bool doubleCoords, int32_t stride, int32_t count); btVector3 getCoordinates(const Vertex* v); btScalar shrink(btScalar amount, btScalar clampAmount); }; btConvexHullInternal::Int128 btConvexHullInternal::Int128::operator*(int64_t b) const { bool negative = (int64_t)high < 0; Int128 a = negative ? -*this : *this; if (b < 0) { negative = !negative; b = -b; } Int128 result = mul(a.low, (uint64_t)b); result.high += a.high * (uint64_t)b; return negative ? -result : result; } btConvexHullInternal::Int128 btConvexHullInternal::Int128::mul(int64_t a, int64_t b) { Int128 result; #ifdef USE_X86_64_ASM __asm__("imulq %[b]" : "=a"(result.low), "=d"(result.high) : "0"(a), [b] "r"(b) : "cc"); return result; #else bool negative = a < 0; if (negative) { a = -a; } if (b < 0) { negative = !negative; b = -b; } DMul<uint64_t, uint32_t>::mul((uint64_t)a, (uint64_t)b, result.low, result.high); return negative ? -result : result; #endif } btConvexHullInternal::Int128 btConvexHullInternal::Int128::mul(uint64_t a, uint64_t b) { Int128 result; #ifdef USE_X86_64_ASM __asm__("mulq %[b]" : "=a"(result.low), "=d"(result.high) : "0"(a), [b] "r"(b) : "cc"); #else DMul<uint64_t, uint32_t>::mul(a, b, result.low, result.high); #endif return result; } int32_t btConvexHullInternal::Rational64::compare(const Rational64& b) const { if (sign != b.sign) { return sign - b.sign; } else if (sign == 0) { return 0; } // return (numerator * b.denominator > b.numerator * denominator) ? sign : (numerator * b.denominator < b.numerator * denominator) ? -sign : 0; #ifdef USE_X86_64_ASM int32_t result; int64_t tmp; int64_t dummy; __asm__("mulq %[bn]\n\t" "movq %%rax, %[tmp]\n\t" "movq %%rdx, %%rbx\n\t" "movq %[tn], %%rax\n\t" "mulq %[bd]\n\t" "subq %[tmp], %%rax\n\t" "sbbq %%rbx, %%rdx\n\t" // rdx:rax contains 128-bit-difference "numerator*b.denominator - b.numerator*denominator" "setnsb %%bh\n\t" // bh=1 if difference is non-negative, bh=0 otherwise "orq %%rdx, %%rax\n\t" "setnzb %%bl\n\t" // bl=1 if difference if non-zero, bl=0 if it is zero "decb %%bh\n\t" // now bx=0x0000 if difference is zero, 0xff01 if it is negative, 0x0001 if it is positive (i.e., same sign as difference) "shll $16, %%ebx\n\t" // ebx has same sign as difference : "=&b"(result), [tmp] "=&r"(tmp), "=a"(dummy) : "a"(denominator), [bn] "g"(b.numerator), [tn] "g"(numerator), [bd] "g"(b.denominator) : "%rdx", "cc"); return result ? result ^ sign // if sign is +1, only bit 0 of result is inverted, which does not change the sign of result (and cannot result in zero) // if sign is -1, all bits of result are inverted, which changes the sign of result (and again cannot result in zero) : 0; #else return sign * Int128::mul(m_numerator, b.m_denominator).ucmp(Int128::mul(m_denominator, b.m_numerator)); #endif } int32_t btConvexHullInternal::Rational128::compare(const Rational128& b) const { if (sign != b.sign) { return sign - b.sign; } else if (sign == 0) { return 0; } if (isInt64) { return -b.compare(sign * (int64_t)numerator.low); } Int128 nbdLow, nbdHigh, dbnLow, dbnHigh; DMul<Int128, uint64_t>::mul(numerator, b.denominator, nbdLow, nbdHigh); DMul<Int128, uint64_t>::mul(denominator, b.numerator, dbnLow, dbnHigh); int32_t cmp = nbdHigh.ucmp(dbnHigh); if (cmp) { return cmp * sign; } return nbdLow.ucmp(dbnLow) * sign; } int32_t btConvexHullInternal::Rational128::compare(int64_t b) const { if (isInt64) { int64_t a = sign * (int64_t)numerator.low; return (a > b) ? 1 : (a < b) ? -1 : 0; } if (b > 0) { if (sign <= 0) { return -1; } } else if (b < 0) { if (sign >= 0) { return 1; } b = -b; } else { return sign; } return numerator.ucmp(denominator * b) * sign; } btConvexHullInternal::Edge* btConvexHullInternal::newEdgePair(Vertex* from, Vertex* to) { btAssert(from && to); Edge* e = edgePool.newObject(); Edge* r = edgePool.newObject(); e->reverse = r; r->reverse = e; e->copy = mergeStamp; r->copy = mergeStamp; e->target = to; r->target = from; e->face = NULL; r->face = NULL; usedEdgePairs++; if (usedEdgePairs > maxUsedEdgePairs) { maxUsedEdgePairs = usedEdgePairs; } return e; } bool btConvexHullInternal::mergeProjection(IntermediateHull& h0, IntermediateHull& h1, Vertex*& c0, Vertex*& c1) { Vertex* v0 = h0.maxYx; Vertex* v1 = h1.minYx; if ((v0->point.x == v1->point.x) && (v0->point.y == v1->point.y)) { btAssert(v0->point.z < v1->point.z); Vertex* v1p = v1->prev; if (v1p == v1) { c0 = v0; if (v1->edges) { btAssert(v1->edges->next == v1->edges); v1 = v1->edges->target; btAssert(v1->edges->next == v1->edges); } c1 = v1; return false; } Vertex* v1n = v1->next; v1p->next = v1n; v1n->prev = v1p; if (v1 == h1.minXy) { if ((v1n->point.x < v1p->point.x) || ((v1n->point.x == v1p->point.x) && (v1n->point.y < v1p->point.y))) { h1.minXy = v1n; } else { h1.minXy = v1p; } } if (v1 == h1.maxXy) { if ((v1n->point.x > v1p->point.x) || ((v1n->point.x == v1p->point.x) && (v1n->point.y > v1p->point.y))) { h1.maxXy = v1n; } else { h1.maxXy = v1p; } } } v0 = h0.maxXy; v1 = h1.maxXy; Vertex* v00 = NULL; Vertex* v10 = NULL; int32_t sign = 1; for (int32_t side = 0; side <= 1; side++) { int32_t dx = (v1->point.x - v0->point.x) * sign; if (dx > 0) { while (true) { int32_t dy = v1->point.y - v0->point.y; Vertex* w0 = side ? v0->next : v0->prev; if (w0 != v0) { int32_t dx0 = (w0->point.x - v0->point.x) * sign; int32_t dy0 = w0->point.y - v0->point.y; if ((dy0 <= 0) && ((dx0 == 0) || ((dx0 < 0) && (dy0 * dx <= dy * dx0)))) { v0 = w0; dx = (v1->point.x - v0->point.x) * sign; continue; } } Vertex* w1 = side ? v1->next : v1->prev; if (w1 != v1) { int32_t dx1 = (w1->point.x - v1->point.x) * sign; int32_t dy1 = w1->point.y - v1->point.y; int32_t dxn = (w1->point.x - v0->point.x) * sign; if ((dxn > 0) && (dy1 < 0) && ((dx1 == 0) || ((dx1 < 0) && (dy1 * dx < dy * dx1)))) { v1 = w1; dx = dxn; continue; } } break; } } else if (dx < 0) { while (true) { int32_t dy = v1->point.y - v0->point.y; Vertex* w1 = side ? v1->prev : v1->next; if (w1 != v1) { int32_t dx1 = (w1->point.x - v1->point.x) * sign; int32_t dy1 = w1->point.y - v1->point.y; if ((dy1 >= 0) && ((dx1 == 0) || ((dx1 < 0) && (dy1 * dx <= dy * dx1)))) { v1 = w1; dx = (v1->point.x - v0->point.x) * sign; continue; } } Vertex* w0 = side ? v0->prev : v0->next; if (w0 != v0) { int32_t dx0 = (w0->point.x - v0->point.x) * sign; int32_t dy0 = w0->point.y - v0->point.y; int32_t dxn = (v1->point.x - w0->point.x) * sign; if ((dxn < 0) && (dy0 > 0) && ((dx0 == 0) || ((dx0 < 0) && (dy0 * dx < dy * dx0)))) { v0 = w0; dx = dxn; continue; } } break; } } else { int32_t x = v0->point.x; int32_t y0 = v0->point.y; Vertex* w0 = v0; Vertex* t; while (((t = side ? w0->next : w0->prev) != v0) && (t->point.x == x) && (t->point.y <= y0)) { w0 = t; y0 = t->point.y; } v0 = w0; int32_t y1 = v1->point.y; Vertex* w1 = v1; while (((t = side ? w1->prev : w1->next) != v1) && (t->point.x == x) && (t->point.y >= y1)) { w1 = t; y1 = t->point.y; } v1 = w1; } if (side == 0) { v00 = v0; v10 = v1; v0 = h0.minXy; v1 = h1.minXy; sign = -1; } } v0->prev = v1; v1->next = v0; v00->next = v10; v10->prev = v00; if (h1.minXy->point.x < h0.minXy->point.x) { h0.minXy = h1.minXy; } if (h1.maxXy->point.x >= h0.maxXy->point.x) { h0.maxXy = h1.maxXy; } h0.maxYx = h1.maxYx; c0 = v00; c1 = v10; return true; } void btConvexHullInternal::computeInternal(int32_t start, int32_t end, IntermediateHull& result) { int32_t n = end - start; switch (n) { case 0: result.minXy = NULL; result.maxXy = NULL; result.minYx = NULL; result.maxYx = NULL; return; case 2: { Vertex* v = originalVertices[start]; Vertex* w = v + 1; if (v->point != w->point) { int32_t dx = v->point.x - w->point.x; int32_t dy = v->point.y - w->point.y; if ((dx == 0) && (dy == 0)) { if (v->point.z > w->point.z) { Vertex* t = w; w = v; v = t; } btAssert(v->point.z < w->point.z); v->next = v; v->prev = v; result.minXy = v; result.maxXy = v; result.minYx = v; result.maxYx = v; } else { v->next = w; v->prev = w; w->next = v; w->prev = v; if ((dx < 0) || ((dx == 0) && (dy < 0))) { result.minXy = v; result.maxXy = w; } else { result.minXy = w; result.maxXy = v; } if ((dy < 0) || ((dy == 0) && (dx < 0))) { result.minYx = v; result.maxYx = w; } else { result.minYx = w; result.maxYx = v; } } Edge* e = newEdgePair(v, w); e->link(e); v->edges = e; e = e->reverse; e->link(e); w->edges = e; return; } #if defined(__GNUC__) goto fallthrough; // Needed to silence gcc #endif } #if defined(__GNUC__) fallthrough: // Needed to silence gcc #endif // lint -fallthrough case 1: { Vertex* v = originalVertices[start]; v->edges = NULL; v->next = v; v->prev = v; result.minXy = v; result.maxXy = v; result.minYx = v; result.maxYx = v; return; } } int32_t split0 = start + n / 2; Point32 p = originalVertices[split0 - 1]->point; int32_t split1 = split0; while ((split1 < end) && (originalVertices[split1]->point == p)) { split1++; } computeInternal(start, split0, result); IntermediateHull hull1; computeInternal(split1, end, hull1); #ifdef DEBUG_CONVEX_HULL printf("\n\nMerge\n"); result.print(); hull1.print(); #endif merge(result, hull1); #ifdef DEBUG_CONVEX_HULL printf("\n Result\n"); result.print(); #endif } #ifdef DEBUG_CONVEX_HULL void btConvexHullInternal::IntermediateHull::print() { printf(" Hull\n"); for (Vertex* v = minXy; v;) { printf(" "); v->print(); if (v == maxXy) { printf(" maxXy"); } if (v == minYx) { printf(" minYx"); } if (v == maxYx) { printf(" maxYx"); } if (v->next->prev != v) { printf(" Inconsistency"); } printf("\n"); v = v->next; if (v == minXy) { break; } } if (minXy) { minXy->copy = (minXy->copy == -1) ? -2 : -1; minXy->printGraph(); } } void btConvexHullInternal::Vertex::printGraph() { print(); printf("\nEdges\n"); Edge* e = edges; if (e) { do { e->print(); printf("\n"); e = e->next; } while (e != edges); do { Vertex* v = e->target; if (v->copy != copy) { v->copy = copy; v->printGraph(); } e = e->next; } while (e != edges); } } #endif btConvexHullInternal::Orientation btConvexHullInternal::getOrientation(const Edge* prev, const Edge* next, const Point32& s, const Point32& t) { btAssert(prev->reverse->target == next->reverse->target); if (prev->next == next) { if (prev->prev == next) { Point64 n = t.cross(s); Point64 m = (*prev->target - *next->reverse->target).cross(*next->target - *next->reverse->target); btAssert(!m.isZero()); int64_t dot = n.dot(m); btAssert(dot != 0); return (dot > 0) ? COUNTER_CLOCKWISE : CLOCKWISE; } return COUNTER_CLOCKWISE; } else if (prev->prev == next) { return CLOCKWISE; } else { return NONE; } } btConvexHullInternal::Edge* btConvexHullInternal::findMaxAngle(bool ccw, const Vertex* start, const Point32& s, const Point64& rxs, const Point64& sxrxs, Rational64& minCot) { Edge* minEdge = NULL; #ifdef DEBUG_CONVEX_HULL printf("find max edge for %d\n", start->point.index); #endif Edge* e = start->edges; if (e) { do { if (e->copy > mergeStamp) { Point32 t = *e->target - *start; Rational64 cot(t.dot(sxrxs), t.dot(rxs)); #ifdef DEBUG_CONVEX_HULL printf(" Angle is %f (%d) for ", (float)btAtan(cot.toScalar()), (int32_t)cot.isNaN()); e->print(); #endif if (cot.isNaN()) { btAssert(ccw ? (t.dot(s) < 0) : (t.dot(s) > 0)); } else { int32_t cmp; if (minEdge == NULL) { minCot = cot; minEdge = e; } else if ((cmp = cot.compare(minCot)) < 0) { minCot = cot; minEdge = e; } else if ((cmp == 0) && (ccw == (getOrientation(minEdge, e, s, t) == COUNTER_CLOCKWISE))) { minEdge = e; } } #ifdef DEBUG_CONVEX_HULL printf("\n"); #endif } e = e->next; } while (e != start->edges); } return minEdge; } void btConvexHullInternal::findEdgeForCoplanarFaces(Vertex* c0, Vertex* c1, Edge*& e0, Edge*& e1, Vertex* stop0, Vertex* stop1) { Edge* start0 = e0; Edge* start1 = e1; Point32 et0 = start0 ? start0->target->point : c0->point; Point32 et1 = start1 ? start1->target->point : c1->point; Point32 s = c1->point - c0->point; Point64 normal = ((start0 ? start0 : start1)->target->point - c0->point).cross(s); int64_t dist = c0->point.dot(normal); btAssert(!start1 || (start1->target->point.dot(normal) == dist)); Point64 perp = s.cross(normal); btAssert(!perp.isZero()); #ifdef DEBUG_CONVEX_HULL printf(" Advancing %d %d (%p %p, %d %d)\n", c0->point.index, c1->point.index, start0, start1, start0 ? start0->target->point.index : -1, start1 ? start1->target->point.index : -1); #endif int64_t maxDot0 = et0.dot(perp); if (e0) { while (e0->target != stop0) { Edge* e = e0->reverse->prev; if (e->target->point.dot(normal) < dist) { break; } btAssert(e->target->point.dot(normal) == dist); if (e->copy == mergeStamp) { break; } int64_t dot = e->target->point.dot(perp); if (dot <= maxDot0) { break; } maxDot0 = dot; e0 = e; et0 = e->target->point; } } int64_t maxDot1 = et1.dot(perp); if (e1) { while (e1->target != stop1) { Edge* e = e1->reverse->next; if (e->target->point.dot(normal) < dist) { break; } btAssert(e->target->point.dot(normal) == dist); if (e->copy == mergeStamp) { break; } int64_t dot = e->target->point.dot(perp); if (dot <= maxDot1) { break; } maxDot1 = dot; e1 = e; et1 = e->target->point; } } #ifdef DEBUG_CONVEX_HULL printf(" Starting at %d %d\n", et0.index, et1.index); #endif int64_t dx = maxDot1 - maxDot0; if (dx > 0) { while (true) { int64_t dy = (et1 - et0).dot(s); if (e0 && (e0->target != stop0)) { Edge* f0 = e0->next->reverse; if (f0->copy > mergeStamp) { int64_t dx0 = (f0->target->point - et0).dot(perp); int64_t dy0 = (f0->target->point - et0).dot(s); if ((dx0 == 0) ? (dy0 < 0) : ((dx0 < 0) && (Rational64(dy0, dx0).compare(Rational64(dy, dx)) >= 0))) { et0 = f0->target->point; dx = (et1 - et0).dot(perp); e0 = (e0 == start0) ? NULL : f0; continue; } } } if (e1 && (e1->target != stop1)) { Edge* f1 = e1->reverse->next; if (f1->copy > mergeStamp) { Point32 d1 = f1->target->point - et1; if (d1.dot(normal) == 0) { int64_t dx1 = d1.dot(perp); int64_t dy1 = d1.dot(s); int64_t dxn = (f1->target->point - et0).dot(perp); if ((dxn > 0) && ((dx1 == 0) ? (dy1 < 0) : ((dx1 < 0) && (Rational64(dy1, dx1).compare(Rational64(dy, dx)) > 0)))) { e1 = f1; et1 = e1->target->point; dx = dxn; continue; } } else { btAssert((e1 == start1) && (d1.dot(normal) < 0)); } } } break; } } else if (dx < 0) { while (true) { int64_t dy = (et1 - et0).dot(s); if (e1 && (e1->target != stop1)) { Edge* f1 = e1->prev->reverse; if (f1->copy > mergeStamp) { int64_t dx1 = (f1->target->point - et1).dot(perp); int64_t dy1 = (f1->target->point - et1).dot(s); if ((dx1 == 0) ? (dy1 > 0) : ((dx1 < 0) && (Rational64(dy1, dx1).compare(Rational64(dy, dx)) <= 0))) { et1 = f1->target->point; dx = (et1 - et0).dot(perp); e1 = (e1 == start1) ? NULL : f1; continue; } } } if (e0 && (e0->target != stop0)) { Edge* f0 = e0->reverse->prev; if (f0->copy > mergeStamp) { Point32 d0 = f0->target->point - et0; if (d0.dot(normal) == 0) { int64_t dx0 = d0.dot(perp); int64_t dy0 = d0.dot(s); int64_t dxn = (et1 - f0->target->point).dot(perp); if ((dxn < 0) && ((dx0 == 0) ? (dy0 > 0) : ((dx0 < 0) && (Rational64(dy0, dx0).compare(Rational64(dy, dx)) < 0)))) { e0 = f0; et0 = e0->target->point; dx = dxn; continue; } } else { btAssert((e0 == start0) && (d0.dot(normal) < 0)); } } } break; } } #ifdef DEBUG_CONVEX_HULL printf(" Advanced edges to %d %d\n", et0.index, et1.index); #endif } void btConvexHullInternal::merge(IntermediateHull& h0, IntermediateHull& h1) { if (!h1.maxXy) { return; } if (!h0.maxXy) { h0 = h1; return; } mergeStamp--; Vertex* c0 = NULL; Edge* toPrev0 = NULL; Edge* firstNew0 = NULL; Edge* pendingHead0 = NULL; Edge* pendingTail0 = NULL; Vertex* c1 = NULL; Edge* toPrev1 = NULL; Edge* firstNew1 = NULL; Edge* pendingHead1 = NULL; Edge* pendingTail1 = NULL; Point32 prevPoint; if (mergeProjection(h0, h1, c0, c1)) { Point32 s = *c1 - *c0; Point64 normal = Point32(0, 0, -1).cross(s); Point64 t = s.cross(normal); btAssert(!t.isZero()); Edge* e = c0->edges; Edge* start0 = NULL; if (e) { do { int64_t dot = (*e->target - *c0).dot(normal); btAssert(dot <= 0); if ((dot == 0) && ((*e->target - *c0).dot(t) > 0)) { if (!start0 || (getOrientation(start0, e, s, Point32(0, 0, -1)) == CLOCKWISE)) { start0 = e; } } e = e->next; } while (e != c0->edges); } e = c1->edges; Edge* start1 = NULL; if (e) { do { int64_t dot = (*e->target - *c1).dot(normal); btAssert(dot <= 0); if ((dot == 0) && ((*e->target - *c1).dot(t) > 0)) { if (!start1 || (getOrientation(start1, e, s, Point32(0, 0, -1)) == COUNTER_CLOCKWISE)) { start1 = e; } } e = e->next; } while (e != c1->edges); } if (start0 || start1) { findEdgeForCoplanarFaces(c0, c1, start0, start1, NULL, NULL); if (start0) { c0 = start0->target; } if (start1) { c1 = start1->target; } } prevPoint = c1->point; prevPoint.z++; } else { prevPoint = c1->point; prevPoint.x++; } Vertex* first0 = c0; Vertex* first1 = c1; bool firstRun = true; while (true) { Point32 s = *c1 - *c0; Point32 r = prevPoint - c0->point; Point64 rxs = r.cross(s); Point64 sxrxs = s.cross(rxs); #ifdef DEBUG_CONVEX_HULL printf("\n Checking %d %d\n", c0->point.index, c1->point.index); #endif Rational64 minCot0(0, 0); Edge* min0 = findMaxAngle(false, c0, s, rxs, sxrxs, minCot0); Rational64 minCot1(0, 0); Edge* min1 = findMaxAngle(true, c1, s, rxs, sxrxs, minCot1); if (!min0 && !min1) { Edge* e = newEdgePair(c0, c1); e->link(e); c0->edges = e; e = e->reverse; e->link(e); c1->edges = e; return; } else { int32_t cmp = !min0 ? 1 : !min1 ? -1 : minCot0.compare(minCot1); #ifdef DEBUG_CONVEX_HULL printf(" -> Result %d\n", cmp); #endif if (firstRun || ((cmp >= 0) ? !minCot1.isNegativeInfinity() : !minCot0.isNegativeInfinity())) { Edge* e = newEdgePair(c0, c1); if (pendingTail0) { pendingTail0->prev = e; } else { pendingHead0 = e; } e->next = pendingTail0; pendingTail0 = e; e = e->reverse; if (pendingTail1) { pendingTail1->next = e; } else { pendingHead1 = e; } e->prev = pendingTail1; pendingTail1 = e; } Edge* e0 = min0; Edge* e1 = min1; #ifdef DEBUG_CONVEX_HULL printf(" Found min edges to %d %d\n", e0 ? e0->target->point.index : -1, e1 ? e1->target->point.index : -1); #endif if (cmp == 0) { findEdgeForCoplanarFaces(c0, c1, e0, e1, NULL, NULL); } if ((cmp >= 0) && e1) { if (toPrev1) { for (Edge *e = toPrev1->next, *n = NULL; e != min1; e = n) { n = e->next; removeEdgePair(e); } } if (pendingTail1) { if (toPrev1) { toPrev1->link(pendingHead1); } else { min1->prev->link(pendingHead1); firstNew1 = pendingHead1; } pendingTail1->link(min1); pendingHead1 = NULL; pendingTail1 = NULL; } else if (!toPrev1) { firstNew1 = min1; } prevPoint = c1->point; c1 = e1->target; toPrev1 = e1->reverse; } if ((cmp <= 0) && e0) { if (toPrev0) { for (Edge *e = toPrev0->prev, *n = NULL; e != min0; e = n) { n = e->prev; removeEdgePair(e); } } if (pendingTail0) { if (toPrev0) { pendingHead0->link(toPrev0); } else { pendingHead0->link(min0->next); firstNew0 = pendingHead0; } min0->link(pendingTail0); pendingHead0 = NULL; pendingTail0 = NULL; } else if (!toPrev0) { firstNew0 = min0; } prevPoint = c0->point; c0 = e0->target; toPrev0 = e0->reverse; } } if ((c0 == first0) && (c1 == first1)) { if (toPrev0 == NULL) { pendingHead0->link(pendingTail0); c0->edges = pendingTail0; } else { for (Edge *e = toPrev0->prev, *n = NULL; e != firstNew0; e = n) { n = e->prev; removeEdgePair(e); } if (pendingTail0) { pendingHead0->link(toPrev0); firstNew0->link(pendingTail0); } } if (toPrev1 == NULL) { pendingTail1->link(pendingHead1); c1->edges = pendingTail1; } else { for (Edge *e = toPrev1->next, *n = NULL; e != firstNew1; e = n) { n = e->next; removeEdgePair(e); } if (pendingTail1) { toPrev1->link(pendingHead1); pendingTail1->link(firstNew1); } } return; } firstRun = false; } } static bool pointCmp(const btConvexHullInternal::Point32& p, const btConvexHullInternal::Point32& q) { return (p.y < q.y) || ((p.y == q.y) && ((p.x < q.x) || ((p.x == q.x) && (p.z < q.z)))); } void btConvexHullInternal::compute(const void* coords, bool doubleCoords, int32_t stride, int32_t count) { btVector3 min(btScalar(1e30), btScalar(1e30), btScalar(1e30)), max(btScalar(-1e30), btScalar(-1e30), btScalar(-1e30)); const char* ptr = (const char*)coords; if (doubleCoords) { for (int32_t i = 0; i < count; i++) { const double* v = (const double*)ptr; btVector3 p((btScalar)v[0], (btScalar)v[1], (btScalar)v[2]); ptr += stride; min.setMin(p); max.setMax(p); } } else { for (int32_t i = 0; i < count; i++) { const float* v = (const float*)ptr; btVector3 p(v[0], v[1], v[2]); ptr += stride; min.setMin(p); max.setMax(p); } } btVector3 s = max - min; maxAxis = s.maxAxis(); minAxis = s.minAxis(); if (minAxis == maxAxis) { minAxis = (maxAxis + 1) % 3; } medAxis = 3 - maxAxis - minAxis; s /= btScalar(10216); if (((medAxis + 1) % 3) != maxAxis) { s *= -1; } scaling = s; if (s[0] != 0) { s[0] = btScalar(1) / s[0]; } if (s[1] != 0) { s[1] = btScalar(1) / s[1]; } if (s[2] != 0) { s[2] = btScalar(1) / s[2]; } center = (min + max) * btScalar(0.5); btAlignedObjectArray<Point32> points; points.resize(count); ptr = (const char*)coords; if (doubleCoords) { for (int32_t i = 0; i < count; i++) { const double* v = (const double*)ptr; btVector3 p((btScalar)v[0], (btScalar)v[1], (btScalar)v[2]); ptr += stride; p = (p - center) * s; points[i].x = (int32_t)p[medAxis]; points[i].y = (int32_t)p[maxAxis]; points[i].z = (int32_t)p[minAxis]; points[i].index = i; } } else { for (int32_t i = 0; i < count; i++) { const float* v = (const float*)ptr; btVector3 p(v[0], v[1], v[2]); ptr += stride; p = (p - center) * s; points[i].x = (int32_t)p[medAxis]; points[i].y = (int32_t)p[maxAxis]; points[i].z = (int32_t)p[minAxis]; points[i].index = i; } } points.quickSort(pointCmp); vertexPool.reset(); vertexPool.setArraySize(count); originalVertices.resize(count); for (int32_t i = 0; i < count; i++) { Vertex* v = vertexPool.newObject(); v->edges = NULL; v->point = points[i]; v->copy = -1; originalVertices[i] = v; } points.clear(); edgePool.reset(); edgePool.setArraySize(6 * count); usedEdgePairs = 0; maxUsedEdgePairs = 0; mergeStamp = -3; IntermediateHull hull; computeInternal(0, count, hull); vertexList = hull.minXy; #ifdef DEBUG_CONVEX_HULL printf("max. edges %d (3v = %d)", maxUsedEdgePairs, 3 * count); #endif } btVector3 btConvexHullInternal::toBtVector(const Point32& v) { btVector3 p; p[medAxis] = btScalar(v.x); p[maxAxis] = btScalar(v.y); p[minAxis] = btScalar(v.z); return p * scaling; } btVector3 btConvexHullInternal::getBtNormal(Face* face) { return toBtVector(face->dir0).cross(toBtVector(face->dir1)).normalized(); } btVector3 btConvexHullInternal::getCoordinates(const Vertex* v) { btVector3 p; p[medAxis] = v->xvalue(); p[maxAxis] = v->yvalue(); p[minAxis] = v->zvalue(); return p * scaling + center; } btScalar btConvexHullInternal::shrink(btScalar amount, btScalar clampAmount) { if (!vertexList) { return 0; } int32_t stamp = --mergeStamp; btAlignedObjectArray<Vertex*> stack; vertexList->copy = stamp; stack.push_back(vertexList); btAlignedObjectArray<Face*> faces; Point32 ref = vertexList->point; Int128 hullCenterX(0, 0); Int128 hullCenterY(0, 0); Int128 hullCenterZ(0, 0); Int128 volume(0, 0); while (stack.size() > 0) { Vertex* v = stack[stack.size() - 1]; stack.pop_back(); Edge* e = v->edges; if (e) { do { if (e->target->copy != stamp) { e->target->copy = stamp; stack.push_back(e->target); } if (e->copy != stamp) { Face* face = facePool.newObject(); face->init(e->target, e->reverse->prev->target, v); faces.push_back(face); Edge* f = e; Vertex* a = NULL; Vertex* b = NULL; do { if (a && b) { int64_t vol = (v->point - ref).dot((a->point - ref).cross(b->point - ref)); btAssert(vol >= 0); Point32 c = v->point + a->point + b->point + ref; hullCenterX += vol * c.x; hullCenterY += vol * c.y; hullCenterZ += vol * c.z; volume += vol; } btAssert(f->copy != stamp); f->copy = stamp; f->face = face; a = b; b = f->target; f = f->reverse->prev; } while (f != e); } e = e->next; } while (e != v->edges); } } if (volume.getSign() <= 0) { return 0; } btVector3 hullCenter; hullCenter[medAxis] = hullCenterX.toScalar(); hullCenter[maxAxis] = hullCenterY.toScalar(); hullCenter[minAxis] = hullCenterZ.toScalar(); hullCenter /= 4 * volume.toScalar(); hullCenter *= scaling; int32_t faceCount = faces.size(); if (clampAmount > 0) { btScalar minDist = SIMD_INFINITY; for (int32_t i = 0; i < faceCount; i++) { btVector3 normal = getBtNormal(faces[i]); btScalar dist = normal.dot(toBtVector(faces[i]->origin) - hullCenter); if (dist < minDist) { minDist = dist; } } if (minDist <= 0) { return 0; } amount = btMin(amount, minDist * clampAmount); } uint32_t seed = 243703; for (int32_t i = 0; i < faceCount; i++, seed = 1664525 * seed + 1013904223) { btSwap(faces[i], faces[seed % faceCount]); } for (int32_t i = 0; i < faceCount; i++) { if (!shiftFace(faces[i], amount, stack)) { return -amount; } } return amount; } bool btConvexHullInternal::shiftFace(Face* face, btScalar amount, btAlignedObjectArray<Vertex*> stack) { btVector3 origShift = getBtNormal(face) * -amount; if (scaling[0] != 0) { origShift[0] /= scaling[0]; } if (scaling[1] != 0) { origShift[1] /= scaling[1]; } if (scaling[2] != 0) { origShift[2] /= scaling[2]; } Point32 shift((int32_t)origShift[medAxis], (int32_t)origShift[maxAxis], (int32_t)origShift[minAxis]); if (shift.isZero()) { return true; } Point64 normal = face->getNormal(); #ifdef DEBUG_CONVEX_HULL printf("\nShrinking face (%d %d %d) (%d %d %d) (%d %d %d) by (%d %d %d)\n", face->origin.x, face->origin.y, face->origin.z, face->dir0.x, face->dir0.y, face->dir0.z, face->dir1.x, face->dir1.y, face->dir1.z, shift.x, shift.y, shift.z); #endif int64_t origDot = face->origin.dot(normal); Point32 shiftedOrigin = face->origin + shift; int64_t shiftedDot = shiftedOrigin.dot(normal); btAssert(shiftedDot <= origDot); if (shiftedDot >= origDot) { return false; } Edge* intersection = NULL; Edge* startEdge = face->nearbyVertex->edges; #ifdef DEBUG_CONVEX_HULL printf("Start edge is "); startEdge->print(); printf(", normal is (%lld %lld %lld), shifted dot is %lld\n", normal.x, normal.y, normal.z, shiftedDot); #endif Rational128 optDot = face->nearbyVertex->dot(normal); int32_t cmp = optDot.compare(shiftedDot); #ifdef SHOW_ITERATIONS int32_t n = 0; #endif if (cmp >= 0) { Edge* e = startEdge; do { #ifdef SHOW_ITERATIONS n++; #endif Rational128 dot = e->target->dot(normal); btAssert(dot.compare(origDot) <= 0); #ifdef DEBUG_CONVEX_HULL printf("Moving downwards, edge is "); e->print(); printf(", dot is %f (%f %lld)\n", (float)dot.toScalar(), (float)optDot.toScalar(), shiftedDot); #endif if (dot.compare(optDot) < 0) { int32_t c = dot.compare(shiftedDot); optDot = dot; e = e->reverse; startEdge = e; if (c < 0) { intersection = e; break; } cmp = c; } e = e->prev; } while (e != startEdge); if (!intersection) { return false; } } else { Edge* e = startEdge; do { #ifdef SHOW_ITERATIONS n++; #endif Rational128 dot = e->target->dot(normal); btAssert(dot.compare(origDot) <= 0); #ifdef DEBUG_CONVEX_HULL printf("Moving upwards, edge is "); e->print(); printf(", dot is %f (%f %lld)\n", (float)dot.toScalar(), (float)optDot.toScalar(), shiftedDot); #endif if (dot.compare(optDot) > 0) { cmp = dot.compare(shiftedDot); if (cmp >= 0) { intersection = e; break; } optDot = dot; e = e->reverse; startEdge = e; } e = e->prev; } while (e != startEdge); if (!intersection) { return true; } } #ifdef SHOW_ITERATIONS printf("Needed %d iterations to find initial intersection\n", n); #endif if (cmp == 0) { Edge* e = intersection->reverse->next; #ifdef SHOW_ITERATIONS n = 0; #endif while (e->target->dot(normal).compare(shiftedDot) <= 0) { #ifdef SHOW_ITERATIONS n++; #endif e = e->next; if (e == intersection->reverse) { return true; } #ifdef DEBUG_CONVEX_HULL printf("Checking for outwards edge, current edge is "); e->print(); printf("\n"); #endif } #ifdef SHOW_ITERATIONS printf("Needed %d iterations to check for complete containment\n", n); #endif } Edge* firstIntersection = NULL; Edge* faceEdge = NULL; Edge* firstFaceEdge = NULL; #ifdef SHOW_ITERATIONS int32_t m = 0; #endif while (true) { #ifdef SHOW_ITERATIONS m++; #endif #ifdef DEBUG_CONVEX_HULL printf("Intersecting edge is "); intersection->print(); printf("\n"); #endif if (cmp == 0) { Edge* e = intersection->reverse->next; startEdge = e; #ifdef SHOW_ITERATIONS n = 0; #endif while (true) { #ifdef SHOW_ITERATIONS n++; #endif if (e->target->dot(normal).compare(shiftedDot) >= 0) { break; } intersection = e->reverse; e = e->next; if (e == startEdge) { return true; } } #ifdef SHOW_ITERATIONS printf("Needed %d iterations to advance intersection\n", n); #endif } #ifdef DEBUG_CONVEX_HULL printf("Advanced intersecting edge to "); intersection->print(); printf(", cmp = %d\n", cmp); #endif if (!firstIntersection) { firstIntersection = intersection; } else if (intersection == firstIntersection) { break; } int32_t prevCmp = cmp; Edge* prevIntersection = intersection; Edge* prevFaceEdge = faceEdge; Edge* e = intersection->reverse; #ifdef SHOW_ITERATIONS n = 0; #endif while (true) { #ifdef SHOW_ITERATIONS n++; #endif e = e->reverse->prev; btAssert(e != intersection->reverse); cmp = e->target->dot(normal).compare(shiftedDot); #ifdef DEBUG_CONVEX_HULL printf("Testing edge "); e->print(); printf(" -> cmp = %d\n", cmp); #endif if (cmp >= 0) { intersection = e; break; } } #ifdef SHOW_ITERATIONS printf("Needed %d iterations to find other intersection of face\n", n); #endif if (cmp > 0) { Vertex* removed = intersection->target; e = intersection->reverse; if (e->prev == e) { removed->edges = NULL; } else { removed->edges = e->prev; e->prev->link(e->next); e->link(e); } #ifdef DEBUG_CONVEX_HULL printf("1: Removed part contains (%d %d %d)\n", removed->point.x, removed->point.y, removed->point.z); #endif Point64 n0 = intersection->face->getNormal(); Point64 n1 = intersection->reverse->face->getNormal(); int64_t m00 = face->dir0.dot(n0); int64_t m01 = face->dir1.dot(n0); int64_t m10 = face->dir0.dot(n1); int64_t m11 = face->dir1.dot(n1); int64_t r0 = (intersection->face->origin - shiftedOrigin).dot(n0); int64_t r1 = (intersection->reverse->face->origin - shiftedOrigin).dot(n1); Int128 det = Int128::mul(m00, m11) - Int128::mul(m01, m10); btAssert(det.getSign() != 0); Vertex* v = vertexPool.newObject(); v->point.index = -1; v->copy = -1; v->point128 = PointR128(Int128::mul(face->dir0.x * r0, m11) - Int128::mul(face->dir0.x * r1, m01) + Int128::mul(face->dir1.x * r1, m00) - Int128::mul(face->dir1.x * r0, m10) + det * shiftedOrigin.x, Int128::mul(face->dir0.y * r0, m11) - Int128::mul(face->dir0.y * r1, m01) + Int128::mul(face->dir1.y * r1, m00) - Int128::mul(face->dir1.y * r0, m10) + det * shiftedOrigin.y, Int128::mul(face->dir0.z * r0, m11) - Int128::mul(face->dir0.z * r1, m01) + Int128::mul(face->dir1.z * r1, m00) - Int128::mul(face->dir1.z * r0, m10) + det * shiftedOrigin.z, det); v->point.x = (int32_t)v->point128.xvalue(); v->point.y = (int32_t)v->point128.yvalue(); v->point.z = (int32_t)v->point128.zvalue(); intersection->target = v; v->edges = e; stack.push_back(v); stack.push_back(removed); stack.push_back(NULL); } if (cmp || prevCmp || (prevIntersection->reverse->next->target != intersection->target)) { faceEdge = newEdgePair(prevIntersection->target, intersection->target); if (prevCmp == 0) { faceEdge->link(prevIntersection->reverse->next); } if ((prevCmp == 0) || prevFaceEdge) { prevIntersection->reverse->link(faceEdge); } if (cmp == 0) { intersection->reverse->prev->link(faceEdge->reverse); } faceEdge->reverse->link(intersection->reverse); } else { faceEdge = prevIntersection->reverse->next; } if (prevFaceEdge) { if (prevCmp > 0) { faceEdge->link(prevFaceEdge->reverse); } else if (faceEdge != prevFaceEdge->reverse) { stack.push_back(prevFaceEdge->target); while (faceEdge->next != prevFaceEdge->reverse) { Vertex* removed = faceEdge->next->target; removeEdgePair(faceEdge->next); stack.push_back(removed); #ifdef DEBUG_CONVEX_HULL printf("2: Removed part contains (%d %d %d)\n", removed->point.x, removed->point.y, removed->point.z); #endif } stack.push_back(NULL); } } faceEdge->face = face; faceEdge->reverse->face = intersection->face; if (!firstFaceEdge) { firstFaceEdge = faceEdge; } } #ifdef SHOW_ITERATIONS printf("Needed %d iterations to process all intersections\n", m); #endif if (cmp > 0) { firstFaceEdge->reverse->target = faceEdge->target; firstIntersection->reverse->link(firstFaceEdge); firstFaceEdge->link(faceEdge->reverse); } else if (firstFaceEdge != faceEdge->reverse) { stack.push_back(faceEdge->target); while (firstFaceEdge->next != faceEdge->reverse) { Vertex* removed = firstFaceEdge->next->target; removeEdgePair(firstFaceEdge->next); stack.push_back(removed); #ifdef DEBUG_CONVEX_HULL printf("3: Removed part contains (%d %d %d)\n", removed->point.x, removed->point.y, removed->point.z); #endif } stack.push_back(NULL); } btAssert(stack.size() > 0); vertexList = stack[0]; #ifdef DEBUG_CONVEX_HULL printf("Removing part\n"); #endif #ifdef SHOW_ITERATIONS n = 0; #endif int32_t pos = 0; while (pos < stack.size()) { int32_t end = stack.size(); while (pos < end) { Vertex* kept = stack[pos++]; #ifdef DEBUG_CONVEX_HULL kept->print(); #endif bool deeper = false; Vertex* removed; while ((removed = stack[pos++]) != NULL) { #ifdef SHOW_ITERATIONS n++; #endif kept->receiveNearbyFaces(removed); while (removed->edges) { if (!deeper) { deeper = true; stack.push_back(kept); } stack.push_back(removed->edges->target); removeEdgePair(removed->edges); } } if (deeper) { stack.push_back(NULL); } } } #ifdef SHOW_ITERATIONS printf("Needed %d iterations to remove part\n", n); #endif stack.resize(0); face->origin = shiftedOrigin; return true; } static int32_t getVertexCopy(btConvexHullInternal::Vertex* vertex, btAlignedObjectArray<btConvexHullInternal::Vertex*>& vertices) { int32_t index = vertex->copy; if (index < 0) { index = vertices.size(); vertex->copy = index; vertices.push_back(vertex); #ifdef DEBUG_CONVEX_HULL printf("Vertex %d gets index *%d\n", vertex->point.index, index); #endif } return index; } btScalar btConvexHullComputer::compute(const void* coords, bool doubleCoords, int32_t stride, int32_t count, btScalar shrink, btScalar shrinkClamp) { if (count <= 0) { vertices.clear(); edges.clear(); faces.clear(); return 0; } btConvexHullInternal hull; hull.compute(coords, doubleCoords, stride, count); btScalar shift = 0; if ((shrink > 0) && ((shift = hull.shrink(shrink, shrinkClamp)) < 0)) { vertices.clear(); edges.clear(); faces.clear(); return shift; } vertices.resize(0); edges.resize(0); faces.resize(0); btAlignedObjectArray<btConvexHullInternal::Vertex*> oldVertices; getVertexCopy(hull.vertexList, oldVertices); int32_t copied = 0; while (copied < oldVertices.size()) { btConvexHullInternal::Vertex* v = oldVertices[copied]; vertices.push_back(hull.getCoordinates(v)); btConvexHullInternal::Edge* firstEdge = v->edges; if (firstEdge) { int32_t firstCopy = -1; int32_t prevCopy = -1; btConvexHullInternal::Edge* e = firstEdge; do { if (e->copy < 0) { int32_t s = edges.size(); edges.push_back(Edge()); edges.push_back(Edge()); Edge* c = &edges[s]; Edge* r = &edges[s + 1]; e->copy = s; e->reverse->copy = s + 1; c->reverse = 1; r->reverse = -1; c->targetVertex = getVertexCopy(e->target, oldVertices); r->targetVertex = copied; #ifdef DEBUG_CONVEX_HULL printf(" CREATE: Vertex *%d has edge to *%d\n", copied, c->getTargetVertex()); #endif } if (prevCopy >= 0) { edges[e->copy].next = prevCopy - e->copy; } else { firstCopy = e->copy; } prevCopy = e->copy; e = e->next; } while (e != firstEdge); edges[firstCopy].next = prevCopy - firstCopy; } copied++; } for (int32_t i = 0; i < copied; i++) { btConvexHullInternal::Vertex* v = oldVertices[i]; btConvexHullInternal::Edge* firstEdge = v->edges; if (firstEdge) { btConvexHullInternal::Edge* e = firstEdge; do { if (e->copy >= 0) { #ifdef DEBUG_CONVEX_HULL printf("Vertex *%d has edge to *%d\n", i, edges[e->copy].getTargetVertex()); #endif faces.push_back(e->copy); btConvexHullInternal::Edge* f = e; do { #ifdef DEBUG_CONVEX_HULL printf(" Face *%d\n", edges[f->copy].getTargetVertex()); #endif f->copy = -1; f = f->reverse->prev; } while (f != e); } e = e->next; } while (e != firstEdge); } } return shift; }
71,274
C++
27.670555
243
0.455187
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/authoring/VHACD/src/vhacdManifoldMesh.cpp
/* Copyright (c) 2011 Khaled Mamou (kmamou at gmail dot com) All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. The names of the contributors may not be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include "vhacdManifoldMesh.h" namespace VHACD { TMMVertex::TMMVertex(void) { Initialize(); } void TMMVertex::Initialize() { m_name = 0; m_id = 0; m_duplicate = 0; m_onHull = false; m_tag = false; } TMMVertex::~TMMVertex(void) { } TMMEdge::TMMEdge(void) { Initialize(); } void TMMEdge::Initialize() { m_id = 0; m_triangles[0] = m_triangles[1] = m_newFace = 0; m_vertices[0] = m_vertices[1] = 0; } TMMEdge::~TMMEdge(void) { } void TMMTriangle::Initialize() { m_id = 0; for (int32_t i = 0; i < 3; i++) { m_edges[i] = 0; m_vertices[0] = 0; } m_visible = false; } TMMTriangle::TMMTriangle(void) { Initialize(); } TMMTriangle::~TMMTriangle(void) { } TMMesh::TMMesh() { } TMMesh::~TMMesh(void) { } void TMMesh::GetIFS(Vec3<double>* const points, Vec3<int32_t>* const triangles) { size_t nV = m_vertices.GetSize(); size_t nT = m_triangles.GetSize(); for (size_t v = 0; v < nV; v++) { points[v] = m_vertices.GetData().m_pos; m_vertices.GetData().m_id = v; m_vertices.Next(); } for (size_t f = 0; f < nT; f++) { TMMTriangle& currentTriangle = m_triangles.GetData(); triangles[f].X() = static_cast<int32_t>(currentTriangle.m_vertices[0]->GetData().m_id); triangles[f].Y() = static_cast<int32_t>(currentTriangle.m_vertices[1]->GetData().m_id); triangles[f].Z() = static_cast<int32_t>(currentTriangle.m_vertices[2]->GetData().m_id); m_triangles.Next(); } } void TMMesh::Clear() { m_vertices.Clear(); m_edges.Clear(); m_triangles.Clear(); } void TMMesh::Copy(TMMesh& mesh) { Clear(); // updating the id's size_t nV = mesh.m_vertices.GetSize(); size_t nE = mesh.m_edges.GetSize(); size_t nT = mesh.m_triangles.GetSize(); for (size_t v = 0; v < nV; v++) { mesh.m_vertices.GetData().m_id = v; mesh.m_vertices.Next(); } for (size_t e = 0; e < nE; e++) { mesh.m_edges.GetData().m_id = e; mesh.m_edges.Next(); } for (size_t f = 0; f < nT; f++) { mesh.m_triangles.GetData().m_id = f; mesh.m_triangles.Next(); } // copying data m_vertices = mesh.m_vertices; m_edges = mesh.m_edges; m_triangles = mesh.m_triangles; // generate mapping CircularListElement<TMMVertex>** vertexMap = new CircularListElement<TMMVertex>*[nV]; CircularListElement<TMMEdge>** edgeMap = new CircularListElement<TMMEdge>*[nE]; CircularListElement<TMMTriangle>** triangleMap = new CircularListElement<TMMTriangle>*[nT]; for (size_t v = 0; v < nV; v++) { vertexMap[v] = m_vertices.GetHead(); m_vertices.Next(); } for (size_t e = 0; e < nE; e++) { edgeMap[e] = m_edges.GetHead(); m_edges.Next(); } for (size_t f = 0; f < nT; f++) { triangleMap[f] = m_triangles.GetHead(); m_triangles.Next(); } // updating pointers for (size_t v = 0; v < nV; v++) { if (vertexMap[v]->GetData().m_duplicate) { vertexMap[v]->GetData().m_duplicate = edgeMap[vertexMap[v]->GetData().m_duplicate->GetData().m_id]; } } for (size_t e = 0; e < nE; e++) { if (edgeMap[e]->GetData().m_newFace) { edgeMap[e]->GetData().m_newFace = triangleMap[edgeMap[e]->GetData().m_newFace->GetData().m_id]; } if (nT > 0) { for (int32_t f = 0; f < 2; f++) { if (edgeMap[e]->GetData().m_triangles[f]) { edgeMap[e]->GetData().m_triangles[f] = triangleMap[edgeMap[e]->GetData().m_triangles[f]->GetData().m_id]; } } } for (int32_t v = 0; v < 2; v++) { if (edgeMap[e]->GetData().m_vertices[v]) { edgeMap[e]->GetData().m_vertices[v] = vertexMap[edgeMap[e]->GetData().m_vertices[v]->GetData().m_id]; } } } for (size_t f = 0; f < nT; f++) { if (nE > 0) { for (int32_t e = 0; e < 3; e++) { if (triangleMap[f]->GetData().m_edges[e]) { triangleMap[f]->GetData().m_edges[e] = edgeMap[triangleMap[f]->GetData().m_edges[e]->GetData().m_id]; } } } for (int32_t v = 0; v < 3; v++) { if (triangleMap[f]->GetData().m_vertices[v]) { triangleMap[f]->GetData().m_vertices[v] = vertexMap[triangleMap[f]->GetData().m_vertices[v]->GetData().m_id]; } } } delete[] vertexMap; delete[] edgeMap; delete[] triangleMap; } bool TMMesh::CheckConsistancy() { size_t nE = m_edges.GetSize(); size_t nT = m_triangles.GetSize(); for (size_t e = 0; e < nE; e++) { for (int32_t f = 0; f < 2; f++) { if (!m_edges.GetHead()->GetData().m_triangles[f]) { return false; } } m_edges.Next(); } for (size_t f = 0; f < nT; f++) { for (int32_t e = 0; e < 3; e++) { int32_t found = 0; for (int32_t k = 0; k < 2; k++) { if (m_triangles.GetHead()->GetData().m_edges[e]->GetData().m_triangles[k] == m_triangles.GetHead()) { found++; } } if (found != 1) { return false; } } m_triangles.Next(); } return true; } }
6,863
C++
32.980198
756
0.576716
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/authoring/VHACD/src/vhacdICHull.cpp
/* Copyright (c) 2011 Khaled Mamou (kmamou at gmail dot com) All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. The names of the contributors may not be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include "vhacdICHull.h" #include <limits> #ifdef _MSC_VER #pragma warning(disable:4456 4706) #endif namespace VHACD { const double ICHull::sc_eps = 1.0e-15; const int32_t ICHull::sc_dummyIndex = std::numeric_limits<int32_t>::max(); ICHull::ICHull() { m_isFlat = false; } bool ICHull::AddPoints(const Vec3<double>* points, size_t nPoints) { if (!points) { return false; } CircularListElement<TMMVertex>* vertex = NULL; for (size_t i = 0; i < nPoints; i++) { vertex = m_mesh.AddVertex(); vertex->GetData().m_pos.X() = points[i].X(); vertex->GetData().m_pos.Y() = points[i].Y(); vertex->GetData().m_pos.Z() = points[i].Z(); vertex->GetData().m_name = static_cast<int32_t>(i); } return true; } bool ICHull::AddPoint(const Vec3<double>& point, int32_t id) { if (AddPoints(&point, 1)) { m_mesh.m_vertices.GetData().m_name = id; return true; } return false; } ICHullError ICHull::Process() { uint32_t addedPoints = 0; if (m_mesh.GetNVertices() < 3) { return ICHullErrorNotEnoughPoints; } if (m_mesh.GetNVertices() == 3) { m_isFlat = true; CircularListElement<TMMTriangle>* t1 = m_mesh.AddTriangle(); CircularListElement<TMMTriangle>* t2 = m_mesh.AddTriangle(); CircularListElement<TMMVertex>* v0 = m_mesh.m_vertices.GetHead(); CircularListElement<TMMVertex>* v1 = v0->GetNext(); CircularListElement<TMMVertex>* v2 = v1->GetNext(); // Compute the normal to the plane Vec3<double> p0 = v0->GetData().m_pos; Vec3<double> p1 = v1->GetData().m_pos; Vec3<double> p2 = v2->GetData().m_pos; m_normal = (p1 - p0) ^ (p2 - p0); m_normal.Normalize(); t1->GetData().m_vertices[0] = v0; t1->GetData().m_vertices[1] = v1; t1->GetData().m_vertices[2] = v2; t2->GetData().m_vertices[0] = v1; t2->GetData().m_vertices[1] = v2; t2->GetData().m_vertices[2] = v2; return ICHullErrorOK; } if (m_isFlat) { m_mesh.m_edges.Clear(); m_mesh.m_triangles.Clear(); m_isFlat = false; } if (m_mesh.GetNTriangles() == 0) // we have to create the first polyhedron { ICHullError res = DoubleTriangle(); if (res != ICHullErrorOK) { return res; } else { addedPoints += 3; } } CircularList<TMMVertex>& vertices = m_mesh.GetVertices(); // go to the first added and not processed vertex while (!(vertices.GetHead()->GetPrev()->GetData().m_tag)) { vertices.Prev(); } while (!vertices.GetData().m_tag) // not processed { vertices.GetData().m_tag = true; if (ProcessPoint()) { addedPoints++; CleanUp(addedPoints); vertices.Next(); if (!GetMesh().CheckConsistancy()) { size_t nV = m_mesh.GetNVertices(); CircularList<TMMVertex>& vertices = m_mesh.GetVertices(); for (size_t v = 0; v < nV; ++v) { if (vertices.GetData().m_name == sc_dummyIndex) { vertices.Delete(); break; } vertices.Next(); } return ICHullErrorInconsistent; } } } if (m_isFlat) { SArray<CircularListElement<TMMTriangle>*> trianglesToDuplicate; size_t nT = m_mesh.GetNTriangles(); for (size_t f = 0; f < nT; f++) { TMMTriangle& currentTriangle = m_mesh.m_triangles.GetHead()->GetData(); if (currentTriangle.m_vertices[0]->GetData().m_name == sc_dummyIndex || currentTriangle.m_vertices[1]->GetData().m_name == sc_dummyIndex || currentTriangle.m_vertices[2]->GetData().m_name == sc_dummyIndex) { m_trianglesToDelete.PushBack(m_mesh.m_triangles.GetHead()); for (int32_t k = 0; k < 3; k++) { for (int32_t h = 0; h < 2; h++) { if (currentTriangle.m_edges[k]->GetData().m_triangles[h] == m_mesh.m_triangles.GetHead()) { currentTriangle.m_edges[k]->GetData().m_triangles[h] = 0; break; } } } } else { trianglesToDuplicate.PushBack(m_mesh.m_triangles.GetHead()); } m_mesh.m_triangles.Next(); } size_t nE = m_mesh.GetNEdges(); for (size_t e = 0; e < nE; e++) { TMMEdge& currentEdge = m_mesh.m_edges.GetHead()->GetData(); if (currentEdge.m_triangles[0] == 0 && currentEdge.m_triangles[1] == 0) { m_edgesToDelete.PushBack(m_mesh.m_edges.GetHead()); } m_mesh.m_edges.Next(); } size_t nV = m_mesh.GetNVertices(); CircularList<TMMVertex>& vertices = m_mesh.GetVertices(); for (size_t v = 0; v < nV; ++v) { if (vertices.GetData().m_name == sc_dummyIndex) { vertices.Delete(); } else { vertices.GetData().m_tag = false; vertices.Next(); } } CleanEdges(); CleanTriangles(); CircularListElement<TMMTriangle>* newTriangle; for (size_t t = 0; t < trianglesToDuplicate.Size(); t++) { newTriangle = m_mesh.AddTriangle(); newTriangle->GetData().m_vertices[0] = trianglesToDuplicate[t]->GetData().m_vertices[1]; newTriangle->GetData().m_vertices[1] = trianglesToDuplicate[t]->GetData().m_vertices[0]; newTriangle->GetData().m_vertices[2] = trianglesToDuplicate[t]->GetData().m_vertices[2]; } } return ICHullErrorOK; } ICHullError ICHull::Process(const uint32_t nPointsCH, const double minVolume) { uint32_t addedPoints = 0; if (nPointsCH < 3 || m_mesh.GetNVertices() < 3) { return ICHullErrorNotEnoughPoints; } if (m_mesh.GetNVertices() == 3) { m_isFlat = true; CircularListElement<TMMTriangle>* t1 = m_mesh.AddTriangle(); CircularListElement<TMMTriangle>* t2 = m_mesh.AddTriangle(); CircularListElement<TMMVertex>* v0 = m_mesh.m_vertices.GetHead(); CircularListElement<TMMVertex>* v1 = v0->GetNext(); CircularListElement<TMMVertex>* v2 = v1->GetNext(); // Compute the normal to the plane Vec3<double> p0 = v0->GetData().m_pos; Vec3<double> p1 = v1->GetData().m_pos; Vec3<double> p2 = v2->GetData().m_pos; m_normal = (p1 - p0) ^ (p2 - p0); m_normal.Normalize(); t1->GetData().m_vertices[0] = v0; t1->GetData().m_vertices[1] = v1; t1->GetData().m_vertices[2] = v2; t2->GetData().m_vertices[0] = v1; t2->GetData().m_vertices[1] = v0; t2->GetData().m_vertices[2] = v2; return ICHullErrorOK; } if (m_isFlat) { m_mesh.m_triangles.Clear(); m_mesh.m_edges.Clear(); m_isFlat = false; } if (m_mesh.GetNTriangles() == 0) // we have to create the first polyhedron { ICHullError res = DoubleTriangle(); if (res != ICHullErrorOK) { return res; } else { addedPoints += 3; } } CircularList<TMMVertex>& vertices = m_mesh.GetVertices(); while (!vertices.GetData().m_tag && addedPoints < nPointsCH) // not processed { if (!FindMaxVolumePoint((addedPoints > 4) ? minVolume : 0.0)) { break; } vertices.GetData().m_tag = true; if (ProcessPoint()) { addedPoints++; CleanUp(addedPoints); if (!GetMesh().CheckConsistancy()) { size_t nV = m_mesh.GetNVertices(); CircularList<TMMVertex>& vertices = m_mesh.GetVertices(); for (size_t v = 0; v < nV; ++v) { if (vertices.GetData().m_name == sc_dummyIndex) { vertices.Delete(); break; } vertices.Next(); } return ICHullErrorInconsistent; } vertices.Next(); } } // delete remaining points while (!vertices.GetData().m_tag) { vertices.Delete(); } if (m_isFlat) { SArray<CircularListElement<TMMTriangle>*> trianglesToDuplicate; size_t nT = m_mesh.GetNTriangles(); for (size_t f = 0; f < nT; f++) { TMMTriangle& currentTriangle = m_mesh.m_triangles.GetHead()->GetData(); if (currentTriangle.m_vertices[0]->GetData().m_name == sc_dummyIndex || currentTriangle.m_vertices[1]->GetData().m_name == sc_dummyIndex || currentTriangle.m_vertices[2]->GetData().m_name == sc_dummyIndex) { m_trianglesToDelete.PushBack(m_mesh.m_triangles.GetHead()); for (int32_t k = 0; k < 3; k++) { for (int32_t h = 0; h < 2; h++) { if (currentTriangle.m_edges[k]->GetData().m_triangles[h] == m_mesh.m_triangles.GetHead()) { currentTriangle.m_edges[k]->GetData().m_triangles[h] = 0; break; } } } } else { trianglesToDuplicate.PushBack(m_mesh.m_triangles.GetHead()); } m_mesh.m_triangles.Next(); } size_t nE = m_mesh.GetNEdges(); for (size_t e = 0; e < nE; e++) { TMMEdge& currentEdge = m_mesh.m_edges.GetHead()->GetData(); if (currentEdge.m_triangles[0] == 0 && currentEdge.m_triangles[1] == 0) { m_edgesToDelete.PushBack(m_mesh.m_edges.GetHead()); } m_mesh.m_edges.Next(); } size_t nV = m_mesh.GetNVertices(); CircularList<TMMVertex>& vertices = m_mesh.GetVertices(); for (size_t v = 0; v < nV; ++v) { if (vertices.GetData().m_name == sc_dummyIndex) { vertices.Delete(); } else { vertices.GetData().m_tag = false; vertices.Next(); } } CleanEdges(); CleanTriangles(); CircularListElement<TMMTriangle>* newTriangle; for (size_t t = 0; t < trianglesToDuplicate.Size(); t++) { newTriangle = m_mesh.AddTriangle(); newTriangle->GetData().m_vertices[0] = trianglesToDuplicate[t]->GetData().m_vertices[1]; newTriangle->GetData().m_vertices[1] = trianglesToDuplicate[t]->GetData().m_vertices[0]; newTriangle->GetData().m_vertices[2] = trianglesToDuplicate[t]->GetData().m_vertices[2]; } } return ICHullErrorOK; } bool ICHull::FindMaxVolumePoint(const double minVolume) { CircularList<TMMVertex>& vertices = m_mesh.GetVertices(); CircularListElement<TMMVertex>* vMaxVolume = 0; CircularListElement<TMMVertex>* vHeadPrev = vertices.GetHead()->GetPrev(); double maxVolume = minVolume; double volume = 0.0; while (!vertices.GetData().m_tag) // not processed { if (ComputePointVolume(volume, false)) { if (maxVolume < volume) { maxVolume = volume; vMaxVolume = vertices.GetHead(); } vertices.Next(); } } CircularListElement<TMMVertex>* vHead = vHeadPrev->GetNext(); vertices.GetHead() = vHead; if (!vMaxVolume) { return false; } if (vMaxVolume != vHead) { Vec3<double> pos = vHead->GetData().m_pos; int32_t id = vHead->GetData().m_name; vHead->GetData().m_pos = vMaxVolume->GetData().m_pos; vHead->GetData().m_name = vMaxVolume->GetData().m_name; vMaxVolume->GetData().m_pos = pos; vHead->GetData().m_name = id; } return true; } ICHullError ICHull::DoubleTriangle() { // find three non colinear points m_isFlat = false; CircularList<TMMVertex>& vertices = m_mesh.GetVertices(); CircularListElement<TMMVertex>* v0 = vertices.GetHead(); while (Colinear(v0->GetData().m_pos, v0->GetNext()->GetData().m_pos, v0->GetNext()->GetNext()->GetData().m_pos)) { if ((v0 = v0->GetNext()) == vertices.GetHead()) { return ICHullErrorCoplanarPoints; } } CircularListElement<TMMVertex>* v1 = v0->GetNext(); CircularListElement<TMMVertex>* v2 = v1->GetNext(); // mark points as processed v0->GetData().m_tag = v1->GetData().m_tag = v2->GetData().m_tag = true; // create two triangles CircularListElement<TMMTriangle>* f0 = MakeFace(v0, v1, v2, 0); MakeFace(v2, v1, v0, f0); // find a fourth non-coplanar point to form tetrahedron CircularListElement<TMMVertex>* v3 = v2->GetNext(); vertices.GetHead() = v3; double vol = ComputeVolume4(v0->GetData().m_pos, v1->GetData().m_pos, v2->GetData().m_pos, v3->GetData().m_pos); while (fabs(vol) < sc_eps && !v3->GetNext()->GetData().m_tag) { v3 = v3->GetNext(); vol = ComputeVolume4(v0->GetData().m_pos, v1->GetData().m_pos, v2->GetData().m_pos, v3->GetData().m_pos); } if (fabs(vol) < sc_eps) { // compute the barycenter Vec3<double> bary(0.0, 0.0, 0.0); CircularListElement<TMMVertex>* vBary = v0; do { bary += vBary->GetData().m_pos; } while ((vBary = vBary->GetNext()) != v0); bary /= static_cast<double>(vertices.GetSize()); // Compute the normal to the plane Vec3<double> p0 = v0->GetData().m_pos; Vec3<double> p1 = v1->GetData().m_pos; Vec3<double> p2 = v2->GetData().m_pos; m_normal = (p1 - p0) ^ (p2 - p0); m_normal.Normalize(); // add dummy vertex placed at (bary + normal) vertices.GetHead() = v2; Vec3<double> newPt = bary + m_normal; AddPoint(newPt, sc_dummyIndex); m_isFlat = true; return ICHullErrorOK; } else if (v3 != vertices.GetHead()) { TMMVertex temp; temp.m_name = v3->GetData().m_name; temp.m_pos = v3->GetData().m_pos; v3->GetData().m_name = vertices.GetHead()->GetData().m_name; v3->GetData().m_pos = vertices.GetHead()->GetData().m_pos; vertices.GetHead()->GetData().m_name = temp.m_name; vertices.GetHead()->GetData().m_pos = temp.m_pos; } return ICHullErrorOK; } CircularListElement<TMMTriangle>* ICHull::MakeFace(CircularListElement<TMMVertex>* v0, CircularListElement<TMMVertex>* v1, CircularListElement<TMMVertex>* v2, CircularListElement<TMMTriangle>* fold) { CircularListElement<TMMEdge>* e0; CircularListElement<TMMEdge>* e1; CircularListElement<TMMEdge>* e2; int32_t index = 0; if (!fold) // if first face to be created { e0 = m_mesh.AddEdge(); // create the three edges e1 = m_mesh.AddEdge(); e2 = m_mesh.AddEdge(); } else // otherwise re-use existing edges (in reverse order) { e0 = fold->GetData().m_edges[2]; e1 = fold->GetData().m_edges[1]; e2 = fold->GetData().m_edges[0]; index = 1; } e0->GetData().m_vertices[0] = v0; e0->GetData().m_vertices[1] = v1; e1->GetData().m_vertices[0] = v1; e1->GetData().m_vertices[1] = v2; e2->GetData().m_vertices[0] = v2; e2->GetData().m_vertices[1] = v0; // create the new face CircularListElement<TMMTriangle>* f = m_mesh.AddTriangle(); f->GetData().m_edges[0] = e0; f->GetData().m_edges[1] = e1; f->GetData().m_edges[2] = e2; f->GetData().m_vertices[0] = v0; f->GetData().m_vertices[1] = v1; f->GetData().m_vertices[2] = v2; // link edges to face f e0->GetData().m_triangles[index] = e1->GetData().m_triangles[index] = e2->GetData().m_triangles[index] = f; return f; } CircularListElement<TMMTriangle>* ICHull::MakeConeFace(CircularListElement<TMMEdge>* e, CircularListElement<TMMVertex>* p) { // create two new edges if they don't already exist CircularListElement<TMMEdge>* newEdges[2]; for (int32_t i = 0; i < 2; ++i) { if (!(newEdges[i] = e->GetData().m_vertices[i]->GetData().m_duplicate)) { // if the edge doesn't exits add it and mark the vertex as duplicated newEdges[i] = m_mesh.AddEdge(); newEdges[i]->GetData().m_vertices[0] = e->GetData().m_vertices[i]; newEdges[i]->GetData().m_vertices[1] = p; e->GetData().m_vertices[i]->GetData().m_duplicate = newEdges[i]; } } // make the new face CircularListElement<TMMTriangle>* newFace = m_mesh.AddTriangle(); newFace->GetData().m_edges[0] = e; newFace->GetData().m_edges[1] = newEdges[0]; newFace->GetData().m_edges[2] = newEdges[1]; MakeCCW(newFace, e, p); for (int32_t i = 0; i < 2; ++i) { for (int32_t j = 0; j < 2; ++j) { if (!newEdges[i]->GetData().m_triangles[j]) { newEdges[i]->GetData().m_triangles[j] = newFace; break; } } } return newFace; } bool ICHull::ComputePointVolume(double& totalVolume, bool markVisibleFaces) { // mark visible faces CircularListElement<TMMTriangle>* fHead = m_mesh.GetTriangles().GetHead(); CircularListElement<TMMTriangle>* f = fHead; CircularList<TMMVertex>& vertices = m_mesh.GetVertices(); CircularListElement<TMMVertex>* vertex0 = vertices.GetHead(); bool visible = false; Vec3<double> pos0 = Vec3<double>(vertex0->GetData().m_pos.X(), vertex0->GetData().m_pos.Y(), vertex0->GetData().m_pos.Z()); double vol = 0.0; totalVolume = 0.0; Vec3<double> ver0, ver1, ver2; do { ver0.X() = f->GetData().m_vertices[0]->GetData().m_pos.X(); ver0.Y() = f->GetData().m_vertices[0]->GetData().m_pos.Y(); ver0.Z() = f->GetData().m_vertices[0]->GetData().m_pos.Z(); ver1.X() = f->GetData().m_vertices[1]->GetData().m_pos.X(); ver1.Y() = f->GetData().m_vertices[1]->GetData().m_pos.Y(); ver1.Z() = f->GetData().m_vertices[1]->GetData().m_pos.Z(); ver2.X() = f->GetData().m_vertices[2]->GetData().m_pos.X(); ver2.Y() = f->GetData().m_vertices[2]->GetData().m_pos.Y(); ver2.Z() = f->GetData().m_vertices[2]->GetData().m_pos.Z(); vol = ComputeVolume4(ver0, ver1, ver2, pos0); if (vol < -sc_eps) { vol = fabs(vol); totalVolume += vol; if (markVisibleFaces) { f->GetData().m_visible = true; m_trianglesToDelete.PushBack(f); } visible = true; } f = f->GetNext(); } while (f != fHead); if (m_trianglesToDelete.Size() == m_mesh.m_triangles.GetSize()) { for (size_t i = 0; i < m_trianglesToDelete.Size(); i++) { m_trianglesToDelete[i]->GetData().m_visible = false; } visible = false; } // if no faces visible from p then p is inside the hull if (!visible && markVisibleFaces) { vertices.Delete(); m_trianglesToDelete.Resize(0); return false; } return true; } bool ICHull::ProcessPoint() { double totalVolume = 0.0; if (!ComputePointVolume(totalVolume, true)) { return false; } // Mark edges in interior of visible region for deletion. // Create a new face based on each border edge CircularListElement<TMMVertex>* v0 = m_mesh.GetVertices().GetHead(); CircularListElement<TMMEdge>* eHead = m_mesh.GetEdges().GetHead(); CircularListElement<TMMEdge>* e = eHead; CircularListElement<TMMEdge>* tmp = 0; int32_t nvisible = 0; m_edgesToDelete.Resize(0); m_edgesToUpdate.Resize(0); do { tmp = e->GetNext(); nvisible = 0; for (int32_t k = 0; k < 2; k++) { if (e->GetData().m_triangles[k]->GetData().m_visible) { nvisible++; } } if (nvisible == 2) { m_edgesToDelete.PushBack(e); } else if (nvisible == 1) { e->GetData().m_newFace = MakeConeFace(e, v0); m_edgesToUpdate.PushBack(e); } e = tmp; } while (e != eHead); return true; } bool ICHull::MakeCCW(CircularListElement<TMMTriangle>* f, CircularListElement<TMMEdge>* e, CircularListElement<TMMVertex>* v) { // the visible face adjacent to e CircularListElement<TMMTriangle>* fv; if (e->GetData().m_triangles[0]->GetData().m_visible) { fv = e->GetData().m_triangles[0]; } else { fv = e->GetData().m_triangles[1]; } // set vertex[0] and vertex[1] to have the same orientation as the corresponding vertices of fv. int32_t i; // index of e->m_vertices[0] in fv CircularListElement<TMMVertex>* v0 = e->GetData().m_vertices[0]; CircularListElement<TMMVertex>* v1 = e->GetData().m_vertices[1]; for (i = 0; fv->GetData().m_vertices[i] != v0; i++) ; if (fv->GetData().m_vertices[(i + 1) % 3] != e->GetData().m_vertices[1]) { f->GetData().m_vertices[0] = v1; f->GetData().m_vertices[1] = v0; } else { f->GetData().m_vertices[0] = v0; f->GetData().m_vertices[1] = v1; // swap edges CircularListElement<TMMEdge>* tmp = f->GetData().m_edges[0]; f->GetData().m_edges[0] = f->GetData().m_edges[1]; f->GetData().m_edges[1] = tmp; } f->GetData().m_vertices[2] = v; return true; } bool ICHull::CleanUp(uint32_t& addedPoints) { bool r0 = CleanEdges(); bool r1 = CleanTriangles(); bool r2 = CleanVertices(addedPoints); return r0 && r1 && r2; } bool ICHull::CleanEdges() { // integrate the new faces into the data structure CircularListElement<TMMEdge>* e; const size_t ne_update = m_edgesToUpdate.Size(); for (size_t i = 0; i < ne_update; ++i) { e = m_edgesToUpdate[i]; if (e->GetData().m_newFace) { if (e->GetData().m_triangles[0]->GetData().m_visible) { e->GetData().m_triangles[0] = e->GetData().m_newFace; } else { e->GetData().m_triangles[1] = e->GetData().m_newFace; } e->GetData().m_newFace = 0; } } // delete edges maked for deletion CircularList<TMMEdge>& edges = m_mesh.GetEdges(); const size_t ne_delete = m_edgesToDelete.Size(); for (size_t i = 0; i < ne_delete; ++i) { edges.Delete(m_edgesToDelete[i]); } m_edgesToDelete.Resize(0); m_edgesToUpdate.Resize(0); return true; } bool ICHull::CleanTriangles() { CircularList<TMMTriangle>& triangles = m_mesh.GetTriangles(); const size_t nt_delete = m_trianglesToDelete.Size(); for (size_t i = 0; i < nt_delete; ++i) { triangles.Delete(m_trianglesToDelete[i]); } m_trianglesToDelete.Resize(0); return true; } bool ICHull::CleanVertices(uint32_t& addedPoints) { // mark all vertices incident to some undeleted edge as on the hull CircularList<TMMEdge>& edges = m_mesh.GetEdges(); CircularListElement<TMMEdge>* e = edges.GetHead(); size_t nE = edges.GetSize(); for (size_t i = 0; i < nE; i++) { e->GetData().m_vertices[0]->GetData().m_onHull = true; e->GetData().m_vertices[1]->GetData().m_onHull = true; e = e->GetNext(); } // delete all the vertices that have been processed but are not on the hull CircularList<TMMVertex>& vertices = m_mesh.GetVertices(); CircularListElement<TMMVertex>* vHead = vertices.GetHead(); CircularListElement<TMMVertex>* v = vHead; v = v->GetPrev(); do { if (v->GetData().m_tag && !v->GetData().m_onHull) { CircularListElement<TMMVertex>* tmp = v->GetPrev(); vertices.Delete(v); v = tmp; addedPoints--; } else { v->GetData().m_duplicate = 0; v->GetData().m_onHull = false; v = v->GetPrev(); } } while (v->GetData().m_tag && v != vHead); return true; } void ICHull::Clear() { m_mesh.Clear(); m_edgesToDelete.Resize(0); m_edgesToUpdate.Resize(0); m_trianglesToDelete.Resize(0); m_isFlat = false; } const ICHull& ICHull::operator=(ICHull& rhs) { if (&rhs != this) { m_mesh.Copy(rhs.m_mesh); m_edgesToDelete = rhs.m_edgesToDelete; m_edgesToUpdate = rhs.m_edgesToUpdate; m_trianglesToDelete = rhs.m_trianglesToDelete; m_isFlat = rhs.m_isFlat; } return (*this); } bool ICHull::IsInside(const Vec3<double>& pt0, const double eps) { const Vec3<double> pt(pt0.X(), pt0.Y(), pt0.Z()); if (m_isFlat) { size_t nT = m_mesh.m_triangles.GetSize(); Vec3<double> ver0, ver1, ver2, a, b, c; double u, v; for (size_t t = 0; t < nT; t++) { ver0.X() = m_mesh.m_triangles.GetHead()->GetData().m_vertices[0]->GetData().m_pos.X(); ver0.Y() = m_mesh.m_triangles.GetHead()->GetData().m_vertices[0]->GetData().m_pos.Y(); ver0.Z() = m_mesh.m_triangles.GetHead()->GetData().m_vertices[0]->GetData().m_pos.Z(); ver1.X() = m_mesh.m_triangles.GetHead()->GetData().m_vertices[1]->GetData().m_pos.X(); ver1.Y() = m_mesh.m_triangles.GetHead()->GetData().m_vertices[1]->GetData().m_pos.Y(); ver1.Z() = m_mesh.m_triangles.GetHead()->GetData().m_vertices[1]->GetData().m_pos.Z(); ver2.X() = m_mesh.m_triangles.GetHead()->GetData().m_vertices[2]->GetData().m_pos.X(); ver2.Y() = m_mesh.m_triangles.GetHead()->GetData().m_vertices[2]->GetData().m_pos.Y(); ver2.Z() = m_mesh.m_triangles.GetHead()->GetData().m_vertices[2]->GetData().m_pos.Z(); a = ver1 - ver0; b = ver2 - ver0; c = pt - ver0; u = c * a; v = c * b; if (u >= 0.0 && u <= 1.0 && v >= 0.0 && u + v <= 1.0) { return true; } m_mesh.m_triangles.Next(); } return false; } else { size_t nT = m_mesh.m_triangles.GetSize(); Vec3<double> ver0, ver1, ver2; double vol; for (size_t t = 0; t < nT; t++) { ver0.X() = m_mesh.m_triangles.GetHead()->GetData().m_vertices[0]->GetData().m_pos.X(); ver0.Y() = m_mesh.m_triangles.GetHead()->GetData().m_vertices[0]->GetData().m_pos.Y(); ver0.Z() = m_mesh.m_triangles.GetHead()->GetData().m_vertices[0]->GetData().m_pos.Z(); ver1.X() = m_mesh.m_triangles.GetHead()->GetData().m_vertices[1]->GetData().m_pos.X(); ver1.Y() = m_mesh.m_triangles.GetHead()->GetData().m_vertices[1]->GetData().m_pos.Y(); ver1.Z() = m_mesh.m_triangles.GetHead()->GetData().m_vertices[1]->GetData().m_pos.Z(); ver2.X() = m_mesh.m_triangles.GetHead()->GetData().m_vertices[2]->GetData().m_pos.X(); ver2.Y() = m_mesh.m_triangles.GetHead()->GetData().m_vertices[2]->GetData().m_pos.Y(); ver2.Z() = m_mesh.m_triangles.GetHead()->GetData().m_vertices[2]->GetData().m_pos.Z(); vol = ComputeVolume4(ver0, ver1, ver2, pt); if (vol < eps) { return false; } m_mesh.m_triangles.Next(); } return true; } } }
28,913
C++
38.5
756
0.562896
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/authoring/VHACD/src/vhacdRaycastMesh.cpp
#include "vhacdRaycastMesh.h" #include <math.h> #include <assert.h> namespace RAYCAST_MESH { /* a = b - c */ #define vector(a,b,c) \ (a)[0] = (b)[0] - (c)[0]; \ (a)[1] = (b)[1] - (c)[1]; \ (a)[2] = (b)[2] - (c)[2]; #define innerProduct(v,q) \ ((v)[0] * (q)[0] + \ (v)[1] * (q)[1] + \ (v)[2] * (q)[2]) #define crossProduct(a,b,c) \ (a)[0] = (b)[1] * (c)[2] - (c)[1] * (b)[2]; \ (a)[1] = (b)[2] * (c)[0] - (c)[2] * (b)[0]; \ (a)[2] = (b)[0] * (c)[1] - (c)[0] * (b)[1]; static inline bool rayIntersectsTriangle(const double *p,const double *d,const double *v0,const double *v1,const double *v2,double &t) { double e1[3],e2[3],h[3],s[3],q[3]; double a,f,u,v; vector(e1,v1,v0); vector(e2,v2,v0); crossProduct(h,d,e2); a = innerProduct(e1,h); if (a > -0.00001 && a < 0.00001) return(false); f = 1/a; vector(s,p,v0); u = f * (innerProduct(s,h)); if (u < 0.0 || u > 1.0) return(false); crossProduct(q,s,e1); v = f * innerProduct(d,q); if (v < 0.0 || u + v > 1.0) return(false); // at this stage we can compute t to find out where // the intersection point is on the line t = f * innerProduct(e2,q); if (t > 0) // ray intersection return(true); else // this means that there is a line intersection // but not a ray intersection return (false); } static double getPointDistance(const double *p1, const double *p2) { double dx = p1[0] - p2[0]; double dy = p1[1] - p2[1]; double dz = p1[2] - p2[2]; return sqrt(dx*dx + dy*dy + dz*dz); } class MyRaycastMesh : public VHACD::RaycastMesh { public: template <class T> MyRaycastMesh(uint32_t vcount, const T *vertices, uint32_t tcount, const uint32_t *indices) { mVcount = vcount; mVertices = new double[mVcount * 3]; for (uint32_t i = 0; i < mVcount; i++) { mVertices[i * 3 + 0] = vertices[0]; mVertices[i * 3 + 1] = vertices[1]; mVertices[i * 3 + 2] = vertices[2]; vertices += 3; } mTcount = tcount; mIndices = new uint32_t[mTcount * 3]; for (uint32_t i = 0; i < mTcount; i++) { mIndices[i * 3 + 0] = indices[0]; mIndices[i * 3 + 1] = indices[1]; mIndices[i * 3 + 2] = indices[2]; indices += 3; } } ~MyRaycastMesh(void) { delete[]mVertices; delete[]mIndices; } virtual void release(void) { delete this; } virtual bool raycast(const double *from, // The starting point of the raycast const double *to, // The ending point of the raycast const double *closestToPoint, // The point to match the nearest hit location (can just be the 'from' location of no specific point) double *hitLocation, // The point where the ray hit nearest to the 'closestToPoint' location double *hitDistance) final // The distance the ray traveled to the hit location { bool ret = false; double dir[3]; dir[0] = to[0] - from[0]; dir[1] = to[1] - from[1]; dir[2] = to[2] - from[2]; double distance = sqrt( dir[0]*dir[0] + dir[1]*dir[1]+dir[2]*dir[2] ); if ( distance < 0.0000000001f ) return false; double recipDistance = 1.0f / distance; dir[0]*=recipDistance; dir[1]*=recipDistance; dir[2]*=recipDistance; const uint32_t *indices = mIndices; const double *vertices = mVertices; double nearestDistance = distance; for (uint32_t tri=0; tri<mTcount; tri++) { uint32_t i1 = indices[tri*3+0]; uint32_t i2 = indices[tri*3+1]; uint32_t i3 = indices[tri*3+2]; const double *p1 = &vertices[i1*3]; const double *p2 = &vertices[i2*3]; const double *p3 = &vertices[i3*3]; double t; if ( rayIntersectsTriangle(from,dir,p1,p2,p3,t)) { double hitPos[3]; hitPos[0] = from[0] + dir[0] * t; hitPos[1] = from[1] + dir[1] * t; hitPos[2] = from[2] + dir[2] * t; double pointDistance = getPointDistance(hitPos, closestToPoint); if (pointDistance < nearestDistance ) { nearestDistance = pointDistance; if ( hitLocation ) { hitLocation[0] = hitPos[0]; hitLocation[1] = hitPos[1]; hitLocation[2] = hitPos[2]; } if ( hitDistance ) { *hitDistance = pointDistance; } ret = true; } } } return ret; } uint32_t mVcount; double *mVertices; uint32_t mTcount; uint32_t *mIndices; }; }; using namespace RAYCAST_MESH; namespace VHACD { RaycastMesh * RaycastMesh::createRaycastMesh(uint32_t vcount, // The number of vertices in the source triangle mesh const double *vertices, // The array of vertex positions in the format x1,y1,z1..x2,y2,z2.. etc. uint32_t tcount, // The number of triangles in the source triangle mesh const uint32_t *indices) // The triangle indices in the format of i1,i2,i3 ... i4,i5,i6, ... { MyRaycastMesh *m = new MyRaycastMesh(vcount, vertices, tcount, indices); return static_cast<RaycastMesh *>(m); } RaycastMesh * RaycastMesh::createRaycastMesh(uint32_t vcount, // The number of vertices in the source triangle mesh const float *vertices, // The array of vertex positions in the format x1,y1,z1..x2,y2,z2.. etc. uint32_t tcount, // The number of triangles in the source triangle mesh const uint32_t *indices) // The triangle indices in the format of i1,i2,i3 ... i4,i5,i6, ... { MyRaycastMesh *m = new MyRaycastMesh(vcount, vertices, tcount, indices); return static_cast<RaycastMesh *>(m); } } // end of VHACD namespace
6,352
C++
29.543269
141
0.515113
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/authoring/VHACD/src/vhacdMesh.cpp
/* Copyright (c) 2011 Khaled Mamou (kmamou at gmail dot com) All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. The names of the contributors may not be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #define _CRT_SECURE_NO_WARNINGS #include "btConvexHullComputer.h" #include "vhacdMesh.h" #include <fstream> #include <iosfwd> #include <iostream> #include <stdio.h> #include <stdlib.h> #include <string> namespace VHACD { Mesh::Mesh() { m_diag = 1.0; } Mesh::~Mesh() { } Vec3<double>& Mesh::ComputeCenter(void) { const size_t nV = GetNPoints(); if (nV) { m_minBB = GetPoint(0); m_maxBB = GetPoint(0); for (size_t v = 1; v < nV; v++) { Vec3<double> p = GetPoint(v); if (p.X() < m_minBB.X()) { m_minBB.X() = p.X(); } if (p.Y() < m_minBB.Y()) { m_minBB.Y() = p.Y(); } if (p.Z() < m_minBB.Z()) { m_minBB.Z() = p.Z(); } if (p.X() > m_maxBB.X()) { m_maxBB.X() = p.X(); } if (p.Y() > m_maxBB.Y()) { m_maxBB.Y() = p.Y(); } if (p.Z() > m_maxBB.Z()) { m_maxBB.Z() = p.Z(); } } m_center.X() = (m_maxBB.X() - m_minBB.X())*0.5 + m_minBB.X(); m_center.Y() = (m_maxBB.Y() - m_minBB.Y())*0.5 + m_minBB.Y(); m_center.Z() = (m_maxBB.Z() - m_minBB.Z())*0.5 + m_minBB.Z(); } return m_center; } double Mesh::ComputeVolume() const { const size_t nV = GetNPoints(); const size_t nT = GetNTriangles(); if (nV == 0 || nT == 0) { return 0.0; } Vec3<double> bary(0.0, 0.0, 0.0); for (size_t v = 0; v < nV; v++) { bary += GetPoint(v); } bary /= static_cast<double>(nV); Vec3<double> ver0, ver1, ver2; double totalVolume = 0.0; for (int32_t t = 0; t < int32_t(nT); t++) { const Vec3<int32_t>& tri = GetTriangle(t); ver0 = GetPoint(tri[0]); ver1 = GetPoint(tri[1]); ver2 = GetPoint(tri[2]); totalVolume += ComputeVolume4(ver0, ver1, ver2, bary); } return totalVolume / 6.0; } void Mesh::ComputeConvexHull(const double* const pts, const size_t nPts) { ResizePoints(0); ResizeTriangles(0); btConvexHullComputer ch; ch.compute(pts, 3 * sizeof(double), (int32_t)nPts, -1.0, -1.0); for (int32_t v = 0; v < ch.vertices.size(); v++) { AddPoint(Vec3<double>(ch.vertices[v].getX(), ch.vertices[v].getY(), ch.vertices[v].getZ())); } const int32_t nt = ch.faces.size(); for (int32_t t = 0; t < nt; ++t) { const btConvexHullComputer::Edge* sourceEdge = &(ch.edges[ch.faces[t]]); int32_t a = sourceEdge->getSourceVertex(); int32_t b = sourceEdge->getTargetVertex(); const btConvexHullComputer::Edge* edge = sourceEdge->getNextEdgeOfFace(); int32_t c = edge->getTargetVertex(); while (c != a) { AddTriangle(Vec3<int32_t>(a, b, c)); edge = edge->getNextEdgeOfFace(); b = c; c = edge->getTargetVertex(); } } } void Mesh::Clip(const Plane& plane, SArray<Vec3<double> >& positivePart, SArray<Vec3<double> >& negativePart) const { const size_t nV = GetNPoints(); if (nV == 0) { return; } double d; for (size_t v = 0; v < nV; v++) { const Vec3<double>& pt = GetPoint(v); d = plane.m_a * pt[0] + plane.m_b * pt[1] + plane.m_c * pt[2] + plane.m_d; if (d > 0.0) { positivePart.PushBack(pt); } else if (d < 0.0) { negativePart.PushBack(pt); } else { positivePart.PushBack(pt); negativePart.PushBack(pt); } } } bool Mesh::IsInside(const Vec3<double>& pt) const { const size_t nV = GetNPoints(); const size_t nT = GetNTriangles(); if (nV == 0 || nT == 0) { return false; } Vec3<double> ver0, ver1, ver2; double volume; for (int32_t t = 0; t < int32_t(nT); t++) { const Vec3<int32_t>& tri = GetTriangle(t); ver0 = GetPoint(tri[0]); ver1 = GetPoint(tri[1]); ver2 = GetPoint(tri[2]); volume = ComputeVolume4(ver0, ver1, ver2, pt); if (volume < 0.0) { return false; } } return true; } double Mesh::ComputeDiagBB() { const size_t nPoints = GetNPoints(); if (nPoints == 0) return 0.0; Vec3<double> minBB = m_points[0]; Vec3<double> maxBB = m_points[0]; double x, y, z; for (size_t v = 1; v < nPoints; v++) { x = m_points[v][0]; y = m_points[v][1]; z = m_points[v][2]; if (x < minBB[0]) minBB[0] = x; else if (x > maxBB[0]) maxBB[0] = x; if (y < minBB[1]) minBB[1] = y; else if (y > maxBB[1]) maxBB[1] = y; if (z < minBB[2]) minBB[2] = z; else if (z > maxBB[2]) maxBB[2] = z; } return (m_diag = (maxBB - minBB).GetNorm()); } #ifdef VHACD_DEBUG_MESH bool Mesh::SaveVRML2(const std::string& fileName) const { std::ofstream fout(fileName.c_str()); if (fout.is_open()) { const Material material; if (SaveVRML2(fout, material)) { fout.close(); return true; } return false; } return false; } bool Mesh::SaveVRML2(std::ofstream& fout, const Material& material) const { if (fout.is_open()) { fout.setf(std::ios::fixed, std::ios::floatfield); fout.setf(std::ios::showpoint); fout.precision(6); size_t nV = m_points.Size(); size_t nT = m_triangles.Size(); fout << "#VRML V2.0 utf8" << std::endl; fout << "" << std::endl; fout << "# Vertices: " << nV << std::endl; fout << "# Triangles: " << nT << std::endl; fout << "" << std::endl; fout << "Group {" << std::endl; fout << " children [" << std::endl; fout << " Shape {" << std::endl; fout << " appearance Appearance {" << std::endl; fout << " material Material {" << std::endl; fout << " diffuseColor " << material.m_diffuseColor[0] << " " << material.m_diffuseColor[1] << " " << material.m_diffuseColor[2] << std::endl; fout << " ambientIntensity " << material.m_ambientIntensity << std::endl; fout << " specularColor " << material.m_specularColor[0] << " " << material.m_specularColor[1] << " " << material.m_specularColor[2] << std::endl; fout << " emissiveColor " << material.m_emissiveColor[0] << " " << material.m_emissiveColor[1] << " " << material.m_emissiveColor[2] << std::endl; fout << " shininess " << material.m_shininess << std::endl; fout << " transparency " << material.m_transparency << std::endl; fout << " }" << std::endl; fout << " }" << std::endl; fout << " geometry IndexedFaceSet {" << std::endl; fout << " ccw TRUE" << std::endl; fout << " solid TRUE" << std::endl; fout << " convex TRUE" << std::endl; if (nV > 0) { fout << " coord DEF co Coordinate {" << std::endl; fout << " point [" << std::endl; for (size_t v = 0; v < nV; v++) { fout << " " << m_points[v][0] << " " << m_points[v][1] << " " << m_points[v][2] << "," << std::endl; } fout << " ]" << std::endl; fout << " }" << std::endl; } if (nT > 0) { fout << " coordIndex [ " << std::endl; for (size_t f = 0; f < nT; f++) { fout << " " << m_triangles[f][0] << ", " << m_triangles[f][1] << ", " << m_triangles[f][2] << ", -1," << std::endl; } fout << " ]" << std::endl; } fout << " }" << std::endl; fout << " }" << std::endl; fout << " ]" << std::endl; fout << "}" << std::endl; return true; } return false; } bool Mesh::SaveOFF(const std::string& fileName) const { std::ofstream fout(fileName.c_str()); if (fout.is_open()) { size_t nV = m_points.Size(); size_t nT = m_triangles.Size(); fout << "OFF" << std::endl; fout << nV << " " << nT << " " << 0 << std::endl; for (size_t v = 0; v < nV; v++) { fout << m_points[v][0] << " " << m_points[v][1] << " " << m_points[v][2] << std::endl; } for (size_t f = 0; f < nT; f++) { fout << "3 " << m_triangles[f][0] << " " << m_triangles[f][1] << " " << m_triangles[f][2] << std::endl; } fout.close(); return true; } return false; } bool Mesh::LoadOFF(const std::string& fileName, bool invert) { FILE* fid = fopen(fileName.c_str(), "r"); if (fid) { const std::string strOFF("OFF"); char temp[1024]; fscanf(fid, "%s", temp); if (std::string(temp) != strOFF) { fclose(fid); return false; } else { int32_t nv = 0; int32_t nf = 0; int32_t ne = 0; fscanf(fid, "%i", &nv); fscanf(fid, "%i", &nf); fscanf(fid, "%i", &ne); m_points.Resize(nv); m_triangles.Resize(nf); Vec3<double> coord; float x, y, z; for (int32_t p = 0; p < nv; p++) { fscanf(fid, "%f", &x); fscanf(fid, "%f", &y); fscanf(fid, "%f", &z); m_points[p][0] = x; m_points[p][1] = y; m_points[p][2] = z; } int32_t i, j, k, s; for (int32_t t = 0; t < nf; ++t) { fscanf(fid, "%i", &s); if (s == 3) { fscanf(fid, "%i", &i); fscanf(fid, "%i", &j); fscanf(fid, "%i", &k); m_triangles[t][0] = i; if (invert) { m_triangles[t][1] = k; m_triangles[t][2] = j; } else { m_triangles[t][1] = j; m_triangles[t][2] = k; } } else // Fix me: support only triangular meshes { for (int32_t h = 0; h < s; ++h) fscanf(fid, "%i", &s); } } fclose(fid); } } else { return false; } return true; } #endif // VHACD_DEBUG_MESH }
12,636
C++
33.433242
756
0.4651
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/authoring/VHACD/src/btAlignedAllocator.cpp
/* Bullet Continuous Collision Detection and Physics Library Copyright (c) 2003-2006 Erwin Coumans http://continuousphysics.com/Bullet/ This software is provided 'as-is', without any express or implied warranty. In no event will the authors be held liable for any damages arising from the use of this software. Permission is granted to anyone to use this software for any purpose, including commercial applications, and to alter it and redistribute it freely, subject to the following restrictions: 1. The origin of this software must not be misrepresented; you must not claim that you wrote the original software. If you use this software in a product, an acknowledgment in the product documentation would be appreciated but is not required. 2. Altered source versions must be plainly marked as such, and must not be misrepresented as being the original software. 3. This notice may not be removed or altered from any source distribution. */ #include "btAlignedAllocator.h" #ifdef _MSC_VER #pragma warning(disable:4311 4302) #endif int32_t gNumAlignedAllocs = 0; int32_t gNumAlignedFree = 0; int32_t gTotalBytesAlignedAllocs = 0; //detect memory leaks static void* btAllocDefault(size_t size) { return malloc(size); } static void btFreeDefault(void* ptr) { free(ptr); } static btAllocFunc* sAllocFunc = btAllocDefault; static btFreeFunc* sFreeFunc = btFreeDefault; #if defined(BT_HAS_ALIGNED_ALLOCATOR) #include <malloc.h> static void* btAlignedAllocDefault(size_t size, int32_t alignment) { return _aligned_malloc(size, (size_t)alignment); } static void btAlignedFreeDefault(void* ptr) { _aligned_free(ptr); } #elif defined(__CELLOS_LV2__) #include <stdlib.h> static inline void* btAlignedAllocDefault(size_t size, int32_t alignment) { return memalign(alignment, size); } static inline void btAlignedFreeDefault(void* ptr) { free(ptr); } #else static inline void* btAlignedAllocDefault(size_t size, int32_t alignment) { void* ret; char* real; unsigned long offset; real = (char*)sAllocFunc(size + sizeof(void*) + (alignment - 1)); if (real) { offset = (alignment - (unsigned long)(real + sizeof(void*))) & (alignment - 1); ret = (void*)((real + sizeof(void*)) + offset); *((void**)(ret)-1) = (void*)(real); } else { ret = (void*)(real); } return (ret); } static inline void btAlignedFreeDefault(void* ptr) { void* real; if (ptr) { real = *((void**)(ptr)-1); sFreeFunc(real); } } #endif static btAlignedAllocFunc* sAlignedAllocFunc = btAlignedAllocDefault; static btAlignedFreeFunc* sAlignedFreeFunc = btAlignedFreeDefault; void btAlignedAllocSetCustomAligned(btAlignedAllocFunc* allocFunc, btAlignedFreeFunc* freeFunc) { sAlignedAllocFunc = allocFunc ? allocFunc : btAlignedAllocDefault; sAlignedFreeFunc = freeFunc ? freeFunc : btAlignedFreeDefault; } void btAlignedAllocSetCustom(btAllocFunc* allocFunc, btFreeFunc* freeFunc) { sAllocFunc = allocFunc ? allocFunc : btAllocDefault; sFreeFunc = freeFunc ? freeFunc : btFreeDefault; } #ifdef BT_DEBUG_MEMORY_ALLOCATIONS //this generic allocator provides the total allocated number of bytes #include <stdio.h> void* btAlignedAllocInternal(size_t size, int32_t alignment, int32_t line, char* filename) { void* ret; char* real; unsigned long offset; gTotalBytesAlignedAllocs += size; gNumAlignedAllocs++; real = (char*)sAllocFunc(size + 2 * sizeof(void*) + (alignment - 1)); if (real) { offset = (alignment - (unsigned long)(real + 2 * sizeof(void*))) & (alignment - 1); ret = (void*)((real + 2 * sizeof(void*)) + offset); *((void**)(ret)-1) = (void*)(real); *((int32_t*)(ret)-2) = size; } else { ret = (void*)(real); //?? } printf("allocation#%d at address %x, from %s,line %d, size %d\n", gNumAlignedAllocs, real, filename, line, size); int32_t* ptr = (int32_t*)ret; *ptr = 12; return (ret); } void btAlignedFreeInternal(void* ptr, int32_t line, char* filename) { void* real; gNumAlignedFree++; if (ptr) { real = *((void**)(ptr)-1); int32_t size = *((int32_t*)(ptr)-2); gTotalBytesAlignedAllocs -= size; printf("free #%d at address %x, from %s,line %d, size %d\n", gNumAlignedFree, real, filename, line, size); sFreeFunc(real); } else { printf("NULL ptr\n"); } } #else //BT_DEBUG_MEMORY_ALLOCATIONS void* btAlignedAllocInternal(size_t size, int32_t alignment) { gNumAlignedAllocs++; void* ptr; ptr = sAlignedAllocFunc(size, alignment); // printf("btAlignedAllocInternal %d, %x\n",size,ptr); return ptr; } void btAlignedFreeInternal(void* ptr) { if (!ptr) { return; } gNumAlignedFree++; // printf("btAlignedFreeInternal %x\n",ptr); sAlignedFreeFunc(ptr); } #endif //BT_DEBUG_MEMORY_ALLOCATIONS
4,932
C++
26.254144
243
0.681671
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/authoring/VHACD/src/VHACD-ASYNC.cpp
#include "../public/VHACD.h" #include <stdlib.h> #include <string.h> #include <stdarg.h> #include <thread> #include <atomic> #include <mutex> #include <string> #include <float.h> #define ENABLE_ASYNC 1 #define HACD_ALLOC(x) malloc(x) #define HACD_FREE(x) free(x) #define HACD_ASSERT(x) assert(x) namespace VHACD { class MyHACD_API : public VHACD::IVHACD, public VHACD::IVHACD::IUserCallback, VHACD::IVHACD::IUserLogger { public: MyHACD_API(void) { mVHACD = VHACD::CreateVHACD(); } virtual ~MyHACD_API(void) { releaseHACD(); Cancel(); mVHACD->Release(); } virtual bool Compute(const double* const _points, const uint32_t countPoints, const uint32_t* const _triangles, const uint32_t countTriangles, const Parameters& _desc) final { #if ENABLE_ASYNC Cancel(); // if we previously had a solution running; cancel it. releaseHACD(); // We need to copy the input vertices and triangles into our own buffers so we can operate // on them safely from the background thread. mVertices = (double *)HACD_ALLOC(sizeof(double)*countPoints * 3); mIndices = (uint32_t *)HACD_ALLOC(sizeof(uint32_t)*countTriangles * 3); memcpy(mVertices, _points, sizeof(double)*countPoints * 3); memcpy(mIndices, _triangles, sizeof(uint32_t)*countTriangles * 3); mRunning = true; mThread = new std::thread([this, countPoints, countTriangles, _desc]() { ComputeNow(mVertices, countPoints, mIndices, countTriangles, _desc); mRunning = false; }); #else releaseHACD(); ComputeNow(_points, countPoints, _triangles, countTriangles, _desc); #endif return true; } bool ComputeNow(const double* const points, const uint32_t countPoints, const uint32_t* const triangles, const uint32_t countTriangles, const Parameters& _desc) { uint32_t ret = 0; mHullCount = 0; mCallback = _desc.m_callback; mLogger = _desc.m_logger; IVHACD::Parameters desc = _desc; // Set our intercepting callback interfaces if non-null desc.m_callback = desc.m_callback ? this : nullptr; desc.m_logger = desc.m_logger ? this : nullptr; if ( countPoints ) { bool ok = mVHACD->Compute(points, countPoints, triangles, countTriangles, desc); if (ok) { ret = mVHACD->GetNConvexHulls(); mHulls = new IVHACD::ConvexHull[ret]; for (uint32_t i = 0; i < ret; i++) { VHACD::IVHACD::ConvexHull vhull; mVHACD->GetConvexHull(i, vhull); VHACD::IVHACD::ConvexHull h; h.m_nPoints = vhull.m_nPoints; h.m_points = (double *)HACD_ALLOC(sizeof(double) * 3 * h.m_nPoints); memcpy(h.m_points, vhull.m_points, sizeof(double) * 3 * h.m_nPoints); h.m_nTriangles = vhull.m_nTriangles; h.m_triangles = (uint32_t *)HACD_ALLOC(sizeof(uint32_t) * 3 * h.m_nTriangles); memcpy(h.m_triangles, vhull.m_triangles, sizeof(uint32_t) * 3 * h.m_nTriangles); h.m_volume = vhull.m_volume; h.m_center[0] = vhull.m_center[0]; h.m_center[1] = vhull.m_center[1]; h.m_center[2] = vhull.m_center[2]; mHulls[i] = h; if (mCancel) { ret = 0; break; } } } } mHullCount = ret; return ret ? true : false; } void releaseHull(VHACD::IVHACD::ConvexHull &h) { HACD_FREE((void *)h.m_triangles); HACD_FREE((void *)h.m_points); h.m_triangles = nullptr; h.m_points = nullptr; } virtual void GetConvexHull(const uint32_t index, VHACD::IVHACD::ConvexHull& ch) const final { if ( index < mHullCount ) { ch = mHulls[index]; } } void releaseHACD(void) // release memory associated with the last HACD request { for (uint32_t i=0; i<mHullCount; i++) { releaseHull(mHulls[i]); } delete[]mHulls; mHulls = nullptr; mHullCount = 0; HACD_FREE(mVertices); mVertices = nullptr; HACD_FREE(mIndices); mIndices = nullptr; } virtual void release(void) // release the HACD_API interface { delete this; } virtual uint32_t getHullCount(void) { return mHullCount; } virtual void Cancel() final { if (mRunning) { mVHACD->Cancel(); // Set the cancel signal to the base VHACD } if (mThread) { mThread->join(); // Wait for the thread to fully exit before we delete the instance delete mThread; mThread = nullptr; Log("Convex Decomposition thread canceled\n"); } mCancel = false; // clear the cancel semaphore } virtual bool Compute(const float* const points, const uint32_t countPoints, const uint32_t* const triangles, const uint32_t countTriangles, const Parameters& params) final { double *vertices = (double *)HACD_ALLOC(sizeof(double)*countPoints * 3); const float *source = points; double *dest = vertices; for (uint32_t i = 0; i < countPoints; i++) { dest[0] = source[0]; dest[1] = source[1]; dest[2] = source[2]; dest += 3; source += 3; } bool ret = Compute(vertices, countPoints, triangles, countTriangles, params); HACD_FREE(vertices); return ret; } virtual uint32_t GetNConvexHulls() const final { processPendingMessages(); return mHullCount; } virtual void Clean(void) final // release internally allocated memory { Cancel(); releaseHACD(); mVHACD->Clean(); } virtual void Release(void) final // release IVHACD { delete this; } virtual bool OCLInit(void* const oclDevice, IVHACD::IUserLogger* const logger = 0) final { return mVHACD->OCLInit(oclDevice, logger); } virtual bool OCLRelease(IVHACD::IUserLogger* const logger = 0) final { return mVHACD->OCLRelease(logger); } virtual void Update(const double overallProgress, const double stageProgress, const double operationProgress, const char* const stage, const char* const operation) final { mMessageMutex.lock(); mHaveUpdateMessage = true; mOverallProgress = overallProgress; mStageProgress = stageProgress; mOperationProgress = operationProgress; mStage = std::string(stage); mOperation = std::string(operation); mMessageMutex.unlock(); } virtual void Log(const char* const msg) final { mMessageMutex.lock(); mHaveLogMessage = true; mMessage = std::string(msg); mMessageMutex.unlock(); } virtual bool IsReady(void) const final { processPendingMessages(); return !mRunning; } // As a convenience for the calling application we only send it update and log messages from it's own main // thread. This reduces the complexity burden on the caller by making sure it only has to deal with log // messages in it's main application thread. void processPendingMessages(void) const { // If we have a new update message and the user has specified a callback we send the message and clear the semaphore if (mHaveUpdateMessage && mCallback) { mMessageMutex.lock(); mCallback->Update(mOverallProgress, mStageProgress, mOperationProgress, mStage.c_str(), mOperation.c_str()); mHaveUpdateMessage = false; mMessageMutex.unlock(); } // If we have a new log message and the user has specified a callback we send the message and clear the semaphore if (mHaveLogMessage && mLogger) { mMessageMutex.lock(); mLogger->Log(mMessage.c_str()); mHaveLogMessage = false; mMessageMutex.unlock(); } } // Will compute the center of mass of the convex hull decomposition results and return it // in 'centerOfMass'. Returns false if the center of mass could not be computed. virtual bool ComputeCenterOfMass(double centerOfMass[3]) const { bool ret = false; centerOfMass[0] = 0; centerOfMass[1] = 0; centerOfMass[2] = 0; if (mVHACD && IsReady() ) { ret = mVHACD->ComputeCenterOfMass(centerOfMass); } return ret; } // Will analyze the HACD results and compute the constraints solutions. // It will analyze the point at which any two convex hulls touch each other and // return the total number of constraint pairs found virtual uint32_t ComputeConstraints(void) final { uint32_t ret = 0; if (mVHACD && IsReady()) { ret = mVHACD->ComputeConstraints(); } return ret; } virtual const Constraint *GetConstraint(uint32_t index) const final { const Constraint * ret = nullptr; if (mVHACD && IsReady()) { ret = mVHACD->GetConstraint(index); } return ret; } private: double *mVertices{ nullptr }; uint32_t *mIndices{ nullptr }; std::atomic< uint32_t> mHullCount{ 0 }; VHACD::IVHACD::ConvexHull *mHulls{ nullptr }; VHACD::IVHACD::IUserCallback *mCallback{ nullptr }; VHACD::IVHACD::IUserLogger *mLogger{ nullptr }; VHACD::IVHACD *mVHACD{ nullptr }; std::thread *mThread{ nullptr }; std::atomic< bool > mRunning{ false }; std::atomic<bool> mCancel{ false }; // Thread safe caching mechanism for messages and update status. // This is so that caller always gets messages in his own thread // Member variables are marked as 'mutable' since the message dispatch function // is called from const query methods. mutable std::mutex mMessageMutex; mutable std::atomic< bool > mHaveUpdateMessage{ false }; mutable std::atomic< bool > mHaveLogMessage{ false }; mutable double mOverallProgress{ 0 }; mutable double mStageProgress{ 0 }; mutable double mOperationProgress{ 0 }; mutable std::string mStage; mutable std::string mOperation; mutable std::string mMessage; }; IVHACD* CreateVHACD_ASYNC(void) { MyHACD_API *m = new MyHACD_API; return static_cast<IVHACD *>(m); } }; // end of VHACD namespace
11,328
C++
30.382271
124
0.565148
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/authoring/VHACD/src/FloatMath.cpp
#include <stdio.h> #include <stdlib.h> #include <string.h> #include <assert.h> #include <math.h> #include <float.h> #include "FloatMath.h" #include <vector> #include <malloc.h> #define REAL float #include "FloatMath.inl" #undef REAL #define REAL double #include "FloatMath.inl"
282
C++
13.894736
24
0.716312
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/authoring/VHACD/inc/btMinMax.h
/* Copyright (c) 2003-2006 Gino van den Bergen / Erwin Coumans http://continuousphysics.com/Bullet/ This software is provided 'as-is', without any express or implied warranty. In no event will the authors be held liable for any damages arising from the use of this software. Permission is granted to anyone to use this software for any purpose, including commercial applications, and to alter it and redistribute it freely, subject to the following restrictions: 1. The origin of this software must not be misrepresented; you must not claim that you wrote the original software. If you use this software in a product, an acknowledgment in the product documentation would be appreciated but is not required. 2. Altered source versions must be plainly marked as such, and must not be misrepresented as being the original software. 3. This notice may not be removed or altered from any source distribution. */ #ifndef BT_GEN_MINMAX_H #define BT_GEN_MINMAX_H #include "btScalar.h" template <class T> SIMD_FORCE_INLINE const T& btMin(const T& a, const T& b) { return a < b ? a : b; } template <class T> SIMD_FORCE_INLINE const T& btMax(const T& a, const T& b) { return a > b ? a : b; } template <class T> SIMD_FORCE_INLINE const T& btClamped(const T& a, const T& lb, const T& ub) { return a < lb ? lb : (ub < a ? ub : a); } template <class T> SIMD_FORCE_INLINE void btSetMin(T& a, const T& b) { if (b < a) { a = b; } } template <class T> SIMD_FORCE_INLINE void btSetMax(T& a, const T& b) { if (a < b) { a = b; } } template <class T> SIMD_FORCE_INLINE void btClamp(T& a, const T& lb, const T& ub) { if (a < lb) { a = lb; } else if (ub < a) { a = ub; } } #endif //BT_GEN_MINMAX_H
1,763
C
25.727272
243
0.680091
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/authoring/VHACD/inc/vhacdMutex.h
/*! ** ** Copyright (c) 2009 by John W. Ratcliff mailto:[email protected] ** ** Portions of this source has been released with the PhysXViewer application, as well as ** Rocket, CreateDynamics, ODF, and as a number of sample code snippets. ** ** If you find this code useful or you are feeling particularily generous I would ** ask that you please go to http://www.amillionpixels.us and make a donation ** to Troy DeMolay. ** ** DeMolay is a youth group for young men between the ages of 12 and 21. ** It teaches strong moral principles, as well as leadership skills and ** public speaking. The donations page uses the 'pay for pixels' paradigm ** where, in this case, a pixel is only a single penny. Donations can be ** made for as small as $4 or as high as a $100 block. Each person who donates ** will get a link to their own site as well as acknowledgement on the ** donations blog located here http://www.amillionpixels.blogspot.com/ ** ** If you wish to contact me you can use the following methods: ** ** Skype ID: jratcliff63367 ** Yahoo: jratcliff63367 ** AOL: jratcliff1961 ** email: [email protected] ** ** ** The MIT license: ** ** Permission is hereby granted, free of charge, to any person obtaining a copy ** of this software and associated documentation files (the "Software"), to deal ** in the Software without restriction, including without limitation the rights ** to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ** copies of the Software, and to permit persons to whom the Software is furnished ** to do so, subject to the following conditions: ** ** The above copyright notice and this permission notice shall be included in all ** copies or substantial portions of the Software. ** THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ** IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ** FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE ** AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, ** WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ** CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ #pragma once #ifndef VHACD_MUTEX_H #define VHACD_MUTEX_H #if defined(WIN32) #ifndef _WIN32_WINNT #define _WIN32_WINNT 0x400 #endif #include <windows.h> #pragma comment(lib, "winmm.lib") #endif #if defined(__linux__) //#include <sys/time.h> #include <errno.h> #include <time.h> #include <unistd.h> #define __stdcall #endif #if defined(__APPLE__) || defined(__linux__) #include <pthread.h> #endif #if defined(__APPLE__) #define PTHREAD_MUTEX_RECURSIVE_NP PTHREAD_MUTEX_RECURSIVE #endif #define VHACD_DEBUG //#define VHACD_NDEBUG #ifdef VHACD_NDEBUG #define VHACD_VERIFY(x) (x) #else #define VHACD_VERIFY(x) assert((x)) #endif namespace VHACD { class Mutex { public: Mutex(void) { #if defined(WIN32) || defined(_XBOX) InitializeCriticalSection(&m_mutex); #elif defined(__APPLE__) || defined(__linux__) pthread_mutexattr_t mutexAttr; // Mutex Attribute VHACD_VERIFY(pthread_mutexattr_init(&mutexAttr) == 0); VHACD_VERIFY(pthread_mutexattr_settype(&mutexAttr, PTHREAD_MUTEX_RECURSIVE_NP) == 0); VHACD_VERIFY(pthread_mutex_init(&m_mutex, &mutexAttr) == 0); VHACD_VERIFY(pthread_mutexattr_destroy(&mutexAttr) == 0); #endif } ~Mutex(void) { #if defined(WIN32) || defined(_XBOX) DeleteCriticalSection(&m_mutex); #elif defined(__APPLE__) || defined(__linux__) VHACD_VERIFY(pthread_mutex_destroy(&m_mutex) == 0); #endif } void Lock(void) { #if defined(WIN32) || defined(_XBOX) EnterCriticalSection(&m_mutex); #elif defined(__APPLE__) || defined(__linux__) VHACD_VERIFY(pthread_mutex_lock(&m_mutex) == 0); #endif } bool TryLock(void) { #if defined(WIN32) || defined(_XBOX) bool bRet = false; //assert(("TryEnterCriticalSection seems to not work on XP???", 0)); bRet = TryEnterCriticalSection(&m_mutex) ? true : false; return bRet; #elif defined(__APPLE__) || defined(__linux__) int32_t result = pthread_mutex_trylock(&m_mutex); return (result == 0); #endif } void Unlock(void) { #if defined(WIN32) || defined(_XBOX) LeaveCriticalSection(&m_mutex); #elif defined(__APPLE__) || defined(__linux__) VHACD_VERIFY(pthread_mutex_unlock(&m_mutex) == 0); #endif } private: #if defined(WIN32) || defined(_XBOX) CRITICAL_SECTION m_mutex; #elif defined(__APPLE__) || defined(__linux__) pthread_mutex_t m_mutex; #endif }; } #endif // VHACD_MUTEX_H
4,677
C
30.395973
93
0.693179
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/authoring/VHACD/inc/btScalar.h
/* Copyright (c) 2003-2009 Erwin Coumans http://bullet.googlecode.com This software is provided 'as-is', without any express or implied warranty. In no event will the authors be held liable for any damages arising from the use of this software. Permission is granted to anyone to use this software for any purpose, including commercial applications, and to alter it and redistribute it freely, subject to the following restrictions: 1. The origin of this software must not be misrepresented; you must not claim that you wrote the original software. If you use this software in a product, an acknowledgment in the product documentation would be appreciated but is not required. 2. Altered source versions must be plainly marked as such, and must not be misrepresented as being the original software. 3. This notice may not be removed or altered from any source distribution. */ #ifndef BT_SCALAR_H #define BT_SCALAR_H #ifdef BT_MANAGED_CODE //Aligned data types not supported in managed code #pragma unmanaged #endif #include <float.h> #include <math.h> #include <stdlib.h> //size_t for MSVC 6.0 #include <stdint.h> /* SVN $Revision$ on $Date$ from http://bullet.googlecode.com*/ #define BT_BULLET_VERSION 279 inline int32_t btGetVersion() { return BT_BULLET_VERSION; } #if defined(DEBUG) || defined(_DEBUG) #define BT_DEBUG #endif #ifdef _WIN32 #if defined(__MINGW32__) || defined(__CYGWIN__) || (defined(_MSC_VER) && _MSC_VER < 1300) #define SIMD_FORCE_INLINE inline #define ATTRIBUTE_ALIGNED16(a) a #define ATTRIBUTE_ALIGNED64(a) a #define ATTRIBUTE_ALIGNED128(a) a #else //#define BT_HAS_ALIGNED_ALLOCATOR #pragma warning(disable : 4324) // disable padding warning // #pragma warning(disable:4530) // Disable the exception disable but used in MSCV Stl warning. // #pragma warning(disable:4996) //Turn off warnings about deprecated C routines // #pragma warning(disable:4786) // Disable the "debug name too long" warning #define SIMD_FORCE_INLINE __forceinline #define ATTRIBUTE_ALIGNED16(a) __declspec(align(16)) a #define ATTRIBUTE_ALIGNED64(a) __declspec(align(64)) a #define ATTRIBUTE_ALIGNED128(a) __declspec(align(128)) a #ifdef _XBOX #define BT_USE_VMX128 #include <ppcintrinsics.h> #define BT_HAVE_NATIVE_FSEL #define btFsel(a, b, c) __fsel((a), (b), (c)) #else #if (defined(_WIN32) && (_MSC_VER) && _MSC_VER >= 1400) && (!defined(BT_USE_DOUBLE_PRECISION)) #define BT_USE_SSE #include <emmintrin.h> #endif #endif //_XBOX #endif //__MINGW32__ #include <assert.h> #ifdef BT_DEBUG #define btAssert assert #else #define btAssert(x) #endif //btFullAssert is optional, slows down a lot #define btFullAssert(x) #define btLikely(_c) _c #define btUnlikely(_c) _c #else #if defined(__CELLOS_LV2__) #define SIMD_FORCE_INLINE inline __attribute__((always_inline)) #define ATTRIBUTE_ALIGNED16(a) a __attribute__((aligned(16))) #define ATTRIBUTE_ALIGNED64(a) a __attribute__((aligned(64))) #define ATTRIBUTE_ALIGNED128(a) a __attribute__((aligned(128))) #ifndef assert #include <assert.h> #endif #ifdef BT_DEBUG #ifdef __SPU__ #include <spu_printf.h> #define printf spu_printf #define btAssert(x) \ { \ if (!(x)) { \ printf("Assert " __FILE__ ":%u (" #x ")\n", __LINE__); \ spu_hcmpeq(0, 0); \ } \ } #else #define btAssert assert #endif #else #define btAssert(x) #endif //btFullAssert is optional, slows down a lot #define btFullAssert(x) #define btLikely(_c) _c #define btUnlikely(_c) _c #else #ifdef USE_LIBSPE2 #define SIMD_FORCE_INLINE __inline #define ATTRIBUTE_ALIGNED16(a) a __attribute__((aligned(16))) #define ATTRIBUTE_ALIGNED64(a) a __attribute__((aligned(64))) #define ATTRIBUTE_ALIGNED128(a) a __attribute__((aligned(128))) #ifndef assert #include <assert.h> #endif #ifdef BT_DEBUG #define btAssert assert #else #define btAssert(x) #endif //btFullAssert is optional, slows down a lot #define btFullAssert(x) #define btLikely(_c) __builtin_expect((_c), 1) #define btUnlikely(_c) __builtin_expect((_c), 0) #else //non-windows systems #if (defined(__APPLE__) && defined(__i386__) && (!defined(BT_USE_DOUBLE_PRECISION))) #define BT_USE_SSE #include <emmintrin.h> #define SIMD_FORCE_INLINE inline ///@todo: check out alignment methods for other platforms/compilers #define ATTRIBUTE_ALIGNED16(a) a __attribute__((aligned(16))) #define ATTRIBUTE_ALIGNED64(a) a __attribute__((aligned(64))) #define ATTRIBUTE_ALIGNED128(a) a __attribute__((aligned(128))) #ifndef assert #include <assert.h> #endif #if defined(DEBUG) || defined(_DEBUG) #define btAssert assert #else #define btAssert(x) #endif //btFullAssert is optional, slows down a lot #define btFullAssert(x) #define btLikely(_c) _c #define btUnlikely(_c) _c #else #define SIMD_FORCE_INLINE inline ///@todo: check out alignment methods for other platforms/compilers ///#define ATTRIBUTE_ALIGNED16(a) a __attribute__ ((aligned (16))) ///#define ATTRIBUTE_ALIGNED64(a) a __attribute__ ((aligned (64))) ///#define ATTRIBUTE_ALIGNED128(a) a __attribute__ ((aligned (128))) #define ATTRIBUTE_ALIGNED16(a) a #define ATTRIBUTE_ALIGNED64(a) a #define ATTRIBUTE_ALIGNED128(a) a #ifndef assert #include <assert.h> #endif #if defined(DEBUG) || defined(_DEBUG) #define btAssert assert #else #define btAssert(x) #endif //btFullAssert is optional, slows down a lot #define btFullAssert(x) #define btLikely(_c) _c #define btUnlikely(_c) _c #endif //__APPLE__ #endif // LIBSPE2 #endif //__CELLOS_LV2__ #endif ///The btScalar type abstracts floating point numbers, to easily switch between double and single floating point precision. #if defined(BT_USE_DOUBLE_PRECISION) typedef double btScalar; //this number could be bigger in double precision #define BT_LARGE_FLOAT 1e30 #else typedef float btScalar; //keep BT_LARGE_FLOAT*BT_LARGE_FLOAT < FLT_MAX #define BT_LARGE_FLOAT 1e18f #endif #define BT_DECLARE_ALIGNED_ALLOCATOR() \ SIMD_FORCE_INLINE void* operator new(size_t sizeInBytes) { return btAlignedAlloc(sizeInBytes, 16); } \ SIMD_FORCE_INLINE void operator delete(void* ptr) { btAlignedFree(ptr); } \ SIMD_FORCE_INLINE void* operator new(size_t, void* ptr) { return ptr; } \ SIMD_FORCE_INLINE void operator delete(void*, void*) {} \ SIMD_FORCE_INLINE void* operator new[](size_t sizeInBytes) { return btAlignedAlloc(sizeInBytes, 16); } \ SIMD_FORCE_INLINE void operator delete[](void* ptr) { btAlignedFree(ptr); } \ SIMD_FORCE_INLINE void* operator new[](size_t, void* ptr) { return ptr; } \ SIMD_FORCE_INLINE void operator delete[](void*, void*) {} #if defined(BT_USE_DOUBLE_PRECISION) || defined(BT_FORCE_DOUBLE_FUNCTIONS) SIMD_FORCE_INLINE btScalar btSqrt(btScalar x) { return sqrt(x); } SIMD_FORCE_INLINE btScalar btFabs(btScalar x) { return fabs(x); } SIMD_FORCE_INLINE btScalar btCos(btScalar x) { return cos(x); } SIMD_FORCE_INLINE btScalar btSin(btScalar x) { return sin(x); } SIMD_FORCE_INLINE btScalar btTan(btScalar x) { return tan(x); } SIMD_FORCE_INLINE btScalar btAcos(btScalar x) { if (x < btScalar(-1)) x = btScalar(-1); if (x > btScalar(1)) x = btScalar(1); return acos(x); } SIMD_FORCE_INLINE btScalar btAsin(btScalar x) { if (x < btScalar(-1)) x = btScalar(-1); if (x > btScalar(1)) x = btScalar(1); return asin(x); } SIMD_FORCE_INLINE btScalar btAtan(btScalar x) { return atan(x); } SIMD_FORCE_INLINE btScalar btAtan2(btScalar x, btScalar y) { return atan2(x, y); } SIMD_FORCE_INLINE btScalar btExp(btScalar x) { return exp(x); } SIMD_FORCE_INLINE btScalar btLog(btScalar x) { return log(x); } SIMD_FORCE_INLINE btScalar btPow(btScalar x, btScalar y) { return pow(x, y); } SIMD_FORCE_INLINE btScalar btFmod(btScalar x, btScalar y) { return fmod(x, y); } #else SIMD_FORCE_INLINE btScalar btSqrt(btScalar y) { #ifdef USE_APPROXIMATION double x, z, tempf; unsigned long* tfptr = ((unsigned long*)&tempf) + 1; tempf = y; *tfptr = (0xbfcdd90a - *tfptr) >> 1; /* estimate of 1/sqrt(y) */ x = tempf; z = y * btScalar(0.5); x = (btScalar(1.5) * x) - (x * x) * (x * z); /* iteration formula */ x = (btScalar(1.5) * x) - (x * x) * (x * z); x = (btScalar(1.5) * x) - (x * x) * (x * z); x = (btScalar(1.5) * x) - (x * x) * (x * z); x = (btScalar(1.5) * x) - (x * x) * (x * z); return x * y; #else return sqrtf(y); #endif } SIMD_FORCE_INLINE btScalar btFabs(btScalar x) { return fabsf(x); } SIMD_FORCE_INLINE btScalar btCos(btScalar x) { return cosf(x); } SIMD_FORCE_INLINE btScalar btSin(btScalar x) { return sinf(x); } SIMD_FORCE_INLINE btScalar btTan(btScalar x) { return tanf(x); } SIMD_FORCE_INLINE btScalar btAcos(btScalar x) { if (x < btScalar(-1)) x = btScalar(-1); if (x > btScalar(1)) x = btScalar(1); return acosf(x); } SIMD_FORCE_INLINE btScalar btAsin(btScalar x) { if (x < btScalar(-1)) x = btScalar(-1); if (x > btScalar(1)) x = btScalar(1); return asinf(x); } SIMD_FORCE_INLINE btScalar btAtan(btScalar x) { return atanf(x); } SIMD_FORCE_INLINE btScalar btAtan2(btScalar x, btScalar y) { return atan2f(x, y); } SIMD_FORCE_INLINE btScalar btExp(btScalar x) { return expf(x); } SIMD_FORCE_INLINE btScalar btLog(btScalar x) { return logf(x); } SIMD_FORCE_INLINE btScalar btPow(btScalar x, btScalar y) { return powf(x, y); } SIMD_FORCE_INLINE btScalar btFmod(btScalar x, btScalar y) { return fmodf(x, y); } #endif #define SIMD_2_PI btScalar(6.283185307179586232) #define SIMD_PI (SIMD_2_PI * btScalar(0.5)) #define SIMD_HALF_PI (SIMD_2_PI * btScalar(0.25)) #define SIMD_RADS_PER_DEG (SIMD_2_PI / btScalar(360.0)) #define SIMD_DEGS_PER_RAD (btScalar(360.0) / SIMD_2_PI) #define SIMDSQRT12 btScalar(0.7071067811865475244008443621048490) #define btRecipSqrt(x) ((btScalar)(btScalar(1.0) / btSqrt(btScalar(x)))) /* reciprocal square root */ #ifdef BT_USE_DOUBLE_PRECISION #define SIMD_EPSILON DBL_EPSILON #define SIMD_INFINITY DBL_MAX #else #define SIMD_EPSILON FLT_EPSILON #define SIMD_INFINITY FLT_MAX #endif SIMD_FORCE_INLINE btScalar btAtan2Fast(btScalar y, btScalar x) { btScalar coeff_1 = SIMD_PI / 4.0f; btScalar coeff_2 = 3.0f * coeff_1; btScalar abs_y = btFabs(y); btScalar angle; if (x >= 0.0f) { btScalar r = (x - abs_y) / (x + abs_y); angle = coeff_1 - coeff_1 * r; } else { btScalar r = (x + abs_y) / (abs_y - x); angle = coeff_2 - coeff_1 * r; } return (y < 0.0f) ? -angle : angle; } SIMD_FORCE_INLINE bool btFuzzyZero(btScalar x) { return btFabs(x) < SIMD_EPSILON; } SIMD_FORCE_INLINE bool btEqual(btScalar a, btScalar eps) { return (((a) <= eps) && !((a) < -eps)); } SIMD_FORCE_INLINE bool btGreaterEqual(btScalar a, btScalar eps) { return (!((a) <= eps)); } SIMD_FORCE_INLINE int32_t btIsNegative(btScalar x) { return x < btScalar(0.0) ? 1 : 0; } SIMD_FORCE_INLINE btScalar btRadians(btScalar x) { return x * SIMD_RADS_PER_DEG; } SIMD_FORCE_INLINE btScalar btDegrees(btScalar x) { return x * SIMD_DEGS_PER_RAD; } #define BT_DECLARE_HANDLE(name) \ typedef struct name##__ { \ int32_t unused; \ } * name #ifndef btFsel SIMD_FORCE_INLINE btScalar btFsel(btScalar a, btScalar b, btScalar c) { return a >= 0 ? b : c; } #endif #define btFsels(a, b, c) (btScalar) btFsel(a, b, c) SIMD_FORCE_INLINE bool btMachineIsLittleEndian() { long int i = 1; const char* p = (const char*)&i; if (p[0] == 1) // Lowest address contains the least significant byte return true; else return false; } ///btSelect avoids branches, which makes performance much better for consoles like Playstation 3 and XBox 360 ///Thanks Phil Knight. See also http://www.cellperformance.com/articles/2006/04/more_techniques_for_eliminatin_1.html SIMD_FORCE_INLINE unsigned btSelect(unsigned condition, unsigned valueIfConditionNonZero, unsigned valueIfConditionZero) { // Set testNz to 0xFFFFFFFF if condition is nonzero, 0x00000000 if condition is zero // Rely on positive value or'ed with its negative having sign bit on // and zero value or'ed with its negative (which is still zero) having sign bit off // Use arithmetic shift right, shifting the sign bit through all 32 bits unsigned testNz = (unsigned)(((int32_t)condition | -(int32_t)condition) >> 31); unsigned testEqz = ~testNz; return ((valueIfConditionNonZero & testNz) | (valueIfConditionZero & testEqz)); } SIMD_FORCE_INLINE int32_t btSelect(unsigned condition, int32_t valueIfConditionNonZero, int32_t valueIfConditionZero) { unsigned testNz = (unsigned)(((int32_t)condition | -(int32_t)condition) >> 31); unsigned testEqz = ~testNz; return static_cast<int32_t>((valueIfConditionNonZero & testNz) | (valueIfConditionZero & testEqz)); } SIMD_FORCE_INLINE float btSelect(unsigned condition, float valueIfConditionNonZero, float valueIfConditionZero) { #ifdef BT_HAVE_NATIVE_FSEL return (float)btFsel((btScalar)condition - btScalar(1.0f), valueIfConditionNonZero, valueIfConditionZero); #else return (condition != 0) ? valueIfConditionNonZero : valueIfConditionZero; #endif } template <typename T> SIMD_FORCE_INLINE void btSwap(T& a, T& b) { T tmp = a; a = b; b = tmp; } //PCK: endian swapping functions SIMD_FORCE_INLINE unsigned btSwapEndian(unsigned val) { return (((val & 0xff000000) >> 24) | ((val & 0x00ff0000) >> 8) | ((val & 0x0000ff00) << 8) | ((val & 0x000000ff) << 24)); } SIMD_FORCE_INLINE unsigned short btSwapEndian(unsigned short val) { return static_cast<unsigned short>(((val & 0xff00) >> 8) | ((val & 0x00ff) << 8)); } SIMD_FORCE_INLINE unsigned btSwapEndian(int32_t val) { return btSwapEndian((unsigned)val); } SIMD_FORCE_INLINE unsigned short btSwapEndian(short val) { return btSwapEndian((unsigned short)val); } ///btSwapFloat uses using char pointers to swap the endianness ////btSwapFloat/btSwapDouble will NOT return a float, because the machine might 'correct' invalid floating point values ///Not all values of sign/exponent/mantissa are valid floating point numbers according to IEEE 754. ///When a floating point unit is faced with an invalid value, it may actually change the value, or worse, throw an exception. ///In most systems, running user mode code, you wouldn't get an exception, but instead the hardware/os/runtime will 'fix' the number for you. ///so instead of returning a float/double, we return integer/long long integer SIMD_FORCE_INLINE uint32_t btSwapEndianFloat(float d) { uint32_t a = 0; unsigned char* dst = (unsigned char*)&a; unsigned char* src = (unsigned char*)&d; dst[0] = src[3]; dst[1] = src[2]; dst[2] = src[1]; dst[3] = src[0]; return a; } // unswap using char pointers SIMD_FORCE_INLINE float btUnswapEndianFloat(uint32_t a) { float d = 0.0f; unsigned char* src = (unsigned char*)&a; unsigned char* dst = (unsigned char*)&d; dst[0] = src[3]; dst[1] = src[2]; dst[2] = src[1]; dst[3] = src[0]; return d; } // swap using char pointers SIMD_FORCE_INLINE void btSwapEndianDouble(double d, unsigned char* dst) { unsigned char* src = (unsigned char*)&d; dst[0] = src[7]; dst[1] = src[6]; dst[2] = src[5]; dst[3] = src[4]; dst[4] = src[3]; dst[5] = src[2]; dst[6] = src[1]; dst[7] = src[0]; } // unswap using char pointers SIMD_FORCE_INLINE double btUnswapEndianDouble(const unsigned char* src) { double d = 0.0; unsigned char* dst = (unsigned char*)&d; dst[0] = src[7]; dst[1] = src[6]; dst[2] = src[5]; dst[3] = src[4]; dst[4] = src[3]; dst[5] = src[2]; dst[6] = src[1]; dst[7] = src[0]; return d; } // returns normalized value in range [-SIMD_PI, SIMD_PI] SIMD_FORCE_INLINE btScalar btNormalizeAngle(btScalar angleInRadians) { angleInRadians = btFmod(angleInRadians, SIMD_2_PI); if (angleInRadians < -SIMD_PI) { return angleInRadians + SIMD_2_PI; } else if (angleInRadians > SIMD_PI) { return angleInRadians - SIMD_2_PI; } else { return angleInRadians; } } ///rudimentary class to provide type info struct btTypedObject { btTypedObject(int32_t objectType) : m_objectType(objectType) { } int32_t m_objectType; inline int32_t getObjectType() const { return m_objectType; } }; #endif //BT_SCALAR_H
16,847
C
30.550562
243
0.662136
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/authoring/VHACD/inc/vhacdRaycastMesh.h
#ifndef RAYCAST_MESH_H #define RAYCAST_MESH_H #include <stdint.h> namespace VHACD { // Very simple brute force raycast against a triangle mesh. Tests every triangle; no hierachy. // Does a deep copy, always does calculations with full double float precision class RaycastMesh { public: static RaycastMesh * createRaycastMesh(uint32_t vcount, // The number of vertices in the source triangle mesh const double *vertices, // The array of vertex positions in the format x1,y1,z1..x2,y2,z2.. etc. uint32_t tcount, // The number of triangles in the source triangle mesh const uint32_t *indices); // The triangle indices in the format of i1,i2,i3 ... i4,i5,i6, ... static RaycastMesh * createRaycastMesh(uint32_t vcount, // The number of vertices in the source triangle mesh const float *vertices, // The array of vertex positions in the format x1,y1,z1..x2,y2,z2.. etc. uint32_t tcount, // The number of triangles in the source triangle mesh const uint32_t *indices); // The triangle indices in the format of i1,i2,i3 ... i4,i5,i6, ... virtual bool raycast(const double *from, // The starting point of the raycast const double *to, // The ending point of the raycast const double *closestToPoint, // The point to match the nearest hit location (can just be the 'from' location of no specific point) double *hitLocation, // The point where the ray hit nearest to the 'closestToPoint' location double *hitDistance) = 0; // The distance the ray traveled to the hit location virtual void release(void) = 0; protected: virtual ~RaycastMesh(void) { }; }; } // end of VHACD namespace #endif
1,853
C
45.349999
145
0.641662
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/authoring/VHACD/inc/vhacdMesh.h
/* Copyright (c) 2011 Khaled Mamou (kmamou at gmail dot com) All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. The names of the contributors may not be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #pragma once #ifndef VHACD_MESH_H #define VHACD_MESH_H #include "vhacdSArray.h" #include "vhacdVector.h" #define VHACD_DEBUG_MESH namespace VHACD { enum AXIS { AXIS_X = 0, AXIS_Y = 1, AXIS_Z = 2 }; struct Plane { double m_a; double m_b; double m_c; double m_d; AXIS m_axis; short m_index; }; #ifdef VHACD_DEBUG_MESH struct Material { Vec3<double> m_diffuseColor; double m_ambientIntensity; Vec3<double> m_specularColor; Vec3<double> m_emissiveColor; double m_shininess; double m_transparency; Material(void) { m_diffuseColor.X() = 0.5; m_diffuseColor.Y() = 0.5; m_diffuseColor.Z() = 0.5; m_specularColor.X() = 0.5; m_specularColor.Y() = 0.5; m_specularColor.Z() = 0.5; m_ambientIntensity = 0.4; m_emissiveColor.X() = 0.0; m_emissiveColor.Y() = 0.0; m_emissiveColor.Z() = 0.0; m_shininess = 0.4; m_transparency = 0.0; }; }; #endif // VHACD_DEBUG_MESH //! Triangular mesh data structure class Mesh { public: void AddPoint(const Vec3<double>& pt) { m_points.PushBack(pt); }; void SetPoint(size_t index, const Vec3<double>& pt) { m_points[index] = pt; }; const Vec3<double>& GetPoint(size_t index) const { return m_points[index]; }; Vec3<double>& GetPoint(size_t index) { return m_points[index]; }; size_t GetNPoints() const { return m_points.Size(); }; double* GetPoints() { return (double*)m_points.Data(); } // ugly const double* const GetPoints() const { return (double*)m_points.Data(); } // ugly const Vec3<double>* const GetPointsBuffer() const { return m_points.Data(); } // Vec3<double>* const GetPointsBuffer() { return m_points.Data(); } // void AddTriangle(const Vec3<int32_t>& tri) { m_triangles.PushBack(tri); }; void SetTriangle(size_t index, const Vec3<int32_t>& tri) { m_triangles[index] = tri; }; const Vec3<int32_t>& GetTriangle(size_t index) const { return m_triangles[index]; }; Vec3<int32_t>& GetTriangle(size_t index) { return m_triangles[index]; }; size_t GetNTriangles() const { return m_triangles.Size(); }; int32_t* GetTriangles() { return (int32_t*)m_triangles.Data(); } // ugly const int32_t* const GetTriangles() const { return (int32_t*)m_triangles.Data(); } // ugly const Vec3<int32_t>* const GetTrianglesBuffer() const { return m_triangles.Data(); } Vec3<int32_t>* const GetTrianglesBuffer() { return m_triangles.Data(); } const Vec3<double>& GetCenter() const { return m_center; } const Vec3<double>& GetMinBB() const { return m_minBB; } const Vec3<double>& GetMaxBB() const { return m_maxBB; } void ClearPoints() { m_points.Clear(); } void ClearTriangles() { m_triangles.Clear(); } void Clear() { ClearPoints(); ClearTriangles(); } void ResizePoints(size_t nPts) { m_points.Resize(nPts); } void ResizeTriangles(size_t nTri) { m_triangles.Resize(nTri); } void CopyPoints(SArray<Vec3<double> >& points) const { points = m_points; } double GetDiagBB() const { return m_diag; } double ComputeVolume() const; void ComputeConvexHull(const double* const pts, const size_t nPts); void Clip(const Plane& plane, SArray<Vec3<double> >& positivePart, SArray<Vec3<double> >& negativePart) const; bool IsInside(const Vec3<double>& pt) const; double ComputeDiagBB(); Vec3<double> &ComputeCenter(void); #ifdef VHACD_DEBUG_MESH bool LoadOFF(const std::string& fileName, bool invert); bool SaveVRML2(const std::string& fileName) const; bool SaveVRML2(std::ofstream& fout, const Material& material) const; bool SaveOFF(const std::string& fileName) const; #endif // VHACD_DEBUG_MESH //! Constructor. Mesh(); //! Destructor. ~Mesh(void); private: SArray<Vec3<double> > m_points; SArray<Vec3<int32_t> > m_triangles; Vec3<double> m_minBB; Vec3<double> m_maxBB; Vec3<double> m_center; double m_diag; }; } #endif
5,520
C
41.46923
756
0.683152
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/authoring/VHACD/inc/btVector3.h
/* Copyright (c) 2003-2006 Gino van den Bergen / Erwin Coumans http://continuousphysics.com/Bullet/ This software is provided 'as-is', without any express or implied warranty. In no event will the authors be held liable for any damages arising from the use of this software. Permission is granted to anyone to use this software for any purpose, including commercial applications, and to alter it and redistribute it freely, subject to the following restrictions: 1. The origin of this software must not be misrepresented; you must not claim that you wrote the original software. If you use this software in a product, an acknowledgment in the product documentation would be appreciated but is not required. 2. Altered source versions must be plainly marked as such, and must not be misrepresented as being the original software. 3. This notice may not be removed or altered from any source distribution. */ #ifndef BT_VECTOR3_H #define BT_VECTOR3_H #include "btMinMax.h" #include "btScalar.h" #ifdef BT_USE_DOUBLE_PRECISION #define btVector3Data btVector3DoubleData #define btVector3DataName "btVector3DoubleData" #else #define btVector3Data btVector3FloatData #define btVector3DataName "btVector3FloatData" #endif //BT_USE_DOUBLE_PRECISION /**@brief btVector3 can be used to represent 3D points and vectors. * It has an un-used w component to suit 16-byte alignment when btVector3 is stored in containers. This extra component can be used by derived classes (Quaternion?) or by user * Ideally, this class should be replaced by a platform optimized SIMD version that keeps the data in registers */ ATTRIBUTE_ALIGNED16(class) btVector3 { public: #if defined(__SPU__) && defined(__CELLOS_LV2__) btScalar m_floats[4]; public: SIMD_FORCE_INLINE const vec_float4& get128() const { return *((const vec_float4*)&m_floats[0]); } public: #else //__CELLOS_LV2__ __SPU__ #ifdef BT_USE_SSE // _WIN32 union { __m128 mVec128; btScalar m_floats[4]; }; SIMD_FORCE_INLINE __m128 get128() const { return mVec128; } SIMD_FORCE_INLINE void set128(__m128 v128) { mVec128 = v128; } #else btScalar m_floats[4]; #endif #endif //__CELLOS_LV2__ __SPU__ public: /**@brief No initialization constructor */ SIMD_FORCE_INLINE btVector3() {} /**@brief Constructor from scalars * @param x X value * @param y Y value * @param z Z value */ SIMD_FORCE_INLINE btVector3(const btScalar& x, const btScalar& y, const btScalar& z) { m_floats[0] = x; m_floats[1] = y; m_floats[2] = z; m_floats[3] = btScalar(0.); } /**@brief Add a vector to this one * @param The vector to add to this one */ SIMD_FORCE_INLINE btVector3& operator+=(const btVector3& v) { m_floats[0] += v.m_floats[0]; m_floats[1] += v.m_floats[1]; m_floats[2] += v.m_floats[2]; return *this; } /**@brief Subtract a vector from this one * @param The vector to subtract */ SIMD_FORCE_INLINE btVector3& operator-=(const btVector3& v) { m_floats[0] -= v.m_floats[0]; m_floats[1] -= v.m_floats[1]; m_floats[2] -= v.m_floats[2]; return *this; } /**@brief Scale the vector * @param s Scale factor */ SIMD_FORCE_INLINE btVector3& operator*=(const btScalar& s) { m_floats[0] *= s; m_floats[1] *= s; m_floats[2] *= s; return *this; } /**@brief Inversely scale the vector * @param s Scale factor to divide by */ SIMD_FORCE_INLINE btVector3& operator/=(const btScalar& s) { btFullAssert(s != btScalar(0.0)); return * this *= btScalar(1.0) / s; } /**@brief Return the dot product * @param v The other vector in the dot product */ SIMD_FORCE_INLINE btScalar dot(const btVector3& v) const { return m_floats[0] * v.m_floats[0] + m_floats[1] * v.m_floats[1] + m_floats[2] * v.m_floats[2]; } /**@brief Return the length of the vector squared */ SIMD_FORCE_INLINE btScalar length2() const { return dot(*this); } /**@brief Return the length of the vector */ SIMD_FORCE_INLINE btScalar length() const { return btSqrt(length2()); } /**@brief Return the distance squared between the ends of this and another vector * This is symantically treating the vector like a point */ SIMD_FORCE_INLINE btScalar distance2(const btVector3& v) const; /**@brief Return the distance between the ends of this and another vector * This is symantically treating the vector like a point */ SIMD_FORCE_INLINE btScalar distance(const btVector3& v) const; SIMD_FORCE_INLINE btVector3& safeNormalize() { btVector3 absVec = this->absolute(); int32_t maxIndex = absVec.maxAxis(); if (absVec[maxIndex] > 0) { *this /= absVec[maxIndex]; return * this /= length(); } setValue(1, 0, 0); return *this; } /**@brief Normalize this vector * x^2 + y^2 + z^2 = 1 */ SIMD_FORCE_INLINE btVector3& normalize() { return * this /= length(); } /**@brief Return a normalized version of this vector */ SIMD_FORCE_INLINE btVector3 normalized() const; /**@brief Return a rotated version of this vector * @param wAxis The axis to rotate about * @param angle The angle to rotate by */ SIMD_FORCE_INLINE btVector3 rotate(const btVector3& wAxis, const btScalar angle) const; /**@brief Return the angle between this and another vector * @param v The other vector */ SIMD_FORCE_INLINE btScalar angle(const btVector3& v) const { btScalar s = btSqrt(length2() * v.length2()); btFullAssert(s != btScalar(0.0)); return btAcos(dot(v) / s); } /**@brief Return a vector will the absolute values of each element */ SIMD_FORCE_INLINE btVector3 absolute() const { return btVector3( btFabs(m_floats[0]), btFabs(m_floats[1]), btFabs(m_floats[2])); } /**@brief Return the cross product between this and another vector * @param v The other vector */ SIMD_FORCE_INLINE btVector3 cross(const btVector3& v) const { return btVector3( m_floats[1] * v.m_floats[2] - m_floats[2] * v.m_floats[1], m_floats[2] * v.m_floats[0] - m_floats[0] * v.m_floats[2], m_floats[0] * v.m_floats[1] - m_floats[1] * v.m_floats[0]); } SIMD_FORCE_INLINE btScalar triple(const btVector3& v1, const btVector3& v2) const { return m_floats[0] * (v1.m_floats[1] * v2.m_floats[2] - v1.m_floats[2] * v2.m_floats[1]) + m_floats[1] * (v1.m_floats[2] * v2.m_floats[0] - v1.m_floats[0] * v2.m_floats[2]) + m_floats[2] * (v1.m_floats[0] * v2.m_floats[1] - v1.m_floats[1] * v2.m_floats[0]); } /**@brief Return the axis with the smallest value * Note return values are 0,1,2 for x, y, or z */ SIMD_FORCE_INLINE int32_t minAxis() const { return m_floats[0] < m_floats[1] ? (m_floats[0] < m_floats[2] ? 0 : 2) : (m_floats[1] < m_floats[2] ? 1 : 2); } /**@brief Return the axis with the largest value * Note return values are 0,1,2 for x, y, or z */ SIMD_FORCE_INLINE int32_t maxAxis() const { return m_floats[0] < m_floats[1] ? (m_floats[1] < m_floats[2] ? 2 : 1) : (m_floats[0] < m_floats[2] ? 2 : 0); } SIMD_FORCE_INLINE int32_t furthestAxis() const { return absolute().minAxis(); } SIMD_FORCE_INLINE int32_t closestAxis() const { return absolute().maxAxis(); } SIMD_FORCE_INLINE void setInterpolate3(const btVector3& v0, const btVector3& v1, btScalar rt) { btScalar s = btScalar(1.0) - rt; m_floats[0] = s * v0.m_floats[0] + rt * v1.m_floats[0]; m_floats[1] = s * v0.m_floats[1] + rt * v1.m_floats[1]; m_floats[2] = s * v0.m_floats[2] + rt * v1.m_floats[2]; //don't do the unused w component // m_co[3] = s * v0[3] + rt * v1[3]; } /**@brief Return the linear interpolation between this and another vector * @param v The other vector * @param t The ration of this to v (t = 0 => return this, t=1 => return other) */ SIMD_FORCE_INLINE btVector3 lerp(const btVector3& v, const btScalar& t) const { return btVector3(m_floats[0] + (v.m_floats[0] - m_floats[0]) * t, m_floats[1] + (v.m_floats[1] - m_floats[1]) * t, m_floats[2] + (v.m_floats[2] - m_floats[2]) * t); } /**@brief Elementwise multiply this vector by the other * @param v The other vector */ SIMD_FORCE_INLINE btVector3& operator*=(const btVector3& v) { m_floats[0] *= v.m_floats[0]; m_floats[1] *= v.m_floats[1]; m_floats[2] *= v.m_floats[2]; return *this; } /**@brief Return the x value */ SIMD_FORCE_INLINE const btScalar& getX() const { return m_floats[0]; } /**@brief Return the y value */ SIMD_FORCE_INLINE const btScalar& getY() const { return m_floats[1]; } /**@brief Return the z value */ SIMD_FORCE_INLINE const btScalar& getZ() const { return m_floats[2]; } /**@brief Set the x value */ SIMD_FORCE_INLINE void setX(btScalar x) { m_floats[0] = x; }; /**@brief Set the y value */ SIMD_FORCE_INLINE void setY(btScalar y) { m_floats[1] = y; }; /**@brief Set the z value */ SIMD_FORCE_INLINE void setZ(btScalar z) { m_floats[2] = z; }; /**@brief Set the w value */ SIMD_FORCE_INLINE void setW(btScalar w) { m_floats[3] = w; }; /**@brief Return the x value */ SIMD_FORCE_INLINE const btScalar& x() const { return m_floats[0]; } /**@brief Return the y value */ SIMD_FORCE_INLINE const btScalar& y() const { return m_floats[1]; } /**@brief Return the z value */ SIMD_FORCE_INLINE const btScalar& z() const { return m_floats[2]; } /**@brief Return the w value */ SIMD_FORCE_INLINE const btScalar& w() const { return m_floats[3]; } //SIMD_FORCE_INLINE btScalar& operator[](int32_t i) { return (&m_floats[0])[i]; } //SIMD_FORCE_INLINE const btScalar& operator[](int32_t i) const { return (&m_floats[0])[i]; } ///operator btScalar*() replaces operator[], using implicit conversion. We added operator != and operator == to avoid pointer comparisons. SIMD_FORCE_INLINE operator btScalar*() { return &m_floats[0]; } SIMD_FORCE_INLINE operator const btScalar*() const { return &m_floats[0]; } SIMD_FORCE_INLINE bool operator==(const btVector3& other) const { return ((m_floats[3] == other.m_floats[3]) && (m_floats[2] == other.m_floats[2]) && (m_floats[1] == other.m_floats[1]) && (m_floats[0] == other.m_floats[0])); } SIMD_FORCE_INLINE bool operator!=(const btVector3& other) const { return !(*this == other); } /**@brief Set each element to the max of the current values and the values of another btVector3 * @param other The other btVector3 to compare with */ SIMD_FORCE_INLINE void setMax(const btVector3& other) { btSetMax(m_floats[0], other.m_floats[0]); btSetMax(m_floats[1], other.m_floats[1]); btSetMax(m_floats[2], other.m_floats[2]); btSetMax(m_floats[3], other.w()); } /**@brief Set each element to the min of the current values and the values of another btVector3 * @param other The other btVector3 to compare with */ SIMD_FORCE_INLINE void setMin(const btVector3& other) { btSetMin(m_floats[0], other.m_floats[0]); btSetMin(m_floats[1], other.m_floats[1]); btSetMin(m_floats[2], other.m_floats[2]); btSetMin(m_floats[3], other.w()); } SIMD_FORCE_INLINE void setValue(const btScalar& x, const btScalar& y, const btScalar& z) { m_floats[0] = x; m_floats[1] = y; m_floats[2] = z; m_floats[3] = btScalar(0.); } void getSkewSymmetricMatrix(btVector3 * v0, btVector3 * v1, btVector3 * v2) const { v0->setValue(0., -z(), y()); v1->setValue(z(), 0., -x()); v2->setValue(-y(), x(), 0.); } void setZero() { setValue(btScalar(0.), btScalar(0.), btScalar(0.)); } SIMD_FORCE_INLINE bool isZero() const { return m_floats[0] == btScalar(0) && m_floats[1] == btScalar(0) && m_floats[2] == btScalar(0); } SIMD_FORCE_INLINE bool fuzzyZero() const { return length2() < SIMD_EPSILON; } SIMD_FORCE_INLINE void serialize(struct btVector3Data & dataOut) const; SIMD_FORCE_INLINE void deSerialize(const struct btVector3Data& dataIn); SIMD_FORCE_INLINE void serializeFloat(struct btVector3FloatData & dataOut) const; SIMD_FORCE_INLINE void deSerializeFloat(const struct btVector3FloatData& dataIn); SIMD_FORCE_INLINE void serializeDouble(struct btVector3DoubleData & dataOut) const; SIMD_FORCE_INLINE void deSerializeDouble(const struct btVector3DoubleData& dataIn); }; /**@brief Return the sum of two vectors (Point symantics)*/ SIMD_FORCE_INLINE btVector3 operator+(const btVector3& v1, const btVector3& v2) { return btVector3(v1.m_floats[0] + v2.m_floats[0], v1.m_floats[1] + v2.m_floats[1], v1.m_floats[2] + v2.m_floats[2]); } /**@brief Return the elementwise product of two vectors */ SIMD_FORCE_INLINE btVector3 operator*(const btVector3& v1, const btVector3& v2) { return btVector3(v1.m_floats[0] * v2.m_floats[0], v1.m_floats[1] * v2.m_floats[1], v1.m_floats[2] * v2.m_floats[2]); } /**@brief Return the difference between two vectors */ SIMD_FORCE_INLINE btVector3 operator-(const btVector3& v1, const btVector3& v2) { return btVector3(v1.m_floats[0] - v2.m_floats[0], v1.m_floats[1] - v2.m_floats[1], v1.m_floats[2] - v2.m_floats[2]); } /**@brief Return the negative of the vector */ SIMD_FORCE_INLINE btVector3 operator-(const btVector3& v) { return btVector3(-v.m_floats[0], -v.m_floats[1], -v.m_floats[2]); } /**@brief Return the vector scaled by s */ SIMD_FORCE_INLINE btVector3 operator*(const btVector3& v, const btScalar& s) { return btVector3(v.m_floats[0] * s, v.m_floats[1] * s, v.m_floats[2] * s); } /**@brief Return the vector scaled by s */ SIMD_FORCE_INLINE btVector3 operator*(const btScalar& s, const btVector3& v) { return v * s; } /**@brief Return the vector inversely scaled by s */ SIMD_FORCE_INLINE btVector3 operator/(const btVector3& v, const btScalar& s) { btFullAssert(s != btScalar(0.0)); return v * (btScalar(1.0) / s); } /**@brief Return the vector inversely scaled by s */ SIMD_FORCE_INLINE btVector3 operator/(const btVector3& v1, const btVector3& v2) { return btVector3(v1.m_floats[0] / v2.m_floats[0], v1.m_floats[1] / v2.m_floats[1], v1.m_floats[2] / v2.m_floats[2]); } /**@brief Return the dot product between two vectors */ SIMD_FORCE_INLINE btScalar btDot(const btVector3& v1, const btVector3& v2) { return v1.dot(v2); } /**@brief Return the distance squared between two vectors */ SIMD_FORCE_INLINE btScalar btDistance2(const btVector3& v1, const btVector3& v2) { return v1.distance2(v2); } /**@brief Return the distance between two vectors */ SIMD_FORCE_INLINE btScalar btDistance(const btVector3& v1, const btVector3& v2) { return v1.distance(v2); } /**@brief Return the angle between two vectors */ SIMD_FORCE_INLINE btScalar btAngle(const btVector3& v1, const btVector3& v2) { return v1.angle(v2); } /**@brief Return the cross product of two vectors */ SIMD_FORCE_INLINE btVector3 btCross(const btVector3& v1, const btVector3& v2) { return v1.cross(v2); } SIMD_FORCE_INLINE btScalar btTriple(const btVector3& v1, const btVector3& v2, const btVector3& v3) { return v1.triple(v2, v3); } /**@brief Return the linear interpolation between two vectors * @param v1 One vector * @param v2 The other vector * @param t The ration of this to v (t = 0 => return v1, t=1 => return v2) */ SIMD_FORCE_INLINE btVector3 lerp(const btVector3& v1, const btVector3& v2, const btScalar& t) { return v1.lerp(v2, t); } SIMD_FORCE_INLINE btScalar btVector3::distance2(const btVector3& v) const { return (v - *this).length2(); } SIMD_FORCE_INLINE btScalar btVector3::distance(const btVector3& v) const { return (v - *this).length(); } SIMD_FORCE_INLINE btVector3 btVector3::normalized() const { return *this / length(); } SIMD_FORCE_INLINE btVector3 btVector3::rotate(const btVector3& wAxis, const btScalar angle) const { // wAxis must be a unit lenght vector btVector3 o = wAxis * wAxis.dot(*this); btVector3 x = *this - o; btVector3 y; y = wAxis.cross(*this); return (o + x * btCos(angle) + y * btSin(angle)); } class btVector4 : public btVector3 { public: SIMD_FORCE_INLINE btVector4() {} SIMD_FORCE_INLINE btVector4(const btScalar& x, const btScalar& y, const btScalar& z, const btScalar& w) : btVector3(x, y, z) { m_floats[3] = w; } SIMD_FORCE_INLINE btVector4 absolute4() const { return btVector4( btFabs(m_floats[0]), btFabs(m_floats[1]), btFabs(m_floats[2]), btFabs(m_floats[3])); } btScalar getW() const { return m_floats[3]; } SIMD_FORCE_INLINE int32_t maxAxis4() const { int32_t maxIndex = -1; btScalar maxVal = btScalar(-BT_LARGE_FLOAT); if (m_floats[0] > maxVal) { maxIndex = 0; maxVal = m_floats[0]; } if (m_floats[1] > maxVal) { maxIndex = 1; maxVal = m_floats[1]; } if (m_floats[2] > maxVal) { maxIndex = 2; maxVal = m_floats[2]; } if (m_floats[3] > maxVal) { maxIndex = 3; } return maxIndex; } SIMD_FORCE_INLINE int32_t minAxis4() const { int32_t minIndex = -1; btScalar minVal = btScalar(BT_LARGE_FLOAT); if (m_floats[0] < minVal) { minIndex = 0; minVal = m_floats[0]; } if (m_floats[1] < minVal) { minIndex = 1; minVal = m_floats[1]; } if (m_floats[2] < minVal) { minIndex = 2; minVal = m_floats[2]; } if (m_floats[3] < minVal) { minIndex = 3; } return minIndex; } SIMD_FORCE_INLINE int32_t closestAxis4() const { return absolute4().maxAxis4(); } /**@brief Set x,y,z and zero w * @param x Value of x * @param y Value of y * @param z Value of z */ /* void getValue(btScalar *m) const { m[0] = m_floats[0]; m[1] = m_floats[1]; m[2] =m_floats[2]; } */ /**@brief Set the values * @param x Value of x * @param y Value of y * @param z Value of z * @param w Value of w */ SIMD_FORCE_INLINE void setValue(const btScalar& x, const btScalar& y, const btScalar& z, const btScalar& w) { m_floats[0] = x; m_floats[1] = y; m_floats[2] = z; m_floats[3] = w; } }; ///btSwapVector3Endian swaps vector endianness, useful for network and cross-platform serialization SIMD_FORCE_INLINE void btSwapScalarEndian(const btScalar& sourceVal, btScalar& destVal) { #ifdef BT_USE_DOUBLE_PRECISION unsigned char* dest = (unsigned char*)&destVal; unsigned char* src = (unsigned char*)&sourceVal; dest[0] = src[7]; dest[1] = src[6]; dest[2] = src[5]; dest[3] = src[4]; dest[4] = src[3]; dest[5] = src[2]; dest[6] = src[1]; dest[7] = src[0]; #else unsigned char* dest = (unsigned char*)&destVal; unsigned char* src = (unsigned char*)&sourceVal; dest[0] = src[3]; dest[1] = src[2]; dest[2] = src[1]; dest[3] = src[0]; #endif //BT_USE_DOUBLE_PRECISION } ///btSwapVector3Endian swaps vector endianness, useful for network and cross-platform serialization SIMD_FORCE_INLINE void btSwapVector3Endian(const btVector3& sourceVec, btVector3& destVec) { for (int32_t i = 0; i < 4; i++) { btSwapScalarEndian(sourceVec[i], destVec[i]); } } ///btUnSwapVector3Endian swaps vector endianness, useful for network and cross-platform serialization SIMD_FORCE_INLINE void btUnSwapVector3Endian(btVector3& vector) { btVector3 swappedVec; for (int32_t i = 0; i < 4; i++) { btSwapScalarEndian(vector[i], swappedVec[i]); } vector = swappedVec; } template <class T> SIMD_FORCE_INLINE void btPlaneSpace1(const T& n, T& p, T& q) { if (btFabs(n[2]) > SIMDSQRT12) { // choose p in y-z plane btScalar a = n[1] * n[1] + n[2] * n[2]; btScalar k = btRecipSqrt(a); p[0] = 0; p[1] = -n[2] * k; p[2] = n[1] * k; // set q = n x p q[0] = a * k; q[1] = -n[0] * p[2]; q[2] = n[0] * p[1]; } else { // choose p in x-y plane btScalar a = n[0] * n[0] + n[1] * n[1]; btScalar k = btRecipSqrt(a); p[0] = -n[1] * k; p[1] = n[0] * k; p[2] = 0; // set q = n x p q[0] = -n[2] * p[1]; q[1] = n[2] * p[0]; q[2] = a * k; } } struct btVector3FloatData { float m_floats[4]; }; struct btVector3DoubleData { double m_floats[4]; }; SIMD_FORCE_INLINE void btVector3::serializeFloat(struct btVector3FloatData& dataOut) const { ///could also do a memcpy, check if it is worth it for (int32_t i = 0; i < 4; i++) dataOut.m_floats[i] = float(m_floats[i]); } SIMD_FORCE_INLINE void btVector3::deSerializeFloat(const struct btVector3FloatData& dataIn) { for (int32_t i = 0; i < 4; i++) m_floats[i] = btScalar(dataIn.m_floats[i]); } SIMD_FORCE_INLINE void btVector3::serializeDouble(struct btVector3DoubleData& dataOut) const { ///could also do a memcpy, check if it is worth it for (int32_t i = 0; i < 4; i++) dataOut.m_floats[i] = double(m_floats[i]); } SIMD_FORCE_INLINE void btVector3::deSerializeDouble(const struct btVector3DoubleData& dataIn) { for (int32_t i = 0; i < 4; i++) m_floats[i] = btScalar(dataIn.m_floats[i]); } SIMD_FORCE_INLINE void btVector3::serialize(struct btVector3Data& dataOut) const { ///could also do a memcpy, check if it is worth it for (int32_t i = 0; i < 4; i++) dataOut.m_floats[i] = m_floats[i]; } SIMD_FORCE_INLINE void btVector3::deSerialize(const struct btVector3Data& dataIn) { for (int32_t i = 0; i < 4; i++) m_floats[i] = dataIn.m_floats[i]; } #endif //BT_VECTOR3_H
22,579
C
30.536313
265
0.613845
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/authoring/VHACD/inc/vhacdVHACD.h
/* Copyright (c) 2011 Khaled Mamou (kmamou at gmail dot com) All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. The names of the contributors may not be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #pragma once #ifndef VHACD_VHACD_H #define VHACD_VHACD_H #ifdef OPENCL_FOUND #ifdef __MACH__ #include <OpenCL/cl.h> #else #include <CL/cl.h> #endif #endif //OPENCL_FOUND #include "vhacdMutex.h" #include "vhacdVolume.h" #include "vhacdRaycastMesh.h" #include <vector> typedef std::vector< VHACD::IVHACD::Constraint > ConstraintVector; #define USE_THREAD 1 #define OCL_MIN_NUM_PRIMITIVES 4096 #define CH_APP_MIN_NUM_PRIMITIVES 64000 namespace VHACD { class VHACD : public IVHACD { public: //! Constructor. VHACD() { #if USE_THREAD == 1 && _OPENMP m_ompNumProcessors = 2 * omp_get_num_procs(); omp_set_num_threads(m_ompNumProcessors); #else //USE_THREAD == 1 && _OPENMP m_ompNumProcessors = 1; #endif //USE_THREAD == 1 && _OPENMP #ifdef CL_VERSION_1_1 m_oclWorkGroupSize = 0; m_oclDevice = 0; m_oclQueue = 0; m_oclKernelComputePartialVolumes = 0; m_oclKernelComputeSum = 0; #endif //CL_VERSION_1_1 Init(); } //! Destructor. ~VHACD(void) { } uint32_t GetNConvexHulls() const { return (uint32_t)m_convexHulls.Size(); } void Cancel() { SetCancel(true); } void GetConvexHull(const uint32_t index, ConvexHull& ch) const { Mesh* mesh = m_convexHulls[index]; ch.m_nPoints = (uint32_t)mesh->GetNPoints(); ch.m_nTriangles = (uint32_t)mesh->GetNTriangles(); ch.m_points = mesh->GetPoints(); ch.m_triangles = (uint32_t *)mesh->GetTriangles(); ch.m_volume = mesh->ComputeVolume(); Vec3<double> &center = mesh->ComputeCenter(); ch.m_center[0] = center.X(); ch.m_center[1] = center.Y(); ch.m_center[2] = center.Z(); } void Clean(void) { if (mRaycastMesh) { mRaycastMesh->release(); mRaycastMesh = nullptr; } delete m_volume; delete m_pset; size_t nCH = m_convexHulls.Size(); for (size_t p = 0; p < nCH; ++p) { delete m_convexHulls[p]; } m_convexHulls.Clear(); Init(); } void Release(void) { delete this; } bool Compute(const float* const points, const uint32_t nPoints, const uint32_t* const triangles, const uint32_t nTriangles, const Parameters& params); bool Compute(const double* const points, const uint32_t nPoints, const uint32_t* const triangles, const uint32_t nTriangles, const Parameters& params); bool OCLInit(void* const oclDevice, IUserLogger* const logger = 0); bool OCLRelease(IUserLogger* const logger = 0); virtual bool ComputeCenterOfMass(double centerOfMass[3]) const; // Will analyze the HACD results and compute the constraints solutions. // It will analyze the point at which any two convex hulls touch each other and // return the total number of constraint pairs found virtual uint32_t ComputeConstraints(void); // Returns a pointer to the constraint index; null if the index is not valid or // the user did not previously call 'ComputeConstraints' virtual const Constraint *GetConstraint(uint32_t index) const; private: void SetCancel(bool cancel) { m_cancelMutex.Lock(); m_cancel = cancel; m_cancelMutex.Unlock(); } bool GetCancel() { m_cancelMutex.Lock(); bool cancel = m_cancel; m_cancelMutex.Unlock(); return cancel; } void Update(const double stageProgress, const double operationProgress, const Parameters& params) { m_stageProgress = stageProgress; m_operationProgress = operationProgress; if (params.m_callback) { params.m_callback->Update(m_overallProgress, m_stageProgress, m_operationProgress, m_stage.c_str(), m_operation.c_str()); } } void Init() { if (mRaycastMesh) { mRaycastMesh->release(); mRaycastMesh = nullptr; } memset(m_rot, 0, sizeof(double) * 9); m_dim = 64; m_volume = 0; m_volumeCH0 = 0.0; m_pset = 0; m_overallProgress = 0.0; m_stageProgress = 0.0; m_operationProgress = 0.0; m_stage = ""; m_operation = ""; m_barycenter[0] = m_barycenter[1] = m_barycenter[2] = 0.0; m_rot[0][0] = m_rot[1][1] = m_rot[2][2] = 1.0; SetCancel(false); } void ComputePrimitiveSet(const Parameters& params); void ComputeACD(const Parameters& params); void MergeConvexHulls(const Parameters& params); void SimplifyConvexHull(Mesh* const ch, const size_t nvertices, const double minVolume); void SimplifyConvexHulls(const Parameters& params); void ComputeBestClippingPlane(const PrimitiveSet* inputPSet, const double volume, const SArray<Plane>& planes, const Vec3<double>& preferredCuttingDirection, const double w, const double alpha, const double beta, const int32_t convexhullDownsampling, const double progress0, const double progress1, Plane& bestPlane, double& minConcavity, const Parameters& params); template <class T> void AlignMesh(const T* const points, const uint32_t stridePoints, const uint32_t nPoints, const int32_t* const triangles, const uint32_t strideTriangles, const uint32_t nTriangles, const Parameters& params) { if (GetCancel() || !params.m_pca) { return; } m_timer.Tic(); m_stage = "Align mesh"; m_operation = "Voxelization"; std::ostringstream msg; if (params.m_logger) { msg << "+ " << m_stage << std::endl; params.m_logger->Log(msg.str().c_str()); } Update(0.0, 0.0, params); if (GetCancel()) { return; } m_dim = (size_t)(pow((double)params.m_resolution, 1.0 / 3.0) + 0.5); Volume volume; volume.Voxelize(points, stridePoints, nPoints, triangles, strideTriangles, nTriangles, m_dim, m_barycenter, m_rot); size_t n = volume.GetNPrimitivesOnSurf() + volume.GetNPrimitivesInsideSurf(); Update(50.0, 100.0, params); if (params.m_logger) { msg.str(""); msg << "\t dim = " << m_dim << "\t-> " << n << " voxels" << std::endl; params.m_logger->Log(msg.str().c_str()); } if (GetCancel()) { return; } m_operation = "PCA"; Update(50.0, 0.0, params); volume.AlignToPrincipalAxes(m_rot); m_overallProgress = 1.0; Update(100.0, 100.0, params); m_timer.Toc(); if (params.m_logger) { msg.str(""); msg << "\t time " << m_timer.GetElapsedTime() / 1000.0 << "s" << std::endl; params.m_logger->Log(msg.str().c_str()); } } template <class T> void VoxelizeMesh(const T* const points, const uint32_t stridePoints, const uint32_t nPoints, const int32_t* const triangles, const uint32_t strideTriangles, const uint32_t nTriangles, const Parameters& params) { if (GetCancel()) { return; } m_timer.Tic(); m_stage = "Voxelization"; std::ostringstream msg; if (params.m_logger) { msg << "+ " << m_stage << std::endl; params.m_logger->Log(msg.str().c_str()); } delete m_volume; m_volume = 0; int32_t iteration = 0; const int32_t maxIteration = 5; double progress = 0.0; while (iteration++ < maxIteration && !m_cancel) { msg.str(""); msg << "Iteration " << iteration; m_operation = msg.str(); progress = iteration * 100.0 / maxIteration; Update(progress, 0.0, params); m_volume = new Volume; m_volume->Voxelize(points, stridePoints, nPoints, triangles, strideTriangles, nTriangles, m_dim, m_barycenter, m_rot); Update(progress, 100.0, params); size_t n = m_volume->GetNPrimitivesOnSurf() + m_volume->GetNPrimitivesInsideSurf(); if (params.m_logger) { msg.str(""); msg << "\t dim = " << m_dim << "\t-> " << n << " voxels" << std::endl; params.m_logger->Log(msg.str().c_str()); } double a = pow((double)(params.m_resolution) / n, 0.33); size_t dim_next = (size_t)(m_dim * a + 0.5); if (n < params.m_resolution && iteration < maxIteration && m_volume->GetNPrimitivesOnSurf() < params.m_resolution / 8 && m_dim != dim_next) { delete m_volume; m_volume = 0; m_dim = dim_next; } else { break; } } m_overallProgress = 10.0; Update(100.0, 100.0, params); m_timer.Toc(); if (params.m_logger) { msg.str(""); msg << "\t time " << m_timer.GetElapsedTime() / 1000.0 << "s" << std::endl; params.m_logger->Log(msg.str().c_str()); } } template <class T> bool ComputeACD(const T* const points, const uint32_t nPoints, const uint32_t* const triangles, const uint32_t nTriangles, const Parameters& params) { Init(); if (params.m_projectHullVertices) { mRaycastMesh = RaycastMesh::createRaycastMesh(nPoints, points, nTriangles, (const uint32_t *)triangles); } if (params.m_oclAcceleration) { // build kernels } AlignMesh(points, 3, nPoints, (int32_t *)triangles, 3, nTriangles, params); VoxelizeMesh(points, 3, nPoints, (int32_t *)triangles, 3, nTriangles, params); ComputePrimitiveSet(params); ComputeACD(params); MergeConvexHulls(params); SimplifyConvexHulls(params); if (params.m_oclAcceleration) { // Release kernels } if (GetCancel()) { Clean(); return false; } return true; } private: RaycastMesh *mRaycastMesh{ nullptr }; SArray<Mesh*> m_convexHulls; std::string m_stage; std::string m_operation; double m_overallProgress; double m_stageProgress; double m_operationProgress; double m_rot[3][3]; double m_volumeCH0; Vec3<double> m_barycenter; Timer m_timer; size_t m_dim; Volume* m_volume; PrimitiveSet* m_pset; Mutex m_cancelMutex; bool m_cancel; int32_t m_ompNumProcessors; #ifdef CL_VERSION_1_1 cl_device_id* m_oclDevice; cl_context m_oclContext; cl_program m_oclProgram; cl_command_queue* m_oclQueue; cl_kernel* m_oclKernelComputePartialVolumes; cl_kernel* m_oclKernelComputeSum; size_t m_oclWorkGroupSize; #endif //CL_VERSION_1_1 ConstraintVector mConstraints; }; } #endif // VHACD_VHACD_H
12,732
C
32.158854
755
0.589695
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/authoring/VHACD/inc/vhacdVolume.h
/* Copyright (c) 2011 Khaled Mamou (kmamou at gmail dot com) All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. The names of the contributors may not be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #pragma once #ifndef VHACD_VOLUME_H #define VHACD_VOLUME_H #include "vhacdMesh.h" #include "vhacdVector.h" #include <assert.h> #ifdef _MSC_VER #pragma warning(push) #pragma warning(disable:4456 4701) #endif namespace VHACD { enum VOXEL_VALUE { PRIMITIVE_UNDEFINED = 0, PRIMITIVE_OUTSIDE_SURFACE = 1, PRIMITIVE_INSIDE_SURFACE = 2, PRIMITIVE_ON_SURFACE = 3 }; struct Voxel { public: short m_coord[3]; short m_data; }; class PrimitiveSet { public: virtual ~PrimitiveSet(){}; virtual PrimitiveSet* Create() const = 0; virtual const size_t GetNPrimitives() const = 0; virtual const size_t GetNPrimitivesOnSurf() const = 0; virtual const size_t GetNPrimitivesInsideSurf() const = 0; virtual const double GetEigenValue(AXIS axis) const = 0; virtual const double ComputeMaxVolumeError() const = 0; virtual const double ComputeVolume() const = 0; virtual void Clip(const Plane& plane, PrimitiveSet* const positivePart, PrimitiveSet* const negativePart) const = 0; virtual void Intersect(const Plane& plane, SArray<Vec3<double> >* const positivePts, SArray<Vec3<double> >* const negativePts, const size_t sampling) const = 0; virtual void ComputeExteriorPoints(const Plane& plane, const Mesh& mesh, SArray<Vec3<double> >* const exteriorPts) const = 0; virtual void ComputeClippedVolumes(const Plane& plane, double& positiveVolume, double& negativeVolume) const = 0; virtual void SelectOnSurface(PrimitiveSet* const onSurfP) const = 0; virtual void ComputeConvexHull(Mesh& meshCH, const size_t sampling = 1) const = 0; virtual void ComputeBB() = 0; virtual void ComputePrincipalAxes() = 0; virtual void AlignToPrincipalAxes() = 0; virtual void RevertAlignToPrincipalAxes() = 0; virtual void Convert(Mesh& mesh, const VOXEL_VALUE value) const = 0; const Mesh& GetConvexHull() const { return m_convexHull; }; Mesh& GetConvexHull() { return m_convexHull; }; private: Mesh m_convexHull; }; //! class VoxelSet : public PrimitiveSet { friend class Volume; public: //! Destructor. ~VoxelSet(void); //! Constructor. VoxelSet(); const size_t GetNPrimitives() const { return m_voxels.Size(); } const size_t GetNPrimitivesOnSurf() const { return m_numVoxelsOnSurface; } const size_t GetNPrimitivesInsideSurf() const { return m_numVoxelsInsideSurface; } const double GetEigenValue(AXIS axis) const { return m_D[axis][axis]; } const double ComputeVolume() const { return m_unitVolume * m_voxels.Size(); } const double ComputeMaxVolumeError() const { return m_unitVolume * m_numVoxelsOnSurface; } const Vec3<short>& GetMinBBVoxels() const { return m_minBBVoxels; } const Vec3<short>& GetMaxBBVoxels() const { return m_maxBBVoxels; } const Vec3<double>& GetMinBB() const { return m_minBB; } const double& GetScale() const { return m_scale; } const double& GetUnitVolume() const { return m_unitVolume; } Vec3<double> GetPoint(Vec3<short> voxel) const { return Vec3<double>(voxel[0] * m_scale + m_minBB[0], voxel[1] * m_scale + m_minBB[1], voxel[2] * m_scale + m_minBB[2]); } Vec3<double> GetPoint(const Voxel& voxel) const { return Vec3<double>(voxel.m_coord[0] * m_scale + m_minBB[0], voxel.m_coord[1] * m_scale + m_minBB[1], voxel.m_coord[2] * m_scale + m_minBB[2]); } Vec3<double> GetPoint(Vec3<double> voxel) const { return Vec3<double>(voxel[0] * m_scale + m_minBB[0], voxel[1] * m_scale + m_minBB[1], voxel[2] * m_scale + m_minBB[2]); } void GetPoints(const Voxel& voxel, Vec3<double>* const pts) const; void ComputeConvexHull(Mesh& meshCH, const size_t sampling = 1) const; void Clip(const Plane& plane, PrimitiveSet* const positivePart, PrimitiveSet* const negativePart) const; void Intersect(const Plane& plane, SArray<Vec3<double> >* const positivePts, SArray<Vec3<double> >* const negativePts, const size_t sampling) const; void ComputeExteriorPoints(const Plane& plane, const Mesh& mesh, SArray<Vec3<double> >* const exteriorPts) const; void ComputeClippedVolumes(const Plane& plane, double& positiveVolume, double& negativeVolume) const; void SelectOnSurface(PrimitiveSet* const onSurfP) const; void ComputeBB(); void Convert(Mesh& mesh, const VOXEL_VALUE value) const; void ComputePrincipalAxes(); PrimitiveSet* Create() const { return new VoxelSet(); } void AlignToPrincipalAxes(){}; void RevertAlignToPrincipalAxes(){}; Voxel* const GetVoxels() { return m_voxels.Data(); } const Voxel* const GetVoxels() const { return m_voxels.Data(); } private: size_t m_numVoxelsOnSurface; size_t m_numVoxelsInsideSurface; Vec3<double> m_minBB; double m_scale; SArray<Voxel, 8> m_voxels; double m_unitVolume; Vec3<double> m_minBBPts; Vec3<double> m_maxBBPts; Vec3<short> m_minBBVoxels; Vec3<short> m_maxBBVoxels; Vec3<short> m_barycenter; double m_Q[3][3]; double m_D[3][3]; Vec3<double> m_barycenterPCA; }; struct Tetrahedron { public: Vec3<double> m_pts[4]; unsigned char m_data; }; //! class TetrahedronSet : public PrimitiveSet { friend class Volume; public: //! Destructor. ~TetrahedronSet(void); //! Constructor. TetrahedronSet(); const size_t GetNPrimitives() const { return m_tetrahedra.Size(); } const size_t GetNPrimitivesOnSurf() const { return m_numTetrahedraOnSurface; } const size_t GetNPrimitivesInsideSurf() const { return m_numTetrahedraInsideSurface; } const Vec3<double>& GetMinBB() const { return m_minBB; } const Vec3<double>& GetMaxBB() const { return m_maxBB; } const Vec3<double>& GetBarycenter() const { return m_barycenter; } const double GetEigenValue(AXIS axis) const { return m_D[axis][axis]; } const double GetSacle() const { return m_scale; } const double ComputeVolume() const; const double ComputeMaxVolumeError() const; void ComputeConvexHull(Mesh& meshCH, const size_t sampling = 1) const; void ComputePrincipalAxes(); void AlignToPrincipalAxes(); void RevertAlignToPrincipalAxes(); void Clip(const Plane& plane, PrimitiveSet* const positivePart, PrimitiveSet* const negativePart) const; void Intersect(const Plane& plane, SArray<Vec3<double> >* const positivePts, SArray<Vec3<double> >* const negativePts, const size_t sampling) const; void ComputeExteriorPoints(const Plane& plane, const Mesh& mesh, SArray<Vec3<double> >* const exteriorPts) const; void ComputeClippedVolumes(const Plane& plane, double& positiveVolume, double& negativeVolume) const; void SelectOnSurface(PrimitiveSet* const onSurfP) const; void ComputeBB(); void Convert(Mesh& mesh, const VOXEL_VALUE value) const; inline bool Add(Tetrahedron& tetrahedron); PrimitiveSet* Create() const { return new TetrahedronSet(); } static const double EPS; private: void AddClippedTetrahedra(const Vec3<double> (&pts)[10], const int32_t nPts); size_t m_numTetrahedraOnSurface; size_t m_numTetrahedraInsideSurface; double m_scale; Vec3<double> m_minBB; Vec3<double> m_maxBB; Vec3<double> m_barycenter; SArray<Tetrahedron, 8> m_tetrahedra; double m_Q[3][3]; double m_D[3][3]; }; //! class Volume { public: //! Destructor. ~Volume(void); //! Constructor. Volume(); //! Voxelize template <class T> void Voxelize(const T* const points, const uint32_t stridePoints, const uint32_t nPoints, const int32_t* const triangles, const uint32_t strideTriangles, const uint32_t nTriangles, const size_t dim, const Vec3<double>& barycenter, const double (&rot)[3][3]); unsigned char& GetVoxel(const size_t i, const size_t j, const size_t k) { assert(i < m_dim[0] || i >= 0); assert(j < m_dim[0] || j >= 0); assert(k < m_dim[0] || k >= 0); return m_data[i + j * m_dim[0] + k * m_dim[0] * m_dim[1]]; } const unsigned char& GetVoxel(const size_t i, const size_t j, const size_t k) const { assert(i < m_dim[0] || i >= 0); assert(j < m_dim[0] || j >= 0); assert(k < m_dim[0] || k >= 0); return m_data[i + j * m_dim[0] + k * m_dim[0] * m_dim[1]]; } const size_t GetNPrimitivesOnSurf() const { return m_numVoxelsOnSurface; } const size_t GetNPrimitivesInsideSurf() const { return m_numVoxelsInsideSurface; } void Convert(Mesh& mesh, const VOXEL_VALUE value) const; void Convert(VoxelSet& vset) const; void Convert(TetrahedronSet& tset) const; void AlignToPrincipalAxes(double (&rot)[3][3]) const; private: void FillOutsideSurface(const size_t i0, const size_t j0, const size_t k0, const size_t i1, const size_t j1, const size_t k1); void FillInsideSurface(); template <class T> void ComputeBB(const T* const points, const uint32_t stridePoints, const uint32_t nPoints, const Vec3<double>& barycenter, const double (&rot)[3][3]); void Allocate(); void Free(); Vec3<double> m_minBB; Vec3<double> m_maxBB; double m_scale; size_t m_dim[3]; //>! dim size_t m_numVoxelsOnSurface; size_t m_numVoxelsInsideSurface; size_t m_numVoxelsOutsideSurface; unsigned char* m_data; }; int32_t TriBoxOverlap(const Vec3<double>& boxcenter, const Vec3<double>& boxhalfsize, const Vec3<double>& triver0, const Vec3<double>& triver1, const Vec3<double>& triver2); template <class T> inline void ComputeAlignedPoint(const T* const points, const uint32_t idx, const Vec3<double>& barycenter, const double (&rot)[3][3], Vec3<double>& pt){}; template <> inline void ComputeAlignedPoint<float>(const float* const points, const uint32_t idx, const Vec3<double>& barycenter, const double (&rot)[3][3], Vec3<double>& pt) { double x = points[idx + 0] - barycenter[0]; double y = points[idx + 1] - barycenter[1]; double z = points[idx + 2] - barycenter[2]; pt[0] = rot[0][0] * x + rot[1][0] * y + rot[2][0] * z; pt[1] = rot[0][1] * x + rot[1][1] * y + rot[2][1] * z; pt[2] = rot[0][2] * x + rot[1][2] * y + rot[2][2] * z; } template <> inline void ComputeAlignedPoint<double>(const double* const points, const uint32_t idx, const Vec3<double>& barycenter, const double (&rot)[3][3], Vec3<double>& pt) { double x = points[idx + 0] - barycenter[0]; double y = points[idx + 1] - barycenter[1]; double z = points[idx + 2] - barycenter[2]; pt[0] = rot[0][0] * x + rot[1][0] * y + rot[2][0] * z; pt[1] = rot[0][1] * x + rot[1][1] * y + rot[2][1] * z; pt[2] = rot[0][2] * x + rot[1][2] * y + rot[2][2] * z; } template <class T> void Volume::ComputeBB(const T* const points, const uint32_t stridePoints, const uint32_t nPoints, const Vec3<double>& barycenter, const double (&rot)[3][3]) { Vec3<double> pt; ComputeAlignedPoint(points, 0, barycenter, rot, pt); m_maxBB = pt; m_minBB = pt; for (uint32_t v = 1; v < nPoints; ++v) { ComputeAlignedPoint(points, v * stridePoints, barycenter, rot, pt); for (int32_t i = 0; i < 3; ++i) { if (pt[i] < m_minBB[i]) m_minBB[i] = pt[i]; else if (pt[i] > m_maxBB[i]) m_maxBB[i] = pt[i]; } } } template <class T> void Volume::Voxelize(const T* const points, const uint32_t stridePoints, const uint32_t nPoints, const int32_t* const triangles, const uint32_t strideTriangles, const uint32_t nTriangles, const size_t dim, const Vec3<double>& barycenter, const double (&rot)[3][3]) { if (nPoints == 0) { return; } ComputeBB(points, stridePoints, nPoints, barycenter, rot); double d[3] = { m_maxBB[0] - m_minBB[0], m_maxBB[1] - m_minBB[1], m_maxBB[2] - m_minBB[2] }; double r; if (d[0] > d[1] && d[0] > d[2]) { r = d[0]; m_dim[0] = dim; m_dim[1] = 2 + static_cast<size_t>(dim * d[1] / d[0]); m_dim[2] = 2 + static_cast<size_t>(dim * d[2] / d[0]); } else if (d[1] > d[0] && d[1] > d[2]) { r = d[1]; m_dim[1] = dim; m_dim[0] = 2 + static_cast<size_t>(dim * d[0] / d[1]); m_dim[2] = 2 + static_cast<size_t>(dim * d[2] / d[1]); } else { r = d[2]; m_dim[2] = dim; m_dim[0] = 2 + static_cast<size_t>(dim * d[0] / d[2]); m_dim[1] = 2 + static_cast<size_t>(dim * d[1] / d[2]); } m_scale = r / (dim - 1); double invScale = (dim - 1) / r; Allocate(); m_numVoxelsOnSurface = 0; m_numVoxelsInsideSurface = 0; m_numVoxelsOutsideSurface = 0; Vec3<double> p[3]; size_t i, j, k; size_t i0, j0, k0; size_t i1, j1, k1; Vec3<double> boxcenter; Vec3<double> pt; const Vec3<double> boxhalfsize(0.5, 0.5, 0.5); for (size_t t = 0, ti = 0; t < nTriangles; ++t, ti += strideTriangles) { Vec3<int32_t> tri(triangles[ti + 0], triangles[ti + 1], triangles[ti + 2]); for (int32_t c = 0; c < 3; ++c) { ComputeAlignedPoint(points, tri[c] * stridePoints, barycenter, rot, pt); p[c][0] = (pt[0] - m_minBB[0]) * invScale; p[c][1] = (pt[1] - m_minBB[1]) * invScale; p[c][2] = (pt[2] - m_minBB[2]) * invScale; i = static_cast<size_t>(p[c][0] + 0.5); j = static_cast<size_t>(p[c][1] + 0.5); k = static_cast<size_t>(p[c][2] + 0.5); assert(i < m_dim[0] && i >= 0 && j < m_dim[1] && j >= 0 && k < m_dim[2] && k >= 0); if (c == 0) { i0 = i1 = i; j0 = j1 = j; k0 = k1 = k; } else { if (i < i0) i0 = i; if (j < j0) j0 = j; if (k < k0) k0 = k; if (i > i1) i1 = i; if (j > j1) j1 = j; if (k > k1) k1 = k; } } if (i0 > 0) --i0; if (j0 > 0) --j0; if (k0 > 0) --k0; if (i1 < m_dim[0]) ++i1; if (j1 < m_dim[1]) ++j1; if (k1 < m_dim[2]) ++k1; for (size_t i = i0; i < i1; ++i) { boxcenter[0] = (double)i; for (size_t j = j0; j < j1; ++j) { boxcenter[1] = (double)j; for (size_t k = k0; k < k1; ++k) { boxcenter[2] = (double)k; int32_t res = TriBoxOverlap(boxcenter, boxhalfsize, p[0], p[1], p[2]); unsigned char& value = GetVoxel(i, j, k); if (res == 1 && value == PRIMITIVE_UNDEFINED) { value = PRIMITIVE_ON_SURFACE; ++m_numVoxelsOnSurface; } } } } } FillOutsideSurface(0, 0, 0, m_dim[0], m_dim[1], 1); FillOutsideSurface(0, 0, m_dim[2] - 1, m_dim[0], m_dim[1], m_dim[2]); FillOutsideSurface(0, 0, 0, m_dim[0], 1, m_dim[2]); FillOutsideSurface(0, m_dim[1] - 1, 0, m_dim[0], m_dim[1], m_dim[2]); FillOutsideSurface(0, 0, 0, 1, m_dim[1], m_dim[2]); FillOutsideSurface(m_dim[0] - 1, 0, 0, m_dim[0], m_dim[1], m_dim[2]); FillInsideSurface(); } } #ifdef _MSC_VER #pragma warning(pop) #endif #endif // VHACD_VOLUME_H
17,055
C
38.573086
756
0.612899
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/authoring/VHACD/inc/vhacdTimer.h
/* Copyright (c) 2011 Khaled Mamou (kmamou at gmail dot com) All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. The names of the contributors may not be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #pragma once #ifndef VHACD_TIMER_H #define VHACD_TIMER_H #ifdef _WIN32 #ifndef WIN32_LEAN_AND_MEAN #define WIN32_LEAN_AND_MEAN // Exclude rarely-used stuff from Windows headers #endif #include <windows.h> #elif __MACH__ #include <mach/clock.h> #include <mach/mach.h> #else #include <sys/time.h> #include <time.h> #endif namespace VHACD { #ifdef _WIN32 class Timer { public: Timer(void) { m_start.QuadPart = 0; m_stop.QuadPart = 0; QueryPerformanceFrequency(&m_freq); }; ~Timer(void){}; void Tic() { QueryPerformanceCounter(&m_start); } void Toc() { QueryPerformanceCounter(&m_stop); } double GetElapsedTime() // in ms { LARGE_INTEGER delta; delta.QuadPart = m_stop.QuadPart - m_start.QuadPart; return (1000.0 * delta.QuadPart) / (double)m_freq.QuadPart; } private: LARGE_INTEGER m_start; LARGE_INTEGER m_stop; LARGE_INTEGER m_freq; }; #elif __MACH__ class Timer { public: Timer(void) { memset(this, 0, sizeof(Timer)); host_get_clock_service(mach_host_self(), CALENDAR_CLOCK, &m_cclock); }; ~Timer(void) { mach_port_deallocate(mach_task_self(), m_cclock); }; void Tic() { clock_get_time(m_cclock, &m_start); } void Toc() { clock_get_time(m_cclock, &m_stop); } double GetElapsedTime() // in ms { return 1000.0 * (m_stop.tv_sec - m_start.tv_sec + (1.0E-9) * (m_stop.tv_nsec - m_start.tv_nsec)); } private: clock_serv_t m_cclock; mach_timespec_t m_start; mach_timespec_t m_stop; }; #else class Timer { public: Timer(void) { memset(this, 0, sizeof(Timer)); }; ~Timer(void){}; void Tic() { clock_gettime(CLOCK_REALTIME, &m_start); } void Toc() { clock_gettime(CLOCK_REALTIME, &m_stop); } double GetElapsedTime() // in ms { return 1000.0 * (m_stop.tv_sec - m_start.tv_sec + (1.0E-9) * (m_stop.tv_nsec - m_start.tv_nsec)); } private: struct timespec m_start; struct timespec m_stop; }; #endif } #endif // VHACD_TIMER_H
3,644
C
28.877049
756
0.671515
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/authoring/VHACD/inc/vhacdManifoldMesh.h
/* Copyright (c) 2011 Khaled Mamou (kmamou at gmail dot com) All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. The names of the contributors may not be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #pragma once #ifndef VHACD_MANIFOLD_MESH_H #define VHACD_MANIFOLD_MESH_H #include "vhacdCircularList.h" #include "vhacdSArray.h" #include "vhacdVector.h" namespace VHACD { class TMMTriangle; class TMMEdge; class TMMesh; class ICHull; //! Vertex data structure used in a triangular manifold mesh (TMM). class TMMVertex { public: void Initialize(); TMMVertex(void); ~TMMVertex(void); private: Vec3<double> m_pos; int32_t m_name; size_t m_id; CircularListElement<TMMEdge>* m_duplicate; // pointer to incident cone edge (or NULL) bool m_onHull; bool m_tag; TMMVertex(const TMMVertex& rhs); friend class ICHull; friend class TMMesh; friend class TMMTriangle; friend class TMMEdge; }; //! Edge data structure used in a triangular manifold mesh (TMM). class TMMEdge { public: void Initialize(); TMMEdge(void); ~TMMEdge(void); private: size_t m_id; CircularListElement<TMMTriangle>* m_triangles[2]; CircularListElement<TMMVertex>* m_vertices[2]; CircularListElement<TMMTriangle>* m_newFace; TMMEdge(const TMMEdge& rhs); friend class ICHull; friend class TMMTriangle; friend class TMMVertex; friend class TMMesh; }; //! Triangle data structure used in a triangular manifold mesh (TMM). class TMMTriangle { public: void Initialize(); TMMTriangle(void); ~TMMTriangle(void); private: size_t m_id; CircularListElement<TMMEdge>* m_edges[3]; CircularListElement<TMMVertex>* m_vertices[3]; bool m_visible; TMMTriangle(const TMMTriangle& rhs); friend class ICHull; friend class TMMesh; friend class TMMVertex; friend class TMMEdge; }; //! triangular manifold mesh data structure. class TMMesh { public: //! Returns the number of vertices> inline size_t GetNVertices() const { return m_vertices.GetSize(); } //! Returns the number of edges inline size_t GetNEdges() const { return m_edges.GetSize(); } //! Returns the number of triangles inline size_t GetNTriangles() const { return m_triangles.GetSize(); } //! Returns the vertices circular list inline const CircularList<TMMVertex>& GetVertices() const { return m_vertices; } //! Returns the edges circular list inline const CircularList<TMMEdge>& GetEdges() const { return m_edges; } //! Returns the triangles circular list inline const CircularList<TMMTriangle>& GetTriangles() const { return m_triangles; } //! Returns the vertices circular list inline CircularList<TMMVertex>& GetVertices() { return m_vertices; } //! Returns the edges circular list inline CircularList<TMMEdge>& GetEdges() { return m_edges; } //! Returns the triangles circular list inline CircularList<TMMTriangle>& GetTriangles() { return m_triangles; } //! Add vertex to the mesh CircularListElement<TMMVertex>* AddVertex() { return m_vertices.Add(); } //! Add vertex to the mesh CircularListElement<TMMEdge>* AddEdge() { return m_edges.Add(); } //! Add vertex to the mesh CircularListElement<TMMTriangle>* AddTriangle() { return m_triangles.Add(); } //! Print mesh information void Print(); //! void GetIFS(Vec3<double>* const points, Vec3<int32_t>* const triangles); //! void Clear(); //! void Copy(TMMesh& mesh); //! bool CheckConsistancy(); //! bool Normalize(); //! bool Denormalize(); //! Constructor TMMesh(); //! Destructor virtual ~TMMesh(void); private: CircularList<TMMVertex> m_vertices; CircularList<TMMEdge> m_edges; CircularList<TMMTriangle> m_triangles; // not defined TMMesh(const TMMesh& rhs); friend class ICHull; }; } #endif // VHACD_MANIFOLD_MESH_H
5,225
C
35.802817
756
0.716938
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/authoring/VHACD/inc/btAlignedAllocator.h
/* Bullet Continuous Collision Detection and Physics Library Copyright (c) 2003-2006 Erwin Coumans http://continuousphysics.com/Bullet/ This software is provided 'as-is', without any express or implied warranty. In no event will the authors be held liable for any damages arising from the use of this software. Permission is granted to anyone to use this software for any purpose, including commercial applications, and to alter it and redistribute it freely, subject to the following restrictions: 1. The origin of this software must not be misrepresented; you must not claim that you wrote the original software. If you use this software in a product, an acknowledgment in the product documentation would be appreciated but is not required. 2. Altered source versions must be plainly marked as such, and must not be misrepresented as being the original software. 3. This notice may not be removed or altered from any source distribution. */ #ifndef BT_ALIGNED_ALLOCATOR #define BT_ALIGNED_ALLOCATOR ///we probably replace this with our own aligned memory allocator ///so we replace _aligned_malloc and _aligned_free with our own ///that is better portable and more predictable #include "btScalar.h" //#define BT_DEBUG_MEMORY_ALLOCATIONS 1 #ifdef BT_DEBUG_MEMORY_ALLOCATIONS #define btAlignedAlloc(a, b) \ btAlignedAllocInternal(a, b, __LINE__, __FILE__) #define btAlignedFree(ptr) \ btAlignedFreeInternal(ptr, __LINE__, __FILE__) void* btAlignedAllocInternal(size_t size, int32_t alignment, int32_t line, char* filename); void btAlignedFreeInternal(void* ptr, int32_t line, char* filename); #else void* btAlignedAllocInternal(size_t size, int32_t alignment); void btAlignedFreeInternal(void* ptr); #define btAlignedAlloc(size, alignment) btAlignedAllocInternal(size, alignment) #define btAlignedFree(ptr) btAlignedFreeInternal(ptr) #endif typedef int32_t size_type; typedef void*(btAlignedAllocFunc)(size_t size, int32_t alignment); typedef void(btAlignedFreeFunc)(void* memblock); typedef void*(btAllocFunc)(size_t size); typedef void(btFreeFunc)(void* memblock); ///The developer can let all Bullet memory allocations go through a custom memory allocator, using btAlignedAllocSetCustom void btAlignedAllocSetCustom(btAllocFunc* allocFunc, btFreeFunc* freeFunc); ///If the developer has already an custom aligned allocator, then btAlignedAllocSetCustomAligned can be used. The default aligned allocator pre-allocates extra memory using the non-aligned allocator, and instruments it. void btAlignedAllocSetCustomAligned(btAlignedAllocFunc* allocFunc, btAlignedFreeFunc* freeFunc); ///The btAlignedAllocator is a portable class for aligned memory allocations. ///Default implementations for unaligned and aligned allocations can be overridden by a custom allocator using btAlignedAllocSetCustom and btAlignedAllocSetCustomAligned. template <typename T, unsigned Alignment> class btAlignedAllocator { typedef btAlignedAllocator<T, Alignment> self_type; public: //just going down a list: btAlignedAllocator() {} /* btAlignedAllocator( const self_type & ) {} */ template <typename Other> btAlignedAllocator(const btAlignedAllocator<Other, Alignment>&) {} typedef const T* const_pointer; typedef const T& const_reference; typedef T* pointer; typedef T& reference; typedef T value_type; pointer address(reference ref) const { return &ref; } const_pointer address(const_reference ref) const { return &ref; } pointer allocate(size_type n, const_pointer* hint = 0) { (void)hint; return reinterpret_cast<pointer>(btAlignedAlloc(sizeof(value_type) * n, Alignment)); } void construct(pointer ptr, const value_type& value) { new (ptr) value_type(value); } void deallocate(pointer ptr) { btAlignedFree(reinterpret_cast<void*>(ptr)); } void destroy(pointer ptr) { ptr->~value_type(); } template <typename O> struct rebind { typedef btAlignedAllocator<O, Alignment> other; }; template <typename O> self_type& operator=(const btAlignedAllocator<O, Alignment>&) { return *this; } friend bool operator==(const self_type&, const self_type&) { return true; } }; #endif //BT_ALIGNED_ALLOCATOR
4,253
C
39.514285
243
0.758288
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/authoring/VHACD/inc/btConvexHullComputer.h
/* Copyright (c) 2011 Ole Kniemeyer, MAXON, www.maxon.net This software is provided 'as-is', without any express or implied warranty. In no event will the authors be held liable for any damages arising from the use of this software. Permission is granted to anyone to use this software for any purpose, including commercial applications, and to alter it and redistribute it freely, subject to the following restrictions: 1. The origin of this software must not be misrepresented; you must not claim that you wrote the original software. If you use this software in a product, an acknowledgment in the product documentation would be appreciated but is not required. 2. Altered source versions must be plainly marked as such, and must not be misrepresented as being the original software. 3. This notice may not be removed or altered from any source distribution. */ #ifndef BT_CONVEX_HULL_COMPUTER_H #define BT_CONVEX_HULL_COMPUTER_H #include "btAlignedObjectArray.h" #include "btVector3.h" /// Convex hull implementation based on Preparata and Hong /// See http://code.google.com/p/bullet/issues/detail?id=275 /// Ole Kniemeyer, MAXON Computer GmbH class btConvexHullComputer { private: btScalar compute(const void* coords, bool doubleCoords, int32_t stride, int32_t count, btScalar shrink, btScalar shrinkClamp); public: class Edge { private: int32_t next; int32_t reverse; int32_t targetVertex; friend class btConvexHullComputer; public: int32_t getSourceVertex() const { return (this + reverse)->targetVertex; } int32_t getTargetVertex() const { return targetVertex; } const Edge* getNextEdgeOfVertex() const // clockwise list of all edges of a vertex { return this + next; } const Edge* getNextEdgeOfFace() const // counter-clockwise list of all edges of a face { return (this + reverse)->getNextEdgeOfVertex(); } const Edge* getReverseEdge() const { return this + reverse; } }; // Vertices of the output hull btAlignedObjectArray<btVector3> vertices; // Edges of the output hull btAlignedObjectArray<Edge> edges; // Faces of the convex hull. Each entry is an index into the "edges" array pointing to an edge of the face. Faces are planar n-gons btAlignedObjectArray<int32_t> faces; /* Compute convex hull of "count" vertices stored in "coords". "stride" is the difference in bytes between the addresses of consecutive vertices. If "shrink" is positive, the convex hull is shrunken by that amount (each face is moved by "shrink" length units towards the center along its normal). If "shrinkClamp" is positive, "shrink" is clamped to not exceed "shrinkClamp * innerRadius", where "innerRadius" is the minimum distance of a face to the center of the convex hull. The returned value is the amount by which the hull has been shrunken. If it is negative, the amount was so large that the resulting convex hull is empty. The output convex hull can be found in the member variables "vertices", "edges", "faces". */ btScalar compute(const float* coords, int32_t stride, int32_t count, btScalar shrink, btScalar shrinkClamp) { return compute(coords, false, stride, count, shrink, shrinkClamp); } // same as above, but double precision btScalar compute(const double* coords, int32_t stride, int32_t count, btScalar shrink, btScalar shrinkClamp) { return compute(coords, true, stride, count, shrink, shrinkClamp); } }; #endif //BT_CONVEX_HULL_COMPUTER_H
3,745
C
37.224489
243
0.695861
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/authoring/VHACD/inc/vhacdSArray.h
/* Copyright (c) 2011 Khaled Mamou (kmamou at gmail dot com) All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. The names of the contributors may not be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #pragma once #ifndef VHACD_SARRAY_H #define VHACD_SARRAY_H #include <stdio.h> #include <stdlib.h> #include <string.h> #define SARRAY_DEFAULT_MIN_SIZE 16 namespace VHACD { //! SArray. template <typename T, size_t N = 64> class SArray { public: T& operator[](size_t i) { T* const data = Data(); return data[i]; } const T& operator[](size_t i) const { const T* const data = Data(); return data[i]; } size_t Size() const { return m_size; } T* const Data() { return (m_maxSize == N) ? m_data0 : m_data; } const T* const Data() const { return (m_maxSize == N) ? m_data0 : m_data; } void Clear() { m_size = 0; delete[] m_data; m_data = 0; m_maxSize = N; } void PopBack() { --m_size; } void Allocate(size_t size) { if (size > m_maxSize) { T* temp = new T[size]; memcpy(temp, Data(), m_size * sizeof(T)); delete[] m_data; m_data = temp; m_maxSize = size; } } void Resize(size_t size) { Allocate(size); m_size = size; } void PushBack(const T& value) { if (m_size == m_maxSize) { size_t maxSize = (m_maxSize << 1); T* temp = new T[maxSize]; memcpy(temp, Data(), m_maxSize * sizeof(T)); delete[] m_data; m_data = temp; m_maxSize = maxSize; } T* const data = Data(); data[m_size++] = value; } bool Find(const T& value, size_t& pos) { T* const data = Data(); for (pos = 0; pos < m_size; ++pos) if (value == data[pos]) return true; return false; } bool Insert(const T& value) { size_t pos; if (Find(value, pos)) return false; PushBack(value); return true; } bool Erase(const T& value) { size_t pos; T* const data = Data(); if (Find(value, pos)) { for (size_t j = pos + 1; j < m_size; ++j) data[j - 1] = data[j]; --m_size; return true; } return false; } void operator=(const SArray& rhs) { if (m_maxSize < rhs.m_size) { delete[] m_data; m_maxSize = rhs.m_maxSize; m_data = new T[m_maxSize]; } m_size = rhs.m_size; memcpy(Data(), rhs.Data(), m_size * sizeof(T)); } void Initialize() { m_data = 0; m_size = 0; m_maxSize = N; } SArray(const SArray& rhs) { m_data = 0; m_size = 0; m_maxSize = N; *this = rhs; } SArray() { Initialize(); } ~SArray() { delete[] m_data; } private: T m_data0[N]; T* m_data; size_t m_size; size_t m_maxSize; }; } #endif
4,473
C
27.316456
756
0.565169
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/authoring/VHACD/inc/vhacdVector.h
/* Copyright (c) 2011 Khaled Mamou (kmamou at gmail dot com) All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. The names of the contributors may not be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #pragma once #ifndef VHACD_VECTOR_H #define VHACD_VECTOR_H #include <iostream> #include <math.h> namespace VHACD { //! Vector dim 3. template <typename T> class Vec3 { public: T& operator[](size_t i) { return m_data[i]; } const T& operator[](size_t i) const { return m_data[i]; } T& X(); T& Y(); T& Z(); const T& X() const; const T& Y() const; const T& Z() const; void Normalize(); T GetNorm() const; void operator=(const Vec3& rhs); void operator+=(const Vec3& rhs); void operator-=(const Vec3& rhs); void operator-=(T a); void operator+=(T a); void operator/=(T a); void operator*=(T a); Vec3 operator^(const Vec3& rhs) const; T operator*(const Vec3& rhs) const; Vec3 operator+(const Vec3& rhs) const; Vec3 operator-(const Vec3& rhs) const; Vec3 operator-() const; Vec3 operator*(T rhs) const; Vec3 operator/(T rhs) const; bool operator<(const Vec3& rhs) const; bool operator>(const Vec3& rhs) const; Vec3(); Vec3(T a); Vec3(T x, T y, T z); Vec3(const Vec3& rhs); /*virtual*/ ~Vec3(void); // Compute the center of this bounding box and return the diagonal length T GetCenter(const Vec3 &bmin, const Vec3 &bmax) { X() = (bmin.X() + bmax.X())*0.5; Y() = (bmin.Y() + bmax.Y())*0.5; Z() = (bmin.Z() + bmax.Z())*0.5; T dx = bmax.X() - bmin.X(); T dy = bmax.Y() - bmin.Y(); T dz = bmax.Z() - bmin.Z(); T diagonal = T(sqrt(dx*dx + dy*dy + dz*dz)); return diagonal; } // Update the min/max values relative to this point void UpdateMinMax(Vec3 &bmin,Vec3 &bmax) const { if (X() < bmin.X()) { bmin.X() = X(); } if (Y() < bmin.Y()) { bmin.Y() = Y(); } if (Z() < bmin.Z()) { bmin.Z() = Z(); } if (X() > bmax.X()) { bmax.X() = X(); } if (X() > bmax.X()) { bmax.X() = X(); } if (Y() > bmax.Y()) { bmax.Y() = Y(); } if (Z() > bmax.Z()) { bmax.Z() = Z(); } } // Returns the squared distance between these two points T GetDistanceSquared(const Vec3 &p) const { T dx = X() - p.X(); T dy = Y() - p.Y(); T dz = Z() - p.Z(); return dx*dx + dy*dy + dz*dz; } T GetDistance(const Vec3 &p) const { return sqrt(GetDistanceSquared(p)); } // Returns the raw vector data as a pointer T* GetData(void) { return m_data; } private: T m_data[3]; }; //! Vector dim 2. template <typename T> class Vec2 { public: T& operator[](size_t i) { return m_data[i]; } const T& operator[](size_t i) const { return m_data[i]; } T& X(); T& Y(); const T& X() const; const T& Y() const; void Normalize(); T GetNorm() const; void operator=(const Vec2& rhs); void operator+=(const Vec2& rhs); void operator-=(const Vec2& rhs); void operator-=(T a); void operator+=(T a); void operator/=(T a); void operator*=(T a); T operator^(const Vec2& rhs) const; T operator*(const Vec2& rhs) const; Vec2 operator+(const Vec2& rhs) const; Vec2 operator-(const Vec2& rhs) const; Vec2 operator-() const; Vec2 operator*(T rhs) const; Vec2 operator/(T rhs) const; Vec2(); Vec2(T a); Vec2(T x, T y); Vec2(const Vec2& rhs); /*virtual*/ ~Vec2(void); private: T m_data[2]; }; template <typename T> const bool Colinear(const Vec3<T>& a, const Vec3<T>& b, const Vec3<T>& c); template <typename T> const T ComputeVolume4(const Vec3<T>& a, const Vec3<T>& b, const Vec3<T>& c, const Vec3<T>& d); } #include "vhacdVector.inl" // template implementation #endif
5,364
C
30.934524
756
0.598993
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/authoring/VHACD/inc/FloatMath.h
#ifndef FLOAT_MATH_LIB_H #define FLOAT_MATH_LIB_H #include <float.h> #include <stdint.h> namespace FLOAT_MATH { enum FM_ClipState { FMCS_XMIN = (1<<0), FMCS_XMAX = (1<<1), FMCS_YMIN = (1<<2), FMCS_YMAX = (1<<3), FMCS_ZMIN = (1<<4), FMCS_ZMAX = (1<<5), }; enum FM_Axis { FM_XAXIS = (1<<0), FM_YAXIS = (1<<1), FM_ZAXIS = (1<<2) }; enum LineSegmentType { LS_START, LS_MIDDLE, LS_END }; const float FM_PI = 3.1415926535897932384626433832795028841971693993751f; const float FM_DEG_TO_RAD = ((2.0f * FM_PI) / 360.0f); const float FM_RAD_TO_DEG = (360.0f / (2.0f * FM_PI)); //***************** Float versions //*** //*** vectors are assumed to be 3 floats or 3 doubles representing X, Y, Z //*** quaternions are assumed to be 4 floats or 4 doubles representing X,Y,Z,W //*** matrices are assumed to be 16 floats or 16 doubles representing a standard D3D or OpenGL style 4x4 matrix //*** bounding volumes are expressed as two sets of 3 floats/double representing bmin(x,y,z) and bmax(x,y,z) //*** Plane equations are assumed to be 4 floats or 4 doubles representing Ax,By,Cz,D FM_Axis fm_getDominantAxis(const float normal[3]); FM_Axis fm_getDominantAxis(const double normal[3]); void fm_decomposeTransform(const float local_transform[16],float trans[3],float rot[4],float scale[3]); void fm_decomposeTransform(const double local_transform[16],double trans[3],double rot[4],double scale[3]); void fm_multiplyTransform(const float *pA,const float *pB,float *pM); void fm_multiplyTransform(const double *pA,const double *pB,double *pM); void fm_inverseTransform(const float matrix[16],float inverse_matrix[16]); void fm_inverseTransform(const double matrix[16],double inverse_matrix[16]); void fm_identity(float matrix[16]); // set 4x4 matrix to identity. void fm_identity(double matrix[16]); // set 4x4 matrix to identity. void fm_inverseRT(const float matrix[16], const float pos[3], float t[3]); // inverse rotate translate the point. void fm_inverseRT(const double matrix[16],const double pos[3],double t[3]); // inverse rotate translate the point. void fm_transform(const float matrix[16], const float pos[3], float t[3]); // rotate and translate this point. void fm_transform(const double matrix[16],const double pos[3],double t[3]); // rotate and translate this point. float fm_getDeterminant(const float matrix[16]); double fm_getDeterminant(const double matrix[16]); void fm_getSubMatrix(int32_t ki,int32_t kj,float pDst[16],const float matrix[16]); void fm_getSubMatrix(int32_t ki,int32_t kj,double pDst[16],const float matrix[16]); void fm_rotate(const float matrix[16],const float pos[3],float t[3]); // only rotate the point by a 4x4 matrix, don't translate. void fm_rotate(const double matri[16],const double pos[3],double t[3]); // only rotate the point by a 4x4 matrix, don't translate. void fm_eulerToMatrix(float ax,float ay,float az,float matrix[16]); // convert euler (in radians) to a dest 4x4 matrix (translation set to zero) void fm_eulerToMatrix(double ax,double ay,double az,double matrix[16]); // convert euler (in radians) to a dest 4x4 matrix (translation set to zero) void fm_getAABB(uint32_t vcount,const float *points,uint32_t pstride,float bmin[3],float bmax[3]); void fm_getAABB(uint32_t vcount,const double *points,uint32_t pstride,double bmin[3],double bmax[3]); void fm_getAABBCenter(const float bmin[3],const float bmax[3],float center[3]); void fm_getAABBCenter(const double bmin[3],const double bmax[3],double center[3]); void fm_transformAABB(const float bmin[3],const float bmax[3],const float matrix[16],float tbmin[3],float tbmax[3]); void fm_transformAABB(const double bmin[3],const double bmax[3],const double matrix[16],double tbmin[3],double tbmax[3]); void fm_eulerToQuat(float x,float y,float z,float quat[4]); // convert euler angles to quaternion. void fm_eulerToQuat(double x,double y,double z,double quat[4]); // convert euler angles to quaternion. void fm_quatToEuler(const float quat[4],float &ax,float &ay,float &az); void fm_quatToEuler(const double quat[4],double &ax,double &ay,double &az); void fm_eulerToQuat(const float euler[3],float quat[4]); // convert euler angles to quaternion. Angles must be radians not degrees! void fm_eulerToQuat(const double euler[3],double quat[4]); // convert euler angles to quaternion. void fm_scale(float x,float y,float z,float matrix[16]); // apply scale to the matrix. void fm_scale(double x,double y,double z,double matrix[16]); // apply scale to the matrix. void fm_eulerToQuatDX(float x,float y,float z,float quat[4]); // convert euler angles to quaternion using the fucked up DirectX method void fm_eulerToQuatDX(double x,double y,double z,double quat[4]); // convert euler angles to quaternion using the fucked up DirectX method void fm_eulerToMatrixDX(float x,float y,float z,float matrix[16]); // convert euler angles to quaternion using the fucked up DirectX method. void fm_eulerToMatrixDX(double x,double y,double z,double matrix[16]); // convert euler angles to quaternion using the fucked up DirectX method. void fm_quatToMatrix(const float quat[4],float matrix[16]); // convert quaterinion rotation to matrix, translation set to zero. void fm_quatToMatrix(const double quat[4],double matrix[16]); // convert quaterinion rotation to matrix, translation set to zero. void fm_quatRotate(const float quat[4],const float v[3],float r[3]); // rotate a vector directly by a quaternion. void fm_quatRotate(const double quat[4],const double v[3],double r[3]); // rotate a vector directly by a quaternion. void fm_getTranslation(const float matrix[16],float t[3]); void fm_getTranslation(const double matrix[16],double t[3]); void fm_setTranslation(const float *translation,float matrix[16]); void fm_setTranslation(const double *translation,double matrix[16]); void fm_multiplyQuat(const float *qa,const float *qb,float *quat); void fm_multiplyQuat(const double *qa,const double *qb,double *quat); void fm_matrixToQuat(const float matrix[16],float quat[4]); // convert the 3x3 portion of a 4x4 matrix into a quaterion as x,y,z,w void fm_matrixToQuat(const double matrix[16],double quat[4]); // convert the 3x3 portion of a 4x4 matrix into a quaterion as x,y,z,w float fm_sphereVolume(float radius); // return's the volume of a sphere of this radius (4/3 PI * R cubed ) double fm_sphereVolume(double radius); // return's the volume of a sphere of this radius (4/3 PI * R cubed ) float fm_cylinderVolume(float radius,float h); double fm_cylinderVolume(double radius,double h); float fm_capsuleVolume(float radius,float h); double fm_capsuleVolume(double radius,double h); float fm_distance(const float p1[3],const float p2[3]); double fm_distance(const double p1[3],const double p2[3]); float fm_distanceSquared(const float p1[3],const float p2[3]); double fm_distanceSquared(const double p1[3],const double p2[3]); float fm_distanceSquaredXZ(const float p1[3],const float p2[3]); double fm_distanceSquaredXZ(const double p1[3],const double p2[3]); float fm_computePlane(const float p1[3],const float p2[3],const float p3[3],float *n); // return D double fm_computePlane(const double p1[3],const double p2[3],const double p3[3],double *n); // return D float fm_distToPlane(const float plane[4],const float pos[3]); // computes the distance of this point from the plane. double fm_distToPlane(const double plane[4],const double pos[3]); // computes the distance of this point from the plane. float fm_dot(const float p1[3],const float p2[3]); double fm_dot(const double p1[3],const double p2[3]); void fm_cross(float cross[3],const float a[3],const float b[3]); void fm_cross(double cross[3],const double a[3],const double b[3]); void fm_computeNormalVector(float n[3],const float p1[3],const float p2[3]); // as P2-P1 normalized. void fm_computeNormalVector(double n[3],const double p1[3],const double p2[3]); // as P2-P1 normalized. bool fm_computeWindingOrder(const float p1[3],const float p2[3],const float p3[3]); // returns true if the triangle is clockwise. bool fm_computeWindingOrder(const double p1[3],const double p2[3],const double p3[3]); // returns true if the triangle is clockwise. float fm_normalize(float n[3]); // normalize this vector and return the distance double fm_normalize(double n[3]); // normalize this vector and return the distance float fm_normalizeQuat(float n[4]); // normalize this quat double fm_normalizeQuat(double n[4]); // normalize this quat void fm_matrixMultiply(const float A[16],const float B[16],float dest[16]); void fm_matrixMultiply(const double A[16],const double B[16],double dest[16]); void fm_composeTransform(const float position[3],const float quat[4],const float scale[3],float matrix[16]); void fm_composeTransform(const double position[3],const double quat[4],const double scale[3],double matrix[16]); float fm_computeArea(const float p1[3],const float p2[3],const float p3[3]); double fm_computeArea(const double p1[3],const double p2[3],const double p3[3]); void fm_lerp(const float p1[3],const float p2[3],float dest[3],float lerpValue); void fm_lerp(const double p1[3],const double p2[3],double dest[3],double lerpValue); bool fm_insideTriangleXZ(const float test[3],const float p1[3],const float p2[3],const float p3[3]); bool fm_insideTriangleXZ(const double test[3],const double p1[3],const double p2[3],const double p3[3]); bool fm_insideAABB(const float pos[3],const float bmin[3],const float bmax[3]); bool fm_insideAABB(const double pos[3],const double bmin[3],const double bmax[3]); bool fm_insideAABB(const float obmin[3],const float obmax[3],const float tbmin[3],const float tbmax[3]); // test if bounding box tbmin/tmbax is fully inside obmin/obmax bool fm_insideAABB(const double obmin[3],const double obmax[3],const double tbmin[3],const double tbmax[3]); // test if bounding box tbmin/tmbax is fully inside obmin/obmax uint32_t fm_clipTestPoint(const float bmin[3],const float bmax[3],const float pos[3]); uint32_t fm_clipTestPoint(const double bmin[3],const double bmax[3],const double pos[3]); uint32_t fm_clipTestPointXZ(const float bmin[3],const float bmax[3],const float pos[3]); // only tests X and Z, not Y uint32_t fm_clipTestPointXZ(const double bmin[3],const double bmax[3],const double pos[3]); // only tests X and Z, not Y uint32_t fm_clipTestAABB(const float bmin[3],const float bmax[3],const float p1[3],const float p2[3],const float p3[3],uint32_t &andCode); uint32_t fm_clipTestAABB(const double bmin[3],const double bmax[3],const double p1[3],const double p2[3],const double p3[3],uint32_t &andCode); bool fm_lineTestAABBXZ(const float p1[3],const float p2[3],const float bmin[3],const float bmax[3],float &time); bool fm_lineTestAABBXZ(const double p1[3],const double p2[3],const double bmin[3],const double bmax[3],double &time); bool fm_lineTestAABB(const float p1[3],const float p2[3],const float bmin[3],const float bmax[3],float &time); bool fm_lineTestAABB(const double p1[3],const double p2[3],const double bmin[3],const double bmax[3],double &time); void fm_initMinMax(const float p[3],float bmin[3],float bmax[3]); void fm_initMinMax(const double p[3],double bmin[3],double bmax[3]); void fm_initMinMax(float bmin[3],float bmax[3]); void fm_initMinMax(double bmin[3],double bmax[3]); void fm_minmax(const float p[3],float bmin[3],float bmax[3]); // accumulate to a min-max value void fm_minmax(const double p[3],double bmin[3],double bmax[3]); // accumulate to a min-max value // Computes the diagonal length of the bounding box and then inflates the bounding box on all sides // by the ratio provided. void fm_inflateMinMax(float bmin[3], float bmax[3], float ratio); void fm_inflateMinMax(double bmin[3], double bmax[3], double ratio); float fm_solveX(const float plane[4],float y,float z); // solve for X given this plane equation and the other two components. double fm_solveX(const double plane[4],double y,double z); // solve for X given this plane equation and the other two components. float fm_solveY(const float plane[4],float x,float z); // solve for Y given this plane equation and the other two components. double fm_solveY(const double plane[4],double x,double z); // solve for Y given this plane equation and the other two components. float fm_solveZ(const float plane[4],float x,float y); // solve for Z given this plane equation and the other two components. double fm_solveZ(const double plane[4],double x,double y); // solve for Z given this plane equation and the other two components. bool fm_computeBestFitPlane(uint32_t vcount, // number of input data points const float *points, // starting address of points array. uint32_t vstride, // stride between input points. const float *weights, // *optional point weighting values. uint32_t wstride, // weight stride for each vertex. float plane[4]); bool fm_computeBestFitPlane(uint32_t vcount, // number of input data points const double *points, // starting address of points array. uint32_t vstride, // stride between input points. const double *weights, // *optional point weighting values. uint32_t wstride, // weight stride for each vertex. double plane[4]); bool fm_computeCentroid(uint32_t vcount, // number of input data points const float *points, // starting address of points array. uint32_t vstride, // stride between input points. float *center); bool fm_computeCentroid(uint32_t vcount, // number of input data points const double *points, // starting address of points array. uint32_t vstride, // stride between input points. double *center); float fm_computeBestFitAABB(uint32_t vcount,const float *points,uint32_t pstride,float bmin[3],float bmax[3]); // returns the diagonal distance double fm_computeBestFitAABB(uint32_t vcount,const double *points,uint32_t pstride,double bmin[3],double bmax[3]); // returns the diagonal distance float fm_computeBestFitSphere(uint32_t vcount,const float *points,uint32_t pstride,float center[3]); double fm_computeBestFitSphere(uint32_t vcount,const double *points,uint32_t pstride,double center[3]); bool fm_lineSphereIntersect(const float center[3],float radius,const float p1[3],const float p2[3],float intersect[3]); bool fm_lineSphereIntersect(const double center[3],double radius,const double p1[3],const double p2[3],double intersect[3]); bool fm_intersectRayAABB(const float bmin[3],const float bmax[3],const float pos[3],const float dir[3],float intersect[3]); bool fm_intersectLineSegmentAABB(const float bmin[3],const float bmax[3],const float p1[3],const float p2[3],float intersect[3]); bool fm_lineIntersectsTriangle(const float rayStart[3],const float rayEnd[3],const float p1[3],const float p2[3],const float p3[3],float sect[3]); bool fm_lineIntersectsTriangle(const double rayStart[3],const double rayEnd[3],const double p1[3],const double p2[3],const double p3[3],double sect[3]); bool fm_rayIntersectsTriangle(const float origin[3],const float dir[3],const float v0[3],const float v1[3],const float v2[3],float &t); bool fm_rayIntersectsTriangle(const double origin[3],const double dir[3],const double v0[3],const double v1[3],const double v2[3],double &t); bool fm_raySphereIntersect(const float center[3],float radius,const float pos[3],const float dir[3],float distance,float intersect[3]); bool fm_raySphereIntersect(const double center[3],double radius,const double pos[3],const double dir[3],double distance,double intersect[3]); void fm_catmullRom(float out_vector[3],const float p1[3],const float p2[3],const float p3[3],const float *p4, const float s); void fm_catmullRom(double out_vector[3],const double p1[3],const double p2[3],const double p3[3],const double *p4, const double s); bool fm_intersectAABB(const float bmin1[3],const float bmax1[3],const float bmin2[3],const float bmax2[3]); bool fm_intersectAABB(const double bmin1[3],const double bmax1[3],const double bmin2[3],const double bmax2[3]); // computes the rotation quaternion to go from unit-vector v0 to unit-vector v1 void fm_rotationArc(const float v0[3],const float v1[3],float quat[4]); void fm_rotationArc(const double v0[3],const double v1[3],double quat[4]); float fm_distancePointLineSegment(const float Point[3],const float LineStart[3],const float LineEnd[3],float intersection[3],LineSegmentType &type,float epsilon); double fm_distancePointLineSegment(const double Point[3],const double LineStart[3],const double LineEnd[3],double intersection[3],LineSegmentType &type,double epsilon); bool fm_colinear(const double p1[3],const double p2[3],const double p3[3],double epsilon=0.999); // true if these three points in a row are co-linear bool fm_colinear(const float p1[3],const float p2[3],const float p3[3],float epsilon=0.999f); bool fm_colinear(const float a1[3],const float a2[3],const float b1[3],const float b2[3],float epsilon=0.999f); // true if these two line segments are co-linear. bool fm_colinear(const double a1[3],const double a2[3],const double b1[3],const double b2[3],double epsilon=0.999); // true if these two line segments are co-linear. enum IntersectResult { IR_DONT_INTERSECT, IR_DO_INTERSECT, IR_COINCIDENT, IR_PARALLEL, }; IntersectResult fm_intersectLineSegments2d(const float a1[3], const float a2[3], const float b1[3], const float b2[3], float intersectionPoint[3]); IntersectResult fm_intersectLineSegments2d(const double a1[3],const double a2[3],const double b1[3],const double b2[3],double intersectionPoint[3]); IntersectResult fm_intersectLineSegments2dTime(const float a1[3], const float a2[3], const float b1[3], const float b2[3],float &t1,float &t2); IntersectResult fm_intersectLineSegments2dTime(const double a1[3],const double a2[3],const double b1[3],const double b2[3],double &t1,double &t2); // Plane-Triangle splitting enum PlaneTriResult { PTR_ON_PLANE, PTR_FRONT, PTR_BACK, PTR_SPLIT, }; PlaneTriResult fm_planeTriIntersection(const float plane[4], // the plane equation in Ax+By+Cz+D format const float *triangle, // the source triangle. uint32_t tstride, // stride in bytes of the input and output *vertices* float epsilon, // the co-planer epsilon value. float *front, // the triangle in front of the uint32_t &fcount, // number of vertices in the 'front' triangle float *back, // the triangle in back of the plane uint32_t &bcount); // the number of vertices in the 'back' triangle. PlaneTriResult fm_planeTriIntersection(const double plane[4], // the plane equation in Ax+By+Cz+D format const double *triangle, // the source triangle. uint32_t tstride, // stride in bytes of the input and output *vertices* double epsilon, // the co-planer epsilon value. double *front, // the triangle in front of the uint32_t &fcount, // number of vertices in the 'front' triangle double *back, // the triangle in back of the plane uint32_t &bcount); // the number of vertices in the 'back' triangle. void fm_intersectPointPlane(const float p1[3],const float p2[3],float *split,const float plane[4]); void fm_intersectPointPlane(const double p1[3],const double p2[3],double *split,const double plane[4]); PlaneTriResult fm_getSidePlane(const float p[3],const float plane[4],float epsilon); PlaneTriResult fm_getSidePlane(const double p[3],const double plane[4],double epsilon); void fm_computeBestFitOBB(uint32_t vcount,const float *points,uint32_t pstride,float *sides,float matrix[16],bool bruteForce=true); void fm_computeBestFitOBB(uint32_t vcount,const double *points,uint32_t pstride,double *sides,double matrix[16],bool bruteForce=true); void fm_computeBestFitOBB(uint32_t vcount,const float *points,uint32_t pstride,float *sides,float pos[3],float quat[4],bool bruteForce=true); void fm_computeBestFitOBB(uint32_t vcount,const double *points,uint32_t pstride,double *sides,double pos[3],double quat[4],bool bruteForce=true); void fm_computeBestFitABB(uint32_t vcount,const float *points,uint32_t pstride,float *sides,float pos[3]); void fm_computeBestFitABB(uint32_t vcount,const double *points,uint32_t pstride,double *sides,double pos[3]); //** Note, if the returned capsule height is less than zero, then you must represent it is a sphere of size radius. void fm_computeBestFitCapsule(uint32_t vcount,const float *points,uint32_t pstride,float &radius,float &height,float matrix[16],bool bruteForce=true); void fm_computeBestFitCapsule(uint32_t vcount,const double *points,uint32_t pstride,float &radius,float &height,double matrix[16],bool bruteForce=true); void fm_planeToMatrix(const float plane[4],float matrix[16]); // convert a plane equation to a 4x4 rotation matrix. Reference vector is 0,1,0 void fm_planeToQuat(const float plane[4],float quat[4],float pos[3]); // convert a plane equation to a quaternion and translation void fm_planeToMatrix(const double plane[4],double matrix[16]); // convert a plane equation to a 4x4 rotation matrix void fm_planeToQuat(const double plane[4],double quat[4],double pos[3]); // convert a plane equation to a quaternion and translation inline void fm_doubleToFloat3(const double p[3],float t[3]) { t[0] = (float) p[0]; t[1] = (float)p[1]; t[2] = (float)p[2]; }; inline void fm_floatToDouble3(const float p[3],double t[3]) { t[0] = (double)p[0]; t[1] = (double)p[1]; t[2] = (double)p[2]; }; void fm_eulerMatrix(float ax,float ay,float az,float matrix[16]); // convert euler (in radians) to a dest 4x4 matrix (translation set to zero) void fm_eulerMatrix(double ax,double ay,double az,double matrix[16]); // convert euler (in radians) to a dest 4x4 matrix (translation set to zero) float fm_computeMeshVolume(const float *vertices,uint32_t tcount,const uint32_t *indices); double fm_computeMeshVolume(const double *vertices,uint32_t tcount,const uint32_t *indices); #define FM_DEFAULT_GRANULARITY 0.001f // 1 millimeter is the default granularity class fm_VertexIndex { public: virtual uint32_t getIndex(const float pos[3],bool &newPos) = 0; // get welded index for this float vector[3] virtual uint32_t getIndex(const double pos[3],bool &newPos) = 0; // get welded index for this double vector[3] virtual const float * getVerticesFloat(void) const = 0; virtual const double * getVerticesDouble(void) const = 0; virtual const float * getVertexFloat(uint32_t index) const = 0; virtual const double * getVertexDouble(uint32_t index) const = 0; virtual uint32_t getVcount(void) const = 0; virtual bool isDouble(void) const = 0; virtual bool saveAsObj(const char *fname,uint32_t tcount,uint32_t *indices) = 0; }; fm_VertexIndex * fm_createVertexIndex(double granularity,bool snapToGrid); // create an indexed vertex system for doubles fm_VertexIndex * fm_createVertexIndex(float granularity,bool snapToGrid); // create an indexed vertext system for floats void fm_releaseVertexIndex(fm_VertexIndex *vindex); class fm_Triangulate { public: virtual const double * triangulate3d(uint32_t pcount, const double *points, uint32_t vstride, uint32_t &tcount, bool consolidate, double epsilon) = 0; virtual const float * triangulate3d(uint32_t pcount, const float *points, uint32_t vstride, uint32_t &tcount, bool consolidate, float epsilon) = 0; }; fm_Triangulate * fm_createTriangulate(void); void fm_releaseTriangulate(fm_Triangulate *t); const float * fm_getPoint(const float *points,uint32_t pstride,uint32_t index); const double * fm_getPoint(const double *points,uint32_t pstride,uint32_t index); bool fm_insideTriangle(float Ax, float Ay,float Bx, float By,float Cx, float Cy,float Px, float Py); bool fm_insideTriangle(double Ax, double Ay,double Bx, double By,double Cx, double Cy,double Px, double Py); float fm_areaPolygon2d(uint32_t pcount,const float *points,uint32_t pstride); double fm_areaPolygon2d(uint32_t pcount,const double *points,uint32_t pstride); bool fm_pointInsidePolygon2d(uint32_t pcount,const float *points,uint32_t pstride,const float *point,uint32_t xindex=0,uint32_t yindex=1); bool fm_pointInsidePolygon2d(uint32_t pcount,const double *points,uint32_t pstride,const double *point,uint32_t xindex=0,uint32_t yindex=1); uint32_t fm_consolidatePolygon(uint32_t pcount,const float *points,uint32_t pstride,float *dest,float epsilon=0.999999f); // collapses co-linear edges. uint32_t fm_consolidatePolygon(uint32_t pcount,const double *points,uint32_t pstride,double *dest,double epsilon=0.999999); // collapses co-linear edges. bool fm_computeSplitPlane(uint32_t vcount,const double *vertices,uint32_t tcount,const uint32_t *indices,double *plane); bool fm_computeSplitPlane(uint32_t vcount,const float *vertices,uint32_t tcount,const uint32_t *indices,float *plane); void fm_nearestPointInTriangle(const float *pos,const float *p1,const float *p2,const float *p3,float *nearest); void fm_nearestPointInTriangle(const double *pos,const double *p1,const double *p2,const double *p3,double *nearest); float fm_areaTriangle(const float *p1,const float *p2,const float *p3); double fm_areaTriangle(const double *p1,const double *p2,const double *p3); void fm_subtract(const float *A,const float *B,float *diff); // compute A-B and store the result in 'diff' void fm_subtract(const double *A,const double *B,double *diff); // compute A-B and store the result in 'diff' void fm_multiply(float *A,float scaler); void fm_multiply(double *A,double scaler); void fm_add(const float *A,const float *B,float *sum); void fm_add(const double *A,const double *B,double *sum); void fm_copy3(const float *source,float *dest); void fm_copy3(const double *source,double *dest); // re-indexes an indexed triangle mesh but drops unused vertices. The output_indices can be the same pointer as the input indices. // the output_vertices can point to the input vertices if you desire. The output_vertices buffer should be at least the same size // is the input buffer. The routine returns the new vertex count after re-indexing. uint32_t fm_copyUniqueVertices(uint32_t vcount,const float *input_vertices,float *output_vertices,uint32_t tcount,const uint32_t *input_indices,uint32_t *output_indices); uint32_t fm_copyUniqueVertices(uint32_t vcount,const double *input_vertices,double *output_vertices,uint32_t tcount,const uint32_t *input_indices,uint32_t *output_indices); bool fm_isMeshCoplanar(uint32_t tcount,const uint32_t *indices,const float *vertices,bool doubleSided); // returns true if this collection of indexed triangles are co-planar! bool fm_isMeshCoplanar(uint32_t tcount,const uint32_t *indices,const double *vertices,bool doubleSided); // returns true if this collection of indexed triangles are co-planar! bool fm_samePlane(const float p1[4],const float p2[4],float normalEpsilon=0.01f,float dEpsilon=0.001f,bool doubleSided=false); // returns true if these two plane equations are identical within an epsilon bool fm_samePlane(const double p1[4],const double p2[4],double normalEpsilon=0.01,double dEpsilon=0.001,bool doubleSided=false); void fm_OBBtoAABB(const float obmin[3],const float obmax[3],const float matrix[16],float abmin[3],float abmax[3]); // a utility class that will tessellate a mesh. class fm_Tesselate { public: virtual const uint32_t * tesselate(fm_VertexIndex *vindex,uint32_t tcount,const uint32_t *indices,float longEdge,uint32_t maxDepth,uint32_t &outcount) = 0; }; fm_Tesselate * fm_createTesselate(void); void fm_releaseTesselate(fm_Tesselate *t); void fm_computeMeanNormals(uint32_t vcount, // the number of vertices const float *vertices, // the base address of the vertex position data. uint32_t vstride, // the stride between position data. float *normals, // the base address of the destination for mean vector normals uint32_t nstride, // the stride between normals uint32_t tcount, // the number of triangles const uint32_t *indices); // the triangle indices void fm_computeMeanNormals(uint32_t vcount, // the number of vertices const double *vertices, // the base address of the vertex position data. uint32_t vstride, // the stride between position data. double *normals, // the base address of the destination for mean vector normals uint32_t nstride, // the stride between normals uint32_t tcount, // the number of triangles const uint32_t *indices); // the triangle indices bool fm_isValidTriangle(const float *p1,const float *p2,const float *p3,float epsilon=0.00001f); bool fm_isValidTriangle(const double *p1,const double *p2,const double *p3,double epsilon=0.00001f); }; // end of namespace #endif
30,412
C
58.86811
206
0.705478
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/authoring/VHACD/inc/vhacdCircularList.h
/* Copyright (c) 2011 Khaled Mamou (kmamou at gmail dot com) All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. The names of the contributors may not be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #pragma once #ifndef VHACD_CIRCULAR_LIST_H #define VHACD_CIRCULAR_LIST_H #include <stdlib.h> namespace VHACD { //! CircularListElement class. template <typename T> class CircularListElement { public: T& GetData() { return m_data; } const T& GetData() const { return m_data; } CircularListElement<T>*& GetNext() { return m_next; } CircularListElement<T>*& GetPrev() { return m_prev; } const CircularListElement<T>*& GetNext() const { return m_next; } const CircularListElement<T>*& GetPrev() const { return m_prev; } //! Constructor CircularListElement(const T& data) { m_data = data; } CircularListElement(void) {} //! Destructor ~CircularListElement(void) {} private: T m_data; CircularListElement<T>* m_next; CircularListElement<T>* m_prev; CircularListElement(const CircularListElement& rhs); }; //! CircularList class. template <typename T> class CircularList { public: CircularListElement<T>*& GetHead() { return m_head; } const CircularListElement<T>* GetHead() const { return m_head; } bool IsEmpty() const { return (m_size == 0); } size_t GetSize() const { return m_size; } const T& GetData() const { return m_head->GetData(); } T& GetData() { return m_head->GetData(); } bool Delete(); bool Delete(CircularListElement<T>* element); CircularListElement<T>* Add(const T* data = 0); CircularListElement<T>* Add(const T& data); bool Next(); bool Prev(); void Clear() { while (Delete()) ; }; const CircularList& operator=(const CircularList& rhs); //! Constructor CircularList() { m_head = 0; m_size = 0; } CircularList(const CircularList& rhs); //! Destructor ~CircularList(void) { Clear(); }; private: CircularListElement<T>* m_head; //!< a pointer to the head of the circular list size_t m_size; //!< number of element in the circular list }; } #include "vhacdCircularList.inl" #endif // VHACD_CIRCULAR_LIST_H
3,512
C
43.468354
756
0.711845
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/authoring/VHACD/inc/btAlignedObjectArray.h
/* Bullet Continuous Collision Detection and Physics Library Copyright (c) 2003-2006 Erwin Coumans http://continuousphysics.com/Bullet/ This software is provided 'as-is', without any express or implied warranty. In no event will the authors be held liable for any damages arising from the use of this software. Permission is granted to anyone to use this software for any purpose, including commercial applications, and to alter it and redistribute it freely, subject to the following restrictions: 1. The origin of this software must not be misrepresented; you must not claim that you wrote the original software. If you use this software in a product, an acknowledgment in the product documentation would be appreciated but is not required. 2. Altered source versions must be plainly marked as such, and must not be misrepresented as being the original software. 3. This notice may not be removed or altered from any source distribution. */ #ifndef BT_OBJECT_ARRAY__ #define BT_OBJECT_ARRAY__ #include "btAlignedAllocator.h" #include "btScalar.h" // has definitions like SIMD_FORCE_INLINE ///If the platform doesn't support placement new, you can disable BT_USE_PLACEMENT_NEW ///then the btAlignedObjectArray doesn't support objects with virtual methods, and non-trivial constructors/destructors ///You can enable BT_USE_MEMCPY, then swapping elements in the array will use memcpy instead of operator= ///see discussion here: http://continuousphysics.com/Bullet/phpBB2/viewtopic.php?t=1231 and ///http://www.continuousphysics.com/Bullet/phpBB2/viewtopic.php?t=1240 #define BT_USE_PLACEMENT_NEW 1 //#define BT_USE_MEMCPY 1 //disable, because it is cumbersome to find out for each platform where memcpy is defined. It can be in <memory.h> or <string.h> or otherwise... #define BT_ALLOW_ARRAY_COPY_OPERATOR // enabling this can accidently perform deep copies of data if you are not careful #ifdef BT_USE_MEMCPY #include <memory.h> #include <string.h> #endif //BT_USE_MEMCPY #ifdef BT_USE_PLACEMENT_NEW #include <new> //for placement new #endif //BT_USE_PLACEMENT_NEW ///The btAlignedObjectArray template class uses a subset of the stl::vector interface for its methods ///It is developed to replace stl::vector to avoid portability issues, including STL alignment issues to add SIMD/SSE data template <typename T> //template <class T> class btAlignedObjectArray { btAlignedAllocator<T, 16> m_allocator; int32_t m_size; int32_t m_capacity; T* m_data; //PCK: added this line bool m_ownsMemory; #ifdef BT_ALLOW_ARRAY_COPY_OPERATOR public: SIMD_FORCE_INLINE btAlignedObjectArray<T>& operator=(const btAlignedObjectArray<T>& other) { copyFromArray(other); return *this; } #else //BT_ALLOW_ARRAY_COPY_OPERATOR private: SIMD_FORCE_INLINE btAlignedObjectArray<T>& operator=(const btAlignedObjectArray<T>& other); #endif //BT_ALLOW_ARRAY_COPY_OPERATOR protected: SIMD_FORCE_INLINE int32_t allocSize(int32_t size) { return (size ? size * 2 : 1); } SIMD_FORCE_INLINE void copy(int32_t start, int32_t end, T* dest) const { int32_t i; for (i = start; i < end; ++i) #ifdef BT_USE_PLACEMENT_NEW new (&dest[i]) T(m_data[i]); #else dest[i] = m_data[i]; #endif //BT_USE_PLACEMENT_NEW } SIMD_FORCE_INLINE void init() { //PCK: added this line m_ownsMemory = true; m_data = 0; m_size = 0; m_capacity = 0; } SIMD_FORCE_INLINE void destroy(int32_t first, int32_t last) { int32_t i; for (i = first; i < last; i++) { m_data[i].~T(); } } SIMD_FORCE_INLINE void* allocate(int32_t size) { if (size) return m_allocator.allocate(size); return 0; } SIMD_FORCE_INLINE void deallocate() { if (m_data) { //PCK: enclosed the deallocation in this block if (m_ownsMemory) { m_allocator.deallocate(m_data); } m_data = 0; } } public: btAlignedObjectArray() { init(); } ~btAlignedObjectArray() { clear(); } ///Generally it is best to avoid using the copy constructor of an btAlignedObjectArray, and use a (const) reference to the array instead. btAlignedObjectArray(const btAlignedObjectArray& otherArray) { init(); int32_t otherSize = otherArray.size(); resize(otherSize); otherArray.copy(0, otherSize, m_data); } /// return the number of elements in the array SIMD_FORCE_INLINE int32_t size() const { return m_size; } SIMD_FORCE_INLINE const T& at(int32_t n) const { btAssert(n >= 0); btAssert(n < size()); return m_data[n]; } SIMD_FORCE_INLINE T& at(int32_t n) { btAssert(n >= 0); btAssert(n < size()); return m_data[n]; } SIMD_FORCE_INLINE const T& operator[](int32_t n) const { btAssert(n >= 0); btAssert(n < size()); return m_data[n]; } SIMD_FORCE_INLINE T& operator[](int32_t n) { btAssert(n >= 0); btAssert(n < size()); return m_data[n]; } ///clear the array, deallocated memory. Generally it is better to use array.resize(0), to reduce performance overhead of run-time memory (de)allocations. SIMD_FORCE_INLINE void clear() { destroy(0, size()); deallocate(); init(); } SIMD_FORCE_INLINE void pop_back() { btAssert(m_size > 0); m_size--; m_data[m_size].~T(); } ///resize changes the number of elements in the array. If the new size is larger, the new elements will be constructed using the optional second argument. ///when the new number of elements is smaller, the destructor will be called, but memory will not be freed, to reduce performance overhead of run-time memory (de)allocations. SIMD_FORCE_INLINE void resize(int32_t newsize, const T& fillData = T()) { int32_t curSize = size(); if (newsize < curSize) { for (int32_t i = newsize; i < curSize; i++) { m_data[i].~T(); } } else { if (newsize > size()) { reserve(newsize); } #ifdef BT_USE_PLACEMENT_NEW for (int32_t i = curSize; i < newsize; i++) { new (&m_data[i]) T(fillData); } #endif //BT_USE_PLACEMENT_NEW } m_size = newsize; } SIMD_FORCE_INLINE T& expandNonInitializing() { int32_t sz = size(); if (sz == capacity()) { reserve(allocSize(size())); } m_size++; return m_data[sz]; } SIMD_FORCE_INLINE T& expand(const T& fillValue = T()) { int32_t sz = size(); if (sz == capacity()) { reserve(allocSize(size())); } m_size++; #ifdef BT_USE_PLACEMENT_NEW new (&m_data[sz]) T(fillValue); //use the in-place new (not really allocating heap memory) #endif return m_data[sz]; } SIMD_FORCE_INLINE void push_back(const T& _Val) { int32_t sz = size(); if (sz == capacity()) { reserve(allocSize(size())); } #ifdef BT_USE_PLACEMENT_NEW new (&m_data[m_size]) T(_Val); #else m_data[size()] = _Val; #endif //BT_USE_PLACEMENT_NEW m_size++; } /// return the pre-allocated (reserved) elements, this is at least as large as the total number of elements,see size() and reserve() SIMD_FORCE_INLINE int32_t capacity() const { return m_capacity; } SIMD_FORCE_INLINE void reserve(int32_t _Count) { // determine new minimum length of allocated storage if (capacity() < _Count) { // not enough room, reallocate T* s = (T*)allocate(_Count); copy(0, size(), s); destroy(0, size()); deallocate(); //PCK: added this line m_ownsMemory = true; m_data = s; m_capacity = _Count; } } class less { public: bool operator()(const T& a, const T& b) { return (a < b); } }; template <typename L> void quickSortInternal(const L& CompareFunc, int32_t lo, int32_t hi) { // lo is the lower index, hi is the upper index // of the region of array a that is to be sorted int32_t i = lo, j = hi; T x = m_data[(lo + hi) / 2]; // partition do { while (CompareFunc(m_data[i], x)) i++; while (CompareFunc(x, m_data[j])) j--; if (i <= j) { swap(i, j); i++; j--; } } while (i <= j); // recursion if (lo < j) quickSortInternal(CompareFunc, lo, j); if (i < hi) quickSortInternal(CompareFunc, i, hi); } template <typename L> void quickSort(const L& CompareFunc) { //don't sort 0 or 1 elements if (size() > 1) { quickSortInternal(CompareFunc, 0, size() - 1); } } ///heap sort from http://www.csse.monash.edu.au/~lloyd/tildeAlgDS/Sort/Heap/ template <typename L> void downHeap(T* pArr, int32_t k, int32_t n, const L& CompareFunc) { /* PRE: a[k+1..N] is a heap */ /* POST: a[k..N] is a heap */ T temp = pArr[k - 1]; /* k has child(s) */ while (k <= n / 2) { int32_t child = 2 * k; if ((child < n) && CompareFunc(pArr[child - 1], pArr[child])) { child++; } /* pick larger child */ if (CompareFunc(temp, pArr[child - 1])) { /* move child up */ pArr[k - 1] = pArr[child - 1]; k = child; } else { break; } } pArr[k - 1] = temp; } /*downHeap*/ void swap(int32_t index0, int32_t index1) { #ifdef BT_USE_MEMCPY char temp[sizeof(T)]; memcpy(temp, &m_data[index0], sizeof(T)); memcpy(&m_data[index0], &m_data[index1], sizeof(T)); memcpy(&m_data[index1], temp, sizeof(T)); #else T temp = m_data[index0]; m_data[index0] = m_data[index1]; m_data[index1] = temp; #endif //BT_USE_PLACEMENT_NEW } template <typename L> void heapSort(const L& CompareFunc) { /* sort a[0..N-1], N.B. 0 to N-1 */ int32_t k; int32_t n = m_size; for (k = n / 2; k > 0; k--) { downHeap(m_data, k, n, CompareFunc); } /* a[1..N] is now a heap */ while (n >= 1) { swap(0, n - 1); /* largest of a[0..n-1] */ n = n - 1; /* restore a[1..i-1] heap */ downHeap(m_data, 1, n, CompareFunc); } } ///non-recursive binary search, assumes sorted array int32_t findBinarySearch(const T& key) const { int32_t first = 0; int32_t last = size() - 1; //assume sorted array while (first <= last) { int32_t mid = (first + last) / 2; // compute mid point. if (key > m_data[mid]) first = mid + 1; // repeat search in top half. else if (key < m_data[mid]) last = mid - 1; // repeat search in bottom half. else return mid; // found it. return position ///// } return size(); // failed to find key } int32_t findLinearSearch(const T& key) const { int32_t index = size(); int32_t i; for (i = 0; i < size(); i++) { if (m_data[i] == key) { index = i; break; } } return index; } void remove(const T& key) { int32_t findIndex = findLinearSearch(key); if (findIndex < size()) { swap(findIndex, size() - 1); pop_back(); } } //PCK: whole function void initializeFromBuffer(void* buffer, int32_t size, int32_t capacity) { clear(); m_ownsMemory = false; m_data = (T*)buffer; m_size = size; m_capacity = capacity; } void copyFromArray(const btAlignedObjectArray& otherArray) { int32_t otherSize = otherArray.size(); resize(otherSize); otherArray.copy(0, otherSize, m_data); } }; #endif //BT_OBJECT_ARRAY__
12,640
C
27.153675
243
0.55712
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/authoring/VHACD/inc/vhacdICHull.h
/* Copyright (c) 2011 Khaled Mamou (kmamou at gmail dot com) All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. The names of the contributors may not be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #pragma once #ifndef VHACD_ICHULL_H #define VHACD_ICHULL_H #include "vhacdManifoldMesh.h" #include "vhacdVector.h" namespace VHACD { //! Incremental Convex Hull algorithm (cf. http://cs.smith.edu/~orourke/books/ftp.html ). enum ICHullError { ICHullErrorOK = 0, ICHullErrorCoplanarPoints, ICHullErrorNoVolume, ICHullErrorInconsistent, ICHullErrorNotEnoughPoints }; class ICHull { public: static const double sc_eps; //! bool IsFlat() { return m_isFlat; } //! Returns the computed mesh TMMesh& GetMesh() { return m_mesh; } //! Add one point to the convex-hull bool AddPoint(const Vec3<double>& point) { return AddPoints(&point, 1); } //! Add one point to the convex-hull bool AddPoint(const Vec3<double>& point, int32_t id); //! Add points to the convex-hull bool AddPoints(const Vec3<double>* points, size_t nPoints); //! ICHullError Process(); //! ICHullError Process(const uint32_t nPointsCH, const double minVolume = 0.0); //! bool IsInside(const Vec3<double>& pt0, const double eps = 0.0); //! const ICHull& operator=(ICHull& rhs); //! Constructor ICHull(); //! Destructor ~ICHull(void){}; private: //! DoubleTriangle builds the initial double triangle. It first finds 3 noncollinear points and makes two faces out of them, in opposite order. It then finds a fourth point that is not coplanar with that face. The vertices are stored in the face structure in counterclockwise order so that the volume between the face and the point is negative. Lastly, the 3 newfaces to the fourth point are constructed and the data structures are cleaned up. ICHullError DoubleTriangle(); //! MakeFace creates a new face structure from three vertices (in ccw order). It returns a pointer to the face. CircularListElement<TMMTriangle>* MakeFace(CircularListElement<TMMVertex>* v0, CircularListElement<TMMVertex>* v1, CircularListElement<TMMVertex>* v2, CircularListElement<TMMTriangle>* fold); //! CircularListElement<TMMTriangle>* MakeConeFace(CircularListElement<TMMEdge>* e, CircularListElement<TMMVertex>* v); //! bool ProcessPoint(); //! bool ComputePointVolume(double& totalVolume, bool markVisibleFaces); //! bool FindMaxVolumePoint(const double minVolume = 0.0); //! bool CleanEdges(); //! bool CleanVertices(uint32_t& addedPoints); //! bool CleanTriangles(); //! bool CleanUp(uint32_t& addedPoints); //! bool MakeCCW(CircularListElement<TMMTriangle>* f, CircularListElement<TMMEdge>* e, CircularListElement<TMMVertex>* v); void Clear(); private: static const int32_t sc_dummyIndex; TMMesh m_mesh; SArray<CircularListElement<TMMEdge>*> m_edgesToDelete; SArray<CircularListElement<TMMEdge>*> m_edgesToUpdate; SArray<CircularListElement<TMMTriangle>*> m_trianglesToDelete; Vec3<double> m_normal; bool m_isFlat; ICHull(const ICHull& rhs); }; } #endif // VHACD_ICHULL_H
4,551
C
44.979798
756
0.728631
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/authoring/VHACD/public/VHACD.h
/* Copyright (c) 2011 Khaled Mamou (kmamou at gmail dot com) All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. The names of the contributors may not be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #pragma once #ifndef VHACD_H #define VHACD_H #define VHACD_VERSION_MAJOR 2 #define VHACD_VERSION_MINOR 3 // Changes for version 2.3 // // m_gamma : Has been removed. This used to control the error metric to merge convex hulls. Now it uses the 'm_maxConvexHulls' value instead. // m_maxConvexHulls : This is the maximum number of convex hulls to produce from the merge operation; replaces 'm_gamma'. // // Note that decomposition depth is no longer a user provided value. It is now derived from the // maximum number of hulls requested. // // As a convenience to the user, each convex hull produced now includes the volume of the hull as well as it's center. // // This version supports a convenience method to automatically make V-HACD run asynchronously in a background thread. // To get a fully asynchronous version, call 'CreateVHACD_ASYNC' instead of 'CreateVHACD'. You get the same interface however, // now when computing convex hulls, it is no longer a blocking operation. All callback messages are still returned // in the application's thread so you don't need to worry about mutex locks or anything in that case. // To tell if the operation is complete, the application should call 'IsReady'. This will return true if // the last approximation operation is complete and will dispatch any pending messages. // If you call 'Compute' while a previous operation was still running, it will automatically cancel the last request // and begin a new one. To cancel a currently running approximation just call 'Cancel'. #include <stdint.h> namespace VHACD { class IVHACD { public: class IUserCallback { public: virtual ~IUserCallback(){}; virtual void Update(const double overallProgress, const double stageProgress, const double operationProgress, const char* const stage, const char* const operation) = 0; }; class IUserLogger { public: virtual ~IUserLogger(){}; virtual void Log(const char* const msg) = 0; }; class ConvexHull { public: double* m_points; uint32_t* m_triangles; uint32_t m_nPoints; uint32_t m_nTriangles; double m_volume; double m_center[3]; }; class Parameters { public: Parameters(void) { Init(); } void Init(void) { m_resolution = 100000; m_concavity = 0.001; m_planeDownsampling = 4; m_convexhullDownsampling = 4; m_alpha = 0.05; m_beta = 0.05; m_pca = 0; m_mode = 0; // 0: voxel-based (recommended), 1: tetrahedron-based m_maxNumVerticesPerCH = 64; m_minVolumePerCH = 0.0001; m_callback = 0; m_logger = 0; m_convexhullApproximation = true; m_oclAcceleration = true; m_maxConvexHulls = 1024; m_projectHullVertices = true; // This will project the output convex hull vertices onto the original source mesh to increase the floating point accuracy of the results } double m_concavity; double m_alpha; double m_beta; double m_minVolumePerCH; IUserCallback* m_callback; IUserLogger* m_logger; uint32_t m_resolution; uint32_t m_maxNumVerticesPerCH; uint32_t m_planeDownsampling; uint32_t m_convexhullDownsampling; uint32_t m_pca; uint32_t m_mode; uint32_t m_convexhullApproximation; uint32_t m_oclAcceleration; uint32_t m_maxConvexHulls; bool m_projectHullVertices; }; class Constraint { public: uint32_t mHullA; // Convex Hull A index uint32_t mHullB; // Convex Hull B index double mConstraintPoint[3]; // The point of intersection between the two convex hulls }; virtual void Cancel() = 0; virtual bool Compute(const float* const points, const uint32_t countPoints, const uint32_t* const triangles, const uint32_t countTriangles, const Parameters& params) = 0; virtual bool Compute(const double* const points, const uint32_t countPoints, const uint32_t* const triangles, const uint32_t countTriangles, const Parameters& params) = 0; virtual uint32_t GetNConvexHulls() const = 0; virtual void GetConvexHull(const uint32_t index, ConvexHull& ch) const = 0; virtual void Clean(void) = 0; // release internally allocated memory virtual void Release(void) = 0; // release IVHACD virtual bool OCLInit(void* const oclDevice, IUserLogger* const logger = 0) = 0; virtual bool OCLRelease(IUserLogger* const logger = 0) = 0; // Will compute the center of mass of the convex hull decomposition results and return it // in 'centerOfMass'. Returns false if the center of mass could not be computed. virtual bool ComputeCenterOfMass(double centerOfMass[3]) const = 0; // Will analyze the HACD results and compute the constraints solutions. // It will analyze the point at which any two convex hulls touch each other and // return the total number of constraint pairs found virtual uint32_t ComputeConstraints(void) = 0; // Returns a pointer to the constraint index; null if the index is not valid or // the user did not previously call 'ComputeConstraints' virtual const Constraint *GetConstraint(uint32_t index) const = 0; // In synchronous mode (non-multi-threaded) the state is always 'ready' // In asynchronous mode, this returns true if the background thread is not still actively computing // a new solution. In an asynchronous config the 'IsReady' call will report any update or log // messages in the caller's current thread. virtual bool IsReady(void) const { return true; } protected: virtual ~IVHACD(void) {} }; IVHACD* CreateVHACD(void); IVHACD* CreateVHACD_ASYNC(void); } #endif // VHACD_H
7,574
C
43.298245
756
0.686163
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/assetutils/NvBlastExtAssetUtils.cpp
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2022-2023 NVIDIA Corporation. All rights reserved. #include "NvBlastExtAssetUtils.h" #include "NvBlast.h" #include "NvBlastIndexFns.h" #include "NvBlastMemory.h" #include "NvBlastGlobals.h" #include "math.h" using namespace Nv::Blast; /** Fill the chunk and bond descriptors from an asset. \param[out] chunkDescsWritten the number of chunk descriptors written to chunkDescs \param[out] bondDescsWritten the number of bond descriptors written to bondDescs \param[out] chunkDescs user-supplied buffer of NvBlastChunkDesc. Size must be at least NvBlastAssetGetChunkCount(asset, logFn) \param[out] bondDescs user-supplied buffer of NvBlastBondDesc. Size must be at least NvBlastAssetGetBondCount(asset, logFn) \param[in] asset asset from which to extract descriptors */ static void fillChunkAndBondDescriptorsFromAsset ( uint32_t& chunkDescsWritten, uint32_t& bondDescsWritten, NvBlastChunkDesc* chunkDescs, NvBlastBondDesc* bondDescs, const NvBlastAsset* asset ) { chunkDescsWritten = 0; bondDescsWritten = 0; // Chunk descs const uint32_t assetChunkCount = NvBlastAssetGetChunkCount(asset, logLL); const NvBlastChunk* assetChunk = NvBlastAssetGetChunks(asset, logLL); for (uint32_t i = 0; i < assetChunkCount; ++i, ++assetChunk) { NvBlastChunkDesc& chunkDesc = chunkDescs[chunkDescsWritten++]; memcpy(chunkDesc.centroid, assetChunk->centroid, sizeof(float) * 3); chunkDesc.volume = assetChunk->volume; chunkDesc.parentChunkDescIndex = assetChunk->parentChunkIndex; chunkDesc.flags = 0; // To be filled in below chunkDesc.userData = assetChunk->userData; } // Bond descs const uint32_t assetBondCount = NvBlastAssetGetBondCount(asset, logLL); const NvBlastBond* assetBond = NvBlastAssetGetBonds(asset, logLL); for (uint32_t i = 0; i < assetBondCount; ++i, ++assetBond) { NvBlastBondDesc& bondDesc = bondDescs[bondDescsWritten++]; memcpy(&bondDesc.bond, assetBond, sizeof(NvBlastBond)); } // Walk the graph and restore connection descriptors const NvBlastSupportGraph graph = NvBlastAssetGetSupportGraph(asset, logLL); for (uint32_t i = 0; i < graph.nodeCount; ++i) { const int32_t currentChunk = graph.chunkIndices[i]; if (isInvalidIndex(currentChunk)) { continue; } chunkDescs[currentChunk].flags |= NvBlastChunkDesc::SupportFlag; // Filling in chunk flags here for (uint32_t j = graph.adjacencyPartition[i]; j < graph.adjacencyPartition[i + 1]; ++j) { NvBlastBondDesc& bondDesc = bondDescs[graph.adjacentBondIndices[j]]; bondDesc.chunkIndices[0] = currentChunk; const uint32_t adjacentChunkIndex = graph.chunkIndices[graph.adjacentNodeIndices[j]]; bondDesc.chunkIndices[1] = adjacentChunkIndex; } } } /** Scale a 3-vector v in-place. \param[in,out] v The vector to scale. \param[in] s The scale. Represents the diagonal elements of a diagonal matrix. The result will be v <- s*v. */ static inline void scale(NvcVec3& v, const NvcVec3& s) { v.x *= s.x; v.y *= s.y; v.z *= s.z; } /** Rotate a 3-vector v in-place using a rotation represented by a quaternion q. \param[in,out] v The vector to rotate. \param[in] q The quaternion representation the rotation. The format of q is { x, y, z, w } where (x,y,z) is the vector part and w is the scalar part. The quaternion q MUST be normalized. */ static inline void rotate(NvcVec3& v, const NvcQuat& q) { const float vx = 2.0f * v.x; const float vy = 2.0f * v.y; const float vz = 2.0f * v.z; const float w2 = q.w * q.w - 0.5f; const float dot2 = (q.x * vx + q.y * vy + q.z * vz); v.x = vx * w2 + (q.y * vz - q.z * vy) * q.w + q.x * dot2; v.y = vy * w2 + (q.z * vx - q.x * vz) * q.w + q.y * dot2; v.z = vz * w2 + (q.x * vy - q.y * vx) * q.w + q.z * dot2; } /** Translate a 3-vector v in-place. \param[in,out] v The vector to translate. \param[in] t The translation. The result will be v <- v+t. */ static inline void translate(NvcVec3& v, const NvcVec3& t) { v.x += t.x; v.y += t.y; v.z += t.z; } NvBlastAsset* NvBlastExtAssetUtilsAddExternalBonds ( const NvBlastAsset* asset, const uint32_t* externalBoundChunks, uint32_t externalBoundChunkCount, const NvcVec3* bondDirections, const uint32_t* bondUserData ) { const uint32_t chunkCount = NvBlastAssetGetChunkCount(asset, logLL); const uint32_t oldBondCount = NvBlastAssetGetBondCount(asset, logLL); const uint32_t newBondCount = oldBondCount + externalBoundChunkCount; NvBlastChunkDesc* chunkDescs = static_cast<NvBlastChunkDesc*>(NVBLAST_ALLOC(chunkCount * sizeof(NvBlastChunkDesc))); NvBlastBondDesc* bondDescs = static_cast<NvBlastBondDesc*>(NVBLAST_ALLOC(newBondCount * sizeof(NvBlastBondDesc))); // Create chunk descs uint32_t chunkDescsWritten; uint32_t bondDescsWritten; fillChunkAndBondDescriptorsFromAsset(chunkDescsWritten, bondDescsWritten, chunkDescs, bondDescs, asset); // Add world bonds uint32_t bondCount = oldBondCount; for (uint32_t i = 0; i < externalBoundChunkCount; i++) { NvBlastBondDesc& bondDesc = bondDescs[bondCount++]; const uint32_t chunkIndex = externalBoundChunks[i]; bondDesc.chunkIndices[0] = chunkIndex; bondDesc.chunkIndices[1] = invalidIndex<uint32_t>(); memcpy(&bondDesc.bond.normal, bondDirections + i, sizeof(float) * 3); bondDesc.bond.area = 1.0f; // Should be set by user memcpy(&bondDesc.bond.centroid, chunkDescs[chunkIndex].centroid, sizeof(float) * 3); bondDesc.bond.userData = bondUserData != nullptr ? bondUserData[i] : 0; } // Create new asset NvBlastAssetDesc assetDesc; assetDesc.chunkCount = chunkCount; assetDesc.chunkDescs = chunkDescs; assetDesc.bondCount = bondCount; assetDesc.bondDescs = bondDescs; void* scratch = NVBLAST_ALLOC(NvBlastGetRequiredScratchForCreateAsset(&assetDesc, logLL)); NvBlastAsset* newAsset = NvBlastCreateAsset(NVBLAST_ALLOC(NvBlastGetAssetMemorySize(&assetDesc, logLL)), &assetDesc, scratch, logLL); // Free buffers NVBLAST_FREE(scratch); NVBLAST_FREE(bondDescs); NVBLAST_FREE(chunkDescs); return newAsset; } NvBlastAssetDesc NvBlastExtAssetUtilsCreateDesc(const NvBlastAsset* asset) { return NvBlastExtAssetUtilsMergeAssets(&asset, nullptr, nullptr, nullptr, 1, nullptr, 0, nullptr, nullptr, 0); } NvBlastAssetDesc NvBlastExtAssetUtilsMergeAssets ( const NvBlastAsset** components, const NvcVec3* scales, const NvcQuat* rotations, const NvcVec3* translations, uint32_t componentCount, const NvBlastExtAssetUtilsBondDesc* newBondDescs, uint32_t newBondCount, uint32_t* chunkIndexOffsets, uint32_t* chunkReorderMap, uint32_t chunkReorderMapSize ) { // Count the total number of chunks and bonds in the new asset uint32_t totalChunkCount = 0; uint32_t totalBondCount = newBondCount; for (uint32_t c = 0; c < componentCount; ++c) { totalChunkCount += NvBlastAssetGetChunkCount(components[c], logLL); totalBondCount += NvBlastAssetGetBondCount(components[c], logLL); } // Allocate space for chunk and bond descriptors NvBlastChunkDesc* chunkDescs = static_cast<NvBlastChunkDesc*>(NVBLAST_ALLOC(totalChunkCount * sizeof(NvBlastChunkDesc))); NvBlastBondDesc* bondDescs = static_cast<NvBlastBondDesc*>(NVBLAST_ALLOC(totalBondCount * sizeof(NvBlastBondDesc))); // Create a list of chunk index offsets per component uint32_t* offsetStackAlloc = static_cast<uint32_t*>(NvBlastAlloca(componentCount * sizeof(uint32_t))); if (chunkIndexOffsets == nullptr) { chunkIndexOffsets = offsetStackAlloc; // Use local stack alloc if no array is provided } // Fill the chunk and bond descriptors from the components uint32_t chunkCount = 0; uint32_t bondCount = 0; for (uint32_t c = 0; c < componentCount; ++c) { chunkIndexOffsets[c] = chunkCount; uint32_t componentChunkCount; uint32_t componentBondCount; fillChunkAndBondDescriptorsFromAsset(componentChunkCount, componentBondCount, chunkDescs + chunkCount, bondDescs + bondCount, components[c]); // Fix chunks' parent indices for (uint32_t i = 0; i < componentChunkCount; ++i) { if (!isInvalidIndex(chunkDescs[chunkCount + i].parentChunkDescIndex)) { chunkDescs[chunkCount + i].parentChunkDescIndex += chunkCount; } } // Fix bonds' chunk indices for (uint32_t i = 0; i < componentBondCount; ++i) { NvBlastBondDesc& bondDesc = bondDescs[bondCount + i]; for (int j = 0; j < 2; ++j) { if (!isInvalidIndex(bondDesc.chunkIndices[j])) { bondDesc.chunkIndices[j] += chunkCount; } } } // Transform geometric data if (scales != nullptr) { const NvcVec3& S = scales[c]; NvcVec3 cofS = { S.y * S.z, S.z * S.x, S.x * S.y }; float absDetS = S.x * S.y * S.z; const float sgnDetS = absDetS < 0.0f ? -1.0f : 1.0f; absDetS *= sgnDetS; for (uint32_t i = 0; i < componentChunkCount; ++i) { scale(reinterpret_cast<NvcVec3&>(chunkDescs[chunkCount + i].centroid), S); chunkDescs[chunkCount + i].volume *= absDetS; } for (uint32_t i = 0; i < componentBondCount; ++i) { NvBlastBond& bond = bondDescs[bondCount + i].bond; scale(reinterpret_cast<NvcVec3&>(bond.normal), cofS); float renorm = sqrtf(bond.normal[0] * bond.normal[0] + bond.normal[1] * bond.normal[1] + bond.normal[2] * bond.normal[2]); bond.area *= renorm; if (renorm != 0) { renorm = sgnDetS / renorm; bond.normal[0] *= renorm; bond.normal[1] *= renorm; bond.normal[2] *= renorm; } scale(reinterpret_cast<NvcVec3&>(bond.centroid), S); } } if (rotations != nullptr) { for (uint32_t i = 0; i < componentChunkCount; ++i) { rotate(reinterpret_cast<NvcVec3&>(chunkDescs[chunkCount + i].centroid), rotations[c]); } for (uint32_t i = 0; i < componentBondCount; ++i) { NvBlastBond& bond = bondDescs[bondCount + i].bond; rotate(reinterpret_cast<NvcVec3&>(bond.normal), rotations[c]); // Normal can be transformed this way since we aren't scaling rotate(reinterpret_cast<NvcVec3&>(bond.centroid), rotations[c]); } } if (translations != nullptr) { for (uint32_t i = 0; i < componentChunkCount; ++i) { translate(reinterpret_cast<NvcVec3&>(chunkDescs[chunkCount + i].centroid), translations[c]); } for (uint32_t i = 0; i < componentBondCount; ++i) { translate(reinterpret_cast<NvcVec3&>(bondDescs[bondCount + i].bond.centroid), translations[c]); } } chunkCount += componentChunkCount; bondCount += componentBondCount; } // Fill the bond descriptors from the new bond descs for (uint32_t b = 0; b < newBondCount; ++b) { const NvBlastExtAssetUtilsBondDesc& newBondDesc = newBondDescs[b]; NvBlastBondDesc& bondDesc = bondDescs[bondCount++]; memcpy(&bondDesc.bond, &newBondDesc.bond, sizeof(NvBlastBond)); bondDesc.chunkIndices[0] = !isInvalidIndex(newBondDesc.chunkIndices[0]) ? newBondDesc.chunkIndices[0] + chunkIndexOffsets[newBondDesc.componentIndices[0]] : invalidIndex<uint32_t>(); bondDesc.chunkIndices[1] = !isInvalidIndex(newBondDesc.chunkIndices[1]) ? newBondDesc.chunkIndices[1] + chunkIndexOffsets[newBondDesc.componentIndices[1]] : invalidIndex<uint32_t>(); } // Create new asset desriptor NvBlastAssetDesc assetDesc; assetDesc.chunkCount = chunkCount; assetDesc.chunkDescs = chunkDescs; assetDesc.bondCount = bondCount; assetDesc.bondDescs = bondDescs; // Massage the descriptors so that they are valid for scratch creation void* scratch = NVBLAST_ALLOC(chunkCount * sizeof(NvBlastChunkDesc)); // Enough for NvBlastEnsureAssetExactSupportCoverage and NvBlastReorderAssetDescChunks NvBlastEnsureAssetExactSupportCoverage(chunkDescs, chunkCount, scratch, logLL); if (chunkReorderMapSize < chunkCount) { if (chunkReorderMap != nullptr) { // Chunk reorder map is not large enough. Fill it with invalid indices and don't use it. memset(chunkReorderMap, 0xFF, chunkReorderMapSize * sizeof(uint32_t)); NVBLAST_LOG_WARNING("NvBlastExtAssetUtilsMergeAssets: insufficient chunkReorderMap array passed in. NvBlastReorderAssetDescChunks will not be used."); } chunkReorderMap = nullptr; // Don't use } if (chunkReorderMap != nullptr) { NvBlastReorderAssetDescChunks(chunkDescs, chunkCount, bondDescs, bondCount, chunkReorderMap, true, scratch, logLL); } NVBLAST_FREE(scratch); return assetDesc; } /** Multiply a 3-vector v in-place by value. \param[in,out] v The vector to multiply. \param[in] m The 3x3 matrix. */ static inline void multiply(NvcVec3& v, float value) { v.x *= value; v.y *= value; v.z *= value; } /** Get Vec3 length */ static inline float length(const NvcVec3& p) { return sqrtf(p.x * p.x + p.y * p.y + p.z * p.z); } /** Transform a point in-place: scale, rotate, then translate \param[in,out] p The point to transform. \param[in] S The diagonal elements of a diagonal scale matrix. \param[in] R A quaternion representing the rotation. Must be normalized. \param[in] T The translation vector. */ static inline void transform(NvcVec3& p, const NvcVec3& S, const NvcQuat& R, const NvcVec3& T) { scale(p, S); rotate(p, R); translate(p, T); } /** Transform a vector in-place: scale, then rotate \param[in,out] v The vector to transform. \param[in] S The diagonal elements of a diagonal scale matrix. \param[in] R A quaternion representing the rotation. Must be normalized. */ static inline void transform(NvcVec3& v, const NvcVec3& S, const NvcQuat& R) { scale(v, S); rotate(v, R); } void NvBlastExtAssetTransformInPlace(NvBlastAsset* asset, const NvcVec3* scaling, const NvcQuat* rotation, const NvcVec3* translation) { // Local copies of scaling (S), rotation (R), and translation (T) NvcVec3 S = { 1, 1, 1 }; NvcQuat R = { 0, 0, 0, 1 }; NvcVec3 T = { 0, 0, 0 }; NvcVec3 cofS = { 1, 1, 1 }; float absDetS = 1; float sgnDetS = 1; { if (rotation) { R = *rotation; } if (scaling) { S = *scaling; cofS.x = S.y * S.z; cofS.y = S.z * S.x; cofS.z = S.x * S.y; absDetS = S.x * S.y * S.z; sgnDetS = absDetS < 0.0f ? -1.0f : 1.0f; absDetS *= sgnDetS; } if (translation) { T = *translation; } } // Chunk descs const uint32_t assetChunkCount = NvBlastAssetGetChunkCount(asset, logLL); NvBlastChunk* assetChunk = const_cast<NvBlastChunk*>(NvBlastAssetGetChunks(asset, logLL)); for (uint32_t i = 0; i < assetChunkCount; ++i, ++assetChunk) { transform(reinterpret_cast<NvcVec3&>(assetChunk->centroid), S, R, T); assetChunk->volume *= absDetS; // Use |detS| to keep the volume positive } // Bond descs const uint32_t assetBondCount = NvBlastAssetGetBondCount(asset, logLL); NvBlastBond* assetBond = const_cast<NvBlastBond*>(NvBlastAssetGetBonds(asset, logLL)); for (uint32_t i = 0; i < assetBondCount; ++i, ++assetBond) { transform(reinterpret_cast<NvcVec3&>(assetBond->centroid), S, R, T); NvcVec3& normal = reinterpret_cast<NvcVec3&>(assetBond->normal); transform(normal, cofS, R); const float l = length(normal); assetBond->area *= l; multiply(normal, l > 0.f ? sgnDetS / l : 1.f); } }
18,148
C++
36.114519
190
0.648556
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/stress/NvBlastExtStressSolver.cpp
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved. #include "NvBlastExtStressSolver.h" #include "NvBlast.h" #include "NvBlastGlobals.h" #include "NvBlastArray.h" #include "NvBlastHashMap.h" #include "NvBlastHashSet.h" #include "NvBlastAssert.h" #include "NvBlastIndexFns.h" #include "NsFPU.h" #include "NvBlastNvSharedHelpers.h" #include "NvCMath.h" #include "stress.h" #include "buffer.h" #include "simd/simd_device_query.h" #include <algorithm> #define USE_SCALAR_IMPL 0 #define WARM_START 1 #define GRAPH_INTERGRIRY_CHECK 0 #if GRAPH_INTERGRIRY_CHECK #include <set> #endif namespace Nv { namespace Blast { using namespace nvidia; static_assert(sizeof(NvVec3) == sizeof(NvcVec3), "sizeof(NvVec3) must equal sizeof(NvcVec3)."); static_assert(offsetof(NvVec3, x) == offsetof(NvcVec3, x) && offsetof(NvVec3, y) == offsetof(NvcVec3, y) && offsetof(NvVec3, z) == offsetof(NvcVec3, z), "Elements of NvVec3 and NvcVec3 must have the same struct offset."); /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // Conjugate Gradient Solver /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// class ConjugateGradientImpulseSolver { public: ConjugateGradientImpulseSolver(uint32_t nodeCount, uint32_t maxBondCount) { m_bonds.reserve(maxBondCount); m_impulses.reserve(maxBondCount); reset(nodeCount); } void getBondImpulses(uint32_t bond, NvVec3& impulseLinear, NvVec3& impulseAngular) const { NVBLAST_ASSERT(bond < m_impulses.size()); const AngLin6& f = m_impulses[bond]; *(NvcVec3*)&impulseAngular = f.ang; *(NvcVec3*)&impulseLinear = f.lin; } void getBondNodes(uint32_t bond, uint32_t& node0, uint32_t& node1) const { NVBLAST_ASSERT(bond < m_bonds.size()); const SolverBond& b = m_bonds[bond]; node0 = b.nodes[0]; node1 = b.nodes[1]; } uint32_t getBondCount() const { return m_bonds.size(); } uint32_t getNodeCount() const { return m_nodes.size(); } void setNodeMassInfo(uint32_t node, const NvVec3& CoM, float mass, float inertia) { NVBLAST_ASSERT(node < m_nodes.size()); SolverNodeS& n = m_nodes[node]; n.CoM = { CoM.x, CoM.y, CoM.z }; n.mass = std::max(mass, 0.0f); // No negative masses, but 0 is meaningful (== infinite) n.inertia = std::max(inertia, 0.0f); // Ditto for inertia m_forceColdStart = true; } void initialize() { StressProcessor::DataParams params; params.centerBonds = true; params.equalizeMasses = true; m_stressProcessor.prepare(m_nodes.begin(), m_nodes.size(), m_bonds.begin(), m_bonds.size(), params); } void setNodeVelocities(uint32_t node, const NvVec3& velocityLinear, const NvVec3& velocityAngular) { NVBLAST_ASSERT(node < m_velocities.size()); AngLin6& v = m_velocities[node]; v.ang = { velocityAngular.x, velocityAngular.y, velocityAngular.z }; v.lin = { velocityLinear.x, velocityLinear.y, velocityLinear.z }; m_inputsChanged = true; } uint32_t addBond(uint32_t node0, uint32_t node1, const NvVec3& bondCentroid) { SolverBond b; b.nodes[0] = node0; b.nodes[1] = node1; b.centroid = { bondCentroid.x, bondCentroid.y, bondCentroid.z }; m_bonds.pushBack(b); m_impulses.push_back({{0,0,0},{0,0,0}}); m_forceColdStart = true; return m_bonds.size() - 1; } void replaceWithLast(uint32_t bondIndex) { m_bonds.replaceWithLast(bondIndex); if ((size_t)bondIndex + 2 < m_impulses.size()) { m_impulses[bondIndex] = m_impulses.back(); m_impulses.resize(m_impulses.size() - 1); } m_stressProcessor.removeBond(bondIndex); } void reset(uint32_t nodeCount) { m_nodes.resize(nodeCount); memset(m_nodes.begin(), 0, sizeof(SolverNodeS)*nodeCount); m_velocities.resize(nodeCount); memset(m_velocities.data(), 0, sizeof(AngLin6)*nodeCount); clearBonds(); m_error_sq = {FLT_MAX, FLT_MAX}; m_converged = false; m_forceColdStart = true; m_inputsChanged = true; } void clearBonds() { m_bonds.clear(); m_impulses.resize(0); m_forceColdStart = true; } void solve(uint32_t iterationCount, bool warmStart = true) { StressProcessor::SolverParams params; params.maxIter = iterationCount; params.tolerance = 0.001f; params.warmStart = warmStart && !m_forceColdStart; m_converged = (m_stressProcessor.solve(m_impulses.data(), m_velocities.data(), params, &m_error_sq) >= 0); m_forceColdStart = false; m_inputsChanged = false; } bool calcError(float& linear, float& angular) const { linear = sqrtf(m_error_sq.lin); angular = sqrtf(m_error_sq.ang); return m_converged; } private: Array<SolverNodeS>::type m_nodes; Array<SolverBond>::type m_bonds; StressProcessor m_stressProcessor; POD_Buffer<AngLin6> m_velocities; POD_Buffer<AngLin6> m_impulses; AngLin6ErrorSq m_error_sq; bool m_converged; bool m_forceColdStart; bool m_inputsChanged; }; /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // Graph Processor /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// #if GRAPH_INTERGRIRY_CHECK #define CHECK_GRAPH_INTEGRITY checkGraphIntegrity() #else #define CHECK_GRAPH_INTEGRITY ((void)0) #endif class SupportGraphProcessor { public: struct BondData { uint32_t node0; uint32_t node1; uint32_t blastBondIndex; // linear stresses float stressNormal; // negative values represent compression pressure, positive represent tension float stressShear; // The normal used to compute stress values // Can be different than the bond normal if graph reduction is used // and multiple bonds are grouped together nvidia::NvVec3 normal; // Centroid used to compute node offsets, instead of assuming the bond is halfway between node positions. // This also allows the bonds to the world node to be drawn nvidia::NvVec3 centroid; }; struct NodeData { float mass; float volume; NvVec3 localPos; NvVec3 localVel; uint32_t solverNode; uint32_t neighborsCount; }; struct SolverNodeData { uint32_t supportNodesCount; NvVec3 localPos; union { float mass; int32_t indexShift; }; float volume; }; struct SolverBondData { InlineArray<uint32_t, 8>::type blastBondIndices; }; SupportGraphProcessor(uint32_t nodeCount, uint32_t maxBondCount) : m_solver(nodeCount, maxBondCount), m_nodesDirty(true), m_bondsDirty(true) { m_nodesData.resize(nodeCount); m_bondsData.reserve(maxBondCount); m_solverNodesData.resize(nodeCount); m_solverBondsData.reserve(maxBondCount); m_solverBondsMap.reserve(maxBondCount); m_blastBondIndexMap.resize(maxBondCount); memset(m_blastBondIndexMap.begin(), 0xFF, m_blastBondIndexMap.size() * sizeof(uint32_t)); resetVelocities(); } const NodeData& getNodeData(uint32_t node) const { return m_nodesData[node]; } const BondData& getBondData(uint32_t bond) const { return m_bondsData[bond]; } const SolverNodeData& getSolverNodeData(uint32_t node) const { return m_solverNodesData[node]; } const SolverBondData& getSolverBondData(uint32_t bond) const { return m_solverBondsData[bond]; } void getSolverInternalBondImpulses(uint32_t bond, NvVec3& impulseLinear, NvVec3& impulseAngular) const { m_solver.getBondImpulses(bond, impulseLinear, impulseAngular); } void getSolverInternalBondNodes(uint32_t bond, uint32_t& node0, uint32_t& node1) const { m_solver.getBondNodes(bond, node0, node1); } uint32_t getBondCount() const { return m_bondsData.size(); } uint32_t getNodeCount() const { return m_nodesData.size();; } uint32_t getSolverBondCount() const { return m_solverBondsData.size(); } uint32_t getSolverNodeCount() const { return m_solverNodesData.size();; } uint32_t getOverstressedBondCount() const { return m_overstressedBondCount; } void calcSolverBondStresses( uint32_t bondIdx, float bondArea, float nodeDist, const nvidia::NvVec3& bondNormal, float& stressNormal, float& stressShear) const { if (!canTakeDamage(bondArea)) { stressNormal = stressShear = 0.0f; return; } // impulseLinear in the direction of the bond normal is stressNormal, perpendicular is stressShear // ignore impulseAngular for now, not sure how to account for that // convert to pressure to factor out area NvVec3 impulseLinear, impulseAngular; getSolverInternalBondImpulses(bondIdx, impulseLinear, impulseAngular); const float normalComponentLinear = impulseLinear.dot(bondNormal); stressNormal = normalComponentLinear / bondArea; const float impulseLinearMagSqr = impulseLinear.magnitudeSquared(); stressShear = sqrtf(impulseLinearMagSqr - normalComponentLinear * normalComponentLinear) / bondArea; // impulseAngular in the direction of the bond normal is twist, perpendicular is bend // take abs() of the dot product because only the magnitude of the twist matters, not direction const float normalComponentAngular = abs(impulseAngular.dot(bondNormal)); const float twist = normalComponentAngular / bondArea; const float impulseAngularMagSqr = impulseAngular.magnitudeSquared(); const float bend = sqrtf(impulseAngularMagSqr - normalComponentAngular * normalComponentAngular) / bondArea; // interpret angular pressure as a composition of linear pressures // dividing by nodeDist for scaling const float twistContribution = twist * 2.0f / nodeDist; stressShear += twistContribution; const float bendContribution = bend * 2.0f / nodeDist; stressNormal += copysignf(bendContribution, stressNormal); } float mapStressToRange(float stress, float elasticLimit, float fatalLimit) const { if (stress < elasticLimit) { return 0.5f * stress / elasticLimit; } else { return fatalLimit > elasticLimit ? 0.5f + 0.5f * (stress - elasticLimit) / (fatalLimit - elasticLimit) : 1.0f; } } float getSolverBondStressPct(uint32_t bondIdx, const float* bondHealths, const ExtStressSolverSettings& settings, ExtStressSolver::DebugRenderMode mode) const { // sum up the stress of all underlying bonds involved in this stress solver bond float compressionStress, tensionStress, shearStress; float stress = -1.0f; const auto& blastBondIndices = m_solverBondsData[bondIdx].blastBondIndices; for (const auto blastBondIndex : blastBondIndices) { // only consider the stress values on bonds that are intact if (bondHealths[blastBondIndex] > 0.0f && getBondStress(blastBondIndex, compressionStress, tensionStress, shearStress)) { if (mode == ExtStressSolver::STRESS_PCT_COMPRESSION || mode == ExtStressSolver::STRESS_PCT_MAX) { compressionStress = mapStressToRange(compressionStress, settings.compressionElasticLimit, settings.compressionFatalLimit); stress = std::max(compressionStress, stress); } if (mode == ExtStressSolver::STRESS_PCT_TENSION || mode == ExtStressSolver::STRESS_PCT_MAX) { tensionStress = mapStressToRange(tensionStress, settings.tensionElasticLimit, settings.tensionFatalLimit); stress = std::max(tensionStress, stress); } if (mode == ExtStressSolver::STRESS_PCT_SHEAR || mode == ExtStressSolver::STRESS_PCT_MAX) { shearStress = mapStressToRange(shearStress, settings.shearElasticLimit, settings.shearFatalLimit); stress = std::max(shearStress, stress); } // all bonds in the group share the same stress values, no need to keep iterating break; } } // return a value < 0.0f if all bonds are broken return stress; } void setNodeInfo(uint32_t node, float mass, float volume, NvVec3 localPos) { m_nodesData[node].mass = mass; m_nodesData[node].volume = volume; m_nodesData[node].localPos = localPos; m_nodesDirty = true; } void setNodeNeighborsCount(uint32_t node, uint32_t neighborsCount) { // neighbors count is expected to be the number of nodes on 1 island/actor. m_nodesData[node].neighborsCount = neighborsCount; // check for too huge aggregates (happens after island's split) if (!m_nodesDirty) { m_nodesDirty |= (m_solverNodesData[m_nodesData[node].solverNode].supportNodesCount > neighborsCount / 2); } } void addNodeForce(uint32_t node, const NvVec3& force, ExtForceMode::Enum mode) { const float mass = m_nodesData[node].mass; if (mass > 0) { // NOTE - passing in acceleration as velocity. The impulse solver's output will be interpreted as force. m_nodesData[node].localVel += (mode == ExtForceMode::FORCE) ? force/mass : force; } } void addBond(uint32_t node0, uint32_t node1, uint32_t blastBondIndex) { if (isInvalidIndex(m_blastBondIndexMap[blastBondIndex])) { const BondData data = { node0, node1, blastBondIndex, 0.0f }; m_bondsData.pushBack(data); m_blastBondIndexMap[blastBondIndex] = m_bondsData.size() - 1; } } void removeBondIfExists(uint32_t blastBondIndex) { const uint32_t bondIndex = m_blastBondIndexMap[blastBondIndex]; if (!isInvalidIndex(bondIndex)) { const BondData& bond = m_bondsData[bondIndex]; const uint32_t solverNode0 = m_nodesData[bond.node0].solverNode; const uint32_t solverNode1 = m_nodesData[bond.node1].solverNode; bool isBondInternal = (solverNode0 == solverNode1); if (isBondInternal) { // internal bond sadly requires graph resync (it never happens on reduction level '0') m_nodesDirty = true; } else if (!m_nodesDirty) { // otherwise it's external bond, we can remove it manually and keep graph synced // we don't need to spend time there if (m_nodesDirty == true), graph will be resynced anyways BondKey solverBondKey(solverNode0, solverNode1); auto entry = m_solverBondsMap.find(solverBondKey); if (entry) { const uint32_t solverBondIndex = entry->second; auto& blastBondIndices = m_solverBondsData[solverBondIndex].blastBondIndices; blastBondIndices.findAndReplaceWithLast(blastBondIndex); if (blastBondIndices.empty()) { // all bonds associated with this solver bond were removed, so let's remove solver bond m_solverBondsData.replaceWithLast(solverBondIndex); m_solver.replaceWithLast(solverBondIndex); if (m_solver.getBondCount() > 0) { // update 'previously last' solver bond mapping uint32_t node0, node1; m_solver.getBondNodes(solverBondIndex, node0, node1); m_solverBondsMap[BondKey(node0, node1)] = solverBondIndex; } m_solverBondsMap.erase(solverBondKey); } } CHECK_GRAPH_INTEGRITY; } // remove bond from graph processor's list m_blastBondIndexMap[blastBondIndex] = invalidIndex<uint32_t>(); m_bondsData.replaceWithLast(bondIndex); m_blastBondIndexMap[m_bondsData[bondIndex].blastBondIndex] = m_bondsData.size() > bondIndex ? bondIndex : invalidIndex<uint32_t>(); } } void setGraphReductionLevel(uint32_t level) { m_graphReductionLevel = level; m_nodesDirty = true; } uint32_t getGraphReductionLevel() const { return m_graphReductionLevel; } void solve(const ExtStressSolverSettings& settings, const float* bondHealth, const NvBlastBond* bonds, bool warmStart = true) { sync(bonds); for (const NodeData& node : m_nodesData) { m_solver.setNodeVelocities(node.solverNode, node.localVel, NvVec3(NvZero)); } m_solver.solve(settings.maxSolverIterationsPerFrame, warmStart); resetVelocities(); updateBondStress(settings, bondHealth, bonds); } bool calcError(float& linear, float& angular) const { return m_solver.calcError(linear, angular); } bool getBondStress(uint32_t blastBondIndex, float& compression, float& tension, float& shear) const { const uint32_t bondIndex = m_blastBondIndexMap[blastBondIndex]; if (isInvalidIndex(bondIndex)) { return false; } // compression and tension are mutually exclusive since they operate in opposite directions // they both measure stress parallel to the bond normal direction // compression is the force resisting two nodes being pushed together (it pushes them apart) // tension is the force resisting two nodes being pulled apart (it pulls them together) if (m_bondsData[bondIndex].stressNormal <= 0.0f) { compression = -m_bondsData[bondIndex].stressNormal; tension = 0.0f; } else { compression = 0.0f; tension = m_bondsData[bondIndex].stressNormal; } // shear is independent and can co-exist with compression and tension shear = m_bondsData[bondIndex].stressShear; // the force perpendicular to the bond normal direction return true; } // Convert from Blast bond index to internal stress solver bond index // Will be InvalidIndex if the internal bond was removed from the stress solver uint32_t getInternalBondIndex(uint32_t blastBondIndex) { return m_blastBondIndexMap[blastBondIndex]; } private: void resetVelocities() { for (auto& node : m_nodesData) { node.localVel = NvVec3(NvZero); } } void updateBondStress(const ExtStressSolverSettings& settings, const float* bondHealth, const NvBlastBond* bonds) { m_overstressedBondCount = 0; Array<uint32_t>::type bondIndicesToRemove; bondIndicesToRemove.reserve(getBondCount()); for (uint32_t i = 0; i < m_solverBondsData.size(); ++i) { // calculate the total area of all bonds involved so pressure can be calculated float totalArea = 0.0f; // calculate an average normal and centroid for all bonds as well, weighted by their area nvidia::NvVec3 bondNormal(NvZero); nvidia::NvVec3 bondCentroid(NvZero); nvidia::NvVec3 averageNodeDisp(NvZero); const auto& blastBondIndices = m_solverBondsData[i].blastBondIndices; for (auto blastBondIndex : blastBondIndices) { if (bondHealth[blastBondIndex] > 0.0f) { const uint32_t bondIndex = m_blastBondIndexMap[blastBondIndex]; const BondData& bond = m_bondsData[bondIndex]; const nvidia::NvVec3 nodeDisp = m_nodesData[bond.node1].localPos - m_nodesData[bond.node0].localPos; // the current health of a bond is the effective area remaining const float remainingArea = bondHealth[blastBondIndex]; const NvBlastBond& blastBond = bonds[blastBondIndex]; // Align normal(s) with node displacement, so that compressive/tensile distinction is correct const nvidia::NvVec3 assetBondNormal(blastBond.normal[0], blastBond.normal[1], blastBond.normal[2]); const nvidia::NvVec3 blastBondNormal = std::copysignf(1.0f, assetBondNormal.dot(nodeDisp))*assetBondNormal; const nvidia::NvVec3 blastBondCentroid(blastBond.centroid[0], blastBond.centroid[1], blastBond.centroid[2]); if (!canTakeDamage(remainingArea)) // Check unbreakable limit { totalArea = kUnbreakableLimit; // Don't add this in, in case of overflow bondNormal = blastBondNormal; bondCentroid = blastBondCentroid; averageNodeDisp = nodeDisp; break; } bondNormal += blastBondNormal*remainingArea; bondCentroid += blastBondCentroid*remainingArea; averageNodeDisp += nodeDisp*remainingArea; totalArea += remainingArea; } else { // if the bond is broken, try to remove it after processing is complete bondIndicesToRemove.pushBack(blastBondIndex); } } if (totalArea == 0.0f) { continue; } // normalized the aggregate normal now that all contributing bonds have been combined bondNormal.normalizeSafe(); // divide by total area for the weighted position, if the area is valid if (canTakeDamage(totalArea)) { bondCentroid /= totalArea; averageNodeDisp /= totalArea; } // bonds are looked at as a whole group, // so regardless of the current health of an individual one they are either all over stressed or none are float stressNormal, stressShear; calcSolverBondStresses(i, totalArea, averageNodeDisp.magnitude(), bondNormal, stressNormal, stressShear); NVBLAST_ASSERT(!std::isnan(stressNormal) && !std::isnan(stressShear)); if ( -stressNormal > settings.compressionElasticLimit || stressNormal > settings.tensionElasticLimit || stressShear > settings.shearElasticLimit ) { m_overstressedBondCount += blastBondIndices.size(); } // store the stress values for all the bonds involved for (auto blastBondIndex : blastBondIndices) { const uint32_t bondIndex = m_blastBondIndexMap[blastBondIndex]; if (!isInvalidIndex(bondIndex) && bondHealth[blastBondIndex] > 0.0f) { BondData& bond = m_bondsData[bondIndex]; NVBLAST_ASSERT(getNodeData(bond.node0).solverNode != getNodeData(bond.node1).solverNode); NVBLAST_ASSERT(bond.blastBondIndex == blastBondIndex); bond.stressNormal = stressNormal; bond.stressShear = stressShear; // store the normal used to calc stresses so it can be used later to determine forces bond.normal = bondNormal; // store the bond centroid bond.centroid = bondCentroid; } } } // now that processing is done, remove any dead bonds for (uint32_t bondIndex : bondIndicesToRemove) { removeBondIfExists(bondIndex); } } void sync(const NvBlastBond* bonds) { if (m_nodesDirty) { syncNodes(bonds); m_solver.initialize(); } if (m_bondsDirty) { syncBonds(bonds); } CHECK_GRAPH_INTEGRITY; } void syncNodes(const NvBlastBond* bonds) { // init with 1<->1 blast nodes to solver nodes mapping m_solverNodesData.resize(m_nodesData.size()); for (uint32_t i = 0; i < m_nodesData.size(); ++i) { m_nodesData[i].solverNode = i; m_solverNodesData[i].supportNodesCount = 1; m_solverNodesData[i].indexShift = 0; } // for static nodes aggregate size per graph reduction level is lower, it // falls behind on few levels. (can be made as parameter) const uint32_t STATIC_NODES_COUNT_PENALTY = 2 << 2; // reducing graph by aggregating nodes level by level // NOTE (@anovoselov): Recently, I found a flow in the algorithm below. In very rare situations aggregate (solver node) // can contain more then one connected component. I didn't notice it to produce any visual artifacts and it's // unlikely to influence stress solvement a lot. Possible solution is to merge *whole* solver nodes, that // will raise complexity a bit (at least will add another loop on nodes for every reduction level. for (uint32_t k = 0; k < m_graphReductionLevel; k++) { const uint32_t maxAggregateSize = 1 << (k + 1); for (const BondData& bond : m_bondsData) { NodeData& node0 = m_nodesData[bond.node0]; NodeData& node1 = m_nodesData[bond.node1]; if (node0.solverNode == node1.solverNode) continue; SolverNodeData& solverNode0 = m_solverNodesData[node0.solverNode]; SolverNodeData& solverNode1 = m_solverNodesData[node1.solverNode]; const int countPenalty = 1; // This was being set to STATIC_NODES_COUNT_PENALTY for static nodes, may want to revisit const uint32_t aggregateSize = std::min<uint32_t>(maxAggregateSize, node0.neighborsCount / 2); if (solverNode0.supportNodesCount * countPenalty >= aggregateSize) continue; if (solverNode1.supportNodesCount * countPenalty >= aggregateSize) continue; if (solverNode0.supportNodesCount >= solverNode1.supportNodesCount) { solverNode1.supportNodesCount--; solverNode0.supportNodesCount++; node1.solverNode = node0.solverNode; } else if (solverNode1.supportNodesCount >= solverNode0.supportNodesCount) { solverNode1.supportNodesCount++; solverNode0.supportNodesCount--; node0.solverNode = node1.solverNode; } } } // Solver Nodes now sparse, a lot of empty ones. Rearrange them by moving all non-empty to the front // 2 passes used for that { uint32_t currentNode = 0; for (; currentNode < m_solverNodesData.size(); ++currentNode) { if (m_solverNodesData[currentNode].supportNodesCount > 0) continue; // 'currentNode' is free // search next occupied node uint32_t k = currentNode + 1; for (; k < m_solverNodesData.size(); ++k) { if (m_solverNodesData[k].supportNodesCount > 0) { // replace currentNode and keep indexShift m_solverNodesData[currentNode].supportNodesCount = m_solverNodesData[k].supportNodesCount; m_solverNodesData[k].indexShift = k - currentNode; m_solverNodesData[k].supportNodesCount = 0; break; } } if (k == m_solverNodesData.size()) { break; } } for (auto& node : m_nodesData) { node.solverNode -= m_solverNodesData[node.solverNode].indexShift; } // now, we know total solver nodes count and which nodes are aggregated into them m_solverNodesData.resize(currentNode); } // calculate all needed data for (SolverNodeData& solverNode : m_solverNodesData) { solverNode.supportNodesCount = 0; solverNode.localPos = NvVec3(NvZero); solverNode.mass = 0.0f; solverNode.volume = 0.0f; } for (NodeData& node : m_nodesData) { SolverNodeData& solverNode = m_solverNodesData[node.solverNode]; solverNode.supportNodesCount++; solverNode.localPos += node.localPos; solverNode.mass += node.mass; solverNode.volume += node.volume; } for (SolverNodeData& solverNode : m_solverNodesData) { solverNode.localPos /= (float)solverNode.supportNodesCount; } m_solver.reset(m_solverNodesData.size()); for (uint32_t nodeIndex = 0; nodeIndex < m_solverNodesData.size(); ++nodeIndex) { const SolverNodeData& solverNode = m_solverNodesData[nodeIndex]; const float R = NvPow(solverNode.volume * 3.0f * NvInvPi / 4.0f, 1.0f / 3.0f); // sphere volume approximation const float inertia = solverNode.mass * (R * R * 0.4f); // sphere inertia tensor approximation: I = 2/5 * M * R^2 ; invI = 1 / I; m_solver.setNodeMassInfo(nodeIndex, solverNode.localPos, solverNode.mass, inertia); } m_nodesDirty = false; syncBonds(bonds); } void syncBonds(const NvBlastBond* bonds) { // traverse all blast bonds and aggregate m_solver.clearBonds(); m_solverBondsMap.clear(); m_solverBondsData.clear(); for (BondData& bond : m_bondsData) { const NodeData& node0 = m_nodesData[bond.node0]; const NodeData& node1 = m_nodesData[bond.node1]; // reset stress, bond structure changed and internal bonds stress won't be updated during updateBondStress() bond.stressNormal = 0.0f; bond.stressShear = 0.0f; // initialize normal and centroid using blast values bond.normal = *(NvVec3*)bonds[bond.blastBondIndex].normal; bond.centroid = *(NvVec3*)bonds[bond.blastBondIndex].centroid; // fix normal direction to point from node0 to node1 bond.normal *= std::copysignf(1.0f, bond.normal.dot(node1.localPos - node1.localPos)); if (node0.solverNode == node1.solverNode) continue; // skip (internal) BondKey key(node0.solverNode, node1.solverNode); auto entry = m_solverBondsMap.find(key); SolverBondData* data; if (!entry) { m_solverBondsData.pushBack(SolverBondData()); data = &m_solverBondsData.back(); m_solverBondsMap[key] = m_solverBondsData.size() - 1; m_solver.addBond(node0.solverNode, node1.solverNode, bond.centroid); } else { data = &m_solverBondsData[entry->second]; } data->blastBondIndices.pushBack(bond.blastBondIndex); } m_bondsDirty = false; } #if GRAPH_INTERGRIRY_CHECK void checkGraphIntegrity() { NVBLAST_ASSERT(m_solver.getBondCount() == m_solverBondsData.size()); NVBLAST_ASSERT(m_solver.getNodeCount() == m_solverNodesData.size()); std::set<uint64_t> solverBonds; for (uint32_t i = 0; i < m_solverBondsData.size(); ++i) { const auto& bondData = m_solver.getBondData(i); BondKey key(bondData.node0, bondData.node1); NVBLAST_ASSERT(solverBonds.find(key) == solverBonds.end()); solverBonds.emplace(key); auto entry = m_solverBondsMap.find(key); NVBLAST_ASSERT(entry != nullptr); const auto& solverBond = m_solverBondsData[entry->second]; for (auto& blastBondIndex : solverBond.blastBondIndices) { if (!isInvalidIndex(m_blastBondIndexMap[blastBondIndex])) { auto& b = m_bondsData[m_blastBondIndexMap[blastBondIndex]]; BondKey key2(m_nodesData[b.node0].solverNode, m_nodesData[b.node1].solverNode); NVBLAST_ASSERT(key2 == key); } } } for (auto& solverBond : m_solverBondsData) { for (auto& blastBondIndex : solverBond.blastBondIndices) { if (!isInvalidIndex(m_blastBondIndexMap[blastBondIndex])) { auto& b = m_bondsData[m_blastBondIndexMap[blastBondIndex]]; NVBLAST_ASSERT(m_nodesData[b.node0].solverNode != m_nodesData[b.node1].solverNode); } } } uint32_t mappedBondCount = 0; for (uint32_t i = 0; i < m_blastBondIndexMap.size(); i++) { const auto& bondIndex = m_blastBondIndexMap[i]; if (!isInvalidIndex(bondIndex)) { mappedBondCount++; NVBLAST_ASSERT(m_bondsData[bondIndex].blastBondIndex == i); } } NVBLAST_ASSERT(m_bondsData.size() == mappedBondCount); } #endif struct BondKey { uint32_t node0; uint32_t node1; BondKey(uint32_t n0, uint32_t n1) : node0(n0), node1(n1) {} operator uint64_t() const { // Szudzik's function return node0 >= node1 ? (uint64_t)node0 * node0 + node0 + node1 : (uint64_t)node1 * node1 + node0; } }; ConjugateGradientImpulseSolver m_solver; Array<SolverNodeData>::type m_solverNodesData; Array<SolverBondData>::type m_solverBondsData; uint32_t m_graphReductionLevel; bool m_nodesDirty; bool m_bondsDirty; uint32_t m_overstressedBondCount; HashMap<BondKey, uint32_t>::type m_solverBondsMap; Array<uint32_t>::type m_blastBondIndexMap; Array<BondData>::type m_bondsData; Array<NodeData>::type m_nodesData; }; /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // ExtStressSolver /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// /** */ class ExtStressSolverImpl final : public ExtStressSolver { NV_NOCOPY(ExtStressSolverImpl) public: ExtStressSolverImpl(const NvBlastFamily& family, const ExtStressSolverSettings& settings); virtual void release() override; //////// ExtStressSolverImpl interface //////// virtual void setAllNodesInfoFromLL(float density = 1.0f) override; virtual void setNodeInfo(uint32_t graphNode, float mass, float volume, NvcVec3 localPos) override; virtual void setSettings(const ExtStressSolverSettings& settings) override { m_settings = settings; inheritSettingsLimits(); } virtual const ExtStressSolverSettings& getSettings() const override { return m_settings; } virtual bool addForce(const NvBlastActor& actor, NvcVec3 localPosition, NvcVec3 localForce, ExtForceMode::Enum mode) override; virtual void addForce(uint32_t graphNode, NvcVec3 localForce, ExtForceMode::Enum mode) override; virtual bool addGravity(const NvBlastActor& actor, NvcVec3 localGravity) override; virtual bool addCentrifugalAcceleration(const NvBlastActor& actor, NvcVec3 localCenterMass, NvcVec3 localAngularVelocity) override; virtual void update() override; virtual uint32_t getOverstressedBondCount() const override { return m_graphProcessor->getOverstressedBondCount(); } virtual void generateFractureCommands(const NvBlastActor& actor, NvBlastFractureBuffers& commands) override; virtual uint32_t generateFractureCommandsPerActor(const NvBlastActor** actorBuffer, NvBlastFractureBuffers* commandsBuffer, uint32_t bufferSize) override; void reset() override { m_reset = true; } virtual float getStressErrorLinear() const override { return m_errorLinear; } virtual float getStressErrorAngular() const override { return m_errorAngular; } virtual bool converged() const override { return m_converged; } virtual uint32_t getFrameCount() const override { return m_framesCount; } virtual uint32_t getBondCount() const override { return m_graphProcessor->getSolverBondCount(); } virtual bool getExcessForces(uint32_t actorIndex, const NvcVec3& com, NvcVec3& force, NvcVec3& torque) override; virtual bool notifyActorCreated(const NvBlastActor& actor) override; virtual void notifyActorDestroyed(const NvBlastActor& actor) override; virtual const DebugBuffer fillDebugRender(const uint32_t* nodes, uint32_t nodeCount, DebugRenderMode mode, float scale) override; private: ~ExtStressSolverImpl(); //////// private methods //////// void solve(); void fillFractureCommands(const NvBlastActor& actor, NvBlastFractureBuffers& commands); void initialize(); void iterate(); void removeBrokenBonds(); template<class T> T* getScratchArray(uint32_t size); bool generateStressDamage(const NvBlastActor& actor, uint32_t bondIndex, uint32_t node0, uint32_t node1); void inheritSettingsLimits() { NVBLAST_ASSERT(m_settings.compressionElasticLimit >= 0.0f && m_settings.compressionFatalLimit >= 0.0f); // check if any optional limits need to inherit from the compression values if (m_settings.tensionElasticLimit < 0.0f) { m_settings.tensionElasticLimit = m_settings.compressionElasticLimit; } if (m_settings.tensionFatalLimit < 0.0f) { m_settings.tensionFatalLimit = m_settings.compressionFatalLimit; } if (m_settings.shearElasticLimit < 0.0f) { m_settings.shearElasticLimit = m_settings.compressionElasticLimit; } if (m_settings.shearFatalLimit < 0.0f) { m_settings.shearFatalLimit = m_settings.compressionFatalLimit; } } //////// data //////// const NvBlastFamily& m_family; HashSet<const NvBlastActor*>::type m_activeActors; ExtStressSolverSettings m_settings; NvBlastSupportGraph m_graph; bool m_isDirty; bool m_reset; const float* m_bondHealths; const float* m_cachedBondHealths; const NvBlastBond* m_bonds; SupportGraphProcessor* m_graphProcessor; float m_errorAngular; float m_errorLinear; bool m_converged; uint32_t m_framesCount; Array<NvBlastBondFractureData>::type m_bondFractureBuffer; Array<uint8_t>::type m_scratch; Array<DebugLine>::type m_debugLineBuffer; }; template<class T> NV_INLINE T* ExtStressSolverImpl::getScratchArray(uint32_t size) { const uint32_t scratchSize = sizeof(T) * size; if (m_scratch.size() < scratchSize) { m_scratch.resize(scratchSize); } return reinterpret_cast<T*>(m_scratch.begin()); } /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // Creation /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// ExtStressSolverImpl::ExtStressSolverImpl(const NvBlastFamily& family, const ExtStressSolverSettings& settings) : m_family(family), m_settings(settings), m_isDirty(false), m_reset(false), m_errorAngular(std::numeric_limits<float>::max()), m_errorLinear(std::numeric_limits<float>::max()), m_converged(false), m_framesCount(0) { // this needs to be called any time settings change, including when they are first set inheritSettingsLimits(); const NvBlastAsset* asset = NvBlastFamilyGetAsset(&m_family, logLL); NVBLAST_ASSERT(asset); m_graph = NvBlastAssetGetSupportGraph(asset, logLL); const uint32_t bondCount = NvBlastAssetGetBondCount(asset, logLL); m_bondFractureBuffer.reserve(bondCount); { NvBlastActor* actor; NvBlastFamilyGetActors(&actor, 1, &family, logLL); m_bondHealths = NvBlastActorGetBondHealths(actor, logLL); m_cachedBondHealths = NvBlastActorGetCachedBondHeaths(actor, logLL); m_bonds = NvBlastAssetGetBonds(asset, logLL); } m_graphProcessor = NVBLAST_NEW(SupportGraphProcessor)(m_graph.nodeCount, bondCount); // traverse graph and fill bond info for (uint32_t node0 = 0; node0 < m_graph.nodeCount; ++node0) { for (uint32_t adjacencyIndex = m_graph.adjacencyPartition[node0]; adjacencyIndex < m_graph.adjacencyPartition[node0 + 1]; adjacencyIndex++) { uint32_t bondIndex = m_graph.adjacentBondIndices[adjacencyIndex]; if (m_bondHealths[bondIndex] <= 0.0f) continue; uint32_t node1 = m_graph.adjacentNodeIndices[adjacencyIndex]; if (node0 < node1) { m_graphProcessor->addBond(node0, node1, bondIndex); } } } } ExtStressSolverImpl::~ExtStressSolverImpl() { NVBLAST_DELETE(m_graphProcessor, SupportGraphProcessor); } ExtStressSolver* ExtStressSolver::create(const NvBlastFamily& family, const ExtStressSolverSettings& settings) { return NVBLAST_NEW(ExtStressSolverImpl) (family, settings); } void ExtStressSolverImpl::release() { NVBLAST_DELETE(this, ExtStressSolverImpl); } /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // Actors & Graph Data /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// void ExtStressSolverImpl::setAllNodesInfoFromLL(float density) { const NvBlastAsset* asset = NvBlastFamilyGetAsset(&m_family, logLL); NVBLAST_ASSERT(asset); const uint32_t chunkCount = NvBlastAssetGetChunkCount(asset, logLL); const NvBlastChunk* chunks = NvBlastAssetGetChunks(asset, logLL); // traverse graph and fill node info for (uint32_t node0 = 0; node0 < m_graph.nodeCount; ++node0) { const uint32_t chunkIndex0 = m_graph.chunkIndices[node0]; if (chunkIndex0 >= chunkCount) { // chunkIndex is invalid means it is static node (represents world) m_graphProcessor->setNodeInfo(node0, 0.0f, 0.0f, NvVec3(NvZero)); } else { // fill node info const NvBlastChunk& chunk = chunks[chunkIndex0]; const float volume = chunk.volume; const float mass = volume * density; const NvVec3 localPos = *reinterpret_cast<const NvVec3*>(chunk.centroid); m_graphProcessor->setNodeInfo(node0, mass, volume, localPos); } } } void ExtStressSolverImpl::setNodeInfo(uint32_t graphNode, float mass, float volume, NvcVec3 localPos) { m_graphProcessor->setNodeInfo(graphNode, mass, volume, toNvShared(localPos)); } bool ExtStressSolverImpl::getExcessForces(uint32_t actorIndex, const NvcVec3& com, NvcVec3& force, NvcVec3& torque) { // otherwise allocate enough space and query the Blast SDK const NvBlastActor* actor = NvBlastFamilyGetActorByIndex(&m_family, actorIndex, logLL); if (actor == nullptr) { return false; } const uint32_t nodeCount = NvBlastActorGetGraphNodeCount(actor, logLL); uint32_t* nodeIndices = getScratchArray<uint32_t>(nodeCount); const uint32_t retCount = NvBlastActorGetGraphNodeIndices(nodeIndices, nodeCount, actor, logLL); NVBLAST_ASSERT(retCount == nodeCount); // get the mapping between support chunks and actor indices // this is the fastest way to tell if two node/chunks are part of the same actor const uint32_t* actorIndices = NvBlastFamilyGetChunkActorIndices(&m_family, logLL); if (actorIndices == nullptr) { return false; } // walk the visible nodes for the actor looking for bonds that broke this frame nvidia::NvVec3 totalForce(0.0f); nvidia::NvVec3 totalTorque(0.0f); for (uint32_t n = 0; n < nodeCount; n++) { // find bonds that broke this frame (health <= 0 but internal stress bond index is still valid) const uint32_t nodeIdx = nodeIndices[n]; for (uint32_t i = m_graph.adjacencyPartition[nodeIdx]; i < m_graph.adjacencyPartition[nodeIdx + 1]; i++) { // check if the bond is broken first of all const uint32_t blastBondIndex = m_graph.adjacentBondIndices[i]; if (m_bondHealths[blastBondIndex] > 0.0f) { continue; } // broken bonds that have invalid internal indices broke before this frame const uint32_t internalBondIndex = m_graphProcessor->getInternalBondIndex(blastBondIndex); if (isInvalidIndex(internalBondIndex)) { continue; } // make sure the other node in the bond isn't part of the same actor // forces should only be applied due to bonds breaking between actors, not within const uint32_t chunkIdx = m_graph.chunkIndices[nodeIdx]; const uint32_t otherNodeIdx = m_graph.adjacentNodeIndices[i]; const uint32_t otherChunkIdx = m_graph.chunkIndices[otherNodeIdx]; if (!isInvalidIndex(chunkIdx) && !isInvalidIndex(otherChunkIdx) && actorIndices[chunkIdx] == actorIndices[otherChunkIdx]) { continue; } // this bond should contribute forces to the output const auto bondData = m_graphProcessor->getBondData(internalBondIndex); NVBLAST_ASSERT(blastBondIndex == bondData.blastBondIndex); uint32_t node0, node1; m_graphProcessor->getSolverInternalBondNodes(internalBondIndex, node0, node1); NVBLAST_ASSERT(bondData.node0 == internalBondData.node0 && bondData.node1 == internalBondData.node1); // accumulators for forces just from this bond nvidia::NvVec3 nvLinearPressure(0.0f); nvidia::NvVec3 nvAngularPressure(0.0f); // deal with linear forces const float excessCompression = bondData.stressNormal + m_settings.compressionFatalLimit; const float excessTension = bondData.stressNormal - m_settings.tensionFatalLimit; if (excessCompression < 0.0f) { nvLinearPressure += excessCompression * bondData.normal; } else if (excessTension > 0.0f) { // tension is in the negative direction of the linear impulse nvLinearPressure += excessTension * bondData.normal; } const float excessShear = bondData.stressShear - m_settings.shearFatalLimit; if (excessShear > 0.0f) { NvVec3 impulseLinear, impulseAngular; m_graphProcessor->getSolverInternalBondImpulses(internalBondIndex, impulseLinear, impulseAngular); const nvidia::NvVec3 shearDir = impulseLinear - impulseLinear.dot(bondData.normal)*bondData.normal; nvLinearPressure += excessShear * shearDir.getNormalized(); } if (nvLinearPressure.magnitudeSquared() > FLT_EPSILON) { const float* bondCenter = m_bonds[blastBondIndex].centroid; const nvidia::NvVec3 forceOffset = nvidia::NvVec3(bondCenter[0], bondCenter[1], bondCenter[3]) - toNvShared(com); const nvidia::NvVec3 torqueFromForce = forceOffset.cross(nvLinearPressure); nvAngularPressure += torqueFromForce; } // add the contributions from this bond to the total forces for the actor // multiply by the area to convert back to force from pressure const float bondRemainingArea = m_cachedBondHealths[blastBondIndex]; NVBLAST_ASSERT(bondRemainingArea <= m_bonds[blastBondIndex].area); const float sign = otherNodeIdx > nodeIdx ? 1.0f : -1.0f; totalForce += nvLinearPressure * (sign*bondRemainingArea); totalTorque += nvAngularPressure * (sign*bondRemainingArea); } } // convert to the output format and return true if non-zero forces were accumulated force = fromNvShared(totalForce); torque = fromNvShared(totalTorque); return (totalForce.magnitudeSquared() + totalTorque.magnitudeSquared()) > 0.0f; } bool ExtStressSolverImpl::notifyActorCreated(const NvBlastActor& actor) { const uint32_t graphNodeCount = NvBlastActorGetGraphNodeCount(&actor, logLL); if (graphNodeCount > 1) { // update neighbors { uint32_t* graphNodeIndices = getScratchArray<uint32_t>(graphNodeCount); const uint32_t nodeCount = NvBlastActorGetGraphNodeIndices(graphNodeIndices, graphNodeCount, &actor, logLL); for (uint32_t i = 0; i < nodeCount; ++i) { m_graphProcessor->setNodeNeighborsCount(graphNodeIndices[i], nodeCount); } } m_activeActors.insert(&actor); m_isDirty = true; return true; } return false; } void ExtStressSolverImpl::notifyActorDestroyed(const NvBlastActor& actor) { if (m_activeActors.erase(&actor)) { m_isDirty = true; } } void ExtStressSolverImpl::removeBrokenBonds() { // traverse graph and remove dead bonds for (uint32_t node0 = 0; node0 < m_graph.nodeCount; ++node0) { for (uint32_t adjacencyIndex = m_graph.adjacencyPartition[node0]; adjacencyIndex < m_graph.adjacencyPartition[node0 + 1]; adjacencyIndex++) { uint32_t node1 = m_graph.adjacentNodeIndices[adjacencyIndex]; if (node0 < node1) { uint32_t bondIndex = m_graph.adjacentBondIndices[adjacencyIndex]; if (m_bondHealths[bondIndex] <= 0.0f) { m_graphProcessor->removeBondIfExists(bondIndex); } } } } m_isDirty = false; } void ExtStressSolverImpl::initialize() { if (m_reset) { m_framesCount = 0; } if (m_isDirty) { removeBrokenBonds(); } if (m_settings.graphReductionLevel != m_graphProcessor->getGraphReductionLevel()) { m_graphProcessor->setGraphReductionLevel(m_settings.graphReductionLevel); } } bool ExtStressSolverImpl::addForce(const NvBlastActor& actor, NvcVec3 localPosition, NvcVec3 localForce, ExtForceMode::Enum mode) { float bestDist = FLT_MAX; uint32_t bestNode = invalidIndex<uint32_t>(); const uint32_t graphNodeCount = NvBlastActorGetGraphNodeCount(&actor, logLL); if (graphNodeCount > 1) { uint32_t* graphNodeIndices = getScratchArray<uint32_t>(graphNodeCount); const uint32_t nodeCount = NvBlastActorGetGraphNodeIndices(graphNodeIndices, graphNodeCount, &actor, logLL); for (uint32_t i = 0; i < nodeCount; ++i) { const uint32_t node = graphNodeIndices[i]; const float sqrDist = (toNvShared(localPosition) - m_graphProcessor->getNodeData(node).localPos).magnitudeSquared(); if (sqrDist < bestDist) { bestDist = sqrDist; bestNode = node; } } if (!isInvalidIndex(bestNode)) { m_graphProcessor->addNodeForce(bestNode, toNvShared(localForce), mode); return true; } } return false; } void ExtStressSolverImpl::addForce(uint32_t graphNode, NvcVec3 localForce, ExtForceMode::Enum mode) { m_graphProcessor->addNodeForce(graphNode, toNvShared(localForce), mode); } bool ExtStressSolverImpl::addGravity(const NvBlastActor& actor, NvcVec3 localGravity) { const uint32_t graphNodeCount = NvBlastActorGetGraphNodeCount(&actor, logLL); if (graphNodeCount > 1) { uint32_t* graphNodeIndices = getScratchArray<uint32_t>(graphNodeCount); const uint32_t nodeCount = NvBlastActorGetGraphNodeIndices(graphNodeIndices, graphNodeCount, &actor, logLL); for (uint32_t i = 0; i < nodeCount; ++i) { const uint32_t node = graphNodeIndices[i]; m_graphProcessor->addNodeForce(node, toNvShared(localGravity), ExtForceMode::ACCELERATION); } return true; } return false; } bool ExtStressSolverImpl::addCentrifugalAcceleration(const NvBlastActor& actor, NvcVec3 localCenterMass, NvcVec3 localAngularVelocity) { const uint32_t graphNodeCount = NvBlastActorGetGraphNodeCount(&actor, logLL); if (graphNodeCount > 1) { uint32_t* graphNodeIndices = getScratchArray<uint32_t>(graphNodeCount); const uint32_t nodeCount = NvBlastActorGetGraphNodeIndices(graphNodeIndices, graphNodeCount, &actor, logLL); // Apply centrifugal force for (uint32_t i = 0; i < nodeCount; ++i) { const uint32_t node = graphNodeIndices[i]; const auto& localPos = m_graphProcessor->getNodeData(node).localPos; // a = w x (w x r) const NvVec3 centrifugalAcceleration = toNvShared(localAngularVelocity) .cross(toNvShared(localAngularVelocity).cross(localPos - toNvShared(localCenterMass))); m_graphProcessor->addNodeForce(node, centrifugalAcceleration, ExtForceMode::ACCELERATION); } return true; } return false; } /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // Update /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// void ExtStressSolverImpl::update() { initialize(); solve(); m_framesCount++; } void ExtStressSolverImpl::solve() { NV_SIMD_GUARD; m_graphProcessor->solve(m_settings, m_bondHealths, m_bonds, WARM_START && !m_reset); m_reset = false; m_converged = m_graphProcessor->calcError(m_errorLinear, m_errorAngular); } /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // Damage /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // check if this bond is over stressed in any way and generate a fracture command if it is bool ExtStressSolverImpl::generateStressDamage(const NvBlastActor& actor, uint32_t bondIndex, uint32_t node0, uint32_t node1) { const float bondHealth = m_bondHealths[bondIndex]; float stressCompression, stressTension, stressShear; if (bondHealth > 0.0f && m_graphProcessor->getBondStress(bondIndex, stressCompression, stressTension, stressShear)) { // compression and tension are mutually exclusive, only one can be positive at a time since they act in opposite directions float stressMultiplier = 0.0f; if (stressCompression > m_settings.compressionElasticLimit) { const float excessStress = stressCompression - m_settings.compressionElasticLimit; const float compressionDenom = m_settings.compressionFatalLimit - m_settings.compressionElasticLimit; const float compressionMultiplier = excessStress / (compressionDenom > 0.0f ? compressionDenom : 1.0f); stressMultiplier += compressionMultiplier; } else if (stressTension > m_settings.tensionElasticLimit) { const float excessStress = stressTension - m_settings.tensionElasticLimit; const float tensionDenom = m_settings.tensionFatalLimit - m_settings.tensionElasticLimit; const float tensionMultiplier = excessStress / (tensionDenom > 0.0f ? tensionDenom : 1.0f); stressMultiplier += tensionMultiplier; } // shear can co-exist with either compression or tension so must be accounted for independently of them if (stressShear > m_settings.shearElasticLimit) { const float excessStress = stressShear - m_settings.shearElasticLimit; const float shearDenom = m_settings.shearFatalLimit - m_settings.shearElasticLimit; const float shearMultiplier = excessStress / (shearDenom > 0.0f ? shearDenom : 1.0f); stressMultiplier += shearMultiplier; } if (stressMultiplier > 0.0f) { // bond health/area is reduced by excess pressure to approximate micro bonds in the material breaking const float bondDamage = bondHealth * stressMultiplier; const NvBlastBondFractureData data = { 0, node0, node1, bondDamage }; m_bondFractureBuffer.pushBack(data); // cache off the current health value for this bond // so it can be used to calculate forces to apply if it breaks later NvBlastActorCacheBondHeath(&actor, bondIndex, logLL); return true; } } return false; } void ExtStressSolverImpl::fillFractureCommands(const NvBlastActor& actor, NvBlastFractureBuffers& commands) { const uint32_t graphNodeCount = NvBlastActorGetGraphNodeCount(&actor, logLL); uint32_t commandCount = 0; if (graphNodeCount > 1 && m_graphProcessor->getOverstressedBondCount() > 0) { uint32_t* graphNodeIndices = getScratchArray<uint32_t>(graphNodeCount); const uint32_t nodeCount = NvBlastActorGetGraphNodeIndices(graphNodeIndices, graphNodeCount, &actor, logLL); for (uint32_t i = 0; i < nodeCount; ++i) { const uint32_t node0 = graphNodeIndices[i]; for (uint32_t adjacencyIndex = m_graph.adjacencyPartition[node0]; adjacencyIndex < m_graph.adjacencyPartition[node0 + 1]; adjacencyIndex++) { const uint32_t node1 = m_graph.adjacentNodeIndices[adjacencyIndex]; if (node0 < node1) { const uint32_t bondIndex = m_graph.adjacentBondIndices[adjacencyIndex]; if (generateStressDamage(actor, bondIndex, node0, node1)) { commandCount++; } } } } } commands.chunkFractureCount = 0; commands.chunkFractures = nullptr; commands.bondFractureCount = commandCount; commands.bondFractures = commandCount > 0 ? m_bondFractureBuffer.end() - commandCount : nullptr; } void ExtStressSolverImpl::generateFractureCommands(const NvBlastActor& actor, NvBlastFractureBuffers& commands) { m_bondFractureBuffer.clear(); fillFractureCommands(actor, commands); } uint32_t ExtStressSolverImpl::generateFractureCommandsPerActor(const NvBlastActor** actorBuffer, NvBlastFractureBuffers* commandsBuffer, uint32_t bufferSize) { if (m_graphProcessor->getOverstressedBondCount() == 0) return 0; m_bondFractureBuffer.clear(); uint32_t index = 0; for (auto it = m_activeActors.getIterator(); !it.done() && index < bufferSize; ++it) { const NvBlastActor* actor = *it; NvBlastFractureBuffers& nextCommand = commandsBuffer[index]; fillFractureCommands(*actor, nextCommand); if (nextCommand.bondFractureCount > 0) { actorBuffer[index] = actor; index++; } } return index; } /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // Debug Render /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// inline uint32_t NvVec4ToU32Color(const NvVec4& color) { return ((uint32_t)(color.w * 255) << 24) | // A ((uint32_t)(color.x * 255) << 16) | // R ((uint32_t)(color.y * 255) << 8) | // G ((uint32_t)(color.z * 255)); // B } static float Lerp(float v0, float v1, float val) { return v0 * (1 - val) + v1 * val; } inline float clamp01(float v) { return v < 0.0f ? 0.0f : (v > 1.0f ? 1.0f : v); } inline NvVec4 colorConvertHSVAtoRGBA(float h, float s, float v, float a) { const float t = 6.0f * (h - std::floor(h)); const int n = (int)t; const float m = t - (float)n; const float c = 1.0f - s; const float b[6] = { 1.0f, 1.0f - s * m, c, c, 1.0f - s * (1.0f - m), 1.0f }; return NvVec4(v * b[n % 6], v * b[(n + 4) % 6], v * b[(n + 2) % 6], a); // n % 6 protects against roundoff errors } inline uint32_t bondHealthColor(float stressPct) { stressPct = clamp01(stressPct); constexpr float BOND_HEALTHY_HUE = 1.0f/3.0f; // Green constexpr float BOND_ELASTIC_HUE = 0.0f; // Red constexpr float BOND_STRESSED_HUE = 2.0f/3.0f; // Blue constexpr float BOND_FATAL_HUE = 5.0f/6.0f; // Magenta const float hue = stressPct < 0.5f ? Lerp(BOND_HEALTHY_HUE, BOND_ELASTIC_HUE, 2.0f * stressPct) : Lerp(BOND_STRESSED_HUE, BOND_FATAL_HUE, 2.0f * stressPct - 1.0f); return NvVec4ToU32Color(colorConvertHSVAtoRGBA(hue, 1.0f, 1.0f, 1.0f)); } const ExtStressSolver::DebugBuffer ExtStressSolverImpl::fillDebugRender(const uint32_t* nodes, uint32_t nodeCount, DebugRenderMode mode, float scale) { NV_UNUSED(scale); const uint32_t BOND_UNBREAKABLE_COLOR = NvVec4ToU32Color(NvVec4(0.0f, 0.682f, 1.0f, 1.0f)); ExtStressSolver::DebugBuffer debugBuffer = { nullptr, 0 }; if (m_isDirty) return debugBuffer; m_debugLineBuffer.clear(); Array<uint8_t>::type& nodesSet = m_scratch; nodesSet.resize(m_graphProcessor->getSolverNodeCount()); memset(nodesSet.begin(), 0, nodesSet.size() * sizeof(uint8_t)); for (uint32_t i = 0; i < nodeCount; ++i) { NVBLAST_ASSERT(m_graphProcessor->getNodeData(nodes[i]).solverNode < nodesSet.size()); nodesSet[m_graphProcessor->getNodeData(nodes[i]).solverNode] = 1; } const uint32_t bondCount = m_graphProcessor->getSolverBondCount(); for (uint32_t i = 0; i < bondCount; ++i) { const auto& bondData = m_graphProcessor->getBondData(i); uint32_t node0, node1; m_graphProcessor->getSolverInternalBondNodes(i, node0, node1); if (nodesSet[node0] != 0) { //NVBLAST_ASSERT(nodesSet[node1] != 0); const auto& solverNode0 = m_graphProcessor->getSolverNodeData(node0); const auto& solverNode1 = m_graphProcessor->getSolverNodeData(node1); const NvcVec3 p0 = fromNvShared(solverNode0.mass > 0.0f ? solverNode0.localPos : bondData.centroid); const NvcVec3 p1 = fromNvShared(solverNode1.mass > 0.0f ? solverNode1.localPos : bondData.centroid); // don't render lines for broken bonds const float stressPct = m_graphProcessor->getSolverBondStressPct(i, m_bondHealths, m_settings, mode); if (stressPct >= 0.0f) { const uint32_t color = canTakeDamage(m_bondHealths[bondData.blastBondIndex]) ? bondHealthColor(stressPct) : BOND_UNBREAKABLE_COLOR; m_debugLineBuffer.pushBack(DebugLine(p0, p1, color)); } } } debugBuffer.lines = m_debugLineBuffer.begin(); debugBuffer.lineCount = m_debugLineBuffer.size(); return debugBuffer; } } // namespace Blast } // namespace Nv
68,856
C++
37.596973
181
0.586979
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/shaders/NvBlastExtDamageAcceleratorAABBTree.cpp
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved. #include "NvBlastExtDamageAcceleratorAABBTree.h" #include "NvBlastIndexFns.h" #include "NvBlastAssert.h" #include "NvVec4.h" #include <algorithm> using namespace nvidia; namespace Nv { namespace Blast { /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // Creation /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// ExtDamageAcceleratorAABBTree* ExtDamageAcceleratorAABBTree::create(const NvBlastAsset* asset) { ExtDamageAcceleratorAABBTree* tree = NVBLAST_NEW(Nv::Blast::ExtDamageAcceleratorAABBTree) (); tree->build(asset); return tree; } void ExtDamageAcceleratorAABBTree::release() { NVBLAST_DELETE(this, ExtDamageAcceleratorAABBTree); } void ExtDamageAcceleratorAABBTree::build(const NvBlastAsset* asset) { NVBLAST_ASSERT(m_root == nullptr); const NvBlastSupportGraph graph = NvBlastAssetGetSupportGraph(asset, logLL); const NvBlastBond* bonds = NvBlastAssetGetBonds(asset, logLL); const NvBlastChunk* chunks = NvBlastAssetGetChunks(asset, logLL); const uint32_t N = NvBlastAssetGetBondCount(asset, logLL); m_indices.resizeUninitialized(N); m_points.resizeUninitialized(N); m_segments.resizeUninitialized(N); m_bonds.resizeUninitialized(N); m_nodes.reserve(2 * N); for (uint32_t node0 = 0; node0 < graph.nodeCount; ++node0) { for (uint32_t j = graph.adjacencyPartition[node0]; j < graph.adjacencyPartition[node0 + 1]; ++j) { uint32_t bondIndex = graph.adjacentBondIndices[j]; uint32_t node1 = graph.adjacentNodeIndices[j]; if (node0 < node1) { const NvBlastBond& bond = bonds[bondIndex]; const NvVec3& p = (reinterpret_cast<const NvVec3&>(bond.centroid)); m_points[bondIndex] = p; m_indices[bondIndex] = bondIndex; m_bonds[bondIndex].node0 = node0; m_bonds[bondIndex].node1 = node1; // filling bond segments as a connection of 2 chunk centroids const uint32_t chunk0 = graph.chunkIndices[node0]; const uint32_t chunk1 = graph.chunkIndices[node1]; if (isInvalidIndex(chunk1)) { // for world node we don't have it's centroid, so approximate with projection on bond normal m_segments[bondIndex].p0 = (reinterpret_cast<const NvVec3&>(chunks[chunk0].centroid)); const NvVec3 normal = (reinterpret_cast<const NvVec3&>(bond.normal)); m_segments[bondIndex].p1 = m_segments[bondIndex].p0 + normal * (p - m_segments[bondIndex].p0).dot(normal) * 2; } else { m_segments[bondIndex].p0 = (reinterpret_cast<const NvVec3&>(chunks[chunk0].centroid)); m_segments[bondIndex].p1 = (reinterpret_cast<const NvVec3&>(chunks[chunk1].centroid)); } } } } int rootIndex = N > 0 ? createNode(0, N - 1, 0) : -1; m_root = rootIndex >= 0 ? &m_nodes[rootIndex] : nullptr; } int ExtDamageAcceleratorAABBTree::createNode(uint32_t startIdx, uint32_t endIdx, uint32_t depth) { if (startIdx > endIdx) return -1; Node node; node.first = startIdx; node.last = endIdx; // calc node bounds node.pointsBound = NvBounds3::empty(); node.segmentsBound = NvBounds3::empty(); for (uint32_t i = node.first; i <= node.last; i++) { const uint32_t idx = m_indices[i]; node.pointsBound.include(m_points[idx]); node.segmentsBound.include(m_segments[idx].p0); node.segmentsBound.include(m_segments[idx].p1); } // select axis of biggest extent const NvVec3 ext = node.pointsBound.getExtents(); uint32_t axis = 0; for (uint32_t k = 1; k < 3; k++) { if (ext[k] > ext[axis]) { axis = k; } } // split on selected axis and partially sort around the middle const uint32_t mid = startIdx + (endIdx - startIdx) / 2; std::nth_element(m_indices.begin() + startIdx, m_indices.begin() + mid, m_indices.begin() + endIdx + 1, [&](uint32_t lhs, uint32_t rhs) { return m_points[lhs][axis] < m_points[rhs][axis]; }); const uint32_t BUCKET = 32; if (endIdx - startIdx > BUCKET && mid > startIdx && mid < endIdx) { node.child[0] = createNode(startIdx, mid, depth + 1); node.child[1] = createNode(mid + 1, endIdx, depth + 1); } else { node.child[0] = -1; node.child[1] = -1; } m_nodes.pushBack(node); return m_nodes.size() - 1; } /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // Queries /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// void ExtDamageAcceleratorAABBTree::findInBounds(const nvidia::NvBounds3& bounds, ResultCallback& callback, bool segments) const { if (m_root) { if (segments) findSegmentsInBounds(*m_root, callback, bounds); else findPointsInBounds(*m_root, callback, bounds); callback.dispatch(); } } void ExtDamageAcceleratorAABBTree::findPointsInBounds(const Node& node, ResultCallback& callback, const nvidia::NvBounds3& bounds) const { if (!bounds.intersects(node.pointsBound)) { return; } // if search bound contains node bound, simply add all point indexes. if (node.pointsBound.isInside(bounds)) { for (uint32_t i = node.first; i <= node.last; i++) pushResult(callback, m_indices[i]); return; // early pruning. } if (node.child[0] < 0) { for (uint32_t i = node.first; i <= node.last; i++) { const uint32_t idx = m_indices[i]; if (bounds.contains(m_points[idx])) pushResult(callback, idx); } return; } // check whether child nodes are in range. for (uint32_t c = 0; c < 2; ++c) { findPointsInBounds(m_nodes[node.child[c]], callback, bounds); } } void ExtDamageAcceleratorAABBTree::findSegmentsInBounds(const Node& node, ResultCallback& callback, const nvidia::NvBounds3& bounds) const { if (!bounds.intersects(node.segmentsBound)) { return; } // if search bound contains node bound, simply add all point indexes. if (node.segmentsBound.isInside(bounds)) { for (uint32_t i = node.first; i <= node.last; i++) pushResult(callback, m_indices[i]); return; // early pruning. } if (node.child[0] < 0) { for (uint32_t i = node.first; i <= node.last; i++) { const uint32_t idx = m_indices[i]; if (bounds.contains(m_segments[idx].p0) || bounds.contains(m_segments[idx].p1)) pushResult(callback, idx); } return; } // check whether child nodes are in range. for (uint32_t c = 0; c < 2; ++c) { findSegmentsInBounds(m_nodes[node.child[c]], callback, bounds); } } bool intersectSegmentPlane(const NvVec3& v1, const NvVec3& v2, const NvPlane& p) { const bool s1 = p.distance(v1) > 0.f; const bool s2 = p.distance(v2) > 0.f; return (s1 && !s2) || (s2 && !s1); } bool intersectBoundsPlane(const NvBounds3& b, const NvPlane& p) { const NvVec3 extents = b.getExtents(); const NvVec3 center = b.getCenter(); float r = extents.x * NvAbs(p.n.x) + extents.y * NvAbs(p.n.y) + extents.z * NvAbs(p.n.z); float s = p.n.dot(center) + p.d; return NvAbs(s) <= r; } void ExtDamageAcceleratorAABBTree::findBondSegmentsPlaneIntersected(const nvidia::NvPlane& plane, ResultCallback& resultCallback) const { if (m_root) { findSegmentsPlaneIntersected(*m_root, resultCallback, plane); resultCallback.dispatch(); } } void ExtDamageAcceleratorAABBTree::findSegmentsPlaneIntersected(const Node& node, ResultCallback& callback, const nvidia::NvPlane& plane) const { if (!intersectBoundsPlane(node.segmentsBound, plane)) { return; } if (node.child[0] < 0) { for (uint32_t i = node.first; i <= node.last; i++) { const uint32_t idx = m_indices[i]; if (intersectSegmentPlane(m_segments[idx].p0, m_segments[idx].p1, plane)) pushResult(callback, idx); } return; } // check whether child nodes are in range. for (uint32_t c = 0; c < 2; ++c) { findSegmentsPlaneIntersected(m_nodes[node.child[c]], callback, plane); } } /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // Debug Render /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// inline uint32_t NvVec4ToU32Color(const NvVec4& color) { uint32_t c = 0; c |= (int)(color.w * 255); c <<= 8; c |= (int)(color.z * 255); c <<= 8; c |= (int)(color.y * 255); c <<= 8; c |= (int)(color.x * 255); return c; } Nv::Blast::DebugBuffer ExtDamageAcceleratorAABBTree::fillDebugRender(int depth, bool segments) { Nv::Blast::DebugBuffer debugBuffer = { nullptr, 0 }; m_debugLineBuffer.clear(); if (m_root) { fillDebugBuffer(*m_root, 0, depth, segments); } debugBuffer.lines = m_debugLineBuffer.begin(); debugBuffer.lineCount = m_debugLineBuffer.size(); return debugBuffer; } void ExtDamageAcceleratorAABBTree::fillDebugBuffer(const Node& node, int currentDepth, int depth, bool segments) { if (depth < 0 || currentDepth == depth) { const NvVec4 LEAF_COLOR(1.0f, 1.0f, 1.0f, 1.0f); const NvVec4 NON_LEAF_COLOR(0.3f, 0.3f, 0.3f, 1.0f); // draw box const NvBounds3 bounds = segments ? node.segmentsBound : node.pointsBound; const NvVec3 center = bounds.getCenter(); const NvVec3 extents = bounds.getExtents(); const int vs[] = { 0,3,5,6 }; for (int i = 0; i < 4; i++) { int v = vs[i]; for (int d = 1; d < 8; d <<= 1) { auto flip = [](int x, int k) { return ((x >> k) & 1) * 2.f - 1.f; }; const float s = std::pow(0.99f, currentDepth); NvVec3 p0 = center + s * extents.multiply(NvVec3(flip(v, 0), flip(v, 1), flip(v, 2))); NvVec3 p1 = center + s * extents.multiply(NvVec3(flip(v^d, 0), flip(v^d, 1), flip(v^d, 2))); m_debugLineBuffer.pushBack(Nv::Blast::DebugLine( reinterpret_cast<NvcVec3&>(p0), reinterpret_cast<NvcVec3&>(p1), NvVec4ToU32Color(LEAF_COLOR * (1.f - (currentDepth + 1) * 0.1f))) ); } } } for (uint32_t i = 0; i < 2; ++i) { if (node.child[i] >= 0) { fillDebugBuffer(m_nodes[node.child[i]], currentDepth + 1, depth, segments); } } } } // namespace Blast } // namespace Nv
12,983
C++
33.168421
143
0.571594
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/shaders/NvBlastExtDamageAcceleratorAABBTree.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved. #pragma once #include "NvBlastExtDamageAcceleratorInternal.h" #include "NvBlast.h" #include "NvBlastArray.h" namespace Nv { namespace Blast { class ExtDamageAcceleratorAABBTree final : public ExtDamageAcceleratorInternal { public: //////// ctor //////// ExtDamageAcceleratorAABBTree() : m_root(nullptr) { } virtual ~ExtDamageAcceleratorAABBTree() { } static ExtDamageAcceleratorAABBTree* create(const NvBlastAsset* asset); //////// interface //////// virtual void release() override; virtual void findBondCentroidsInBounds(const nvidia::NvBounds3& bounds, ResultCallback& resultCallback) const override { const_cast<ExtDamageAcceleratorAABBTree*>(this)->findInBounds(bounds, resultCallback, false); } virtual void findBondSegmentsInBounds(const nvidia::NvBounds3& bounds, ResultCallback& resultCallback) const override { const_cast<ExtDamageAcceleratorAABBTree*>(this)->findInBounds(bounds, resultCallback, true); } virtual void findBondSegmentsPlaneIntersected(const nvidia::NvPlane& plane, ResultCallback& resultCallback) const override; virtual Nv::Blast::DebugBuffer fillDebugRender(int depth, bool segments) override; virtual void* getImmediateScratch(size_t size) override { m_scratch.resizeUninitialized(size); return m_scratch.begin(); } private: // no copy/assignment ExtDamageAcceleratorAABBTree(ExtDamageAcceleratorAABBTree&); ExtDamageAcceleratorAABBTree& operator=(const ExtDamageAcceleratorAABBTree& tree); // Tree node struct Node { int child[2]; uint32_t first; uint32_t last; nvidia::NvBounds3 pointsBound; nvidia::NvBounds3 segmentsBound; }; void build(const NvBlastAsset* asset); int createNode(uint32_t startIdx, uint32_t endIdx, uint32_t depth); void pushResult(ResultCallback& callback, uint32_t pointIndex) const { callback.push(pointIndex, m_bonds[pointIndex].node0, m_bonds[pointIndex].node1); } void findInBounds(const nvidia::NvBounds3& bounds, ResultCallback& callback, bool segments) const; void findPointsInBounds(const Node& node, ResultCallback& callback, const nvidia::NvBounds3& bounds) const; void findSegmentsInBounds(const Node& node, ResultCallback& callback, const nvidia::NvBounds3& bounds) const; void findSegmentsPlaneIntersected(const Node& node, ResultCallback& callback, const nvidia::NvPlane& plane) const; void fillDebugBuffer(const Node& node, int currentDepth, int depth, bool segments); //////// data //////// Node* m_root; Array<Node>::type m_nodes; Array<uint32_t>::type m_indices; Array<nvidia::NvVec3>::type m_points; struct Segment { nvidia::NvVec3 p0; nvidia::NvVec3 p1; }; Array<Segment>::type m_segments; struct BondData { uint32_t node0; uint32_t node1; }; Array<BondData>::type m_bonds; Array<Nv::Blast::DebugLine>::type m_debugLineBuffer; Array<char>::type m_scratch; }; } // namespace Blast } // namespace Nv
4,860
C
31.844594
127
0.695885
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/shaders/NvBlastExtDamageShaders.cpp
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved. #include "NvBlastExtDamageShaders.h" #include "NvBlastExtDamageAcceleratorInternal.h" #include "NvBlastIndexFns.h" #include "NvBlastMath.h" #include "NvBlastGeometry.h" #include "NvBlastAssert.h" #include "NvBlastFixedQueue.h" #include "NvBlastFixedBitmap.h" #include "NvBlast.h" #include <cmath> // for abs() on linux #include <new> using namespace Nv::Blast; using namespace Nv::Blast::VecMath; using namespace nvidia; /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // Profiles /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// typedef float(*ProfileFunction)(float, float, float, float); float falloffProfile(float min, float max, float x, float f = 1.0f) { if (x > max) return 0.0f; if (x < min) return f; float y = 1.0f - (x - min) / (max - min); return y * f; } float cutterProfile(float min, float max, float x, float f = 1.0f) { if (x > max || x < min) return 0.0f; return f; } /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // Damage Functions /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// typedef float(*DamageFunction)(const float pos[3], const void* damageDescBuffer); template <ProfileFunction profileFn, typename DescT = NvBlastExtRadialDamageDesc> float pointDistanceDamage(const float pos[3], const void* damageDescBuffer) { const DescT& desc = *static_cast<const DescT*>(damageDescBuffer); float relativePosition[3]; sub(desc.position, pos, relativePosition); const float distance = sqrtf(dot(relativePosition, relativePosition)); const float damage = profileFn(desc.minRadius, desc.maxRadius, distance, desc.damage); return damage; } // Distance from point 'p' to line segment '(a, b)' float distanceToSegment(const float p[3], const float a[3], const float b[3]) { float v[3]; sub(b, a, v); float w[3]; sub(p, a, w); const float c1 = dot(v, w); if (c1 <= 0) return length(w); const float c2 = dot(v, v); if (c2 < c1) return dist(p, b); const float t = c1 / c2; mul(v, t); return dist(v, w); } template <ProfileFunction profileFn> float capsuleDistanceDamage(const float pos[3], const void* damageDesc) { const NvBlastExtCapsuleRadialDamageDesc& desc = *static_cast<const NvBlastExtCapsuleRadialDamageDesc*>(damageDesc); const float distance = distanceToSegment(pos, desc.position0, desc.position1); const float damage = profileFn(desc.minRadius, desc.maxRadius, distance, desc.damage); return damage; } /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // AABB Functions /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// typedef NvBounds3(*BoundFunction)(const void* damageDesc); NvBounds3 sphereBounds(const void* damageDesc) { const NvBlastExtRadialDamageDesc& desc = *static_cast<const NvBlastExtRadialDamageDesc*>(damageDesc); const nvidia::NvVec3& p = (reinterpret_cast<const nvidia::NvVec3&>(desc.position)); return nvidia::NvBounds3::centerExtents(p, nvidia::NvVec3(desc.maxRadius, desc.maxRadius, desc.maxRadius)); } NvBounds3 capsuleBounds(const void* damageDesc) { const NvBlastExtCapsuleRadialDamageDesc& desc = *static_cast<const NvBlastExtCapsuleRadialDamageDesc*>(damageDesc); const nvidia::NvVec3& p0 = (reinterpret_cast<const nvidia::NvVec3&>(desc.position0)); const nvidia::NvVec3& p1 = (reinterpret_cast<const nvidia::NvVec3&>(desc.position1)); NvBounds3 b = NvBounds3::empty(); b.include(p0); b.include(p1); b.fattenFast(desc.maxRadius); return b; } /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // Radial Graph Shader Template /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// template <DamageFunction damageFn, BoundFunction boundsFn> void RadialProfileGraphShader(NvBlastFractureBuffers* commandBuffers, const NvBlastGraphShaderActor* actor, const void* params) { const uint32_t* graphNodeIndexLinks = actor->graphNodeIndexLinks; const uint32_t firstGraphNodeIndex = actor->firstGraphNodeIndex; const uint32_t* adjacencyPartition = actor->adjacencyPartition; const uint32_t* adjacentNodeIndices = actor->adjacentNodeIndices; const uint32_t* adjacentBondIndices = actor->adjacentBondIndices; const NvBlastBond* assetBonds = actor->assetBonds; const float* familyBondHealths = actor->familyBondHealths; const NvBlastExtProgramParams* programParams = static_cast<const NvBlastExtProgramParams*>(params); uint32_t outCount = 0; auto processBondFn = [&](uint32_t bondIndex, uint32_t node0, uint32_t node1) { // skip bonds that are already broken or were visited already // TODO: investigate why testing against health > -1.0f seems slower // could reuse the island edge bitmap instead if (canTakeDamage(familyBondHealths[bondIndex])) { const NvBlastBond& bond = assetBonds[bondIndex]; const float totalBondDamage = damageFn(bond.centroid, programParams->damageDesc); if (totalBondDamage > 0.0f) { NvBlastBondFractureData& outCommand = commandBuffers->bondFractures[outCount++]; outCommand.nodeIndex0 = node0; outCommand.nodeIndex1 = node1; outCommand.health = totalBondDamage; } } }; const ExtDamageAcceleratorInternal* damageAccelerator = programParams->accelerator ? static_cast<const ExtDamageAcceleratorInternal*>(programParams->accelerator) : nullptr; const uint32_t ACTOR_MINIMUM_NODE_COUNT_TO_ACCELERATE = actor->assetNodeCount / 3; if (damageAccelerator && actor->graphNodeCount > ACTOR_MINIMUM_NODE_COUNT_TO_ACCELERATE) { nvidia::NvBounds3 bounds = boundsFn(programParams->damageDesc); const uint32_t CALLBACK_BUFFER_SIZE = 1000; class AcceleratorCallback : public ExtDamageAcceleratorInternal::ResultCallback { public: AcceleratorCallback(NvBlastFractureBuffers* commandBuffers, uint32_t& outCount, const NvBlastGraphShaderActor* actor, const NvBlastExtProgramParams* programParams) : ExtDamageAcceleratorInternal::ResultCallback(m_buffer, CALLBACK_BUFFER_SIZE), m_actor(actor), m_commandBuffers(commandBuffers), m_outCount(outCount), m_programParams(programParams) { } virtual void processResults(const ExtDamageAcceleratorInternal::QueryBondData* bondBuffer, uint32_t count) override { for (uint32_t i = 0; i < count; i++) { const ExtDamageAcceleratorInternal::QueryBondData& bondData = bondBuffer[i]; if (m_actor->nodeActorIndices[bondData.node0] == m_actor->actorIndex) { if (canTakeDamage(m_actor->familyBondHealths[bondData.bond])) { const NvBlastBond& bond = m_actor->assetBonds[bondData.bond]; const float totalBondDamage = damageFn(bond.centroid, m_programParams->damageDesc); if (totalBondDamage > 0.0f) { NvBlastBondFractureData& outCommand = m_commandBuffers->bondFractures[m_outCount++]; outCommand.nodeIndex0 = bondData.node0; outCommand.nodeIndex1 = bondData.node1; outCommand.health = totalBondDamage; } } } } } private: const NvBlastGraphShaderActor* m_actor; NvBlastFractureBuffers* m_commandBuffers; uint32_t& m_outCount; const NvBlastExtProgramParams* m_programParams; ExtDamageAcceleratorInternal::QueryBondData m_buffer[CALLBACK_BUFFER_SIZE]; }; AcceleratorCallback cb(commandBuffers, outCount, actor, programParams); damageAccelerator->findBondCentroidsInBounds(bounds, cb); } else { uint32_t currentNodeIndex = firstGraphNodeIndex; while (!Nv::Blast::isInvalidIndex(currentNodeIndex)) { for (uint32_t adj = adjacencyPartition[currentNodeIndex]; adj < adjacencyPartition[currentNodeIndex + 1]; adj++) { uint32_t adjacentNodeIndex = adjacentNodeIndices[adj]; if (currentNodeIndex < adjacentNodeIndex) { uint32_t bondIndex = adjacentBondIndices[adj]; processBondFn(bondIndex, currentNodeIndex, adjacentNodeIndex); } } currentNodeIndex = graphNodeIndexLinks[currentNodeIndex]; } } commandBuffers->bondFractureCount = outCount; commandBuffers->chunkFractureCount = 0; } /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // Radial Single Shader Template /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// template <DamageFunction damageFn> void RadialProfileSubgraphShader(NvBlastFractureBuffers* commandBuffers, const NvBlastSubgraphShaderActor* actor, const void* params) { uint32_t chunkFractureCount = 0; uint32_t chunkFractureCountMax = commandBuffers->chunkFractureCount; const uint32_t chunkIndex = actor->chunkIndex; const NvBlastChunk* assetChunks = actor->assetChunks; const NvBlastChunk& chunk = assetChunks[chunkIndex]; const NvBlastExtProgramParams* programParams = static_cast<const NvBlastExtProgramParams*>(params); const float totalDamage = damageFn(chunk.centroid, programParams->damageDesc); if (totalDamage > 0.0f && chunkFractureCount < chunkFractureCountMax) { NvBlastChunkFractureData& frac = commandBuffers->chunkFractures[chunkFractureCount++]; frac.chunkIndex = chunkIndex; frac.health = totalDamage; } commandBuffers->bondFractureCount = 0; commandBuffers->chunkFractureCount = chunkFractureCount; } /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // Radial Shaders Instantiation /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// void NvBlastExtFalloffGraphShader(NvBlastFractureBuffers* commandBuffers, const NvBlastGraphShaderActor* actor, const void* params) { RadialProfileGraphShader<pointDistanceDamage<falloffProfile>, sphereBounds>(commandBuffers, actor, params); } void NvBlastExtFalloffSubgraphShader(NvBlastFractureBuffers* commandBuffers, const NvBlastSubgraphShaderActor* actor, const void* params) { RadialProfileSubgraphShader<pointDistanceDamage<falloffProfile>>(commandBuffers, actor, params); } void NvBlastExtCutterGraphShader(NvBlastFractureBuffers* commandBuffers, const NvBlastGraphShaderActor* actor, const void* params) { RadialProfileGraphShader<pointDistanceDamage<cutterProfile>, sphereBounds>(commandBuffers, actor, params); } void NvBlastExtCutterSubgraphShader(NvBlastFractureBuffers* commandBuffers, const NvBlastSubgraphShaderActor* actor, const void* params) { RadialProfileSubgraphShader<pointDistanceDamage<cutterProfile>>(commandBuffers, actor, params); } void NvBlastExtCapsuleFalloffGraphShader(NvBlastFractureBuffers* commandBuffers, const NvBlastGraphShaderActor* actor, const void* params) { RadialProfileGraphShader<capsuleDistanceDamage<falloffProfile>, capsuleBounds>(commandBuffers, actor, params); } void NvBlastExtCapsuleFalloffSubgraphShader(NvBlastFractureBuffers* commandBuffers, const NvBlastSubgraphShaderActor* actor, const void* params) { RadialProfileSubgraphShader<capsuleDistanceDamage<falloffProfile>>(commandBuffers, actor, params); } /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // Shear Shader /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// void NvBlastExtShearGraphShader(NvBlastFractureBuffers* commandBuffers, const NvBlastGraphShaderActor* actor, const void* params) { uint32_t chunkFractureCount = 0; uint32_t chunkFractureCountMax = commandBuffers->chunkFractureCount; uint32_t bondFractureCount = 0; uint32_t bondFractureCountMax = commandBuffers->bondFractureCount; const NvBlastExtProgramParams* programParams = static_cast<const NvBlastExtProgramParams*>(params); const NvBlastExtShearDamageDesc& desc = *static_cast<const NvBlastExtShearDamageDesc*>(programParams->damageDesc); const uint32_t* graphNodeIndexLinks = actor->graphNodeIndexLinks; const uint32_t firstGraphNodeIndex = actor->firstGraphNodeIndex; const uint32_t* chunkIndices = actor->chunkIndices; const uint32_t* adjacencyPartition = actor->adjacencyPartition; const uint32_t* adjacentNodeIndices = actor->adjacentNodeIndices; const uint32_t* adjacentBondIndices = actor->adjacentBondIndices; const NvBlastBond* assetBonds = actor->assetBonds; const NvBlastChunk* assetChunks = actor->assetChunks; const float* familyBondHealths = actor->familyBondHealths; const float* supportChunkHealths = actor->supportChunkHealths; uint32_t closestNode = findClosestNode(desc.position , firstGraphNodeIndex, graphNodeIndexLinks , adjacencyPartition, adjacentNodeIndices, adjacentBondIndices , assetBonds, familyBondHealths , assetChunks, supportChunkHealths, chunkIndices); if (!isInvalidIndex(chunkIndices[closestNode])) { uint32_t nodeIndex = closestNode; float maxDist = 0.0f; uint32_t nextNode = invalidIndex<uint32_t>(); if (chunkFractureCount < chunkFractureCountMax) { const uint32_t chunkIndex = chunkIndices[nodeIndex]; const NvBlastChunk& chunk = assetChunks[chunkIndex]; NvBlastChunkFractureData& frac = commandBuffers->chunkFractures[chunkFractureCount++]; frac.chunkIndex = chunkIndex; frac.health = pointDistanceDamage<falloffProfile, NvBlastExtShearDamageDesc>(chunk.centroid, programParams->damageDesc); } do { const uint32_t startIndex = adjacencyPartition[nodeIndex]; const uint32_t stopIndex = adjacencyPartition[nodeIndex + 1]; for (uint32_t adjacentNodeIndex = startIndex; adjacentNodeIndex < stopIndex; adjacentNodeIndex++) { const uint32_t neighbourIndex = adjacentNodeIndices[adjacentNodeIndex]; const uint32_t bondIndex = adjacentBondIndices[adjacentNodeIndex]; const NvBlastBond& bond = assetBonds[bondIndex]; if (!canTakeDamage(familyBondHealths[bondIndex])) continue; float shear = 1 * std::abs(1 - std::abs(VecMath::dot(desc.normal, bond.normal))); float d[3]; VecMath::sub(bond.centroid, desc.position, d); float ahead = VecMath::dot(d, desc.normal); if (ahead > maxDist) { maxDist = ahead; nextNode = neighbourIndex; } const float damage = pointDistanceDamage<falloffProfile, NvBlastExtShearDamageDesc>(bond.centroid, programParams->damageDesc); if (damage > 0.0f && bondFractureCount < bondFractureCountMax) { NvBlastBondFractureData& frac = commandBuffers->bondFractures[bondFractureCount++]; frac.userdata = bond.userData; frac.nodeIndex0 = nodeIndex; frac.nodeIndex1 = neighbourIndex; frac.health = shear * damage; } } if (nodeIndex == nextNode) break; nodeIndex = nextNode; } while (!isInvalidIndex(nextNode)); } commandBuffers->bondFractureCount = bondFractureCount; commandBuffers->chunkFractureCount = chunkFractureCount; } void NvBlastExtShearSubgraphShader(NvBlastFractureBuffers* commandBuffers, const NvBlastSubgraphShaderActor* actor, const void* params) { RadialProfileSubgraphShader<pointDistanceDamage<falloffProfile, NvBlastExtShearDamageDesc>>(commandBuffers, actor, params); } /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // Triangle Intersection Damage /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// #define SMALL_NUMBER (1.e-4f) bool intersectSegmentTriangle(const NvVec3& p, const NvVec3& q, const NvVec3& a, const NvVec3& b, const NvVec3& c, const NvPlane& trianglePlane) { const NvVec3 N = trianglePlane.n; const float D = trianglePlane.d; NvVec3 intersectPoint; float t = (-D - (p.dot(N))) / ((q - p).dot(N)); // If the parameter value is not between 0 and 1, there is no intersection if (t > -SMALL_NUMBER && t < 1.f + SMALL_NUMBER) { intersectPoint = p + t * (q - p); } else { return false; } // Compute the normal of the triangle const NvVec3 TriNorm = (b - a).cross(c - a); // Compute twice area of triangle ABC const float AreaABCInv = 1.0f / (N.dot(TriNorm)); // Compute v contribution const float AreaPBC = N.dot((b - intersectPoint).cross(c - intersectPoint)); const float v = AreaPBC * AreaABCInv; if (v <= 0.f) return false; // Compute w contribution const float AreaPCA = N.dot((c - intersectPoint).cross(a - intersectPoint)); const float w = AreaPCA * AreaABCInv; if (w <= 0.f) return false; const float u = 1.0f - v - w; return u > 0.f; } void NvBlastExtTriangleIntersectionGraphShader(NvBlastFractureBuffers* commandBuffers, const NvBlastGraphShaderActor* actor, const void* params) { const uint32_t* graphNodeIndexLinks = actor->graphNodeIndexLinks; const uint32_t firstGraphNodeIndex = actor->firstGraphNodeIndex; const uint32_t* adjacencyPartition = actor->adjacencyPartition; const uint32_t* adjacentNodeIndices = actor->adjacentNodeIndices; const uint32_t* adjacentBondIndices = actor->adjacentBondIndices; const NvBlastBond* assetBonds = actor->assetBonds; const NvBlastChunk* assetChunks = actor->assetChunks; const uint32_t* chunkIndices = actor->chunkIndices; const float* familyBondHealths = actor->familyBondHealths; const NvBlastExtProgramParams* programParams = static_cast<const NvBlastExtProgramParams*>(params); const NvBlastExtTriangleIntersectionDamageDesc& desc = *static_cast<const NvBlastExtTriangleIntersectionDamageDesc*>(programParams->damageDesc); const nvidia::NvVec3& t0 = (reinterpret_cast<const nvidia::NvVec3&>(desc.position0)); const nvidia::NvVec3& t1 = (reinterpret_cast<const nvidia::NvVec3&>(desc.position1)); const nvidia::NvVec3& t2 = (reinterpret_cast<const nvidia::NvVec3&>(desc.position2)); const NvPlane trianglePlane(t0, t1, t2); uint32_t outCount = 0; const ExtDamageAcceleratorInternal* damageAccelerator = programParams->accelerator ? static_cast<const ExtDamageAcceleratorInternal*>(programParams->accelerator) : nullptr; const uint32_t ACTOR_MINIMUM_NODE_COUNT_TO_ACCELERATE = actor->assetNodeCount / 3; if (damageAccelerator && actor->graphNodeCount > ACTOR_MINIMUM_NODE_COUNT_TO_ACCELERATE) { const uint32_t CALLBACK_BUFFER_SIZE = 1000; class AcceleratorCallback : public ExtDamageAcceleratorInternal::ResultCallback { public: AcceleratorCallback(NvBlastFractureBuffers* commandBuffers, uint32_t& outCount, const NvBlastGraphShaderActor* actor, const NvBlastExtTriangleIntersectionDamageDesc& desc) : ExtDamageAcceleratorInternal::ResultCallback(m_buffer, CALLBACK_BUFFER_SIZE), m_actor(actor), m_commandBuffers(commandBuffers), m_outCount(outCount), m_desc(desc) { } virtual void processResults(const ExtDamageAcceleratorInternal::QueryBondData* bondBuffer, uint32_t count) override { const nvidia::NvVec3& t0 = (reinterpret_cast<const nvidia::NvVec3&>(m_desc.position0)); const nvidia::NvVec3& t1 = (reinterpret_cast<const nvidia::NvVec3&>(m_desc.position1)); const nvidia::NvVec3& t2 = (reinterpret_cast<const nvidia::NvVec3&>(m_desc.position2)); const NvPlane trianglePlane(t0, t1, t2); for (uint32_t i = 0; i < count; i++) { const ExtDamageAcceleratorInternal::QueryBondData& bondData = bondBuffer[i]; if (m_actor->nodeActorIndices[bondData.node0] == m_actor->actorIndex) { if (canTakeDamage(m_actor->familyBondHealths[bondData.bond])) { const NvBlastBond& bond = m_actor->assetBonds[bondData.bond]; const uint32_t chunkIndex0 = m_actor->chunkIndices[bondData.node0]; const uint32_t chunkIndex1 = m_actor->chunkIndices[bondData.node1]; const nvidia::NvVec3& c0 = (reinterpret_cast<const nvidia::NvVec3&>(m_actor->assetChunks[chunkIndex0].centroid)); const NvVec3& normal = (reinterpret_cast<const NvVec3&>(bond.normal)); const NvVec3& bondCentroid = (reinterpret_cast<const NvVec3&>(bond.centroid)); const nvidia::NvVec3& c1 = isInvalidIndex(chunkIndex1) ? (c0 + normal * (bondCentroid - c0).dot(normal)) : (reinterpret_cast<const nvidia::NvVec3&>(m_actor->assetChunks[chunkIndex1].centroid)); if(intersectSegmentTriangle(c0, c1, t0, t1, t2, trianglePlane)) { NvBlastBondFractureData& outCommand = m_commandBuffers->bondFractures[m_outCount++]; outCommand.nodeIndex0 = bondData.node0; outCommand.nodeIndex1 = bondData.node1; outCommand.health = m_desc.damage; } } } } } private: const NvBlastGraphShaderActor* m_actor; NvBlastFractureBuffers* m_commandBuffers; uint32_t& m_outCount; const NvBlastExtTriangleIntersectionDamageDesc& m_desc; ExtDamageAcceleratorInternal::QueryBondData m_buffer[CALLBACK_BUFFER_SIZE]; }; AcceleratorCallback cb(commandBuffers, outCount, actor, desc); damageAccelerator->findBondSegmentsPlaneIntersected(trianglePlane, cb); } else { uint32_t currentNodeIndex = firstGraphNodeIndex; while (!Nv::Blast::isInvalidIndex(currentNodeIndex)) { for (uint32_t adj = adjacencyPartition[currentNodeIndex]; adj < adjacencyPartition[currentNodeIndex + 1]; adj++) { uint32_t adjacentNodeIndex = adjacentNodeIndices[adj]; if (currentNodeIndex < adjacentNodeIndex) { uint32_t bondIndex = adjacentBondIndices[adj]; // skip bonds that are already broken or were visited already // TODO: investigate why testing against health > -1.0f seems slower // could reuse the island edge bitmap instead if (canTakeDamage(familyBondHealths[bondIndex])) { const NvBlastBond& bond = assetBonds[bondIndex]; const uint32_t chunkIndex0 = chunkIndices[currentNodeIndex]; const uint32_t chunkIndex1 = chunkIndices[adjacentNodeIndex]; const nvidia::NvVec3& c0 = (reinterpret_cast<const nvidia::NvVec3&>(assetChunks[chunkIndex0].centroid)); const NvVec3& normal = (reinterpret_cast<const NvVec3&>(bond.normal)); const NvVec3& bondCentroid = (reinterpret_cast<const NvVec3&>(bond.centroid)); const nvidia::NvVec3& c1 = isInvalidIndex(chunkIndex1) ? (c0 + normal * (bondCentroid - c0).dot(normal)) : (reinterpret_cast<const nvidia::NvVec3&>(assetChunks[chunkIndex1].centroid)); if (intersectSegmentTriangle(c0, c1, t0, t1, t2, trianglePlane)) { NvBlastBondFractureData& outCommand = commandBuffers->bondFractures[outCount++]; outCommand.nodeIndex0 = currentNodeIndex; outCommand.nodeIndex1 = adjacentNodeIndex; outCommand.health = desc.damage; } } } } currentNodeIndex = graphNodeIndexLinks[currentNodeIndex]; } } commandBuffers->bondFractureCount = outCount; commandBuffers->chunkFractureCount = 0; } void NvBlastExtTriangleIntersectionSubgraphShader(NvBlastFractureBuffers* commandBuffers, const NvBlastSubgraphShaderActor* actor, const void* params) { uint32_t chunkFractureCount = 0; uint32_t chunkFractureCountMax = commandBuffers->chunkFractureCount; const uint32_t chunkIndex = actor->chunkIndex; const NvBlastChunk* assetChunks = actor->assetChunks; const NvBlastChunk& chunk = assetChunks[chunkIndex]; const NvBlastExtProgramParams* programParams = static_cast<const NvBlastExtProgramParams*>(params); const NvBlastExtTriangleIntersectionDamageDesc& desc = *static_cast<const NvBlastExtTriangleIntersectionDamageDesc*>(programParams->damageDesc); const nvidia::NvVec3& t0 = (reinterpret_cast<const nvidia::NvVec3&>(desc.position0)); const nvidia::NvVec3& t1 = (reinterpret_cast<const nvidia::NvVec3&>(desc.position1)); const nvidia::NvVec3& t2 = (reinterpret_cast<const nvidia::NvVec3&>(desc.position2)); const NvPlane trianglePlane(t0, t1, t2); for (uint32_t subChunkIndex = chunk.firstChildIndex; subChunkIndex < chunk.childIndexStop; subChunkIndex++) { const nvidia::NvVec3& c0 = (reinterpret_cast<const nvidia::NvVec3&>(assetChunks[subChunkIndex].centroid)); const nvidia::NvVec3& c1 = (reinterpret_cast<const nvidia::NvVec3&>(assetChunks[subChunkIndex + 1].centroid)); if (chunkFractureCount < chunkFractureCountMax && intersectSegmentTriangle(c0, c1, t0, t1, t2, trianglePlane)) { NvBlastChunkFractureData& frac = commandBuffers->chunkFractures[chunkFractureCount++]; frac.chunkIndex = chunkIndex; frac.health = desc.damage; break; } } commandBuffers->bondFractureCount = 0; commandBuffers->chunkFractureCount = chunkFractureCount; } /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // Impact Spread Shader /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// void NvBlastExtImpactSpreadGraphShader(NvBlastFractureBuffers* commandBuffers, const NvBlastGraphShaderActor* actor, const void* params) { uint32_t bondFractureCount = 0; uint32_t bondFractureCountMax = commandBuffers->bondFractureCount; const NvBlastExtProgramParams* programParams = static_cast<const NvBlastExtProgramParams*>(params); const NvBlastExtImpactSpreadDamageDesc& desc = *static_cast<const NvBlastExtImpactSpreadDamageDesc*>(programParams->damageDesc); const uint32_t* graphNodeIndexLinks = actor->graphNodeIndexLinks; const uint32_t firstGraphNodeIndex = actor->firstGraphNodeIndex; const uint32_t* chunkIndices = actor->chunkIndices; const uint32_t* adjacencyPartition = actor->adjacencyPartition; const uint32_t* adjacentNodeIndices = actor->adjacentNodeIndices; const uint32_t* adjacentBondIndices = actor->adjacentBondIndices; const NvBlastBond* assetBonds = actor->assetBonds; const NvBlastChunk* assetChunks = actor->assetChunks; const float* familyBondHealths = actor->familyBondHealths; const float* supportChunkHealths = actor->supportChunkHealths; // Find nearest chunk. uint32_t closestNode = findClosestNode(desc.position , firstGraphNodeIndex, graphNodeIndexLinks , adjacencyPartition, adjacentNodeIndices, adjacentBondIndices , assetBonds, familyBondHealths , assetChunks, supportChunkHealths, chunkIndices); // Breadth-first support graph traversal. For radial falloff metric distance is measured along the edges of the graph ExtDamageAcceleratorInternal* damageAccelerator = programParams->accelerator ? static_cast<ExtDamageAcceleratorInternal*>(programParams->accelerator) : nullptr; NVBLAST_ASSERT_WITH_MESSAGE(damageAccelerator, "This shader requires damage accelerator passed"); if (!isInvalidIndex(chunkIndices[closestNode]) && damageAccelerator) { struct NodeData { uint32_t index; float distance; }; // Calculating scratch size and requesting it from the accelerator const uint32_t bondCount = actor->adjacencyPartition[actor->assetNodeCount]; const size_t nodeQueueSize = align16(FixedQueue<NodeData>::requiredMemorySize(actor->graphNodeCount)); const size_t visitedBitmapSize = align16(FixedBitmap::requiredMemorySize(bondCount)); const size_t scratchSize = 16 + nodeQueueSize + visitedBitmapSize; void* scratch = damageAccelerator->getImmediateScratch(scratchSize); // prepare intermediate data on scratch scratch = (void*)align16((size_t)scratch); // Bump to 16-byte alignment FixedQueue<NodeData>* nodeQueue = new (scratch)FixedQueue<NodeData>(actor->graphNodeCount); scratch = pointerOffset(scratch, align16(nodeQueueSize)); FixedBitmap* visitedBitmap = new (scratch)FixedBitmap(bondCount); scratch = pointerOffset(scratch, align16(FixedBitmap::requiredMemorySize(bondCount))); // initalize traversal nodeQueue->pushBack({ closestNode, 0.f }); visitedBitmap->clear(); while (!nodeQueue->empty()) { NodeData currentNode = nodeQueue->popFront(); const uint32_t startIndex = adjacencyPartition[currentNode.index]; const uint32_t stopIndex = adjacencyPartition[currentNode.index + 1]; for (uint32_t adjacentNodeIndex = startIndex; adjacentNodeIndex < stopIndex; adjacentNodeIndex++) { const uint32_t neighbourIndex = adjacentNodeIndices[adjacentNodeIndex]; const uint32_t bondIndex = adjacentBondIndices[adjacentNodeIndex]; const NvBlastBond& bond = assetBonds[bondIndex]; const NvVec3& bondCentroid = (reinterpret_cast<const NvVec3&>(bond.centroid)); if (!canTakeDamage(familyBondHealths[bondIndex])) continue; if (visitedBitmap->test(bondIndex)) continue; visitedBitmap->set(bondIndex); const uint32_t chunkIndex0 = chunkIndices[currentNode.index]; const uint32_t chunkIndex1 = chunkIndices[neighbourIndex]; const nvidia::NvVec3& c0 = reinterpret_cast<const nvidia::NvVec3&>(assetChunks[chunkIndex0].centroid); bool isNeighbourWorldChunk = isInvalidIndex(chunkIndex1); const nvidia::NvVec3& c1 = isNeighbourWorldChunk ? bondCentroid : (reinterpret_cast<const nvidia::NvVec3&>(assetChunks[chunkIndex1].centroid)); const float distance = (c1 - c0).magnitude() * (isNeighbourWorldChunk ? 2.f : 1.f); float totalDistance = currentNode.distance + distance; float totalDamage = desc.damage * falloffProfile(desc.minRadius, desc.maxRadius, totalDistance); if (totalDamage > 0.0f && bondFractureCount < bondFractureCountMax) { NvBlastBondFractureData& frac = commandBuffers->bondFractures[bondFractureCount++]; frac.userdata = bond.userData; frac.nodeIndex0 = currentNode.index; frac.nodeIndex1 = neighbourIndex; frac.health = totalDamage; if (!isNeighbourWorldChunk) { nodeQueue->pushBack({ neighbourIndex, totalDistance }); } } } } } commandBuffers->bondFractureCount = bondFractureCount; commandBuffers->chunkFractureCount = 0; } void NvBlastExtImpactSpreadSubgraphShader(NvBlastFractureBuffers* commandBuffers, const NvBlastSubgraphShaderActor* actor, const void* params) { uint32_t chunkFractureCount = 0; uint32_t chunkFractureCountMax = commandBuffers->chunkFractureCount; const uint32_t chunkIndex = actor->chunkIndex; const NvBlastExtProgramParams* programParams = static_cast<const NvBlastExtProgramParams*>(params); const NvBlastExtImpactSpreadDamageDesc& desc = *static_cast<const NvBlastExtImpactSpreadDamageDesc*>(programParams->damageDesc); if (chunkFractureCount < chunkFractureCountMax) { NvBlastChunkFractureData& frac = commandBuffers->chunkFractures[chunkFractureCount++]; frac.chunkIndex = chunkIndex; frac.health = desc.damage; } commandBuffers->bondFractureCount = 0; commandBuffers->chunkFractureCount = chunkFractureCount; }
36,490
C++
47.460823
185
0.626528
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/serialization/NvBlastExtSerializationInternal.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2022-2023 NVIDIA Corporation. All rights reserved. #pragma once #include "NvBlastExtSerialization.h" #include <cstring> #define ExtSerializerBoilerplate(_name, _description, _objectTypeID, _encodingID) \ virtual const char* getName() const override { return _name; } \ virtual const char* getDescription() const override { return _description; } \ virtual uint32_t getObjectTypeID() const override { return _objectTypeID; } \ virtual uint32_t getEncodingID() const override { return _encodingID; } #define ExtSerializerReadOnly(_name) \ virtual bool isReadOnly() const override { return true; } \ virtual uint64_t serializeIntoBuffer \ ( \ void*& buffer, \ ExtSerialization::BufferProvider& bufferProvider, \ const void* object, \ uint64_t offset = 0 \ ) override \ { \ NVBLAST_LOG_WARNING(#_name "::serializeIntoBuffer: serializer is read-only."); \ NV_UNUSED(buffer); \ NV_UNUSED(bufferProvider); \ NV_UNUSED(object); \ NV_UNUSED(offset); \ return 0; \ } #define ExtSerializerDefaultFactoryAndRelease(_classname) \ static ExtSerializer* create() \ { \ return NVBLAST_NEW(_classname) (); \ } \ virtual void release() override \ { \ NVBLAST_DELETE(this, _classname); \ } namespace Nv { namespace Blast { /** Serializer internal interface */ class ExtSerializer { public: virtual ~ExtSerializer() {} /** return the name of this serializer. */ virtual const char* getName() const = 0; /** return a description of this serializer. */ virtual const char* getDescription() const = 0; /** return an identifier for the type of object handled. */ virtual uint32_t getObjectTypeID() const = 0; /** return an identifier for serialization format. */ virtual uint32_t getEncodingID() const = 0; /** Whether or not this serializer supports writing. Legacy formats, for example, may not. \return true iff this serialization does not support writing. */ virtual bool isReadOnly() const { return false; } /** Deserialize from a buffer into a newly allocated object. \param[in] buffer Pointer to the buffer to read. \param[in] size Size of the buffer to read. \return object pointer; returns null if failed to deserialize. */ virtual void* deserializeFromBuffer(const void* buffer, uint64_t size) = 0; /** Serialize into a buffer. Allocates the buffer internally using the ExtSerialization::BufferProvider callack interface. \param[out] buffer Pointer to the buffer created. \param[in] bufferProvider The buffer provider callback interface to use. \param[in] object Object pointer. \return the number of bytes serialized into the buffer (zero if unsuccessful). */ virtual uint64_t serializeIntoBuffer(void*& buffer, ExtSerialization::BufferProvider& bufferProvider, const void* object, uint64_t offset = 0) = 0; /** Release the serializer and free associated memory. */ virtual void release() = 0; }; /** Internal serialization manager interface */ class ExtSerializationInternal : public ExtSerialization { public: /** Internal interfaces to register and unregister a serializer, used by modules to automatically register all of their serializers with a serialization manager. */ virtual bool registerSerializer(ExtSerializer& serializer) = 0; virtual bool unregisterSerializer(ExtSerializer& serializer) = 0; /** Find a registered serializer for the given object type and encoding. \param[in] objectTypeID ID for the requested object type. \param[in] encodingID ID for the requested encoding (see EncodingID). \return a registered serializer if found, NULL otherwise. */ virtual ExtSerializer* findSerializer(uint32_t objectTypeID, uint32_t encodingID) = 0; //// Enums //// enum { HeaderSize = 128 }; }; template<typename Factory, size_t N> size_t ExtSerializationLoadSet(Nv::Blast::ExtSerializationInternal& serialization, Factory(&factories)[N]) { size_t count = 0; for (auto f : factories) { Nv::Blast::ExtSerializer* serializer = f(); if (serializer != nullptr) { if (serialization.registerSerializer(*serializer)) { ++count; } else { NVBLAST_LOG_ERROR("Nv::Blast::ExtSerializationLoadSet: failed to register serailizer:"); NVBLAST_LOG_ERROR(serializer->getName()); serializer->release(); } } else { NVBLAST_LOG_ERROR("Nv::Blast::ExtSerializationLoadSet: failed to create serailizer."); } } return count; } class ExtIStream { public: enum Flags { LittleEndian = (1 << 0), Fail = (1 << 1) }; ExtIStream(const void* buffer, size_t size) : m_buf(reinterpret_cast<const char*>(buffer)), m_flags(0) { m_cur = m_buf; m_end = m_buf + size; const uint16_t x = LittleEndian; m_flags = *reinterpret_cast<const char*>(&x); } bool advance(ptrdiff_t diff) { m_cur += diff; if (m_cur < m_buf) { m_cur = m_buf; m_flags |= Fail; return false; } else if (m_cur > m_end) { m_cur = m_end; m_flags |= Fail; return false; } return true; } const void* view() { return m_cur; } bool read(void* buffer, size_t size) { if (!canRead(size)) return false; std::memcpy(buffer, m_cur, size); m_cur += size; return true; } size_t tellg() const { return m_cur - m_buf; } size_t left() const { return m_end - m_cur; } bool eof() const { return m_cur >= m_end; } bool fail() const { return (m_flags & Fail) != 0; } private: const char* m_buf; const char* m_cur; const char* m_end; uint32_t m_flags; bool isLittleEndian() const { return (m_flags & LittleEndian) != 0; } bool canRead(size_t size) const { return m_cur + size <= m_end; } template<typename T> friend ExtIStream& operator >> (ExtIStream& s, T& x); }; template<typename T> NV_INLINE ExtIStream& operator >> (ExtIStream& s, T& x) { if (s.canRead(sizeof(T))) { if (s.isLittleEndian()) { x = *reinterpret_cast<const T*>(s.m_cur); s.m_cur += sizeof(T); } else { char* b = reinterpret_cast<char*>(&x) + sizeof(T); for (size_t n = sizeof(T); n--;) *--b = *s.m_cur++; } } else { s.m_flags |= ExtIStream::Fail; } return s; } } // namespace Blast } // namespace Nv
9,883
C
32.965636
154
0.540828
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/serialization/NvBlastExtTkSerializerRAW.cpp
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2022-2023 NVIDIA Corporation. All rights reserved. #include "NvBlastExtSerializationInternal.h" #include "NvBlastTkFramework.h" #include "NvBlastTkAsset.h" #include "NvBlast.h" namespace Nv { namespace Blast { // Legacy IDs struct ExtTkSerializationLegacyID { enum Enum { Framework = NVBLAST_FOURCC('T', 'K', 'F', 'W'), //!< TkFramework identifier token, used in serialization Asset = NVBLAST_FOURCC('A', 'S', 'S', 'T'), //!< TkAsset identifier token, used in serialization Family = NVBLAST_FOURCC('A', 'C', 'T', 'F'), //!< TkFamily identifier token, used in serialization }; }; // Legacy object format versions struct ExtTkSerializationLegacyAssetVersion { enum Enum { /** Initial version */ Initial, // New formats must come before Count. They should be given descriptive names with more information in comments. /** The number of serialized formats. */ Count, /** The current version. This should always be Count-1 */ Current = Count - 1 }; }; struct ExtTkSerializationLegacyFamilyVersion { enum Enum { /** Initial version */ Initial, // New formats must come before Count. They should be given descriptive names with more information in comments. /** The number of serialized formats. */ Count, /** The current version. This should always be Count-1 */ Current = Count - 1 }; }; static bool deserializeTkObjectHeader(uint32_t& legacyTypeID, uint32_t& legacyVersion, NvBlastID& objID, uint64_t& userIntData, ExtIStream& stream) { // Read framework ID uint32_t fwkID = 0; // Initialize to silence some compilers stream >> fwkID; if (fwkID != ExtTkSerializationLegacyID::Framework) { NVBLAST_LOG_ERROR("deserializeTkObjectHeader: stream does not contain a BlastTk legacy object."); return false; } // Read object class ID stream >> legacyTypeID; // Read object class version and ensure it's current stream >> legacyVersion; // Object ID stream.read(objID.data, sizeof(NvBlastID)); // Serializable user data uint32_t lsd, msd; stream >> lsd >> msd; userIntData = static_cast<uint64_t>(msd) << 32 | static_cast<uint64_t>(lsd); return !stream.fail(); } TkAsset* deserializeTkAsset(ExtIStream& stream, TkFramework& framework) { // Deserializer header uint32_t legacyTypeID; uint32_t legacyVersion; NvBlastID objID; uint64_t userIntData; if (!deserializeTkObjectHeader(legacyTypeID, legacyVersion, objID, userIntData, stream)) { return nullptr; } if (legacyTypeID != ExtTkSerializationLegacyID::Asset) { NVBLAST_LOG_ERROR("deserializeTkAsset: stream does not contain a BlastTk legacy asset."); return nullptr; } if (legacyVersion > ExtTkSerializationLegacyAssetVersion::Current) { NVBLAST_LOG_ERROR("deserializeTkAsset: stream contains a BlastTk legacy asset which is in an unknown version."); return nullptr; } // LL asset uint32_t assetSize; stream >> assetSize; NvBlastAsset* llAsset = static_cast<NvBlastAsset*>(NVBLAST_ALLOC_NAMED(assetSize, "deserializeTkAsset")); stream.read(reinterpret_cast<char*>(llAsset), assetSize); // Joint descs uint32_t jointDescCount; stream >> jointDescCount; std::vector<TkAssetJointDesc> jointDescs(jointDescCount); for (uint32_t i = 0; i < jointDescs.size(); ++i) { TkAssetJointDesc& jointDesc = jointDescs[i]; stream >> jointDesc.nodeIndices[0]; stream >> jointDesc.nodeIndices[1]; stream >> jointDesc.attachPositions[0].x; stream >> jointDesc.attachPositions[0].y; stream >> jointDesc.attachPositions[0].z; stream >> jointDesc.attachPositions[1].x; stream >> jointDesc.attachPositions[1].y; stream >> jointDesc.attachPositions[1].z; } if (stream.fail()) { NVBLAST_FREE(llAsset); return nullptr; } TkAsset* asset = framework.createAsset(llAsset, jointDescs.data(), (uint32_t)jointDescs.size(), true); NvBlastID zeroID; memset(zeroID.data, 0, sizeof(zeroID)); if (!memcmp(zeroID.data, objID.data, sizeof(NvBlastID))) { asset->setID(objID); } asset->userIntData = userIntData; return asset; } } // namespace Blast } // namespace Nv
5,965
C++
31.423913
147
0.682481
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/serialization/NvBlastExtSerializationCAPN.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2022-2023 NVIDIA Corporation. All rights reserved. #pragma once #include "capnp/serialize.h" #include "NvBlastExtInputStream.h" #include "NvBlastExtOutputStream.h" #include "NvBlastArray.h" #include "NvBlastExtSerialization.h" namespace Nv { namespace Blast { template<typename TObject, typename TSerializationReader, typename TSerializationBuilder> class ExtSerializationCAPN { public: static TObject* deserializeFromBuffer(const unsigned char* input, uint64_t size); static TObject* deserializeFromStream(std::istream& inputStream); static uint64_t serializationBufferSize(const TObject* object); static bool serializeIntoBuffer(const TObject* object, unsigned char* buffer, uint64_t maxSize, uint64_t& usedSize); static bool serializeIntoBuffer(const TObject *object, unsigned char*& buffer, uint64_t& size, ExtSerialization::BufferProvider* bufferProvider = nullptr, uint64_t offset = 0); static bool serializeIntoStream(const TObject* object, std::ostream& outputStream); private: // Specialized static bool serializeIntoBuilder(TSerializationBuilder& objectBuilder, const TObject* object); static bool serializeIntoMessage(capnp::MallocMessageBuilder& message, const TObject* object); static TObject* deserializeFromStreamReader(capnp::InputStreamMessageReader& message); }; template<typename TObject, typename TSerializationReader, typename TSerializationBuilder> TObject* ExtSerializationCAPN<TObject, TSerializationReader, TSerializationBuilder>::deserializeFromBuffer(const unsigned char* input, uint64_t size) { kj::ArrayPtr<const unsigned char> source(input, size); kj::ArrayInputStream inputStream(source); Nv::Blast::Array<uint64_t>::type scratch(static_cast<uint32_t>(size)); kj::ArrayPtr<capnp::word> scratchArray((capnp::word*) scratch.begin(), size); capnp::InputStreamMessageReader message(inputStream, capnp::ReaderOptions(), scratchArray); return deserializeFromStreamReader(message); } template<typename TObject, typename TSerializationReader, typename TSerializationBuilder> TObject* ExtSerializationCAPN<TObject, TSerializationReader, TSerializationBuilder>::deserializeFromStream(std::istream& inputStream) { ExtInputStream readStream(inputStream); capnp::InputStreamMessageReader message(readStream); return deserializeFromStreamReader(message); } template<typename TObject, typename TSerializationReader, typename TSerializationBuilder> uint64_t ExtSerializationCAPN<TObject, TSerializationReader, TSerializationBuilder>::serializationBufferSize(const TObject* object) { capnp::MallocMessageBuilder message; bool result = serializeIntoMessage(message, object); if (result == false) { return 0; } return computeSerializedSizeInWords(message) * sizeof(uint64_t); } template<typename TObject, typename TSerializationReader, typename TSerializationBuilder> bool ExtSerializationCAPN<TObject, TSerializationReader, TSerializationBuilder>::serializeIntoBuffer(const TObject* object, unsigned char* buffer, uint64_t maxSize, uint64_t& usedSize) { capnp::MallocMessageBuilder message; bool result = serializeIntoMessage(message, object); if (result == false) { usedSize = 0; return false; } uint64_t messageSize = computeSerializedSizeInWords(message) * sizeof(uint64_t); if (maxSize < messageSize) { NVBLAST_LOG_ERROR("When attempting to serialize into an existing buffer, the provided buffer was too small."); usedSize = 0; return false; } kj::ArrayPtr<unsigned char> outputBuffer(buffer, maxSize); kj::ArrayOutputStream outputStream(outputBuffer); capnp::writeMessage(outputStream, message); usedSize = messageSize; return true; } template<typename TObject, typename TSerializationReader, typename TSerializationBuilder> bool ExtSerializationCAPN<TObject, TSerializationReader, TSerializationBuilder>::serializeIntoBuffer(const TObject *object, unsigned char*& buffer, uint64_t& size, ExtSerialization::BufferProvider* bufferProvider, uint64_t offset) { capnp::MallocMessageBuilder message; bool result = serializeIntoMessage(message, object); if (result == false) { buffer = nullptr; size = 0; return false; } const uint64_t blockSize = computeSerializedSizeInWords(message) * sizeof(uint64_t); size = blockSize + offset; buffer = static_cast<unsigned char *>(bufferProvider != nullptr ? bufferProvider->requestBuffer(size) : NVBLAST_ALLOC(size)); kj::ArrayPtr<unsigned char> outputBuffer(buffer + offset, blockSize); kj::ArrayOutputStream outputStream(outputBuffer); capnp::writeMessage(outputStream, message); return true; } template<typename TObject, typename TSerializationReader, typename TSerializationBuilder> bool ExtSerializationCAPN<TObject, TSerializationReader, TSerializationBuilder>::serializeIntoStream(const TObject* object, std::ostream& outputStream) { capnp::MallocMessageBuilder message; bool result = serializeIntoMessage(message, object); if (result == false) { return false; } ExtOutputStream blastOutputStream(outputStream); writeMessage(blastOutputStream, message); return true; } } // namespace Blast } // namespace Nv
6,870
C
35.547872
230
0.760844
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/serialization/NvBlastExtSerialization.cpp
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2022-2023 NVIDIA Corporation. All rights reserved. #include "NvBlastExtSerialization.h" #include "NvBlastExtLlSerialization.h" #include "NvBlastArray.h" #include "NvBlastHashMap.h" #include "NvBlastExtSerializationInternal.h" namespace Nv { namespace Blast { class ExtSerializationImpl : public ExtSerializationInternal { public: // Default buffer provider class AllocBufferProvider : public ExtSerialization::BufferProvider { public: virtual void* requestBuffer(size_t size) override; }; ExtSerializationImpl(); ~ExtSerializationImpl(); // ExtSerialization interface begin virtual bool setSerializationEncoding(uint32_t encodingID) override; virtual uint32_t getSerializationEncoding() const override; virtual void setBufferProvider(BufferProvider* bufferProvider) override; virtual bool peekHeader(uint32_t* objectTypeID, uint32_t* encodingID, uint64_t* dataSize, const void* buffer, uint64_t bufferSize) override; virtual const void* skipObject(uint64_t& bufferSize, const void* buffer) override; virtual void* deserializeFromBuffer(const void* buffer, uint64_t size, uint32_t* objectTypeIDPtr = nullptr) override; virtual uint64_t serializeIntoBuffer(void*& buffer, const void* object, uint32_t objectTypeID) override; virtual void release() override; // ExtSerialization interface end // ExtSerializationInternal interface begin virtual bool registerSerializer(ExtSerializer& serializer) override; virtual bool unregisterSerializer(ExtSerializer& serializer) override; virtual ExtSerializer* findSerializer(uint32_t objectTypeID, uint32_t encodingID) override; // ExtSerializationInternal interface end private: char* writeHeaderIntoBuffer(char* buffer, uint64_t bufferSize, uint32_t objectTypeID, uint32_t encodingID, uint64_t dataSize) const; const char* readHeaderFromBuffer(uint32_t* objectTypeID, uint32_t* encodingID, uint64_t* dataSize, const char* buffer, uint64_t bufferSize) const; //// Static data //// static const char* s_identifier; static const char* s_version; static AllocBufferProvider s_defaultBufferProvider; //// Member data //// HashMap<uint64_t, ExtSerializer*>::type m_serializers; uint32_t m_serializationEncoding; BufferProvider* m_bufferProvider; }; //////// ExtSerializationImpl static member variables //////// /** Module identifying header. This should never change. */ const char* ExtSerializationImpl::s_identifier = "NVidia(r) GameWorks Blast(tm) v."; const char* ExtSerializationImpl::s_version = "1"; ExtSerializationImpl::AllocBufferProvider ExtSerializationImpl::s_defaultBufferProvider; //////// Local utility functions //////// static NV_INLINE uint64_t generateKey(uint32_t objectTypeID, uint32_t encodingID) { return static_cast<uint64_t>(encodingID) << 32 | static_cast<uint64_t>(objectTypeID); } static NV_INLINE uint64_t generateKey(const ExtSerializer& serializer) { return generateKey(serializer.getObjectTypeID(), serializer.getEncodingID()); } static NV_INLINE void writeIDToBuffer(char* buffer, uint32_t id) { for (int i = 0; i < 4; ++i, id >>= 8) { *buffer++ = static_cast<char>(id & 0xFF); } } static NV_INLINE uint32_t readIDFromBuffer(const char* buffer) { return NVBLAST_FOURCC(buffer[0], buffer[1], buffer[2], buffer[3]); } static NV_INLINE void writeU64InHexToBuffer(char* buffer, uint64_t val) { for (char* curr = buffer + 16; curr-- > buffer; val >>= 4) { *curr = "0123456789ABCDEF"[val & 0xF]; } } static NV_INLINE uint64_t readU64InHexFromBuffer(const char* buffer) { uint64_t val = 0; for (const char* curr = buffer; curr < buffer + 16; ++curr) { const char c = *curr; const char msn = c >> 4; const char mask = ((88 >> msn) & 1) - 1; const unsigned char digit = "\x0\x1\x2\x3\x4\x5\x6\x7\x8\x9\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xA\xB\xC\xD\xE\xF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF"[((msn - 3) & 1) << 4 | (c & 0xF)] | mask; if (digit == 0xFF) { return 0; // Not a hexidecimal digit } val = val << 4 | digit; } return val; } //////// ExtSerialization member functions //////// ExtSerializationImpl::ExtSerializationImpl() : m_serializationEncoding(EncodingID::CapnProtoBinary), m_bufferProvider(&s_defaultBufferProvider) { } ExtSerializationImpl::~ExtSerializationImpl() { // Release and remove all registered serializers Array<ExtSerializer*>::type registeredSerializers; registeredSerializers.reserve(m_serializers.size()); for (auto it = m_serializers.getIterator(); !it.done(); ++it) { registeredSerializers.pushBack(it->second); } m_serializers.clear(); for (uint32_t i = 0; i < registeredSerializers.size(); ++i) { registeredSerializers[i]->release(); } } char* ExtSerializationImpl::writeHeaderIntoBuffer(char* buffer, uint64_t bufferSize, uint32_t objectTypeID, uint32_t encodingID, uint64_t dataSize) const { if (bufferSize < HeaderSize) { return nullptr; } char* stop = buffer + HeaderSize; size_t versionLen = strlen(s_version); if (versionLen > 63) { versionLen = 63; } memset(buffer, ' ', HeaderSize); memcpy(buffer, s_identifier, 32); buffer += 32; memcpy(buffer, s_version, versionLen); buffer += 64; writeIDToBuffer(buffer, objectTypeID); buffer += 5; writeIDToBuffer(buffer, encodingID); buffer += 5; writeU64InHexToBuffer(buffer, dataSize); buffer += 16; *(stop - 1) = '\n'; return stop; } const char* ExtSerializationImpl::readHeaderFromBuffer(uint32_t* objectTypeID, uint32_t* encodingID, uint64_t* dataSize, const char* buffer, uint64_t bufferSize) const { if (bufferSize < HeaderSize) { NVBLAST_LOG_ERROR("ExtSerializationImpl::readHeaderFromBuffer: header terminator not found."); return nullptr; } const char* stop = buffer + HeaderSize; if (memcmp(buffer, s_identifier, 32)) { NVBLAST_LOG_ERROR("ExtSerializationImpl::readHeaderFromBuffer: file identifier does not match expected value."); return nullptr; } buffer += 32; const char* s = strchr(buffer, ' '); if (s == nullptr) { NVBLAST_LOG_ERROR("ExtSerializationImpl::readHeaderFromBuffer: file format error reading serializer library version."); } if (memcmp(buffer, s_version, s - buffer)) { NVBLAST_LOG_ERROR("ExtSerializationImpl::readHeaderFromBuffer: file version does not match serializer library version."); return nullptr; } buffer += 64; if (objectTypeID != nullptr) { *objectTypeID = readIDFromBuffer(buffer); } buffer += 5; if (encodingID != nullptr) { *encodingID = readIDFromBuffer(buffer); } buffer += 5; if (dataSize != nullptr) { *dataSize = readU64InHexFromBuffer(buffer); } buffer += 16; return stop; } bool ExtSerializationImpl::registerSerializer(ExtSerializer& serializer) { return m_serializers.insert(generateKey(serializer), &serializer); } bool ExtSerializationImpl::unregisterSerializer(ExtSerializer& serializer) { const uint64_t key = generateKey(serializer); const auto entry = m_serializers.find(key); if (entry == nullptr) { return false; } entry->second->release(); return m_serializers.erase(key); } ExtSerializer* ExtSerializationImpl::findSerializer(uint32_t objectTypeID, uint32_t encodingID) { auto entry = m_serializers.find(generateKey(objectTypeID, encodingID)); return entry != nullptr ? entry->second : nullptr; } bool ExtSerializationImpl::setSerializationEncoding(uint32_t encodingID) { m_serializationEncoding = encodingID; return true; } uint32_t ExtSerializationImpl::getSerializationEncoding() const { return m_serializationEncoding; } void ExtSerializationImpl::setBufferProvider(BufferProvider* bufferProvider) { m_bufferProvider = bufferProvider != nullptr ? bufferProvider : &s_defaultBufferProvider; } bool ExtSerializationImpl::peekHeader(uint32_t* objectTypeID, uint32_t* encodingID, uint64_t* dataSize, const void* buffer, uint64_t bufferSize) { return nullptr != readHeaderFromBuffer(objectTypeID, encodingID, dataSize, reinterpret_cast<const char*>(buffer), bufferSize); } const void* ExtSerializationImpl::skipObject(uint64_t& bufferSize, const void* buffer) { uint64_t dataSize; const char* next = readHeaderFromBuffer(nullptr, nullptr, &dataSize, static_cast<const char*>(buffer), bufferSize); if (next == nullptr) { return nullptr; } next += dataSize; const uint64_t skipSize = next - static_cast<const char*>(buffer); NVBLAST_CHECK_ERROR(skipSize <= bufferSize, "Object size in buffer is too large for given buffer size.", return nullptr); bufferSize -= skipSize; return next; } void* ExtSerializationImpl::deserializeFromBuffer(const void* buffer, uint64_t bufferSize, uint32_t* objectTypeIDPtr) { uint32_t objectTypeID; uint32_t encodingID; uint64_t dataSize; void* result = nullptr; buffer = readHeaderFromBuffer(&objectTypeID, &encodingID, &dataSize, reinterpret_cast<const char*>(buffer), bufferSize); if (buffer != nullptr) { auto entry = m_serializers.find(generateKey(objectTypeID, encodingID)); if (entry != nullptr && entry->second != nullptr) { result = entry->second->deserializeFromBuffer(buffer, dataSize); } } if (objectTypeIDPtr != nullptr) { *objectTypeIDPtr = result != nullptr ? objectTypeID : 0; } return result; } uint64_t ExtSerializationImpl::serializeIntoBuffer(void*& buffer, const void* object, uint32_t objectTypeID) { if (!m_serializationEncoding) { NVBLAST_LOG_ERROR("ExtSerializationImpl::serializeIntoBuffer: no serialization encoding has been set."); return false; // No encoding available } auto entry = m_serializers.find(generateKey(objectTypeID, m_serializationEncoding)); if (entry == nullptr || entry->second == nullptr) { return false; } const uint64_t size = entry->second->serializeIntoBuffer(buffer, *m_bufferProvider, object, HeaderSize); if (size < HeaderSize) { NVBLAST_LOG_ERROR("ExtSerializationImpl::serializeIntoBuffer: failed to write data to buffer."); return 0; } writeHeaderIntoBuffer(reinterpret_cast<char*>(buffer), HeaderSize, objectTypeID, m_serializationEncoding, size - HeaderSize); return size; } void ExtSerializationImpl::release() { NVBLAST_DELETE(this, ExtSerializationImpl); } //////// ExtSerializationImpl::AllocBufferProvider member functions //////// void* ExtSerializationImpl::AllocBufferProvider::requestBuffer(size_t size) { return NVBLAST_ALLOC(size); } } // namespace Blast } // namespace Nv Nv::Blast::ExtSerialization* NvBlastExtSerializationCreate() { Nv::Blast::ExtSerializationImpl* serialization = NVBLAST_NEW(Nv::Blast::ExtSerializationImpl) (); // Automatically load LL serializers NvBlastExtLlSerializerLoadSet(*serialization); return serialization; }
13,068
C++
31.031863
192
0.6897
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/serialization/NvBlastExtTkSerialization.cpp
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2022-2023 NVIDIA Corporation. All rights reserved. #include "NvBlastExtSerializationInternal.h" #include "NvBlastExtTkSerialization.h" #include "NvBlastExtTkSerializerCAPN.h" #include "NvBlastExtTkSerializerRAW.h" namespace Nv { namespace Blast { TkFramework* sExtTkSerializerFramework = nullptr; class ExtTkSerializerAsset_CPNB : public ExtSerializer { public: ExtSerializerBoilerplate("TkAsset_CPNB", "Blast high-level asset (Nv::Blast::TkAsset) serialization using Cap'n Proto binary format.", TkObjectTypeID::Asset, ExtSerialization::EncodingID::CapnProtoBinary); ExtSerializerDefaultFactoryAndRelease(ExtTkSerializerAsset_CPNB); virtual void* deserializeFromBuffer(const void* buffer, uint64_t size) override { return ExtSerializationCAPN<TkAsset, Serialization::TkAsset::Reader, Serialization::TkAsset::Builder>::deserializeFromBuffer(reinterpret_cast<const unsigned char*>(buffer), size); } virtual uint64_t serializeIntoBuffer(void*& buffer, ExtSerialization::BufferProvider& bufferProvider, const void* object, uint64_t offset = 0) override { uint64_t usedSize; if (!ExtSerializationCAPN<TkAsset, Serialization::TkAsset::Reader, Serialization::TkAsset::Builder>::serializeIntoBuffer(reinterpret_cast<const TkAsset*>(object), reinterpret_cast<unsigned char*&>(buffer), usedSize, &bufferProvider, offset)) { return 0; } return usedSize; } }; class ExTkSerializerAsset_RAW : public ExtSerializer { public: ExtSerializerBoilerplate("TkAsset_RAW", "Blast high-level asset (Nv::Blast::TkAsset) serialization using raw memory format.", TkObjectTypeID::Asset, ExtSerialization::EncodingID::RawBinary); ExtSerializerDefaultFactoryAndRelease(ExTkSerializerAsset_RAW); ExtSerializerReadOnly(ExTkSerializerAsset_RAW); virtual void* deserializeFromBuffer(const void* buffer, uint64_t size) override { ExtIStream stream(buffer, size); return deserializeTkAsset(stream, *sExtTkSerializerFramework); } }; } // namespace Blast } // namespace Nv /////////////////////////////////////// size_t NvBlastExtTkSerializerLoadSet(Nv::Blast::TkFramework& framework, Nv::Blast::ExtSerialization& serialization) { Nv::Blast::sExtTkSerializerFramework = &framework; Nv::Blast::ExtSerializer* (*factories[])() = { Nv::Blast::ExtTkSerializerAsset_CPNB::create, Nv::Blast::ExTkSerializerAsset_RAW::create }; return Nv::Blast::ExtSerializationLoadSet(static_cast<Nv::Blast::ExtSerializationInternal&>(serialization), factories); } uint64_t NvBlastExtSerializationSerializeTkAssetIntoBuffer(void*& buffer, Nv::Blast::ExtSerialization& serialization, const Nv::Blast::TkAsset* asset) { return serialization.serializeIntoBuffer(buffer, asset, Nv::Blast::TkObjectTypeID::Asset); }
4,373
C++
40.657142
209
0.750057
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/serialization/NvBlastExtLlSerialization.cpp
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2022-2023 NVIDIA Corporation. All rights reserved. #include "NvBlastExtSerializationInternal.h" #include "NvBlastExtLlSerialization.h" #include "NvBlastExtLlSerializerCAPN.h" namespace Nv { namespace Blast { class ExtLlSerializerAsset_CPNB : public ExtSerializer { public: ExtSerializerBoilerplate("LLAsset_CPNB", "Blast low-level asset (NvBlastAsset) serialization using Cap'n Proto binary format.", LlObjectTypeID::Asset, ExtSerialization::EncodingID::CapnProtoBinary); ExtSerializerDefaultFactoryAndRelease(ExtLlSerializerAsset_CPNB); virtual void* deserializeFromBuffer(const void* buffer, uint64_t size) override { return ExtSerializationCAPN<Asset, Serialization::Asset::Reader, Serialization::Asset::Builder>::deserializeFromBuffer(reinterpret_cast<const unsigned char*>(buffer), size); } virtual uint64_t serializeIntoBuffer(void*& buffer, ExtSerialization::BufferProvider& bufferProvider, const void* object, uint64_t offset = 0) override { uint64_t usedSize; if (!ExtSerializationCAPN<Asset, Serialization::Asset::Reader, Serialization::Asset::Builder>::serializeIntoBuffer(reinterpret_cast<const Asset*>(object), reinterpret_cast<unsigned char*&>(buffer), usedSize, &bufferProvider, offset)) { return 0; } return usedSize; } }; class ExtLlSerializerFamily_CPNB : public ExtSerializer { public: ExtSerializerBoilerplate("LLFamily_CPNB", "Blast low-level family (NvBlastFamily) serialization using Cap'n Proto binary format.", LlObjectTypeID::Family, ExtSerialization::EncodingID::CapnProtoBinary); ExtSerializerDefaultFactoryAndRelease(ExtLlSerializerFamily_CPNB); virtual void* deserializeFromBuffer(const void* buffer, uint64_t size) override { return ExtSerializationCAPN<FamilyHeader, Serialization::Family::Reader, Serialization::Family::Builder>::deserializeFromBuffer(reinterpret_cast<const unsigned char*>(buffer), size); } virtual uint64_t serializeIntoBuffer(void*& buffer, ExtSerialization::BufferProvider& bufferProvider, const void* object, uint64_t offset = 0) override { uint64_t usedSize; if (!ExtSerializationCAPN<FamilyHeader, Serialization::Family::Reader, Serialization::Family::Builder>::serializeIntoBuffer(reinterpret_cast<const FamilyHeader*>(object), reinterpret_cast<unsigned char*&>(buffer), usedSize, &bufferProvider, offset)) { return 0; } return usedSize; } }; class ExtLlSerializerObject_RAW : public ExtSerializer { public: virtual void* deserializeFromBuffer(const void* buffer, uint64_t size) override { const NvBlastDataBlock* block = reinterpret_cast<const NvBlastDataBlock*>(buffer); if (static_cast<uint64_t>(block->size) > size) { return nullptr; } void* llobject = NVBLAST_ALLOC(block->size); return memcpy(llobject, block, block->size); } virtual uint64_t serializeIntoBuffer(void*& buffer, ExtSerialization::BufferProvider& bufferProvider, const void* object, uint64_t offset = 0) override { const NvBlastDataBlock* block = reinterpret_cast<const NvBlastDataBlock*>(object); const uint64_t size = block->size + offset; buffer = bufferProvider.requestBuffer(size); if (buffer == nullptr) { return 0; } memcpy(static_cast<char*>(buffer) + offset, object, block->size); return size; } }; class ExtLlSerializerAsset_RAW : public ExtLlSerializerObject_RAW { public: ExtSerializerBoilerplate("LLAsset_RAW", "Blast low-level asset (NvBlastAsset) serialization using raw memory format.", LlObjectTypeID::Asset, ExtSerialization::EncodingID::RawBinary); ExtSerializerDefaultFactoryAndRelease(ExtLlSerializerAsset_RAW); }; class ExtLlSerializerFamily_RAW : public ExtLlSerializerObject_RAW { public: ExtSerializerBoilerplate("LLFamily_RAW", "Blast low-level family (NvBlastFamily) serialization using raw memory format.", LlObjectTypeID::Family, ExtSerialization::EncodingID::RawBinary); ExtSerializerDefaultFactoryAndRelease(ExtLlSerializerFamily_RAW); }; } // namespace Blast } // namespace Nv /////////////////////////////////////// size_t NvBlastExtLlSerializerLoadSet(Nv::Blast::ExtSerialization& serialization) { Nv::Blast::ExtSerializer* (*factories[])() = { Nv::Blast::ExtLlSerializerAsset_CPNB::create, Nv::Blast::ExtLlSerializerAsset_RAW::create, Nv::Blast::ExtLlSerializerFamily_CPNB::create, Nv::Blast::ExtLlSerializerFamily_RAW::create }; return Nv::Blast::ExtSerializationLoadSet(static_cast<Nv::Blast::ExtSerializationInternal&>(serialization), factories); } uint64_t NvBlastExtSerializationSerializeAssetIntoBuffer(void*& buffer, Nv::Blast::ExtSerialization& serialization, const NvBlastAsset* asset) { return serialization.serializeIntoBuffer(buffer, asset, Nv::Blast::LlObjectTypeID::Asset); } uint64_t NvBlastExtSerializationSerializeFamilyIntoBuffer(void*& buffer, Nv::Blast::ExtSerialization& serialization, const NvBlastFamily* family) { return serialization.serializeIntoBuffer(buffer, family, Nv::Blast::LlObjectTypeID::Family); }
6,780
C++
41.118012
206
0.737316
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/serialization/DTO/ActorDTO.cpp
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2022-2023 NVIDIA Corporation. All rights reserved. #include "ActorDTO.h" #include "NvBlastGlobals.h" #include "NvBlastIDDTO.h" #include "NvBlastChunkDTO.h" #include "NvBlastBondDTO.h" namespace Nv { namespace Blast { bool ActorDTO::serialize(Nv::Blast::Serialization::Actor::Builder builder, const Nv::Blast::Actor* poco) { builder.setFamilyOffset(poco->getFamilyOffset()); builder.setFirstVisibleChunkIndex(poco->getFirstVisibleChunkIndex()); builder.setVisibleChunkCount(poco->getVisibleChunkCount()); builder.setFirstGraphNodeIndex(poco->getFirstGraphNodeIndex()); builder.setGraphNodeCount(poco->getGraphNodeCount()); builder.setLeafChunkCount(poco->getLeafChunkCount()); return true; } Nv::Blast::Actor* ActorDTO::deserialize(Nv::Blast::Serialization::Actor::Reader reader) { NV_UNUSED(reader); return nullptr; } bool ActorDTO::deserializeInto(Nv::Blast::Serialization::Actor::Reader reader, Nv::Blast::Actor* poco) { poco->setFamilyOffset(reader.getFamilyOffset()); poco->setFirstVisibleChunkIndex(reader.getFirstVisibleChunkIndex()); poco->setVisibleChunkCount(reader.getVisibleChunkCount()); poco->setFirstGraphNodeIndex(reader.getFirstGraphNodeIndex()); poco->setGraphNodeCount(reader.getGraphNodeCount()); poco->setLeafChunkCount(reader.getLeafChunkCount()); return true; } } // namespace Blast } // namespace Nv
2,914
C++
38.391891
104
0.762183
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/serialization/DTO/PxConvexMeshGeometryDTO.cpp
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2022-2023 NVIDIA Corporation. All rights reserved. #include "PxConvexMeshGeometryDTO.h" #include "PxMeshScaleDTO.h" #include "NvBlastAssert.h" #include "NvBlastExtKJPxInputStream.h" #include "NvBlastExtKJPxOutputStream.h" #include "PxConvexMeshDesc.h" #include "NvBlastExtSerialization.h" #include "PxVec3.h" #include <algorithm> #include <vector> #include "PxPhysics.h" #include "NvBlastPxCallbacks.h" #include "PxDefaultStreams.h" namespace Nv { namespace Blast { extern physx::PxPhysics* sExtPxSerializerPhysics; extern physx::PxCooking* sExtPxSerializerCooking; bool PxConvexMeshGeometryDTO::serialize(Nv::Blast::Serialization::PxConvexMeshGeometry::Builder builder, const physx::PxConvexMeshGeometry * poco) { NVBLAST_ASSERT(sExtPxSerializerCooking != nullptr); PxMeshScaleDTO::serialize(builder.getScale(), &poco->scale); //TODO: Use cooking.cookConvexMesh to cook the mesh to a stream - then get that backing buffer and put it into the Data field physx::PxConvexMeshDesc desc; desc.points.data = poco->convexMesh->getVertices(); desc.points.count = poco->convexMesh->getNbVertices(); desc.points.stride = sizeof(physx::PxVec3); std::vector<uint32_t> indicesScratch; std::vector<physx::PxHullPolygon> hullPolygonsScratch; hullPolygonsScratch.resize(poco->convexMesh->getNbPolygons()); uint32_t indexCount = 0; for (uint32_t i = 0; i < hullPolygonsScratch.size(); i++) { physx::PxHullPolygon polygon; poco->convexMesh->getPolygonData(i, polygon); if (polygon.mNbVerts) { indexCount = std::max<uint32_t>(indexCount, polygon.mIndexBase + polygon.mNbVerts); } } indicesScratch.resize(indexCount); for (uint32_t i = 0; i < hullPolygonsScratch.size(); i++) { physx::PxHullPolygon polygon; poco->convexMesh->getPolygonData(i, polygon); for (uint32_t j = 0; j < polygon.mNbVerts; j++) { indicesScratch[polygon.mIndexBase + j] = poco->convexMesh->getIndexBuffer()[polygon.mIndexBase + j]; } hullPolygonsScratch[i] = polygon; } desc.indices.count = indexCount; desc.indices.data = indicesScratch.data(); desc.indices.stride = sizeof(uint32_t); desc.polygons.count = poco->convexMesh->getNbPolygons(); desc.polygons.data = hullPolygonsScratch.data(); desc.polygons.stride = sizeof(physx::PxHullPolygon); physx::PxDefaultMemoryOutputStream outStream(NvBlastGetPxAllocatorCallback()); if (!sExtPxSerializerCooking->cookConvexMesh(desc, outStream)) { return false; } kj::ArrayPtr<unsigned char> cookedBuffer(outStream.getData(), outStream.getSize()); builder.setConvexMesh(cookedBuffer); return true; } physx::PxConvexMeshGeometry* PxConvexMeshGeometryDTO::deserialize(Nv::Blast::Serialization::PxConvexMeshGeometry::Reader reader) { NVBLAST_ASSERT(sExtPxSerializerCooking != nullptr); NV_UNUSED(reader); return nullptr; } bool PxConvexMeshGeometryDTO::deserializeInto(Nv::Blast::Serialization::PxConvexMeshGeometry::Reader reader, physx::PxConvexMeshGeometry * poco) { NVBLAST_ASSERT(sExtPxSerializerPhysics != nullptr); PxMeshScaleDTO::deserializeInto(reader.getScale(), &poco->scale); Nv::Blast::ExtKJPxInputStream inputStream(reader.getConvexMesh()); //NOTE: Naive approach, no shared convex hulls poco->convexMesh = sExtPxSerializerPhysics->createConvexMesh(inputStream); return poco->convexMesh != nullptr; } } // namespace Blast } // namespace Nv
5,081
C++
34.788732
146
0.731943
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/serialization/DTO/TkAssetJointDescDTO.cpp
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2022-2023 NVIDIA Corporation. All rights reserved. #include "TkAssetJointDescDTO.h" #include "NvVec3DTO.h" namespace Nv { namespace Blast { bool TkAssetJointDescDTO::serialize(Nv::Blast::Serialization::TkAssetJointDesc::Builder builder, const Nv::Blast::TkAssetJointDesc * poco) { kj::ArrayPtr<const uint32_t> nodeIndices(poco->nodeIndices, 2); builder.setNodeIndices(nodeIndices); builder.initAttachPositions(2); for (int i = 0; i < 2; i++) { NvVec3DTO::serialize(builder.getAttachPositions()[i], &poco->attachPositions[i]); } return true; } Nv::Blast::TkAssetJointDesc* TkAssetJointDescDTO::deserialize(Nv::Blast::Serialization::TkAssetJointDesc::Reader reader) { //TODO: Allocate with ExtContent and return NV_UNUSED(reader); return nullptr; } bool TkAssetJointDescDTO::deserializeInto(Nv::Blast::Serialization::TkAssetJointDesc::Reader reader, Nv::Blast::TkAssetJointDesc * poco) { auto readerAttachPositions = reader.getAttachPositions(); NvVec3DTO::deserializeInto(readerAttachPositions[0], &poco->attachPositions[0]); NvVec3DTO::deserializeInto(readerAttachPositions[1], &poco->attachPositions[1]); auto readerNodeIndices = reader.getNodeIndices(); poco->nodeIndices[0] = readerNodeIndices[0]; poco->nodeIndices[1] = readerNodeIndices[1]; return true; } } // namespace Blast } // namespace Nv
2,911
C++
36.818181
138
0.74854
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/serialization/DTO/AssetDTO.cpp
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2022-2023 NVIDIA Corporation. All rights reserved. #include "AssetDTO.h" #include "NvBlastGlobals.h" #include "NvBlastIDDTO.h" #include "NvBlastChunkDTO.h" #include "NvBlastBondDTO.h" #include "NvBlastAsset.h" namespace Nv { namespace Blast { bool AssetDTO::serialize(Nv::Blast::Serialization::Asset::Builder builder, const Nv::Blast::Asset * poco) { NvBlastIDDTO::serialize(builder.initID(), &poco->m_ID); builder.setLeafChunkCount(poco->m_leafChunkCount); builder.setFirstSubsupportChunkIndex(poco->m_firstSubsupportChunkIndex); capnp::List<Nv::Blast::Serialization::NvBlastChunk>::Builder chunks = builder.initChunks(poco->m_chunkCount); builder.setChunkCount(poco->m_chunkCount); NVBLAST_ASSERT_WITH_MESSAGE(builder.getChunkCount() == poco->m_chunkCount, "WTF"); for (uint32_t i = 0; i < poco->m_chunkCount; i++) { NvBlastChunk& chunk = poco->getChunks()[i]; NvBlastChunkDTO::serialize(chunks[i], &chunk); } NVBLAST_ASSERT_WITH_MESSAGE(builder.getChunkCount() == poco->m_chunkCount, "WTF"); capnp::List<Nv::Blast::Serialization::NvBlastBond>::Builder bonds = builder.initBonds(poco->m_bondCount); builder.setBondCount(poco->m_bondCount); for (uint32_t i = 0; i < poco->m_bondCount; i++) { NvBlastBond& bond = poco->getBonds()[i]; NvBlastBondDTO::serialize(bonds[i], &bond); } kj::ArrayPtr<uint32_t> stlcArray(poco->getSubtreeLeafChunkCounts(), poco->m_chunkCount); builder.initSubtreeLeafChunkCounts(poco->m_chunkCount); builder.setSubtreeLeafChunkCounts(stlcArray); kj::ArrayPtr<uint32_t> ctgnArray(poco->getChunkToGraphNodeMap(), poco->m_chunkCount); builder.setChunkToGraphNodeMap(ctgnArray); Nv::Blast::Serialization::NvBlastSupportGraph::Builder graphBulder = builder.initGraph(); graphBulder.setNodeCount(poco->m_graph.m_nodeCount); uint32_t* ciPtr = poco->m_graph.getChunkIndices(); kj::ArrayPtr<const uint32_t> ciArray(ciPtr, poco->m_graph.m_nodeCount); graphBulder.setChunkIndices(ciArray); kj::ArrayPtr<const uint32_t> adjPart(poco->m_graph.getAdjacencyPartition(), poco->m_graph.m_nodeCount + 1); graphBulder.setAdjacencyPartition(adjPart); NVBLAST_ASSERT(graphBulder.getAdjacencyPartition().size() == poco->m_graph.m_nodeCount + 1); kj::ArrayPtr<const uint32_t> nodeIndices(poco->m_graph.getAdjacentNodeIndices(), poco->m_bondCount * 2); graphBulder.setAdjacentNodeIndices(nodeIndices); NVBLAST_ASSERT(graphBulder.getAdjacentNodeIndices().size() == poco->m_bondCount * 2); kj::ArrayPtr<const uint32_t> bondIndices(poco->m_graph.getAdjacentBondIndices(), poco->m_bondCount * 2); graphBulder.setAdjacentBondIndices(bondIndices); return true; } Nv::Blast::Asset* AssetDTO::deserialize(Nv::Blast::Serialization::Asset::Reader reader) { NvBlastAssetMemSizeData sizeData; sizeData.chunkCount = reader.getChunkCount(); sizeData.nodeCount = reader.getGraph().getNodeCount(); sizeData.bondCount = reader.getBondCount(); const uint32_t leafChunkCount = reader.getLeafChunkCount(); const uint32_t firstSubsupportChunkIndex = reader.getFirstSubsupportChunkIndex(); const size_t assetSize = NvBlastGetAssetMemorySizeFromSizeData(sizeData, nullptr); void* mem = NVBLAST_ALLOC(assetSize); auto asset = Nv::Blast::initializeAsset(mem, sizeData.chunkCount, sizeData.nodeCount, leafChunkCount, firstSubsupportChunkIndex, sizeData.bondCount, logLL); if (deserializeInto(reader, asset)) return asset; // free the memory so it doesn't leak NVBLAST_FREE(asset); return nullptr; } bool AssetDTO::deserializeInto(Nv::Blast::Serialization::Asset::Reader reader, Nv::Blast::Asset * poco) { NvBlastIDDTO::deserializeInto(reader.getID(), &poco->m_ID); NvBlastBond* bonds = poco->getBonds(); uint32_t bondCount = reader.getBondCount(); auto readerBonds = reader.getBonds(); for (uint32_t i = 0; i < bondCount; i++) { auto bondReader = readerBonds[i]; NvBlastBondDTO::deserializeInto(bondReader, &bonds[i]); } NvBlastChunk* chunks = poco->getChunks(); uint32_t chunkCount = reader.getChunkCount(); auto readerChunks = reader.getChunks(); for (uint32_t i = 0; i < chunkCount; i++) { auto chunkReader = readerChunks[i]; NvBlastChunkDTO::deserializeInto(chunkReader, &chunks[i]); } poco->m_graph.m_nodeCount = reader.getGraph().getNodeCount(); NVBLAST_ASSERT(reader.getSubtreeLeafChunkCounts().size() == poco->m_chunkCount); auto readerSubtreeLeafChunkCounts = reader.getSubtreeLeafChunkCounts(); for (uint32_t i = 0; i < poco->m_chunkCount; i++) { poco->getSubtreeLeafChunkCounts()[i] = readerSubtreeLeafChunkCounts[i]; } auto readerChunkToGraphNodeMap = reader.getChunkToGraphNodeMap(); for (uint32_t i = 0; i < chunkCount; i++) { poco->getChunkToGraphNodeMap()[i] = readerChunkToGraphNodeMap[i]; } uint32_t* ciPtr = poco->m_graph.getChunkIndices(); NVBLAST_ASSERT(reader.getGraph().getChunkIndices().size() == poco->m_graph.m_nodeCount); auto readerGraphChunkIndices = reader.getGraph().getChunkIndices(); for (uint32_t i = 0; i < poco->m_graph.m_nodeCount; i++) { ciPtr[i] = readerGraphChunkIndices[i]; } uint32_t* adjPartition = poco->m_graph.getAdjacencyPartition(); const uint32_t graphAdjacencyPartitionSize = reader.getGraph().getAdjacencyPartition().size(); auto readerGraphAdjacencyPartition = reader.getGraph().getAdjacencyPartition(); for (uint32_t i = 0; i < graphAdjacencyPartitionSize; ++i) { adjPartition[i] = readerGraphAdjacencyPartition[i]; } uint32_t* adjNodes = poco->m_graph.getAdjacentNodeIndices(); const uint32_t graphAdjacentNodeIndicesSize = reader.getGraph().getAdjacentNodeIndices().size(); auto readerGraphAdjacentNodeIndices = reader.getGraph().getAdjacentNodeIndices(); for (uint32_t i = 0; i < graphAdjacentNodeIndicesSize; ++i) { adjNodes[i] = readerGraphAdjacentNodeIndices[i]; } uint32_t* adjBonds = poco->m_graph.getAdjacentBondIndices(); const uint32_t graphAdjacentBondIndicesSize = reader.getGraph().getAdjacentBondIndices().size(); auto readerGraphAdjacentBondIndices = reader.getGraph().getAdjacentBondIndices(); for (uint32_t i = 0; i < graphAdjacentBondIndicesSize; ++i) { adjBonds[i] = readerGraphAdjacentBondIndices[i]; } return true; } } // namespace Blast } // namespace Nv
8,072
C++
37.8125
160
0.718285
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/serialization/DTO/NvBlastBondDTO.cpp
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2022-2023 NVIDIA Corporation. All rights reserved. #include "NvBlastBondDTO.h" #include "NvBlastAssert.h" namespace Nv { namespace Blast { bool NvBlastBondDTO::serialize(Nv::Blast::Serialization::NvBlastBond::Builder builder, const NvBlastBond * poco) { NVBLAST_ASSERT(poco != nullptr); kj::ArrayPtr<const float> normArray(poco->normal, 3); builder.setNormal(normArray); builder.setArea(poco->area); kj::ArrayPtr<const float> centArray(poco->centroid, 3); builder.setCentroid(centArray); builder.setUserData(poco->userData); return true; } NvBlastBond* NvBlastBondDTO::deserialize(Nv::Blast::Serialization::NvBlastBond::Reader reader) { //FIXME NV_UNUSED(reader); //TODO: Allocate with ExtContext and return return nullptr; } bool NvBlastBondDTO::deserializeInto(Nv::Blast::Serialization::NvBlastBond::Reader reader, NvBlastBond * poco) { poco->area = reader.getArea(); auto readerCentroid = reader.getCentroid(); poco->centroid[0] = readerCentroid[0]; poco->centroid[1] = readerCentroid[1]; poco->centroid[2] = readerCentroid[2]; auto readerNormal = reader.getNormal(); poco->normal[0] = readerNormal[0]; poco->normal[1] = readerNormal[1]; poco->normal[2] = readerNormal[2]; poco->userData = reader.getUserData(); return true; } } // namespace Blast } // namespace Nv
2,896
C++
32.686046
112
0.734116
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/serialization/DTO/TkAssetDTO.cpp
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2022-2023 NVIDIA Corporation. All rights reserved. #include "TkAssetDTO.h" #include "AssetDTO.h" #include "TkAssetJointDescDTO.h" #include <vector> #include "NvBlastTkFramework.h" #include "NvBlastGlobals.h" namespace Nv { namespace Blast { extern TkFramework* sExtTkSerializerFramework; bool TkAssetDTO::serialize(Nv::Blast::Serialization::TkAsset::Builder builder, const Nv::Blast::TkAsset * poco) { const Asset* assetLL = reinterpret_cast<const Nv::Blast::Asset*>(poco->getAssetLL()); Nv::Blast::AssetDTO::serialize(builder.getAssetLL(), assetLL); uint32_t jointDescCount = poco->getJointDescCount(); capnp::List<Nv::Blast::Serialization::TkAssetJointDesc>::Builder jointDescs = builder.initJointDescs(jointDescCount); for (uint32_t i = 0; i < jointDescCount; i++) { TkAssetJointDescDTO::serialize(jointDescs[i], &poco->getJointDescs()[i]); } return true; } Nv::Blast::TkAsset* TkAssetDTO::deserialize(Nv::Blast::Serialization::TkAsset::Reader reader) { const NvBlastAsset* assetLL = reinterpret_cast<const NvBlastAsset*>(AssetDTO::deserialize(reader.getAssetLL())); std::vector<Nv::Blast::TkAssetJointDesc> jointDescs; const uint32_t jointDescCount = reader.getJointDescs().size(); jointDescs.resize(jointDescCount); auto readerJointDescs = reader.getJointDescs(); for (uint32_t i = 0; i < jointDescCount; i++) { TkAssetJointDescDTO::deserializeInto(readerJointDescs[i], &jointDescs[i]); } // Make sure to set ownsAsset to true - this is serialization and no one else owns it. Nv::Blast::TkAsset* asset = NvBlastTkFrameworkGet()->createAsset(assetLL, jointDescs.data(), jointDescCount, true); return asset; } bool TkAssetDTO::deserializeInto(Nv::Blast::Serialization::TkAsset::Reader reader, Nv::Blast::TkAsset * poco) { NV_UNUSED(reader); poco = nullptr; // NOTE: Because of the way TkAsset is currently structured, this won't work. return false; } } // namespace Blast } // namespace Nv
3,537
C++
36.638297
121
0.737913
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/serialization/DTO/NvBlastChunkDTO.cpp
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2022-2023 NVIDIA Corporation. All rights reserved. #include "NvBlastChunkDTO.h" #include "NvBlastAssert.h" namespace Nv { namespace Blast { bool NvBlastChunkDTO::serialize(Nv::Blast::Serialization::NvBlastChunk::Builder builder, const NvBlastChunk* poco) { NVBLAST_ASSERT(poco != nullptr); kj::ArrayPtr<const float> centArray(poco->centroid, 3); builder.setCentroid(centArray); builder.setVolume(poco->volume); builder.setParentChunkIndex(poco->parentChunkIndex); builder.setFirstChildIndex(poco->firstChildIndex); builder.setChildIndexStop(poco->childIndexStop); builder.setUserData(poco->userData); return true; } NvBlastChunk* NvBlastChunkDTO::deserialize(Nv::Blast::Serialization::NvBlastChunk::Reader reader) { //FIXME NV_UNUSED(reader); return nullptr; } bool NvBlastChunkDTO::deserializeInto(Nv::Blast::Serialization::NvBlastChunk::Reader reader, NvBlastChunk* target) { NVBLAST_ASSERT(target != nullptr); auto readerCentroid = reader.getCentroid(); target->centroid[0] = readerCentroid[0]; target->centroid[1] = readerCentroid[1]; target->centroid[2] = readerCentroid[2]; target->childIndexStop = reader.getChildIndexStop(); target->firstChildIndex = reader.getFirstChildIndex(); target->parentChunkIndex = reader.getParentChunkIndex(); target->userData = reader.getUserData(); target->volume = reader.getVolume(); return true; } } // namespace Blast } // namespace Nv
3,001
C++
35.168674
114
0.748417
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/serialization/DTO/FamilyDTO.cpp
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2022-2023 NVIDIA Corporation. All rights reserved. #include "FamilyDTO.h" #include "ActorDTO.h" #include "AssetDTO.h" #include "FamilyGraphDTO.h" #include "NvBlastFamilyGraph.h" #include "NvBlastGlobals.h" #include "NvBlastIDDTO.h" #include "NvBlastChunkDTO.h" #include "NvBlastBondDTO.h" #include <vector> namespace Nv { namespace Blast { bool FamilyDTO::serialize(Nv::Blast::Serialization::Family::Builder builder, const Nv::Blast::FamilyHeader* poco) { NvBlastIDDTO::serialize(builder.initAssetID(), &poco->m_assetID); // cache off the count data from the asset needed to re-create the family post serialization const NvBlastAssetMemSizeData sizeData = NvBlastAssetMemSizeDataFromAsset(poco->m_asset); builder.setBondCount(sizeData.bondCount); builder.setChunkCount(sizeData.chunkCount); builder.setNodeCount(sizeData.nodeCount); builder.setLowerSupportChunkCount(sizeData.lowerSupportChunkCount); builder.setUpperSupportChunkCount(sizeData.upperSupportChunkCount); // actorCount - these are active builder.setActorCount(poco->m_actorCount); // all possible actors const uint32_t actorCount = poco->getActorsArraySize(); capnp::List<Nv::Blast::Serialization::Actor>::Builder actors = builder.initActors(actorCount); for (uint32_t i = 0; i < actorCount; i++) { Actor& actor = poco->getActors()[i]; ActorDTO::serialize(actors[i], &actor); } // visibleChunkIndexLinks uint32_t* visibleChunkIndexLinks = reinterpret_cast<uint32_t *>(poco->getVisibleChunkIndexLinks()); kj::ArrayPtr<uint32_t> visibleChunkIndexLinksArray(visibleChunkIndexLinks, sizeData.chunkCount * 2); builder.setVisibleChunkIndexLinks(visibleChunkIndexLinksArray); // chunkActorIndices kj::ArrayPtr<uint32_t> chunkActorIndicesArray(poco->getChunkActorIndices(), sizeData.chunkCount); builder.setChunkActorIndices(chunkActorIndicesArray); // graphNodeIndexLinks kj::ArrayPtr<uint32_t> graphNodeIndexLinksArray(poco->getGraphNodeIndexLinks(), sizeData.chunkCount); builder.setGraphNodeIndexLinks(graphNodeIndexLinksArray); // lowerSupportChunkHealths kj::ArrayPtr<float> lowerSupportChunkHealthsArray(poco->getLowerSupportChunkHealths(), sizeData.chunkCount); builder.setLowerSupportChunkHealths(lowerSupportChunkHealthsArray); // graphBondHealths kj::ArrayPtr<float> graphBondHealthsArray(poco->getBondHealths(), sizeData.bondCount); builder.setGraphBondHealths(graphBondHealthsArray); // familyGraph FamilyGraph *graph = poco->getFamilyGraph(); auto builderGraph = builder.initFamilyGraph(); builderGraph.setNodeCount(sizeData.nodeCount); FamilyGraphDTO::serialize(builderGraph, graph); return true; } Nv::Blast::FamilyHeader* FamilyDTO::deserialize(Nv::Blast::Serialization::Family::Reader reader) { // fill in the count info from the reader NvBlastAssetMemSizeData sizeData; sizeData.bondCount = reader.getBondCount(); sizeData.chunkCount = reader.getChunkCount(); sizeData.nodeCount = reader.getNodeCount(); sizeData.lowerSupportChunkCount = reader.getLowerSupportChunkCount(); sizeData.upperSupportChunkCount = reader.getUpperSupportChunkCount(); // allocate enough space to hold the family const size_t familySize = NvBlastAssetGetFamilyMemorySizeFromSizeData(sizeData, nullptr); void* mem = NVBLAST_ALLOC(familySize); // use the count info to initialize the family auto family = reinterpret_cast<Nv::Blast::FamilyHeader *>(NvBlastAssetCreateFamilyFromSizeData(mem, sizeData, Nv::Blast::logLL)); // then fill in the data from the reader if (deserializeInto(reader, family)) return family; // failed to deserialize, free the allocated memory so it doesn't leak NVBLAST_FREE(mem); return nullptr; } bool FamilyDTO::deserializeInto(Nv::Blast::Serialization::Family::Reader reader, Nv::Blast::FamilyHeader* poco) { NvBlastIDDTO::deserializeInto(reader.getAssetID(), &poco->m_assetID); // active actor count poco->m_actorCount = reader.getActorCount(); // all possible actors Actor* actors = poco->getActors(); auto readerActors = reader.getActors(); NVBLAST_ASSERT(poco->m_actorCount <= readerActors.size()); for (uint32_t i = 0; i < readerActors.size(); i++) { auto actorReader = readerActors[i]; ActorDTO::deserializeInto(actorReader, &actors[i]); } // visibleChunkIndexLinks // they are stored in the buffer as a flat list of uint32_t values, // but stored as pairs in the Family auto readerVisibleChunkIndexLinks = reader.getVisibleChunkIndexLinks(); const uint32_t numVisibleChunkIndexLinks = readerVisibleChunkIndexLinks.size(); for (uint32_t i = 0; i < numVisibleChunkIndexLinks; i += 2) { const uint32_t vcil = i / 2; poco->getVisibleChunkIndexLinks()[vcil].m_adj[0] = readerVisibleChunkIndexLinks[i]; poco->getVisibleChunkIndexLinks()[vcil].m_adj[1] = readerVisibleChunkIndexLinks[i+1]; } // chunkActorIndices auto readerChunkActorIndices = reader.getChunkActorIndices(); const uint32_t numChunkActorIndices = readerChunkActorIndices.size(); for (uint32_t i = 0; i < numChunkActorIndices; i++) { poco->getChunkActorIndices()[i] = readerChunkActorIndices[i]; } // graphNodeIndexLinks auto readerGraphNodeIndexLinks = reader.getGraphNodeIndexLinks(); const uint32_t numGraphNodeIndexLinks = readerGraphNodeIndexLinks.size(); for (uint32_t i = 0; i < numGraphNodeIndexLinks; i++) { poco->getGraphNodeIndexLinks()[i] = readerGraphNodeIndexLinks[i]; } // lowerSupportChunkHealths auto readerLowerSupportChunkHealths = reader.getLowerSupportChunkHealths(); const uint32_t numLowerSupportChunkHealths = readerLowerSupportChunkHealths.size(); for (uint32_t i = 0; i < numLowerSupportChunkHealths; i++) { poco->getLowerSupportChunkHealths()[i] = readerLowerSupportChunkHealths[i]; } // graphBondHealths auto readerGraphBondHealths = reader.getGraphBondHealths(); const uint32_t numGraphBondHealths = readerGraphBondHealths.size(); for (uint32_t i = 0; i < numGraphBondHealths; i++) { poco->getBondHealths()[i] = readerGraphBondHealths[i]; } // familyGraph FamilyGraphDTO::deserializeInto(reader.getFamilyGraph(), poco->getFamilyGraph()); return true; } } // namespace Blast } // namespace Nv
8,006
C++
39.64467
133
0.738696
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/serialization/DTO/FamilyGraphDTO.cpp
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2022-2023 NVIDIA Corporation. All rights reserved. #include "FamilyGraphDTO.h" #include "NvBlastGlobals.h" namespace Nv { namespace Blast { bool FamilyGraphDTO::serialize(Nv::Blast::Serialization::FamilyGraph::Builder builder, const Nv::Blast::FamilyGraph * poco) { // this needs to be set externally so we have access to it here const uint32_t nodeCount = builder.getNodeCount(); kj::ArrayPtr<IslandId> islandIdsArray(poco->getIslandIds(), nodeCount); builder.setIslandIds(islandIdsArray); kj::ArrayPtr<NodeIndex> dirtyNodeLinksArray(poco->getDirtyNodeLinks(), nodeCount); builder.setDirtyNodeLinks(dirtyNodeLinksArray); kj::ArrayPtr<uint32_t> firstDirtyNodeIndicesArray(poco->getFirstDirtyNodeIndices(), nodeCount); builder.setFirstDirtyNodeIndices(firstDirtyNodeIndicesArray); kj::ArrayPtr<NodeIndex> fastRouteArray(poco->getFastRoute(), nodeCount); builder.setFastRoute(fastRouteArray); kj::ArrayPtr<uint32_t> hopCountsArray(poco->getHopCounts(), nodeCount); builder.setHopCounts(hopCountsArray); auto isEdgeRemoved = poco->getIsEdgeRemoved(); uint8_t* isEdgeRemovedData = reinterpret_cast<uint8_t*>(const_cast<char*>(isEdgeRemoved->getData())); capnp::Data::Reader isEdgeRemovedReader(isEdgeRemovedData, isEdgeRemoved->getSize()); builder.setIsEdgeRemoved(isEdgeRemovedReader); auto isNodeInDirtyList = poco->getIsNodeInDirtyList(); uint8_t* isNodeInDirtyListData = reinterpret_cast<uint8_t*>(const_cast<char*>(isNodeInDirtyList->getData())); capnp::Data::Reader isNodeInDirtyListReader(isNodeInDirtyListData, isNodeInDirtyList->getSize()); builder.setIsNodeInDirtyList(isNodeInDirtyListReader); return true; } Nv::Blast::FamilyGraph* FamilyGraphDTO::deserialize(Nv::Blast::Serialization::FamilyGraph::Reader reader) { NV_UNUSED(reader); return nullptr; } bool FamilyGraphDTO::deserializeInto(Nv::Blast::Serialization::FamilyGraph::Reader reader, Nv::Blast::FamilyGraph * poco) { auto readerIslandIds = reader.getIslandIds(); const uint32_t numIslandIds = readerIslandIds.size(); for (uint32_t i = 0; i < numIslandIds; i++) { poco->getIslandIds()[i] = readerIslandIds[i]; } auto readerDirtyNodeLinks = reader.getDirtyNodeLinks(); const uint32_t numDirtyNodeLinks = readerDirtyNodeLinks.size(); for (uint32_t i = 0; i < numDirtyNodeLinks; i++) { poco->getDirtyNodeLinks()[i] = readerDirtyNodeLinks[i]; } auto readerFirstDirtyNodeIndices = reader.getFirstDirtyNodeIndices(); const uint32_t numFirstDirtyNodeIndices = readerFirstDirtyNodeIndices.size(); for (uint32_t i = 0; i < numFirstDirtyNodeIndices; i++) { poco->getFirstDirtyNodeIndices()[i] = readerFirstDirtyNodeIndices[i]; } auto readerFastRoute = reader.getFastRoute(); const uint32_t numFastRoute = readerFastRoute.size(); for (uint32_t i = 0; i < numFastRoute; i++) { poco->getFastRoute()[i] = readerFastRoute[i]; } auto readerHopCounts = reader.getHopCounts(); const uint32_t numHopCounts = readerHopCounts.size(); for (uint32_t i = 0; i < numHopCounts; i++) { poco->getHopCounts()[i] = readerHopCounts[i]; } auto readerIsEdgeRemoved = reader.getIsEdgeRemoved(); const uint32_t numIsEdgeRemoved = readerIsEdgeRemoved.size(); const char* isEdgeRemovedData = reinterpret_cast<const char*>(readerIsEdgeRemoved.begin()); auto isEdgeRemoved = poco->getIsEdgeRemoved(); isEdgeRemoved->setData(isEdgeRemovedData, numIsEdgeRemoved); auto readerIsNodeInDirtyList = reader.getIsNodeInDirtyList(); const uint32_t numIsNodeInDirtyList = readerIsNodeInDirtyList.size(); const char* readerIsNodeInDirtyListData = reinterpret_cast<const char*>(readerIsNodeInDirtyList.begin()); auto isNodeInDirtyList = poco->getIsNodeInDirtyList(); isNodeInDirtyList->setData(readerIsNodeInDirtyListData, numIsNodeInDirtyList); return true; } } // namespace Blast } // namespace Nv
5,532
C++
40.916666
123
0.743312
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/serialization/DTO/NvBlastIDDTO.cpp
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2022-2023 NVIDIA Corporation. All rights reserved. #include "NvBlastIDDTO.h" #include "NvBlastTypes.h" #include "NvBlastAssert.h" #include "NvBlastExtLlSerialization-capn.h" namespace Nv { namespace Blast { bool NvBlastIDDTO::serialize(Nv::Blast::Serialization::UUID::Builder builder, const NvBlastID * poco) { capnp::Data::Reader idArrayReader((unsigned char *)poco->data, 16); builder.setValue(idArrayReader); return true; } NvBlastID* NvBlastIDDTO::deserialize(Nv::Blast::Serialization::UUID::Reader reader) { //FIXME NV_UNUSED(reader); //TODO: Allocate with ExtContext and return return nullptr; } bool NvBlastIDDTO::deserializeInto(Nv::Blast::Serialization::UUID::Reader reader, NvBlastID * poco) { NVBLAST_ASSERT_WITH_MESSAGE(reader.getValue().size() == 16, "BlastID must be 16 bytes"); memcpy(poco, reader.getValue().begin(), 16); return true; } } // namespace Blast } // namespace Nv
2,466
C++
34.242857
101
0.745742
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/serialization/generated/NvBlastExtLlSerialization-capn.h
// Generated by Cap'n Proto compiler, DO NOT EDIT // source: NvBlastExtLlSerialization-capn #ifndef CAPNP_INCLUDED_9a4a58fac38375e0_ #define CAPNP_INCLUDED_9a4a58fac38375e0_ #include <capnp/generated-header-support.h> #if CAPNP_VERSION != 6001 #error "Version mismatch between generated code and library headers. You must use the same version of the Cap'n Proto compiler and library." #endif namespace capnp { namespace schemas { CAPNP_DECLARE_SCHEMA(ce4f8468c36f427d); CAPNP_DECLARE_SCHEMA(fe6948a9a6a3eff5); CAPNP_DECLARE_SCHEMA(d20ccbe36dd9711d); CAPNP_DECLARE_SCHEMA(8a38616881ef8310); CAPNP_DECLARE_SCHEMA(d5e1a9fb31b1350d); CAPNP_DECLARE_SCHEMA(b292bd608606f041); enum class Type_b292bd608606f041: uint16_t { ASSET_DATA_BLOCK, INSTANCE_DATA_BLOCK, }; CAPNP_DECLARE_ENUM(Type, b292bd608606f041); CAPNP_DECLARE_SCHEMA(92818c664a7b1aba); CAPNP_DECLARE_SCHEMA(c43da43c95eada67); CAPNP_DECLARE_SCHEMA(f018cbfcaacb3a55); CAPNP_DECLARE_SCHEMA(bfd00835cc19bf3a); } // namespace schemas } // namespace capnp namespace Nv { namespace Blast { namespace Serialization { struct Asset { Asset() = delete; class Reader; class Builder; class Pipeline; struct _capnpPrivate { CAPNP_DECLARE_STRUCT_HEADER(ce4f8468c36f427d, 2, 7) #if !CAPNP_LITE static constexpr ::capnp::_::RawBrandedSchema const* brand() { return &schema->defaultBrand; } #endif // !CAPNP_LITE }; }; struct Family { Family() = delete; class Reader; class Builder; class Pipeline; struct _capnpPrivate { CAPNP_DECLARE_STRUCT_HEADER(fe6948a9a6a3eff5, 3, 8) #if !CAPNP_LITE static constexpr ::capnp::_::RawBrandedSchema const* brand() { return &schema->defaultBrand; } #endif // !CAPNP_LITE }; }; struct Actor { Actor() = delete; class Reader; class Builder; class Pipeline; struct _capnpPrivate { CAPNP_DECLARE_STRUCT_HEADER(d20ccbe36dd9711d, 3, 0) #if !CAPNP_LITE static constexpr ::capnp::_::RawBrandedSchema const* brand() { return &schema->defaultBrand; } #endif // !CAPNP_LITE }; }; struct FamilyGraph { FamilyGraph() = delete; class Reader; class Builder; class Pipeline; struct _capnpPrivate { CAPNP_DECLARE_STRUCT_HEADER(8a38616881ef8310, 1, 7) #if !CAPNP_LITE static constexpr ::capnp::_::RawBrandedSchema const* brand() { return &schema->defaultBrand; } #endif // !CAPNP_LITE }; }; struct NvBlastDataBlock { NvBlastDataBlock() = delete; class Reader; class Builder; class Pipeline; typedef ::capnp::schemas::Type_b292bd608606f041 Type; struct _capnpPrivate { CAPNP_DECLARE_STRUCT_HEADER(d5e1a9fb31b1350d, 2, 0) #if !CAPNP_LITE static constexpr ::capnp::_::RawBrandedSchema const* brand() { return &schema->defaultBrand; } #endif // !CAPNP_LITE }; }; struct NvBlastChunk { NvBlastChunk() = delete; class Reader; class Builder; class Pipeline; struct _capnpPrivate { CAPNP_DECLARE_STRUCT_HEADER(92818c664a7b1aba, 3, 1) #if !CAPNP_LITE static constexpr ::capnp::_::RawBrandedSchema const* brand() { return &schema->defaultBrand; } #endif // !CAPNP_LITE }; }; struct NvBlastBond { NvBlastBond() = delete; class Reader; class Builder; class Pipeline; struct _capnpPrivate { CAPNP_DECLARE_STRUCT_HEADER(c43da43c95eada67, 1, 2) #if !CAPNP_LITE static constexpr ::capnp::_::RawBrandedSchema const* brand() { return &schema->defaultBrand; } #endif // !CAPNP_LITE }; }; struct NvBlastSupportGraph { NvBlastSupportGraph() = delete; class Reader; class Builder; class Pipeline; struct _capnpPrivate { CAPNP_DECLARE_STRUCT_HEADER(f018cbfcaacb3a55, 1, 4) #if !CAPNP_LITE static constexpr ::capnp::_::RawBrandedSchema const* brand() { return &schema->defaultBrand; } #endif // !CAPNP_LITE }; }; struct UUID { UUID() = delete; class Reader; class Builder; class Pipeline; struct _capnpPrivate { CAPNP_DECLARE_STRUCT_HEADER(bfd00835cc19bf3a, 0, 1) #if !CAPNP_LITE static constexpr ::capnp::_::RawBrandedSchema const* brand() { return &schema->defaultBrand; } #endif // !CAPNP_LITE }; }; // ======================================================================================= class Asset::Reader { public: typedef Asset Reads; Reader() = default; inline explicit Reader(::capnp::_::StructReader base): _reader(base) {} inline ::capnp::MessageSize totalSize() const { return _reader.totalSize().asPublic(); } #if !CAPNP_LITE inline ::kj::StringTree toString() const { return ::capnp::_::structString(_reader, *_capnpPrivate::brand()); } #endif // !CAPNP_LITE inline bool hasHeader() const; inline ::Nv::Blast::Serialization::NvBlastDataBlock::Reader getHeader() const; inline bool hasID() const; inline ::Nv::Blast::Serialization::UUID::Reader getID() const; inline ::uint32_t getChunkCount() const; inline bool hasGraph() const; inline ::Nv::Blast::Serialization::NvBlastSupportGraph::Reader getGraph() const; inline ::uint32_t getLeafChunkCount() const; inline ::uint32_t getFirstSubsupportChunkIndex() const; inline ::uint32_t getBondCount() const; inline bool hasChunks() const; inline ::capnp::List< ::Nv::Blast::Serialization::NvBlastChunk>::Reader getChunks() const; inline bool hasBonds() const; inline ::capnp::List< ::Nv::Blast::Serialization::NvBlastBond>::Reader getBonds() const; inline bool hasSubtreeLeafChunkCounts() const; inline ::capnp::List< ::uint32_t>::Reader getSubtreeLeafChunkCounts() const; inline bool hasChunkToGraphNodeMap() const; inline ::capnp::List< ::uint32_t>::Reader getChunkToGraphNodeMap() const; private: ::capnp::_::StructReader _reader; template <typename, ::capnp::Kind> friend struct ::capnp::ToDynamic_; template <typename, ::capnp::Kind> friend struct ::capnp::_::PointerHelpers; template <typename, ::capnp::Kind> friend struct ::capnp::List; friend class ::capnp::MessageBuilder; friend class ::capnp::Orphanage; }; class Asset::Builder { public: typedef Asset Builds; Builder() = delete; // Deleted to discourage incorrect usage. // You can explicitly initialize to nullptr instead. inline Builder(decltype(nullptr)) {} inline explicit Builder(::capnp::_::StructBuilder base): _builder(base) {} inline operator Reader() const { return Reader(_builder.asReader()); } inline Reader asReader() const { return *this; } inline ::capnp::MessageSize totalSize() const { return asReader().totalSize(); } #if !CAPNP_LITE inline ::kj::StringTree toString() const { return asReader().toString(); } #endif // !CAPNP_LITE inline bool hasHeader(); inline ::Nv::Blast::Serialization::NvBlastDataBlock::Builder getHeader(); inline void setHeader( ::Nv::Blast::Serialization::NvBlastDataBlock::Reader value); inline ::Nv::Blast::Serialization::NvBlastDataBlock::Builder initHeader(); inline void adoptHeader(::capnp::Orphan< ::Nv::Blast::Serialization::NvBlastDataBlock>&& value); inline ::capnp::Orphan< ::Nv::Blast::Serialization::NvBlastDataBlock> disownHeader(); inline bool hasID(); inline ::Nv::Blast::Serialization::UUID::Builder getID(); inline void setID( ::Nv::Blast::Serialization::UUID::Reader value); inline ::Nv::Blast::Serialization::UUID::Builder initID(); inline void adoptID(::capnp::Orphan< ::Nv::Blast::Serialization::UUID>&& value); inline ::capnp::Orphan< ::Nv::Blast::Serialization::UUID> disownID(); inline ::uint32_t getChunkCount(); inline void setChunkCount( ::uint32_t value); inline bool hasGraph(); inline ::Nv::Blast::Serialization::NvBlastSupportGraph::Builder getGraph(); inline void setGraph( ::Nv::Blast::Serialization::NvBlastSupportGraph::Reader value); inline ::Nv::Blast::Serialization::NvBlastSupportGraph::Builder initGraph(); inline void adoptGraph(::capnp::Orphan< ::Nv::Blast::Serialization::NvBlastSupportGraph>&& value); inline ::capnp::Orphan< ::Nv::Blast::Serialization::NvBlastSupportGraph> disownGraph(); inline ::uint32_t getLeafChunkCount(); inline void setLeafChunkCount( ::uint32_t value); inline ::uint32_t getFirstSubsupportChunkIndex(); inline void setFirstSubsupportChunkIndex( ::uint32_t value); inline ::uint32_t getBondCount(); inline void setBondCount( ::uint32_t value); inline bool hasChunks(); inline ::capnp::List< ::Nv::Blast::Serialization::NvBlastChunk>::Builder getChunks(); inline void setChunks( ::capnp::List< ::Nv::Blast::Serialization::NvBlastChunk>::Reader value); inline ::capnp::List< ::Nv::Blast::Serialization::NvBlastChunk>::Builder initChunks(unsigned int size); inline void adoptChunks(::capnp::Orphan< ::capnp::List< ::Nv::Blast::Serialization::NvBlastChunk>>&& value); inline ::capnp::Orphan< ::capnp::List< ::Nv::Blast::Serialization::NvBlastChunk>> disownChunks(); inline bool hasBonds(); inline ::capnp::List< ::Nv::Blast::Serialization::NvBlastBond>::Builder getBonds(); inline void setBonds( ::capnp::List< ::Nv::Blast::Serialization::NvBlastBond>::Reader value); inline ::capnp::List< ::Nv::Blast::Serialization::NvBlastBond>::Builder initBonds(unsigned int size); inline void adoptBonds(::capnp::Orphan< ::capnp::List< ::Nv::Blast::Serialization::NvBlastBond>>&& value); inline ::capnp::Orphan< ::capnp::List< ::Nv::Blast::Serialization::NvBlastBond>> disownBonds(); inline bool hasSubtreeLeafChunkCounts(); inline ::capnp::List< ::uint32_t>::Builder getSubtreeLeafChunkCounts(); inline void setSubtreeLeafChunkCounts( ::capnp::List< ::uint32_t>::Reader value); inline void setSubtreeLeafChunkCounts(::kj::ArrayPtr<const ::uint32_t> value); inline ::capnp::List< ::uint32_t>::Builder initSubtreeLeafChunkCounts(unsigned int size); inline void adoptSubtreeLeafChunkCounts(::capnp::Orphan< ::capnp::List< ::uint32_t>>&& value); inline ::capnp::Orphan< ::capnp::List< ::uint32_t>> disownSubtreeLeafChunkCounts(); inline bool hasChunkToGraphNodeMap(); inline ::capnp::List< ::uint32_t>::Builder getChunkToGraphNodeMap(); inline void setChunkToGraphNodeMap( ::capnp::List< ::uint32_t>::Reader value); inline void setChunkToGraphNodeMap(::kj::ArrayPtr<const ::uint32_t> value); inline ::capnp::List< ::uint32_t>::Builder initChunkToGraphNodeMap(unsigned int size); inline void adoptChunkToGraphNodeMap(::capnp::Orphan< ::capnp::List< ::uint32_t>>&& value); inline ::capnp::Orphan< ::capnp::List< ::uint32_t>> disownChunkToGraphNodeMap(); private: ::capnp::_::StructBuilder _builder; template <typename, ::capnp::Kind> friend struct ::capnp::ToDynamic_; friend class ::capnp::Orphanage; template <typename, ::capnp::Kind> friend struct ::capnp::_::PointerHelpers; }; #if !CAPNP_LITE class Asset::Pipeline { public: typedef Asset Pipelines; inline Pipeline(decltype(nullptr)): _typeless(nullptr) {} inline explicit Pipeline(::capnp::AnyPointer::Pipeline&& typeless) : _typeless(kj::mv(typeless)) {} inline ::Nv::Blast::Serialization::NvBlastDataBlock::Pipeline getHeader(); inline ::Nv::Blast::Serialization::UUID::Pipeline getID(); inline ::Nv::Blast::Serialization::NvBlastSupportGraph::Pipeline getGraph(); private: ::capnp::AnyPointer::Pipeline _typeless; friend class ::capnp::PipelineHook; template <typename, ::capnp::Kind> friend struct ::capnp::ToDynamic_; }; #endif // !CAPNP_LITE class Family::Reader { public: typedef Family Reads; Reader() = default; inline explicit Reader(::capnp::_::StructReader base): _reader(base) {} inline ::capnp::MessageSize totalSize() const { return _reader.totalSize().asPublic(); } #if !CAPNP_LITE inline ::kj::StringTree toString() const { return ::capnp::_::structString(_reader, *_capnpPrivate::brand()); } #endif // !CAPNP_LITE inline bool hasAssetID() const; inline ::Nv::Blast::Serialization::UUID::Reader getAssetID() const; inline bool hasActors() const; inline ::capnp::List< ::Nv::Blast::Serialization::Actor>::Reader getActors() const; inline bool hasVisibleChunkIndexLinks() const; inline ::capnp::List< ::uint32_t>::Reader getVisibleChunkIndexLinks() const; inline bool hasChunkActorIndices() const; inline ::capnp::List< ::uint32_t>::Reader getChunkActorIndices() const; inline bool hasGraphNodeIndexLinks() const; inline ::capnp::List< ::uint32_t>::Reader getGraphNodeIndexLinks() const; inline bool hasLowerSupportChunkHealths() const; inline ::capnp::List<float>::Reader getLowerSupportChunkHealths() const; inline bool hasGraphBondHealths() const; inline ::capnp::List<float>::Reader getGraphBondHealths() const; inline bool hasFamilyGraph() const; inline ::Nv::Blast::Serialization::FamilyGraph::Reader getFamilyGraph() const; inline ::uint32_t getActorCount() const; inline ::uint32_t getBondCount() const; inline ::uint32_t getChunkCount() const; inline ::uint32_t getNodeCount() const; inline ::uint32_t getLowerSupportChunkCount() const; inline ::uint32_t getUpperSupportChunkCount() const; private: ::capnp::_::StructReader _reader; template <typename, ::capnp::Kind> friend struct ::capnp::ToDynamic_; template <typename, ::capnp::Kind> friend struct ::capnp::_::PointerHelpers; template <typename, ::capnp::Kind> friend struct ::capnp::List; friend class ::capnp::MessageBuilder; friend class ::capnp::Orphanage; }; class Family::Builder { public: typedef Family Builds; Builder() = delete; // Deleted to discourage incorrect usage. // You can explicitly initialize to nullptr instead. inline Builder(decltype(nullptr)) {} inline explicit Builder(::capnp::_::StructBuilder base): _builder(base) {} inline operator Reader() const { return Reader(_builder.asReader()); } inline Reader asReader() const { return *this; } inline ::capnp::MessageSize totalSize() const { return asReader().totalSize(); } #if !CAPNP_LITE inline ::kj::StringTree toString() const { return asReader().toString(); } #endif // !CAPNP_LITE inline bool hasAssetID(); inline ::Nv::Blast::Serialization::UUID::Builder getAssetID(); inline void setAssetID( ::Nv::Blast::Serialization::UUID::Reader value); inline ::Nv::Blast::Serialization::UUID::Builder initAssetID(); inline void adoptAssetID(::capnp::Orphan< ::Nv::Blast::Serialization::UUID>&& value); inline ::capnp::Orphan< ::Nv::Blast::Serialization::UUID> disownAssetID(); inline bool hasActors(); inline ::capnp::List< ::Nv::Blast::Serialization::Actor>::Builder getActors(); inline void setActors( ::capnp::List< ::Nv::Blast::Serialization::Actor>::Reader value); inline ::capnp::List< ::Nv::Blast::Serialization::Actor>::Builder initActors(unsigned int size); inline void adoptActors(::capnp::Orphan< ::capnp::List< ::Nv::Blast::Serialization::Actor>>&& value); inline ::capnp::Orphan< ::capnp::List< ::Nv::Blast::Serialization::Actor>> disownActors(); inline bool hasVisibleChunkIndexLinks(); inline ::capnp::List< ::uint32_t>::Builder getVisibleChunkIndexLinks(); inline void setVisibleChunkIndexLinks( ::capnp::List< ::uint32_t>::Reader value); inline void setVisibleChunkIndexLinks(::kj::ArrayPtr<const ::uint32_t> value); inline ::capnp::List< ::uint32_t>::Builder initVisibleChunkIndexLinks(unsigned int size); inline void adoptVisibleChunkIndexLinks(::capnp::Orphan< ::capnp::List< ::uint32_t>>&& value); inline ::capnp::Orphan< ::capnp::List< ::uint32_t>> disownVisibleChunkIndexLinks(); inline bool hasChunkActorIndices(); inline ::capnp::List< ::uint32_t>::Builder getChunkActorIndices(); inline void setChunkActorIndices( ::capnp::List< ::uint32_t>::Reader value); inline void setChunkActorIndices(::kj::ArrayPtr<const ::uint32_t> value); inline ::capnp::List< ::uint32_t>::Builder initChunkActorIndices(unsigned int size); inline void adoptChunkActorIndices(::capnp::Orphan< ::capnp::List< ::uint32_t>>&& value); inline ::capnp::Orphan< ::capnp::List< ::uint32_t>> disownChunkActorIndices(); inline bool hasGraphNodeIndexLinks(); inline ::capnp::List< ::uint32_t>::Builder getGraphNodeIndexLinks(); inline void setGraphNodeIndexLinks( ::capnp::List< ::uint32_t>::Reader value); inline void setGraphNodeIndexLinks(::kj::ArrayPtr<const ::uint32_t> value); inline ::capnp::List< ::uint32_t>::Builder initGraphNodeIndexLinks(unsigned int size); inline void adoptGraphNodeIndexLinks(::capnp::Orphan< ::capnp::List< ::uint32_t>>&& value); inline ::capnp::Orphan< ::capnp::List< ::uint32_t>> disownGraphNodeIndexLinks(); inline bool hasLowerSupportChunkHealths(); inline ::capnp::List<float>::Builder getLowerSupportChunkHealths(); inline void setLowerSupportChunkHealths( ::capnp::List<float>::Reader value); inline void setLowerSupportChunkHealths(::kj::ArrayPtr<const float> value); inline ::capnp::List<float>::Builder initLowerSupportChunkHealths(unsigned int size); inline void adoptLowerSupportChunkHealths(::capnp::Orphan< ::capnp::List<float>>&& value); inline ::capnp::Orphan< ::capnp::List<float>> disownLowerSupportChunkHealths(); inline bool hasGraphBondHealths(); inline ::capnp::List<float>::Builder getGraphBondHealths(); inline void setGraphBondHealths( ::capnp::List<float>::Reader value); inline void setGraphBondHealths(::kj::ArrayPtr<const float> value); inline ::capnp::List<float>::Builder initGraphBondHealths(unsigned int size); inline void adoptGraphBondHealths(::capnp::Orphan< ::capnp::List<float>>&& value); inline ::capnp::Orphan< ::capnp::List<float>> disownGraphBondHealths(); inline bool hasFamilyGraph(); inline ::Nv::Blast::Serialization::FamilyGraph::Builder getFamilyGraph(); inline void setFamilyGraph( ::Nv::Blast::Serialization::FamilyGraph::Reader value); inline ::Nv::Blast::Serialization::FamilyGraph::Builder initFamilyGraph(); inline void adoptFamilyGraph(::capnp::Orphan< ::Nv::Blast::Serialization::FamilyGraph>&& value); inline ::capnp::Orphan< ::Nv::Blast::Serialization::FamilyGraph> disownFamilyGraph(); inline ::uint32_t getActorCount(); inline void setActorCount( ::uint32_t value); inline ::uint32_t getBondCount(); inline void setBondCount( ::uint32_t value); inline ::uint32_t getChunkCount(); inline void setChunkCount( ::uint32_t value); inline ::uint32_t getNodeCount(); inline void setNodeCount( ::uint32_t value); inline ::uint32_t getLowerSupportChunkCount(); inline void setLowerSupportChunkCount( ::uint32_t value); inline ::uint32_t getUpperSupportChunkCount(); inline void setUpperSupportChunkCount( ::uint32_t value); private: ::capnp::_::StructBuilder _builder; template <typename, ::capnp::Kind> friend struct ::capnp::ToDynamic_; friend class ::capnp::Orphanage; template <typename, ::capnp::Kind> friend struct ::capnp::_::PointerHelpers; }; #if !CAPNP_LITE class Family::Pipeline { public: typedef Family Pipelines; inline Pipeline(decltype(nullptr)): _typeless(nullptr) {} inline explicit Pipeline(::capnp::AnyPointer::Pipeline&& typeless) : _typeless(kj::mv(typeless)) {} inline ::Nv::Blast::Serialization::UUID::Pipeline getAssetID(); inline ::Nv::Blast::Serialization::FamilyGraph::Pipeline getFamilyGraph(); private: ::capnp::AnyPointer::Pipeline _typeless; friend class ::capnp::PipelineHook; template <typename, ::capnp::Kind> friend struct ::capnp::ToDynamic_; }; #endif // !CAPNP_LITE class Actor::Reader { public: typedef Actor Reads; Reader() = default; inline explicit Reader(::capnp::_::StructReader base): _reader(base) {} inline ::capnp::MessageSize totalSize() const { return _reader.totalSize().asPublic(); } #if !CAPNP_LITE inline ::kj::StringTree toString() const { return ::capnp::_::structString(_reader, *_capnpPrivate::brand()); } #endif // !CAPNP_LITE inline ::uint32_t getFamilyOffset() const; inline ::uint32_t getFirstVisibleChunkIndex() const; inline ::uint32_t getVisibleChunkCount() const; inline ::uint32_t getFirstGraphNodeIndex() const; inline ::uint32_t getGraphNodeCount() const; inline ::uint32_t getLeafChunkCount() const; private: ::capnp::_::StructReader _reader; template <typename, ::capnp::Kind> friend struct ::capnp::ToDynamic_; template <typename, ::capnp::Kind> friend struct ::capnp::_::PointerHelpers; template <typename, ::capnp::Kind> friend struct ::capnp::List; friend class ::capnp::MessageBuilder; friend class ::capnp::Orphanage; }; class Actor::Builder { public: typedef Actor Builds; Builder() = delete; // Deleted to discourage incorrect usage. // You can explicitly initialize to nullptr instead. inline Builder(decltype(nullptr)) {} inline explicit Builder(::capnp::_::StructBuilder base): _builder(base) {} inline operator Reader() const { return Reader(_builder.asReader()); } inline Reader asReader() const { return *this; } inline ::capnp::MessageSize totalSize() const { return asReader().totalSize(); } #if !CAPNP_LITE inline ::kj::StringTree toString() const { return asReader().toString(); } #endif // !CAPNP_LITE inline ::uint32_t getFamilyOffset(); inline void setFamilyOffset( ::uint32_t value); inline ::uint32_t getFirstVisibleChunkIndex(); inline void setFirstVisibleChunkIndex( ::uint32_t value); inline ::uint32_t getVisibleChunkCount(); inline void setVisibleChunkCount( ::uint32_t value); inline ::uint32_t getFirstGraphNodeIndex(); inline void setFirstGraphNodeIndex( ::uint32_t value); inline ::uint32_t getGraphNodeCount(); inline void setGraphNodeCount( ::uint32_t value); inline ::uint32_t getLeafChunkCount(); inline void setLeafChunkCount( ::uint32_t value); private: ::capnp::_::StructBuilder _builder; template <typename, ::capnp::Kind> friend struct ::capnp::ToDynamic_; friend class ::capnp::Orphanage; template <typename, ::capnp::Kind> friend struct ::capnp::_::PointerHelpers; }; #if !CAPNP_LITE class Actor::Pipeline { public: typedef Actor Pipelines; inline Pipeline(decltype(nullptr)): _typeless(nullptr) {} inline explicit Pipeline(::capnp::AnyPointer::Pipeline&& typeless) : _typeless(kj::mv(typeless)) {} private: ::capnp::AnyPointer::Pipeline _typeless; friend class ::capnp::PipelineHook; template <typename, ::capnp::Kind> friend struct ::capnp::ToDynamic_; }; #endif // !CAPNP_LITE class FamilyGraph::Reader { public: typedef FamilyGraph Reads; Reader() = default; inline explicit Reader(::capnp::_::StructReader base): _reader(base) {} inline ::capnp::MessageSize totalSize() const { return _reader.totalSize().asPublic(); } #if !CAPNP_LITE inline ::kj::StringTree toString() const { return ::capnp::_::structString(_reader, *_capnpPrivate::brand()); } #endif // !CAPNP_LITE inline bool hasIslandIds() const; inline ::capnp::List< ::uint32_t>::Reader getIslandIds() const; inline bool hasDirtyNodeLinks() const; inline ::capnp::List< ::uint32_t>::Reader getDirtyNodeLinks() const; inline bool hasFirstDirtyNodeIndices() const; inline ::capnp::List< ::uint32_t>::Reader getFirstDirtyNodeIndices() const; inline bool hasFastRoute() const; inline ::capnp::List< ::uint32_t>::Reader getFastRoute() const; inline bool hasHopCounts() const; inline ::capnp::List< ::uint32_t>::Reader getHopCounts() const; inline bool hasIsEdgeRemoved() const; inline ::capnp::Data::Reader getIsEdgeRemoved() const; inline bool hasIsNodeInDirtyList() const; inline ::capnp::Data::Reader getIsNodeInDirtyList() const; inline ::uint32_t getNodeCount() const; private: ::capnp::_::StructReader _reader; template <typename, ::capnp::Kind> friend struct ::capnp::ToDynamic_; template <typename, ::capnp::Kind> friend struct ::capnp::_::PointerHelpers; template <typename, ::capnp::Kind> friend struct ::capnp::List; friend class ::capnp::MessageBuilder; friend class ::capnp::Orphanage; }; class FamilyGraph::Builder { public: typedef FamilyGraph Builds; Builder() = delete; // Deleted to discourage incorrect usage. // You can explicitly initialize to nullptr instead. inline Builder(decltype(nullptr)) {} inline explicit Builder(::capnp::_::StructBuilder base): _builder(base) {} inline operator Reader() const { return Reader(_builder.asReader()); } inline Reader asReader() const { return *this; } inline ::capnp::MessageSize totalSize() const { return asReader().totalSize(); } #if !CAPNP_LITE inline ::kj::StringTree toString() const { return asReader().toString(); } #endif // !CAPNP_LITE inline bool hasIslandIds(); inline ::capnp::List< ::uint32_t>::Builder getIslandIds(); inline void setIslandIds( ::capnp::List< ::uint32_t>::Reader value); inline void setIslandIds(::kj::ArrayPtr<const ::uint32_t> value); inline ::capnp::List< ::uint32_t>::Builder initIslandIds(unsigned int size); inline void adoptIslandIds(::capnp::Orphan< ::capnp::List< ::uint32_t>>&& value); inline ::capnp::Orphan< ::capnp::List< ::uint32_t>> disownIslandIds(); inline bool hasDirtyNodeLinks(); inline ::capnp::List< ::uint32_t>::Builder getDirtyNodeLinks(); inline void setDirtyNodeLinks( ::capnp::List< ::uint32_t>::Reader value); inline void setDirtyNodeLinks(::kj::ArrayPtr<const ::uint32_t> value); inline ::capnp::List< ::uint32_t>::Builder initDirtyNodeLinks(unsigned int size); inline void adoptDirtyNodeLinks(::capnp::Orphan< ::capnp::List< ::uint32_t>>&& value); inline ::capnp::Orphan< ::capnp::List< ::uint32_t>> disownDirtyNodeLinks(); inline bool hasFirstDirtyNodeIndices(); inline ::capnp::List< ::uint32_t>::Builder getFirstDirtyNodeIndices(); inline void setFirstDirtyNodeIndices( ::capnp::List< ::uint32_t>::Reader value); inline void setFirstDirtyNodeIndices(::kj::ArrayPtr<const ::uint32_t> value); inline ::capnp::List< ::uint32_t>::Builder initFirstDirtyNodeIndices(unsigned int size); inline void adoptFirstDirtyNodeIndices(::capnp::Orphan< ::capnp::List< ::uint32_t>>&& value); inline ::capnp::Orphan< ::capnp::List< ::uint32_t>> disownFirstDirtyNodeIndices(); inline bool hasFastRoute(); inline ::capnp::List< ::uint32_t>::Builder getFastRoute(); inline void setFastRoute( ::capnp::List< ::uint32_t>::Reader value); inline void setFastRoute(::kj::ArrayPtr<const ::uint32_t> value); inline ::capnp::List< ::uint32_t>::Builder initFastRoute(unsigned int size); inline void adoptFastRoute(::capnp::Orphan< ::capnp::List< ::uint32_t>>&& value); inline ::capnp::Orphan< ::capnp::List< ::uint32_t>> disownFastRoute(); inline bool hasHopCounts(); inline ::capnp::List< ::uint32_t>::Builder getHopCounts(); inline void setHopCounts( ::capnp::List< ::uint32_t>::Reader value); inline void setHopCounts(::kj::ArrayPtr<const ::uint32_t> value); inline ::capnp::List< ::uint32_t>::Builder initHopCounts(unsigned int size); inline void adoptHopCounts(::capnp::Orphan< ::capnp::List< ::uint32_t>>&& value); inline ::capnp::Orphan< ::capnp::List< ::uint32_t>> disownHopCounts(); inline bool hasIsEdgeRemoved(); inline ::capnp::Data::Builder getIsEdgeRemoved(); inline void setIsEdgeRemoved( ::capnp::Data::Reader value); inline ::capnp::Data::Builder initIsEdgeRemoved(unsigned int size); inline void adoptIsEdgeRemoved(::capnp::Orphan< ::capnp::Data>&& value); inline ::capnp::Orphan< ::capnp::Data> disownIsEdgeRemoved(); inline bool hasIsNodeInDirtyList(); inline ::capnp::Data::Builder getIsNodeInDirtyList(); inline void setIsNodeInDirtyList( ::capnp::Data::Reader value); inline ::capnp::Data::Builder initIsNodeInDirtyList(unsigned int size); inline void adoptIsNodeInDirtyList(::capnp::Orphan< ::capnp::Data>&& value); inline ::capnp::Orphan< ::capnp::Data> disownIsNodeInDirtyList(); inline ::uint32_t getNodeCount(); inline void setNodeCount( ::uint32_t value); private: ::capnp::_::StructBuilder _builder; template <typename, ::capnp::Kind> friend struct ::capnp::ToDynamic_; friend class ::capnp::Orphanage; template <typename, ::capnp::Kind> friend struct ::capnp::_::PointerHelpers; }; #if !CAPNP_LITE class FamilyGraph::Pipeline { public: typedef FamilyGraph Pipelines; inline Pipeline(decltype(nullptr)): _typeless(nullptr) {} inline explicit Pipeline(::capnp::AnyPointer::Pipeline&& typeless) : _typeless(kj::mv(typeless)) {} private: ::capnp::AnyPointer::Pipeline _typeless; friend class ::capnp::PipelineHook; template <typename, ::capnp::Kind> friend struct ::capnp::ToDynamic_; }; #endif // !CAPNP_LITE class NvBlastDataBlock::Reader { public: typedef NvBlastDataBlock Reads; Reader() = default; inline explicit Reader(::capnp::_::StructReader base): _reader(base) {} inline ::capnp::MessageSize totalSize() const { return _reader.totalSize().asPublic(); } #if !CAPNP_LITE inline ::kj::StringTree toString() const { return ::capnp::_::structString(_reader, *_capnpPrivate::brand()); } #endif // !CAPNP_LITE inline ::Nv::Blast::Serialization::NvBlastDataBlock::Type getDataType() const; inline ::uint32_t getFormatVersion() const; inline ::uint32_t getSize() const; private: ::capnp::_::StructReader _reader; template <typename, ::capnp::Kind> friend struct ::capnp::ToDynamic_; template <typename, ::capnp::Kind> friend struct ::capnp::_::PointerHelpers; template <typename, ::capnp::Kind> friend struct ::capnp::List; friend class ::capnp::MessageBuilder; friend class ::capnp::Orphanage; }; class NvBlastDataBlock::Builder { public: typedef NvBlastDataBlock Builds; Builder() = delete; // Deleted to discourage incorrect usage. // You can explicitly initialize to nullptr instead. inline Builder(decltype(nullptr)) {} inline explicit Builder(::capnp::_::StructBuilder base): _builder(base) {} inline operator Reader() const { return Reader(_builder.asReader()); } inline Reader asReader() const { return *this; } inline ::capnp::MessageSize totalSize() const { return asReader().totalSize(); } #if !CAPNP_LITE inline ::kj::StringTree toString() const { return asReader().toString(); } #endif // !CAPNP_LITE inline ::Nv::Blast::Serialization::NvBlastDataBlock::Type getDataType(); inline void setDataType( ::Nv::Blast::Serialization::NvBlastDataBlock::Type value); inline ::uint32_t getFormatVersion(); inline void setFormatVersion( ::uint32_t value); inline ::uint32_t getSize(); inline void setSize( ::uint32_t value); private: ::capnp::_::StructBuilder _builder; template <typename, ::capnp::Kind> friend struct ::capnp::ToDynamic_; friend class ::capnp::Orphanage; template <typename, ::capnp::Kind> friend struct ::capnp::_::PointerHelpers; }; #if !CAPNP_LITE class NvBlastDataBlock::Pipeline { public: typedef NvBlastDataBlock Pipelines; inline Pipeline(decltype(nullptr)): _typeless(nullptr) {} inline explicit Pipeline(::capnp::AnyPointer::Pipeline&& typeless) : _typeless(kj::mv(typeless)) {} private: ::capnp::AnyPointer::Pipeline _typeless; friend class ::capnp::PipelineHook; template <typename, ::capnp::Kind> friend struct ::capnp::ToDynamic_; }; #endif // !CAPNP_LITE class NvBlastChunk::Reader { public: typedef NvBlastChunk Reads; Reader() = default; inline explicit Reader(::capnp::_::StructReader base): _reader(base) {} inline ::capnp::MessageSize totalSize() const { return _reader.totalSize().asPublic(); } #if !CAPNP_LITE inline ::kj::StringTree toString() const { return ::capnp::_::structString(_reader, *_capnpPrivate::brand()); } #endif // !CAPNP_LITE inline bool hasCentroid() const; inline ::capnp::List<float>::Reader getCentroid() const; inline float getVolume() const; inline ::uint32_t getParentChunkIndex() const; inline ::uint32_t getFirstChildIndex() const; inline ::uint32_t getChildIndexStop() const; inline ::uint32_t getUserData() const; private: ::capnp::_::StructReader _reader; template <typename, ::capnp::Kind> friend struct ::capnp::ToDynamic_; template <typename, ::capnp::Kind> friend struct ::capnp::_::PointerHelpers; template <typename, ::capnp::Kind> friend struct ::capnp::List; friend class ::capnp::MessageBuilder; friend class ::capnp::Orphanage; }; class NvBlastChunk::Builder { public: typedef NvBlastChunk Builds; Builder() = delete; // Deleted to discourage incorrect usage. // You can explicitly initialize to nullptr instead. inline Builder(decltype(nullptr)) {} inline explicit Builder(::capnp::_::StructBuilder base): _builder(base) {} inline operator Reader() const { return Reader(_builder.asReader()); } inline Reader asReader() const { return *this; } inline ::capnp::MessageSize totalSize() const { return asReader().totalSize(); } #if !CAPNP_LITE inline ::kj::StringTree toString() const { return asReader().toString(); } #endif // !CAPNP_LITE inline bool hasCentroid(); inline ::capnp::List<float>::Builder getCentroid(); inline void setCentroid( ::capnp::List<float>::Reader value); inline void setCentroid(::kj::ArrayPtr<const float> value); inline ::capnp::List<float>::Builder initCentroid(unsigned int size); inline void adoptCentroid(::capnp::Orphan< ::capnp::List<float>>&& value); inline ::capnp::Orphan< ::capnp::List<float>> disownCentroid(); inline float getVolume(); inline void setVolume(float value); inline ::uint32_t getParentChunkIndex(); inline void setParentChunkIndex( ::uint32_t value); inline ::uint32_t getFirstChildIndex(); inline void setFirstChildIndex( ::uint32_t value); inline ::uint32_t getChildIndexStop(); inline void setChildIndexStop( ::uint32_t value); inline ::uint32_t getUserData(); inline void setUserData( ::uint32_t value); private: ::capnp::_::StructBuilder _builder; template <typename, ::capnp::Kind> friend struct ::capnp::ToDynamic_; friend class ::capnp::Orphanage; template <typename, ::capnp::Kind> friend struct ::capnp::_::PointerHelpers; }; #if !CAPNP_LITE class NvBlastChunk::Pipeline { public: typedef NvBlastChunk Pipelines; inline Pipeline(decltype(nullptr)): _typeless(nullptr) {} inline explicit Pipeline(::capnp::AnyPointer::Pipeline&& typeless) : _typeless(kj::mv(typeless)) {} private: ::capnp::AnyPointer::Pipeline _typeless; friend class ::capnp::PipelineHook; template <typename, ::capnp::Kind> friend struct ::capnp::ToDynamic_; }; #endif // !CAPNP_LITE class NvBlastBond::Reader { public: typedef NvBlastBond Reads; Reader() = default; inline explicit Reader(::capnp::_::StructReader base): _reader(base) {} inline ::capnp::MessageSize totalSize() const { return _reader.totalSize().asPublic(); } #if !CAPNP_LITE inline ::kj::StringTree toString() const { return ::capnp::_::structString(_reader, *_capnpPrivate::brand()); } #endif // !CAPNP_LITE inline bool hasNormal() const; inline ::capnp::List<float>::Reader getNormal() const; inline float getArea() const; inline bool hasCentroid() const; inline ::capnp::List<float>::Reader getCentroid() const; inline ::uint32_t getUserData() const; private: ::capnp::_::StructReader _reader; template <typename, ::capnp::Kind> friend struct ::capnp::ToDynamic_; template <typename, ::capnp::Kind> friend struct ::capnp::_::PointerHelpers; template <typename, ::capnp::Kind> friend struct ::capnp::List; friend class ::capnp::MessageBuilder; friend class ::capnp::Orphanage; }; class NvBlastBond::Builder { public: typedef NvBlastBond Builds; Builder() = delete; // Deleted to discourage incorrect usage. // You can explicitly initialize to nullptr instead. inline Builder(decltype(nullptr)) {} inline explicit Builder(::capnp::_::StructBuilder base): _builder(base) {} inline operator Reader() const { return Reader(_builder.asReader()); } inline Reader asReader() const { return *this; } inline ::capnp::MessageSize totalSize() const { return asReader().totalSize(); } #if !CAPNP_LITE inline ::kj::StringTree toString() const { return asReader().toString(); } #endif // !CAPNP_LITE inline bool hasNormal(); inline ::capnp::List<float>::Builder getNormal(); inline void setNormal( ::capnp::List<float>::Reader value); inline void setNormal(::kj::ArrayPtr<const float> value); inline ::capnp::List<float>::Builder initNormal(unsigned int size); inline void adoptNormal(::capnp::Orphan< ::capnp::List<float>>&& value); inline ::capnp::Orphan< ::capnp::List<float>> disownNormal(); inline float getArea(); inline void setArea(float value); inline bool hasCentroid(); inline ::capnp::List<float>::Builder getCentroid(); inline void setCentroid( ::capnp::List<float>::Reader value); inline void setCentroid(::kj::ArrayPtr<const float> value); inline ::capnp::List<float>::Builder initCentroid(unsigned int size); inline void adoptCentroid(::capnp::Orphan< ::capnp::List<float>>&& value); inline ::capnp::Orphan< ::capnp::List<float>> disownCentroid(); inline ::uint32_t getUserData(); inline void setUserData( ::uint32_t value); private: ::capnp::_::StructBuilder _builder; template <typename, ::capnp::Kind> friend struct ::capnp::ToDynamic_; friend class ::capnp::Orphanage; template <typename, ::capnp::Kind> friend struct ::capnp::_::PointerHelpers; }; #if !CAPNP_LITE class NvBlastBond::Pipeline { public: typedef NvBlastBond Pipelines; inline Pipeline(decltype(nullptr)): _typeless(nullptr) {} inline explicit Pipeline(::capnp::AnyPointer::Pipeline&& typeless) : _typeless(kj::mv(typeless)) {} private: ::capnp::AnyPointer::Pipeline _typeless; friend class ::capnp::PipelineHook; template <typename, ::capnp::Kind> friend struct ::capnp::ToDynamic_; }; #endif // !CAPNP_LITE class NvBlastSupportGraph::Reader { public: typedef NvBlastSupportGraph Reads; Reader() = default; inline explicit Reader(::capnp::_::StructReader base): _reader(base) {} inline ::capnp::MessageSize totalSize() const { return _reader.totalSize().asPublic(); } #if !CAPNP_LITE inline ::kj::StringTree toString() const { return ::capnp::_::structString(_reader, *_capnpPrivate::brand()); } #endif // !CAPNP_LITE inline ::uint32_t getNodeCount() const; inline bool hasChunkIndices() const; inline ::capnp::List< ::uint32_t>::Reader getChunkIndices() const; inline bool hasAdjacencyPartition() const; inline ::capnp::List< ::uint32_t>::Reader getAdjacencyPartition() const; inline bool hasAdjacentNodeIndices() const; inline ::capnp::List< ::uint32_t>::Reader getAdjacentNodeIndices() const; inline bool hasAdjacentBondIndices() const; inline ::capnp::List< ::uint32_t>::Reader getAdjacentBondIndices() const; private: ::capnp::_::StructReader _reader; template <typename, ::capnp::Kind> friend struct ::capnp::ToDynamic_; template <typename, ::capnp::Kind> friend struct ::capnp::_::PointerHelpers; template <typename, ::capnp::Kind> friend struct ::capnp::List; friend class ::capnp::MessageBuilder; friend class ::capnp::Orphanage; }; class NvBlastSupportGraph::Builder { public: typedef NvBlastSupportGraph Builds; Builder() = delete; // Deleted to discourage incorrect usage. // You can explicitly initialize to nullptr instead. inline Builder(decltype(nullptr)) {} inline explicit Builder(::capnp::_::StructBuilder base): _builder(base) {} inline operator Reader() const { return Reader(_builder.asReader()); } inline Reader asReader() const { return *this; } inline ::capnp::MessageSize totalSize() const { return asReader().totalSize(); } #if !CAPNP_LITE inline ::kj::StringTree toString() const { return asReader().toString(); } #endif // !CAPNP_LITE inline ::uint32_t getNodeCount(); inline void setNodeCount( ::uint32_t value); inline bool hasChunkIndices(); inline ::capnp::List< ::uint32_t>::Builder getChunkIndices(); inline void setChunkIndices( ::capnp::List< ::uint32_t>::Reader value); inline void setChunkIndices(::kj::ArrayPtr<const ::uint32_t> value); inline ::capnp::List< ::uint32_t>::Builder initChunkIndices(unsigned int size); inline void adoptChunkIndices(::capnp::Orphan< ::capnp::List< ::uint32_t>>&& value); inline ::capnp::Orphan< ::capnp::List< ::uint32_t>> disownChunkIndices(); inline bool hasAdjacencyPartition(); inline ::capnp::List< ::uint32_t>::Builder getAdjacencyPartition(); inline void setAdjacencyPartition( ::capnp::List< ::uint32_t>::Reader value); inline void setAdjacencyPartition(::kj::ArrayPtr<const ::uint32_t> value); inline ::capnp::List< ::uint32_t>::Builder initAdjacencyPartition(unsigned int size); inline void adoptAdjacencyPartition(::capnp::Orphan< ::capnp::List< ::uint32_t>>&& value); inline ::capnp::Orphan< ::capnp::List< ::uint32_t>> disownAdjacencyPartition(); inline bool hasAdjacentNodeIndices(); inline ::capnp::List< ::uint32_t>::Builder getAdjacentNodeIndices(); inline void setAdjacentNodeIndices( ::capnp::List< ::uint32_t>::Reader value); inline void setAdjacentNodeIndices(::kj::ArrayPtr<const ::uint32_t> value); inline ::capnp::List< ::uint32_t>::Builder initAdjacentNodeIndices(unsigned int size); inline void adoptAdjacentNodeIndices(::capnp::Orphan< ::capnp::List< ::uint32_t>>&& value); inline ::capnp::Orphan< ::capnp::List< ::uint32_t>> disownAdjacentNodeIndices(); inline bool hasAdjacentBondIndices(); inline ::capnp::List< ::uint32_t>::Builder getAdjacentBondIndices(); inline void setAdjacentBondIndices( ::capnp::List< ::uint32_t>::Reader value); inline void setAdjacentBondIndices(::kj::ArrayPtr<const ::uint32_t> value); inline ::capnp::List< ::uint32_t>::Builder initAdjacentBondIndices(unsigned int size); inline void adoptAdjacentBondIndices(::capnp::Orphan< ::capnp::List< ::uint32_t>>&& value); inline ::capnp::Orphan< ::capnp::List< ::uint32_t>> disownAdjacentBondIndices(); private: ::capnp::_::StructBuilder _builder; template <typename, ::capnp::Kind> friend struct ::capnp::ToDynamic_; friend class ::capnp::Orphanage; template <typename, ::capnp::Kind> friend struct ::capnp::_::PointerHelpers; }; #if !CAPNP_LITE class NvBlastSupportGraph::Pipeline { public: typedef NvBlastSupportGraph Pipelines; inline Pipeline(decltype(nullptr)): _typeless(nullptr) {} inline explicit Pipeline(::capnp::AnyPointer::Pipeline&& typeless) : _typeless(kj::mv(typeless)) {} private: ::capnp::AnyPointer::Pipeline _typeless; friend class ::capnp::PipelineHook; template <typename, ::capnp::Kind> friend struct ::capnp::ToDynamic_; }; #endif // !CAPNP_LITE class UUID::Reader { public: typedef UUID Reads; Reader() = default; inline explicit Reader(::capnp::_::StructReader base): _reader(base) {} inline ::capnp::MessageSize totalSize() const { return _reader.totalSize().asPublic(); } #if !CAPNP_LITE inline ::kj::StringTree toString() const { return ::capnp::_::structString(_reader, *_capnpPrivate::brand()); } #endif // !CAPNP_LITE inline bool hasValue() const; inline ::capnp::Data::Reader getValue() const; private: ::capnp::_::StructReader _reader; template <typename, ::capnp::Kind> friend struct ::capnp::ToDynamic_; template <typename, ::capnp::Kind> friend struct ::capnp::_::PointerHelpers; template <typename, ::capnp::Kind> friend struct ::capnp::List; friend class ::capnp::MessageBuilder; friend class ::capnp::Orphanage; }; class UUID::Builder { public: typedef UUID Builds; Builder() = delete; // Deleted to discourage incorrect usage. // You can explicitly initialize to nullptr instead. inline Builder(decltype(nullptr)) {} inline explicit Builder(::capnp::_::StructBuilder base): _builder(base) {} inline operator Reader() const { return Reader(_builder.asReader()); } inline Reader asReader() const { return *this; } inline ::capnp::MessageSize totalSize() const { return asReader().totalSize(); } #if !CAPNP_LITE inline ::kj::StringTree toString() const { return asReader().toString(); } #endif // !CAPNP_LITE inline bool hasValue(); inline ::capnp::Data::Builder getValue(); inline void setValue( ::capnp::Data::Reader value); inline ::capnp::Data::Builder initValue(unsigned int size); inline void adoptValue(::capnp::Orphan< ::capnp::Data>&& value); inline ::capnp::Orphan< ::capnp::Data> disownValue(); private: ::capnp::_::StructBuilder _builder; template <typename, ::capnp::Kind> friend struct ::capnp::ToDynamic_; friend class ::capnp::Orphanage; template <typename, ::capnp::Kind> friend struct ::capnp::_::PointerHelpers; }; #if !CAPNP_LITE class UUID::Pipeline { public: typedef UUID Pipelines; inline Pipeline(decltype(nullptr)): _typeless(nullptr) {} inline explicit Pipeline(::capnp::AnyPointer::Pipeline&& typeless) : _typeless(kj::mv(typeless)) {} private: ::capnp::AnyPointer::Pipeline _typeless; friend class ::capnp::PipelineHook; template <typename, ::capnp::Kind> friend struct ::capnp::ToDynamic_; }; #endif // !CAPNP_LITE // ======================================================================================= inline bool Asset::Reader::hasHeader() const { return !_reader.getPointerField( ::capnp::bounded<0>() * ::capnp::POINTERS).isNull(); } inline bool Asset::Builder::hasHeader() { return !_builder.getPointerField( ::capnp::bounded<0>() * ::capnp::POINTERS).isNull(); } inline ::Nv::Blast::Serialization::NvBlastDataBlock::Reader Asset::Reader::getHeader() const { return ::capnp::_::PointerHelpers< ::Nv::Blast::Serialization::NvBlastDataBlock>::get(_reader.getPointerField( ::capnp::bounded<0>() * ::capnp::POINTERS)); } inline ::Nv::Blast::Serialization::NvBlastDataBlock::Builder Asset::Builder::getHeader() { return ::capnp::_::PointerHelpers< ::Nv::Blast::Serialization::NvBlastDataBlock>::get(_builder.getPointerField( ::capnp::bounded<0>() * ::capnp::POINTERS)); } #if !CAPNP_LITE inline ::Nv::Blast::Serialization::NvBlastDataBlock::Pipeline Asset::Pipeline::getHeader() { return ::Nv::Blast::Serialization::NvBlastDataBlock::Pipeline(_typeless.getPointerField(0)); } #endif // !CAPNP_LITE inline void Asset::Builder::setHeader( ::Nv::Blast::Serialization::NvBlastDataBlock::Reader value) { ::capnp::_::PointerHelpers< ::Nv::Blast::Serialization::NvBlastDataBlock>::set(_builder.getPointerField( ::capnp::bounded<0>() * ::capnp::POINTERS), value); } inline ::Nv::Blast::Serialization::NvBlastDataBlock::Builder Asset::Builder::initHeader() { return ::capnp::_::PointerHelpers< ::Nv::Blast::Serialization::NvBlastDataBlock>::init(_builder.getPointerField( ::capnp::bounded<0>() * ::capnp::POINTERS)); } inline void Asset::Builder::adoptHeader( ::capnp::Orphan< ::Nv::Blast::Serialization::NvBlastDataBlock>&& value) { ::capnp::_::PointerHelpers< ::Nv::Blast::Serialization::NvBlastDataBlock>::adopt(_builder.getPointerField( ::capnp::bounded<0>() * ::capnp::POINTERS), kj::mv(value)); } inline ::capnp::Orphan< ::Nv::Blast::Serialization::NvBlastDataBlock> Asset::Builder::disownHeader() { return ::capnp::_::PointerHelpers< ::Nv::Blast::Serialization::NvBlastDataBlock>::disown(_builder.getPointerField( ::capnp::bounded<0>() * ::capnp::POINTERS)); } inline bool Asset::Reader::hasID() const { return !_reader.getPointerField( ::capnp::bounded<1>() * ::capnp::POINTERS).isNull(); } inline bool Asset::Builder::hasID() { return !_builder.getPointerField( ::capnp::bounded<1>() * ::capnp::POINTERS).isNull(); } inline ::Nv::Blast::Serialization::UUID::Reader Asset::Reader::getID() const { return ::capnp::_::PointerHelpers< ::Nv::Blast::Serialization::UUID>::get(_reader.getPointerField( ::capnp::bounded<1>() * ::capnp::POINTERS)); } inline ::Nv::Blast::Serialization::UUID::Builder Asset::Builder::getID() { return ::capnp::_::PointerHelpers< ::Nv::Blast::Serialization::UUID>::get(_builder.getPointerField( ::capnp::bounded<1>() * ::capnp::POINTERS)); } #if !CAPNP_LITE inline ::Nv::Blast::Serialization::UUID::Pipeline Asset::Pipeline::getID() { return ::Nv::Blast::Serialization::UUID::Pipeline(_typeless.getPointerField(1)); } #endif // !CAPNP_LITE inline void Asset::Builder::setID( ::Nv::Blast::Serialization::UUID::Reader value) { ::capnp::_::PointerHelpers< ::Nv::Blast::Serialization::UUID>::set(_builder.getPointerField( ::capnp::bounded<1>() * ::capnp::POINTERS), value); } inline ::Nv::Blast::Serialization::UUID::Builder Asset::Builder::initID() { return ::capnp::_::PointerHelpers< ::Nv::Blast::Serialization::UUID>::init(_builder.getPointerField( ::capnp::bounded<1>() * ::capnp::POINTERS)); } inline void Asset::Builder::adoptID( ::capnp::Orphan< ::Nv::Blast::Serialization::UUID>&& value) { ::capnp::_::PointerHelpers< ::Nv::Blast::Serialization::UUID>::adopt(_builder.getPointerField( ::capnp::bounded<1>() * ::capnp::POINTERS), kj::mv(value)); } inline ::capnp::Orphan< ::Nv::Blast::Serialization::UUID> Asset::Builder::disownID() { return ::capnp::_::PointerHelpers< ::Nv::Blast::Serialization::UUID>::disown(_builder.getPointerField( ::capnp::bounded<1>() * ::capnp::POINTERS)); } inline ::uint32_t Asset::Reader::getChunkCount() const { return _reader.getDataField< ::uint32_t>( ::capnp::bounded<0>() * ::capnp::ELEMENTS); } inline ::uint32_t Asset::Builder::getChunkCount() { return _builder.getDataField< ::uint32_t>( ::capnp::bounded<0>() * ::capnp::ELEMENTS); } inline void Asset::Builder::setChunkCount( ::uint32_t value) { _builder.setDataField< ::uint32_t>( ::capnp::bounded<0>() * ::capnp::ELEMENTS, value); } inline bool Asset::Reader::hasGraph() const { return !_reader.getPointerField( ::capnp::bounded<2>() * ::capnp::POINTERS).isNull(); } inline bool Asset::Builder::hasGraph() { return !_builder.getPointerField( ::capnp::bounded<2>() * ::capnp::POINTERS).isNull(); } inline ::Nv::Blast::Serialization::NvBlastSupportGraph::Reader Asset::Reader::getGraph() const { return ::capnp::_::PointerHelpers< ::Nv::Blast::Serialization::NvBlastSupportGraph>::get(_reader.getPointerField( ::capnp::bounded<2>() * ::capnp::POINTERS)); } inline ::Nv::Blast::Serialization::NvBlastSupportGraph::Builder Asset::Builder::getGraph() { return ::capnp::_::PointerHelpers< ::Nv::Blast::Serialization::NvBlastSupportGraph>::get(_builder.getPointerField( ::capnp::bounded<2>() * ::capnp::POINTERS)); } #if !CAPNP_LITE inline ::Nv::Blast::Serialization::NvBlastSupportGraph::Pipeline Asset::Pipeline::getGraph() { return ::Nv::Blast::Serialization::NvBlastSupportGraph::Pipeline(_typeless.getPointerField(2)); } #endif // !CAPNP_LITE inline void Asset::Builder::setGraph( ::Nv::Blast::Serialization::NvBlastSupportGraph::Reader value) { ::capnp::_::PointerHelpers< ::Nv::Blast::Serialization::NvBlastSupportGraph>::set(_builder.getPointerField( ::capnp::bounded<2>() * ::capnp::POINTERS), value); } inline ::Nv::Blast::Serialization::NvBlastSupportGraph::Builder Asset::Builder::initGraph() { return ::capnp::_::PointerHelpers< ::Nv::Blast::Serialization::NvBlastSupportGraph>::init(_builder.getPointerField( ::capnp::bounded<2>() * ::capnp::POINTERS)); } inline void Asset::Builder::adoptGraph( ::capnp::Orphan< ::Nv::Blast::Serialization::NvBlastSupportGraph>&& value) { ::capnp::_::PointerHelpers< ::Nv::Blast::Serialization::NvBlastSupportGraph>::adopt(_builder.getPointerField( ::capnp::bounded<2>() * ::capnp::POINTERS), kj::mv(value)); } inline ::capnp::Orphan< ::Nv::Blast::Serialization::NvBlastSupportGraph> Asset::Builder::disownGraph() { return ::capnp::_::PointerHelpers< ::Nv::Blast::Serialization::NvBlastSupportGraph>::disown(_builder.getPointerField( ::capnp::bounded<2>() * ::capnp::POINTERS)); } inline ::uint32_t Asset::Reader::getLeafChunkCount() const { return _reader.getDataField< ::uint32_t>( ::capnp::bounded<1>() * ::capnp::ELEMENTS); } inline ::uint32_t Asset::Builder::getLeafChunkCount() { return _builder.getDataField< ::uint32_t>( ::capnp::bounded<1>() * ::capnp::ELEMENTS); } inline void Asset::Builder::setLeafChunkCount( ::uint32_t value) { _builder.setDataField< ::uint32_t>( ::capnp::bounded<1>() * ::capnp::ELEMENTS, value); } inline ::uint32_t Asset::Reader::getFirstSubsupportChunkIndex() const { return _reader.getDataField< ::uint32_t>( ::capnp::bounded<2>() * ::capnp::ELEMENTS); } inline ::uint32_t Asset::Builder::getFirstSubsupportChunkIndex() { return _builder.getDataField< ::uint32_t>( ::capnp::bounded<2>() * ::capnp::ELEMENTS); } inline void Asset::Builder::setFirstSubsupportChunkIndex( ::uint32_t value) { _builder.setDataField< ::uint32_t>( ::capnp::bounded<2>() * ::capnp::ELEMENTS, value); } inline ::uint32_t Asset::Reader::getBondCount() const { return _reader.getDataField< ::uint32_t>( ::capnp::bounded<3>() * ::capnp::ELEMENTS); } inline ::uint32_t Asset::Builder::getBondCount() { return _builder.getDataField< ::uint32_t>( ::capnp::bounded<3>() * ::capnp::ELEMENTS); } inline void Asset::Builder::setBondCount( ::uint32_t value) { _builder.setDataField< ::uint32_t>( ::capnp::bounded<3>() * ::capnp::ELEMENTS, value); } inline bool Asset::Reader::hasChunks() const { return !_reader.getPointerField( ::capnp::bounded<3>() * ::capnp::POINTERS).isNull(); } inline bool Asset::Builder::hasChunks() { return !_builder.getPointerField( ::capnp::bounded<3>() * ::capnp::POINTERS).isNull(); } inline ::capnp::List< ::Nv::Blast::Serialization::NvBlastChunk>::Reader Asset::Reader::getChunks() const { return ::capnp::_::PointerHelpers< ::capnp::List< ::Nv::Blast::Serialization::NvBlastChunk>>::get(_reader.getPointerField( ::capnp::bounded<3>() * ::capnp::POINTERS)); } inline ::capnp::List< ::Nv::Blast::Serialization::NvBlastChunk>::Builder Asset::Builder::getChunks() { return ::capnp::_::PointerHelpers< ::capnp::List< ::Nv::Blast::Serialization::NvBlastChunk>>::get(_builder.getPointerField( ::capnp::bounded<3>() * ::capnp::POINTERS)); } inline void Asset::Builder::setChunks( ::capnp::List< ::Nv::Blast::Serialization::NvBlastChunk>::Reader value) { ::capnp::_::PointerHelpers< ::capnp::List< ::Nv::Blast::Serialization::NvBlastChunk>>::set(_builder.getPointerField( ::capnp::bounded<3>() * ::capnp::POINTERS), value); } inline ::capnp::List< ::Nv::Blast::Serialization::NvBlastChunk>::Builder Asset::Builder::initChunks(unsigned int size) { return ::capnp::_::PointerHelpers< ::capnp::List< ::Nv::Blast::Serialization::NvBlastChunk>>::init(_builder.getPointerField( ::capnp::bounded<3>() * ::capnp::POINTERS), size); } inline void Asset::Builder::adoptChunks( ::capnp::Orphan< ::capnp::List< ::Nv::Blast::Serialization::NvBlastChunk>>&& value) { ::capnp::_::PointerHelpers< ::capnp::List< ::Nv::Blast::Serialization::NvBlastChunk>>::adopt(_builder.getPointerField( ::capnp::bounded<3>() * ::capnp::POINTERS), kj::mv(value)); } inline ::capnp::Orphan< ::capnp::List< ::Nv::Blast::Serialization::NvBlastChunk>> Asset::Builder::disownChunks() { return ::capnp::_::PointerHelpers< ::capnp::List< ::Nv::Blast::Serialization::NvBlastChunk>>::disown(_builder.getPointerField( ::capnp::bounded<3>() * ::capnp::POINTERS)); } inline bool Asset::Reader::hasBonds() const { return !_reader.getPointerField( ::capnp::bounded<4>() * ::capnp::POINTERS).isNull(); } inline bool Asset::Builder::hasBonds() { return !_builder.getPointerField( ::capnp::bounded<4>() * ::capnp::POINTERS).isNull(); } inline ::capnp::List< ::Nv::Blast::Serialization::NvBlastBond>::Reader Asset::Reader::getBonds() const { return ::capnp::_::PointerHelpers< ::capnp::List< ::Nv::Blast::Serialization::NvBlastBond>>::get(_reader.getPointerField( ::capnp::bounded<4>() * ::capnp::POINTERS)); } inline ::capnp::List< ::Nv::Blast::Serialization::NvBlastBond>::Builder Asset::Builder::getBonds() { return ::capnp::_::PointerHelpers< ::capnp::List< ::Nv::Blast::Serialization::NvBlastBond>>::get(_builder.getPointerField( ::capnp::bounded<4>() * ::capnp::POINTERS)); } inline void Asset::Builder::setBonds( ::capnp::List< ::Nv::Blast::Serialization::NvBlastBond>::Reader value) { ::capnp::_::PointerHelpers< ::capnp::List< ::Nv::Blast::Serialization::NvBlastBond>>::set(_builder.getPointerField( ::capnp::bounded<4>() * ::capnp::POINTERS), value); } inline ::capnp::List< ::Nv::Blast::Serialization::NvBlastBond>::Builder Asset::Builder::initBonds(unsigned int size) { return ::capnp::_::PointerHelpers< ::capnp::List< ::Nv::Blast::Serialization::NvBlastBond>>::init(_builder.getPointerField( ::capnp::bounded<4>() * ::capnp::POINTERS), size); } inline void Asset::Builder::adoptBonds( ::capnp::Orphan< ::capnp::List< ::Nv::Blast::Serialization::NvBlastBond>>&& value) { ::capnp::_::PointerHelpers< ::capnp::List< ::Nv::Blast::Serialization::NvBlastBond>>::adopt(_builder.getPointerField( ::capnp::bounded<4>() * ::capnp::POINTERS), kj::mv(value)); } inline ::capnp::Orphan< ::capnp::List< ::Nv::Blast::Serialization::NvBlastBond>> Asset::Builder::disownBonds() { return ::capnp::_::PointerHelpers< ::capnp::List< ::Nv::Blast::Serialization::NvBlastBond>>::disown(_builder.getPointerField( ::capnp::bounded<4>() * ::capnp::POINTERS)); } inline bool Asset::Reader::hasSubtreeLeafChunkCounts() const { return !_reader.getPointerField( ::capnp::bounded<5>() * ::capnp::POINTERS).isNull(); } inline bool Asset::Builder::hasSubtreeLeafChunkCounts() { return !_builder.getPointerField( ::capnp::bounded<5>() * ::capnp::POINTERS).isNull(); } inline ::capnp::List< ::uint32_t>::Reader Asset::Reader::getSubtreeLeafChunkCounts() const { return ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::get(_reader.getPointerField( ::capnp::bounded<5>() * ::capnp::POINTERS)); } inline ::capnp::List< ::uint32_t>::Builder Asset::Builder::getSubtreeLeafChunkCounts() { return ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::get(_builder.getPointerField( ::capnp::bounded<5>() * ::capnp::POINTERS)); } inline void Asset::Builder::setSubtreeLeafChunkCounts( ::capnp::List< ::uint32_t>::Reader value) { ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::set(_builder.getPointerField( ::capnp::bounded<5>() * ::capnp::POINTERS), value); } inline void Asset::Builder::setSubtreeLeafChunkCounts(::kj::ArrayPtr<const ::uint32_t> value) { ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::set(_builder.getPointerField( ::capnp::bounded<5>() * ::capnp::POINTERS), value); } inline ::capnp::List< ::uint32_t>::Builder Asset::Builder::initSubtreeLeafChunkCounts(unsigned int size) { return ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::init(_builder.getPointerField( ::capnp::bounded<5>() * ::capnp::POINTERS), size); } inline void Asset::Builder::adoptSubtreeLeafChunkCounts( ::capnp::Orphan< ::capnp::List< ::uint32_t>>&& value) { ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::adopt(_builder.getPointerField( ::capnp::bounded<5>() * ::capnp::POINTERS), kj::mv(value)); } inline ::capnp::Orphan< ::capnp::List< ::uint32_t>> Asset::Builder::disownSubtreeLeafChunkCounts() { return ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::disown(_builder.getPointerField( ::capnp::bounded<5>() * ::capnp::POINTERS)); } inline bool Asset::Reader::hasChunkToGraphNodeMap() const { return !_reader.getPointerField( ::capnp::bounded<6>() * ::capnp::POINTERS).isNull(); } inline bool Asset::Builder::hasChunkToGraphNodeMap() { return !_builder.getPointerField( ::capnp::bounded<6>() * ::capnp::POINTERS).isNull(); } inline ::capnp::List< ::uint32_t>::Reader Asset::Reader::getChunkToGraphNodeMap() const { return ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::get(_reader.getPointerField( ::capnp::bounded<6>() * ::capnp::POINTERS)); } inline ::capnp::List< ::uint32_t>::Builder Asset::Builder::getChunkToGraphNodeMap() { return ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::get(_builder.getPointerField( ::capnp::bounded<6>() * ::capnp::POINTERS)); } inline void Asset::Builder::setChunkToGraphNodeMap( ::capnp::List< ::uint32_t>::Reader value) { ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::set(_builder.getPointerField( ::capnp::bounded<6>() * ::capnp::POINTERS), value); } inline void Asset::Builder::setChunkToGraphNodeMap(::kj::ArrayPtr<const ::uint32_t> value) { ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::set(_builder.getPointerField( ::capnp::bounded<6>() * ::capnp::POINTERS), value); } inline ::capnp::List< ::uint32_t>::Builder Asset::Builder::initChunkToGraphNodeMap(unsigned int size) { return ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::init(_builder.getPointerField( ::capnp::bounded<6>() * ::capnp::POINTERS), size); } inline void Asset::Builder::adoptChunkToGraphNodeMap( ::capnp::Orphan< ::capnp::List< ::uint32_t>>&& value) { ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::adopt(_builder.getPointerField( ::capnp::bounded<6>() * ::capnp::POINTERS), kj::mv(value)); } inline ::capnp::Orphan< ::capnp::List< ::uint32_t>> Asset::Builder::disownChunkToGraphNodeMap() { return ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::disown(_builder.getPointerField( ::capnp::bounded<6>() * ::capnp::POINTERS)); } inline bool Family::Reader::hasAssetID() const { return !_reader.getPointerField( ::capnp::bounded<0>() * ::capnp::POINTERS).isNull(); } inline bool Family::Builder::hasAssetID() { return !_builder.getPointerField( ::capnp::bounded<0>() * ::capnp::POINTERS).isNull(); } inline ::Nv::Blast::Serialization::UUID::Reader Family::Reader::getAssetID() const { return ::capnp::_::PointerHelpers< ::Nv::Blast::Serialization::UUID>::get(_reader.getPointerField( ::capnp::bounded<0>() * ::capnp::POINTERS)); } inline ::Nv::Blast::Serialization::UUID::Builder Family::Builder::getAssetID() { return ::capnp::_::PointerHelpers< ::Nv::Blast::Serialization::UUID>::get(_builder.getPointerField( ::capnp::bounded<0>() * ::capnp::POINTERS)); } #if !CAPNP_LITE inline ::Nv::Blast::Serialization::UUID::Pipeline Family::Pipeline::getAssetID() { return ::Nv::Blast::Serialization::UUID::Pipeline(_typeless.getPointerField(0)); } #endif // !CAPNP_LITE inline void Family::Builder::setAssetID( ::Nv::Blast::Serialization::UUID::Reader value) { ::capnp::_::PointerHelpers< ::Nv::Blast::Serialization::UUID>::set(_builder.getPointerField( ::capnp::bounded<0>() * ::capnp::POINTERS), value); } inline ::Nv::Blast::Serialization::UUID::Builder Family::Builder::initAssetID() { return ::capnp::_::PointerHelpers< ::Nv::Blast::Serialization::UUID>::init(_builder.getPointerField( ::capnp::bounded<0>() * ::capnp::POINTERS)); } inline void Family::Builder::adoptAssetID( ::capnp::Orphan< ::Nv::Blast::Serialization::UUID>&& value) { ::capnp::_::PointerHelpers< ::Nv::Blast::Serialization::UUID>::adopt(_builder.getPointerField( ::capnp::bounded<0>() * ::capnp::POINTERS), kj::mv(value)); } inline ::capnp::Orphan< ::Nv::Blast::Serialization::UUID> Family::Builder::disownAssetID() { return ::capnp::_::PointerHelpers< ::Nv::Blast::Serialization::UUID>::disown(_builder.getPointerField( ::capnp::bounded<0>() * ::capnp::POINTERS)); } inline bool Family::Reader::hasActors() const { return !_reader.getPointerField( ::capnp::bounded<1>() * ::capnp::POINTERS).isNull(); } inline bool Family::Builder::hasActors() { return !_builder.getPointerField( ::capnp::bounded<1>() * ::capnp::POINTERS).isNull(); } inline ::capnp::List< ::Nv::Blast::Serialization::Actor>::Reader Family::Reader::getActors() const { return ::capnp::_::PointerHelpers< ::capnp::List< ::Nv::Blast::Serialization::Actor>>::get(_reader.getPointerField( ::capnp::bounded<1>() * ::capnp::POINTERS)); } inline ::capnp::List< ::Nv::Blast::Serialization::Actor>::Builder Family::Builder::getActors() { return ::capnp::_::PointerHelpers< ::capnp::List< ::Nv::Blast::Serialization::Actor>>::get(_builder.getPointerField( ::capnp::bounded<1>() * ::capnp::POINTERS)); } inline void Family::Builder::setActors( ::capnp::List< ::Nv::Blast::Serialization::Actor>::Reader value) { ::capnp::_::PointerHelpers< ::capnp::List< ::Nv::Blast::Serialization::Actor>>::set(_builder.getPointerField( ::capnp::bounded<1>() * ::capnp::POINTERS), value); } inline ::capnp::List< ::Nv::Blast::Serialization::Actor>::Builder Family::Builder::initActors(unsigned int size) { return ::capnp::_::PointerHelpers< ::capnp::List< ::Nv::Blast::Serialization::Actor>>::init(_builder.getPointerField( ::capnp::bounded<1>() * ::capnp::POINTERS), size); } inline void Family::Builder::adoptActors( ::capnp::Orphan< ::capnp::List< ::Nv::Blast::Serialization::Actor>>&& value) { ::capnp::_::PointerHelpers< ::capnp::List< ::Nv::Blast::Serialization::Actor>>::adopt(_builder.getPointerField( ::capnp::bounded<1>() * ::capnp::POINTERS), kj::mv(value)); } inline ::capnp::Orphan< ::capnp::List< ::Nv::Blast::Serialization::Actor>> Family::Builder::disownActors() { return ::capnp::_::PointerHelpers< ::capnp::List< ::Nv::Blast::Serialization::Actor>>::disown(_builder.getPointerField( ::capnp::bounded<1>() * ::capnp::POINTERS)); } inline bool Family::Reader::hasVisibleChunkIndexLinks() const { return !_reader.getPointerField( ::capnp::bounded<2>() * ::capnp::POINTERS).isNull(); } inline bool Family::Builder::hasVisibleChunkIndexLinks() { return !_builder.getPointerField( ::capnp::bounded<2>() * ::capnp::POINTERS).isNull(); } inline ::capnp::List< ::uint32_t>::Reader Family::Reader::getVisibleChunkIndexLinks() const { return ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::get(_reader.getPointerField( ::capnp::bounded<2>() * ::capnp::POINTERS)); } inline ::capnp::List< ::uint32_t>::Builder Family::Builder::getVisibleChunkIndexLinks() { return ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::get(_builder.getPointerField( ::capnp::bounded<2>() * ::capnp::POINTERS)); } inline void Family::Builder::setVisibleChunkIndexLinks( ::capnp::List< ::uint32_t>::Reader value) { ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::set(_builder.getPointerField( ::capnp::bounded<2>() * ::capnp::POINTERS), value); } inline void Family::Builder::setVisibleChunkIndexLinks(::kj::ArrayPtr<const ::uint32_t> value) { ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::set(_builder.getPointerField( ::capnp::bounded<2>() * ::capnp::POINTERS), value); } inline ::capnp::List< ::uint32_t>::Builder Family::Builder::initVisibleChunkIndexLinks(unsigned int size) { return ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::init(_builder.getPointerField( ::capnp::bounded<2>() * ::capnp::POINTERS), size); } inline void Family::Builder::adoptVisibleChunkIndexLinks( ::capnp::Orphan< ::capnp::List< ::uint32_t>>&& value) { ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::adopt(_builder.getPointerField( ::capnp::bounded<2>() * ::capnp::POINTERS), kj::mv(value)); } inline ::capnp::Orphan< ::capnp::List< ::uint32_t>> Family::Builder::disownVisibleChunkIndexLinks() { return ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::disown(_builder.getPointerField( ::capnp::bounded<2>() * ::capnp::POINTERS)); } inline bool Family::Reader::hasChunkActorIndices() const { return !_reader.getPointerField( ::capnp::bounded<3>() * ::capnp::POINTERS).isNull(); } inline bool Family::Builder::hasChunkActorIndices() { return !_builder.getPointerField( ::capnp::bounded<3>() * ::capnp::POINTERS).isNull(); } inline ::capnp::List< ::uint32_t>::Reader Family::Reader::getChunkActorIndices() const { return ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::get(_reader.getPointerField( ::capnp::bounded<3>() * ::capnp::POINTERS)); } inline ::capnp::List< ::uint32_t>::Builder Family::Builder::getChunkActorIndices() { return ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::get(_builder.getPointerField( ::capnp::bounded<3>() * ::capnp::POINTERS)); } inline void Family::Builder::setChunkActorIndices( ::capnp::List< ::uint32_t>::Reader value) { ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::set(_builder.getPointerField( ::capnp::bounded<3>() * ::capnp::POINTERS), value); } inline void Family::Builder::setChunkActorIndices(::kj::ArrayPtr<const ::uint32_t> value) { ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::set(_builder.getPointerField( ::capnp::bounded<3>() * ::capnp::POINTERS), value); } inline ::capnp::List< ::uint32_t>::Builder Family::Builder::initChunkActorIndices(unsigned int size) { return ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::init(_builder.getPointerField( ::capnp::bounded<3>() * ::capnp::POINTERS), size); } inline void Family::Builder::adoptChunkActorIndices( ::capnp::Orphan< ::capnp::List< ::uint32_t>>&& value) { ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::adopt(_builder.getPointerField( ::capnp::bounded<3>() * ::capnp::POINTERS), kj::mv(value)); } inline ::capnp::Orphan< ::capnp::List< ::uint32_t>> Family::Builder::disownChunkActorIndices() { return ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::disown(_builder.getPointerField( ::capnp::bounded<3>() * ::capnp::POINTERS)); } inline bool Family::Reader::hasGraphNodeIndexLinks() const { return !_reader.getPointerField( ::capnp::bounded<4>() * ::capnp::POINTERS).isNull(); } inline bool Family::Builder::hasGraphNodeIndexLinks() { return !_builder.getPointerField( ::capnp::bounded<4>() * ::capnp::POINTERS).isNull(); } inline ::capnp::List< ::uint32_t>::Reader Family::Reader::getGraphNodeIndexLinks() const { return ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::get(_reader.getPointerField( ::capnp::bounded<4>() * ::capnp::POINTERS)); } inline ::capnp::List< ::uint32_t>::Builder Family::Builder::getGraphNodeIndexLinks() { return ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::get(_builder.getPointerField( ::capnp::bounded<4>() * ::capnp::POINTERS)); } inline void Family::Builder::setGraphNodeIndexLinks( ::capnp::List< ::uint32_t>::Reader value) { ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::set(_builder.getPointerField( ::capnp::bounded<4>() * ::capnp::POINTERS), value); } inline void Family::Builder::setGraphNodeIndexLinks(::kj::ArrayPtr<const ::uint32_t> value) { ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::set(_builder.getPointerField( ::capnp::bounded<4>() * ::capnp::POINTERS), value); } inline ::capnp::List< ::uint32_t>::Builder Family::Builder::initGraphNodeIndexLinks(unsigned int size) { return ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::init(_builder.getPointerField( ::capnp::bounded<4>() * ::capnp::POINTERS), size); } inline void Family::Builder::adoptGraphNodeIndexLinks( ::capnp::Orphan< ::capnp::List< ::uint32_t>>&& value) { ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::adopt(_builder.getPointerField( ::capnp::bounded<4>() * ::capnp::POINTERS), kj::mv(value)); } inline ::capnp::Orphan< ::capnp::List< ::uint32_t>> Family::Builder::disownGraphNodeIndexLinks() { return ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::disown(_builder.getPointerField( ::capnp::bounded<4>() * ::capnp::POINTERS)); } inline bool Family::Reader::hasLowerSupportChunkHealths() const { return !_reader.getPointerField( ::capnp::bounded<5>() * ::capnp::POINTERS).isNull(); } inline bool Family::Builder::hasLowerSupportChunkHealths() { return !_builder.getPointerField( ::capnp::bounded<5>() * ::capnp::POINTERS).isNull(); } inline ::capnp::List<float>::Reader Family::Reader::getLowerSupportChunkHealths() const { return ::capnp::_::PointerHelpers< ::capnp::List<float>>::get(_reader.getPointerField( ::capnp::bounded<5>() * ::capnp::POINTERS)); } inline ::capnp::List<float>::Builder Family::Builder::getLowerSupportChunkHealths() { return ::capnp::_::PointerHelpers< ::capnp::List<float>>::get(_builder.getPointerField( ::capnp::bounded<5>() * ::capnp::POINTERS)); } inline void Family::Builder::setLowerSupportChunkHealths( ::capnp::List<float>::Reader value) { ::capnp::_::PointerHelpers< ::capnp::List<float>>::set(_builder.getPointerField( ::capnp::bounded<5>() * ::capnp::POINTERS), value); } inline void Family::Builder::setLowerSupportChunkHealths(::kj::ArrayPtr<const float> value) { ::capnp::_::PointerHelpers< ::capnp::List<float>>::set(_builder.getPointerField( ::capnp::bounded<5>() * ::capnp::POINTERS), value); } inline ::capnp::List<float>::Builder Family::Builder::initLowerSupportChunkHealths(unsigned int size) { return ::capnp::_::PointerHelpers< ::capnp::List<float>>::init(_builder.getPointerField( ::capnp::bounded<5>() * ::capnp::POINTERS), size); } inline void Family::Builder::adoptLowerSupportChunkHealths( ::capnp::Orphan< ::capnp::List<float>>&& value) { ::capnp::_::PointerHelpers< ::capnp::List<float>>::adopt(_builder.getPointerField( ::capnp::bounded<5>() * ::capnp::POINTERS), kj::mv(value)); } inline ::capnp::Orphan< ::capnp::List<float>> Family::Builder::disownLowerSupportChunkHealths() { return ::capnp::_::PointerHelpers< ::capnp::List<float>>::disown(_builder.getPointerField( ::capnp::bounded<5>() * ::capnp::POINTERS)); } inline bool Family::Reader::hasGraphBondHealths() const { return !_reader.getPointerField( ::capnp::bounded<6>() * ::capnp::POINTERS).isNull(); } inline bool Family::Builder::hasGraphBondHealths() { return !_builder.getPointerField( ::capnp::bounded<6>() * ::capnp::POINTERS).isNull(); } inline ::capnp::List<float>::Reader Family::Reader::getGraphBondHealths() const { return ::capnp::_::PointerHelpers< ::capnp::List<float>>::get(_reader.getPointerField( ::capnp::bounded<6>() * ::capnp::POINTERS)); } inline ::capnp::List<float>::Builder Family::Builder::getGraphBondHealths() { return ::capnp::_::PointerHelpers< ::capnp::List<float>>::get(_builder.getPointerField( ::capnp::bounded<6>() * ::capnp::POINTERS)); } inline void Family::Builder::setGraphBondHealths( ::capnp::List<float>::Reader value) { ::capnp::_::PointerHelpers< ::capnp::List<float>>::set(_builder.getPointerField( ::capnp::bounded<6>() * ::capnp::POINTERS), value); } inline void Family::Builder::setGraphBondHealths(::kj::ArrayPtr<const float> value) { ::capnp::_::PointerHelpers< ::capnp::List<float>>::set(_builder.getPointerField( ::capnp::bounded<6>() * ::capnp::POINTERS), value); } inline ::capnp::List<float>::Builder Family::Builder::initGraphBondHealths(unsigned int size) { return ::capnp::_::PointerHelpers< ::capnp::List<float>>::init(_builder.getPointerField( ::capnp::bounded<6>() * ::capnp::POINTERS), size); } inline void Family::Builder::adoptGraphBondHealths( ::capnp::Orphan< ::capnp::List<float>>&& value) { ::capnp::_::PointerHelpers< ::capnp::List<float>>::adopt(_builder.getPointerField( ::capnp::bounded<6>() * ::capnp::POINTERS), kj::mv(value)); } inline ::capnp::Orphan< ::capnp::List<float>> Family::Builder::disownGraphBondHealths() { return ::capnp::_::PointerHelpers< ::capnp::List<float>>::disown(_builder.getPointerField( ::capnp::bounded<6>() * ::capnp::POINTERS)); } inline bool Family::Reader::hasFamilyGraph() const { return !_reader.getPointerField( ::capnp::bounded<7>() * ::capnp::POINTERS).isNull(); } inline bool Family::Builder::hasFamilyGraph() { return !_builder.getPointerField( ::capnp::bounded<7>() * ::capnp::POINTERS).isNull(); } inline ::Nv::Blast::Serialization::FamilyGraph::Reader Family::Reader::getFamilyGraph() const { return ::capnp::_::PointerHelpers< ::Nv::Blast::Serialization::FamilyGraph>::get(_reader.getPointerField( ::capnp::bounded<7>() * ::capnp::POINTERS)); } inline ::Nv::Blast::Serialization::FamilyGraph::Builder Family::Builder::getFamilyGraph() { return ::capnp::_::PointerHelpers< ::Nv::Blast::Serialization::FamilyGraph>::get(_builder.getPointerField( ::capnp::bounded<7>() * ::capnp::POINTERS)); } #if !CAPNP_LITE inline ::Nv::Blast::Serialization::FamilyGraph::Pipeline Family::Pipeline::getFamilyGraph() { return ::Nv::Blast::Serialization::FamilyGraph::Pipeline(_typeless.getPointerField(7)); } #endif // !CAPNP_LITE inline void Family::Builder::setFamilyGraph( ::Nv::Blast::Serialization::FamilyGraph::Reader value) { ::capnp::_::PointerHelpers< ::Nv::Blast::Serialization::FamilyGraph>::set(_builder.getPointerField( ::capnp::bounded<7>() * ::capnp::POINTERS), value); } inline ::Nv::Blast::Serialization::FamilyGraph::Builder Family::Builder::initFamilyGraph() { return ::capnp::_::PointerHelpers< ::Nv::Blast::Serialization::FamilyGraph>::init(_builder.getPointerField( ::capnp::bounded<7>() * ::capnp::POINTERS)); } inline void Family::Builder::adoptFamilyGraph( ::capnp::Orphan< ::Nv::Blast::Serialization::FamilyGraph>&& value) { ::capnp::_::PointerHelpers< ::Nv::Blast::Serialization::FamilyGraph>::adopt(_builder.getPointerField( ::capnp::bounded<7>() * ::capnp::POINTERS), kj::mv(value)); } inline ::capnp::Orphan< ::Nv::Blast::Serialization::FamilyGraph> Family::Builder::disownFamilyGraph() { return ::capnp::_::PointerHelpers< ::Nv::Blast::Serialization::FamilyGraph>::disown(_builder.getPointerField( ::capnp::bounded<7>() * ::capnp::POINTERS)); } inline ::uint32_t Family::Reader::getActorCount() const { return _reader.getDataField< ::uint32_t>( ::capnp::bounded<0>() * ::capnp::ELEMENTS); } inline ::uint32_t Family::Builder::getActorCount() { return _builder.getDataField< ::uint32_t>( ::capnp::bounded<0>() * ::capnp::ELEMENTS); } inline void Family::Builder::setActorCount( ::uint32_t value) { _builder.setDataField< ::uint32_t>( ::capnp::bounded<0>() * ::capnp::ELEMENTS, value); } inline ::uint32_t Family::Reader::getBondCount() const { return _reader.getDataField< ::uint32_t>( ::capnp::bounded<1>() * ::capnp::ELEMENTS); } inline ::uint32_t Family::Builder::getBondCount() { return _builder.getDataField< ::uint32_t>( ::capnp::bounded<1>() * ::capnp::ELEMENTS); } inline void Family::Builder::setBondCount( ::uint32_t value) { _builder.setDataField< ::uint32_t>( ::capnp::bounded<1>() * ::capnp::ELEMENTS, value); } inline ::uint32_t Family::Reader::getChunkCount() const { return _reader.getDataField< ::uint32_t>( ::capnp::bounded<2>() * ::capnp::ELEMENTS); } inline ::uint32_t Family::Builder::getChunkCount() { return _builder.getDataField< ::uint32_t>( ::capnp::bounded<2>() * ::capnp::ELEMENTS); } inline void Family::Builder::setChunkCount( ::uint32_t value) { _builder.setDataField< ::uint32_t>( ::capnp::bounded<2>() * ::capnp::ELEMENTS, value); } inline ::uint32_t Family::Reader::getNodeCount() const { return _reader.getDataField< ::uint32_t>( ::capnp::bounded<3>() * ::capnp::ELEMENTS); } inline ::uint32_t Family::Builder::getNodeCount() { return _builder.getDataField< ::uint32_t>( ::capnp::bounded<3>() * ::capnp::ELEMENTS); } inline void Family::Builder::setNodeCount( ::uint32_t value) { _builder.setDataField< ::uint32_t>( ::capnp::bounded<3>() * ::capnp::ELEMENTS, value); } inline ::uint32_t Family::Reader::getLowerSupportChunkCount() const { return _reader.getDataField< ::uint32_t>( ::capnp::bounded<4>() * ::capnp::ELEMENTS); } inline ::uint32_t Family::Builder::getLowerSupportChunkCount() { return _builder.getDataField< ::uint32_t>( ::capnp::bounded<4>() * ::capnp::ELEMENTS); } inline void Family::Builder::setLowerSupportChunkCount( ::uint32_t value) { _builder.setDataField< ::uint32_t>( ::capnp::bounded<4>() * ::capnp::ELEMENTS, value); } inline ::uint32_t Family::Reader::getUpperSupportChunkCount() const { return _reader.getDataField< ::uint32_t>( ::capnp::bounded<5>() * ::capnp::ELEMENTS); } inline ::uint32_t Family::Builder::getUpperSupportChunkCount() { return _builder.getDataField< ::uint32_t>( ::capnp::bounded<5>() * ::capnp::ELEMENTS); } inline void Family::Builder::setUpperSupportChunkCount( ::uint32_t value) { _builder.setDataField< ::uint32_t>( ::capnp::bounded<5>() * ::capnp::ELEMENTS, value); } inline ::uint32_t Actor::Reader::getFamilyOffset() const { return _reader.getDataField< ::uint32_t>( ::capnp::bounded<0>() * ::capnp::ELEMENTS); } inline ::uint32_t Actor::Builder::getFamilyOffset() { return _builder.getDataField< ::uint32_t>( ::capnp::bounded<0>() * ::capnp::ELEMENTS); } inline void Actor::Builder::setFamilyOffset( ::uint32_t value) { _builder.setDataField< ::uint32_t>( ::capnp::bounded<0>() * ::capnp::ELEMENTS, value); } inline ::uint32_t Actor::Reader::getFirstVisibleChunkIndex() const { return _reader.getDataField< ::uint32_t>( ::capnp::bounded<1>() * ::capnp::ELEMENTS); } inline ::uint32_t Actor::Builder::getFirstVisibleChunkIndex() { return _builder.getDataField< ::uint32_t>( ::capnp::bounded<1>() * ::capnp::ELEMENTS); } inline void Actor::Builder::setFirstVisibleChunkIndex( ::uint32_t value) { _builder.setDataField< ::uint32_t>( ::capnp::bounded<1>() * ::capnp::ELEMENTS, value); } inline ::uint32_t Actor::Reader::getVisibleChunkCount() const { return _reader.getDataField< ::uint32_t>( ::capnp::bounded<2>() * ::capnp::ELEMENTS); } inline ::uint32_t Actor::Builder::getVisibleChunkCount() { return _builder.getDataField< ::uint32_t>( ::capnp::bounded<2>() * ::capnp::ELEMENTS); } inline void Actor::Builder::setVisibleChunkCount( ::uint32_t value) { _builder.setDataField< ::uint32_t>( ::capnp::bounded<2>() * ::capnp::ELEMENTS, value); } inline ::uint32_t Actor::Reader::getFirstGraphNodeIndex() const { return _reader.getDataField< ::uint32_t>( ::capnp::bounded<3>() * ::capnp::ELEMENTS); } inline ::uint32_t Actor::Builder::getFirstGraphNodeIndex() { return _builder.getDataField< ::uint32_t>( ::capnp::bounded<3>() * ::capnp::ELEMENTS); } inline void Actor::Builder::setFirstGraphNodeIndex( ::uint32_t value) { _builder.setDataField< ::uint32_t>( ::capnp::bounded<3>() * ::capnp::ELEMENTS, value); } inline ::uint32_t Actor::Reader::getGraphNodeCount() const { return _reader.getDataField< ::uint32_t>( ::capnp::bounded<4>() * ::capnp::ELEMENTS); } inline ::uint32_t Actor::Builder::getGraphNodeCount() { return _builder.getDataField< ::uint32_t>( ::capnp::bounded<4>() * ::capnp::ELEMENTS); } inline void Actor::Builder::setGraphNodeCount( ::uint32_t value) { _builder.setDataField< ::uint32_t>( ::capnp::bounded<4>() * ::capnp::ELEMENTS, value); } inline ::uint32_t Actor::Reader::getLeafChunkCount() const { return _reader.getDataField< ::uint32_t>( ::capnp::bounded<5>() * ::capnp::ELEMENTS); } inline ::uint32_t Actor::Builder::getLeafChunkCount() { return _builder.getDataField< ::uint32_t>( ::capnp::bounded<5>() * ::capnp::ELEMENTS); } inline void Actor::Builder::setLeafChunkCount( ::uint32_t value) { _builder.setDataField< ::uint32_t>( ::capnp::bounded<5>() * ::capnp::ELEMENTS, value); } inline bool FamilyGraph::Reader::hasIslandIds() const { return !_reader.getPointerField( ::capnp::bounded<0>() * ::capnp::POINTERS).isNull(); } inline bool FamilyGraph::Builder::hasIslandIds() { return !_builder.getPointerField( ::capnp::bounded<0>() * ::capnp::POINTERS).isNull(); } inline ::capnp::List< ::uint32_t>::Reader FamilyGraph::Reader::getIslandIds() const { return ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::get(_reader.getPointerField( ::capnp::bounded<0>() * ::capnp::POINTERS)); } inline ::capnp::List< ::uint32_t>::Builder FamilyGraph::Builder::getIslandIds() { return ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::get(_builder.getPointerField( ::capnp::bounded<0>() * ::capnp::POINTERS)); } inline void FamilyGraph::Builder::setIslandIds( ::capnp::List< ::uint32_t>::Reader value) { ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::set(_builder.getPointerField( ::capnp::bounded<0>() * ::capnp::POINTERS), value); } inline void FamilyGraph::Builder::setIslandIds(::kj::ArrayPtr<const ::uint32_t> value) { ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::set(_builder.getPointerField( ::capnp::bounded<0>() * ::capnp::POINTERS), value); } inline ::capnp::List< ::uint32_t>::Builder FamilyGraph::Builder::initIslandIds(unsigned int size) { return ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::init(_builder.getPointerField( ::capnp::bounded<0>() * ::capnp::POINTERS), size); } inline void FamilyGraph::Builder::adoptIslandIds( ::capnp::Orphan< ::capnp::List< ::uint32_t>>&& value) { ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::adopt(_builder.getPointerField( ::capnp::bounded<0>() * ::capnp::POINTERS), kj::mv(value)); } inline ::capnp::Orphan< ::capnp::List< ::uint32_t>> FamilyGraph::Builder::disownIslandIds() { return ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::disown(_builder.getPointerField( ::capnp::bounded<0>() * ::capnp::POINTERS)); } inline bool FamilyGraph::Reader::hasDirtyNodeLinks() const { return !_reader.getPointerField( ::capnp::bounded<1>() * ::capnp::POINTERS).isNull(); } inline bool FamilyGraph::Builder::hasDirtyNodeLinks() { return !_builder.getPointerField( ::capnp::bounded<1>() * ::capnp::POINTERS).isNull(); } inline ::capnp::List< ::uint32_t>::Reader FamilyGraph::Reader::getDirtyNodeLinks() const { return ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::get(_reader.getPointerField( ::capnp::bounded<1>() * ::capnp::POINTERS)); } inline ::capnp::List< ::uint32_t>::Builder FamilyGraph::Builder::getDirtyNodeLinks() { return ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::get(_builder.getPointerField( ::capnp::bounded<1>() * ::capnp::POINTERS)); } inline void FamilyGraph::Builder::setDirtyNodeLinks( ::capnp::List< ::uint32_t>::Reader value) { ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::set(_builder.getPointerField( ::capnp::bounded<1>() * ::capnp::POINTERS), value); } inline void FamilyGraph::Builder::setDirtyNodeLinks(::kj::ArrayPtr<const ::uint32_t> value) { ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::set(_builder.getPointerField( ::capnp::bounded<1>() * ::capnp::POINTERS), value); } inline ::capnp::List< ::uint32_t>::Builder FamilyGraph::Builder::initDirtyNodeLinks(unsigned int size) { return ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::init(_builder.getPointerField( ::capnp::bounded<1>() * ::capnp::POINTERS), size); } inline void FamilyGraph::Builder::adoptDirtyNodeLinks( ::capnp::Orphan< ::capnp::List< ::uint32_t>>&& value) { ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::adopt(_builder.getPointerField( ::capnp::bounded<1>() * ::capnp::POINTERS), kj::mv(value)); } inline ::capnp::Orphan< ::capnp::List< ::uint32_t>> FamilyGraph::Builder::disownDirtyNodeLinks() { return ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::disown(_builder.getPointerField( ::capnp::bounded<1>() * ::capnp::POINTERS)); } inline bool FamilyGraph::Reader::hasFirstDirtyNodeIndices() const { return !_reader.getPointerField( ::capnp::bounded<2>() * ::capnp::POINTERS).isNull(); } inline bool FamilyGraph::Builder::hasFirstDirtyNodeIndices() { return !_builder.getPointerField( ::capnp::bounded<2>() * ::capnp::POINTERS).isNull(); } inline ::capnp::List< ::uint32_t>::Reader FamilyGraph::Reader::getFirstDirtyNodeIndices() const { return ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::get(_reader.getPointerField( ::capnp::bounded<2>() * ::capnp::POINTERS)); } inline ::capnp::List< ::uint32_t>::Builder FamilyGraph::Builder::getFirstDirtyNodeIndices() { return ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::get(_builder.getPointerField( ::capnp::bounded<2>() * ::capnp::POINTERS)); } inline void FamilyGraph::Builder::setFirstDirtyNodeIndices( ::capnp::List< ::uint32_t>::Reader value) { ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::set(_builder.getPointerField( ::capnp::bounded<2>() * ::capnp::POINTERS), value); } inline void FamilyGraph::Builder::setFirstDirtyNodeIndices(::kj::ArrayPtr<const ::uint32_t> value) { ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::set(_builder.getPointerField( ::capnp::bounded<2>() * ::capnp::POINTERS), value); } inline ::capnp::List< ::uint32_t>::Builder FamilyGraph::Builder::initFirstDirtyNodeIndices(unsigned int size) { return ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::init(_builder.getPointerField( ::capnp::bounded<2>() * ::capnp::POINTERS), size); } inline void FamilyGraph::Builder::adoptFirstDirtyNodeIndices( ::capnp::Orphan< ::capnp::List< ::uint32_t>>&& value) { ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::adopt(_builder.getPointerField( ::capnp::bounded<2>() * ::capnp::POINTERS), kj::mv(value)); } inline ::capnp::Orphan< ::capnp::List< ::uint32_t>> FamilyGraph::Builder::disownFirstDirtyNodeIndices() { return ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::disown(_builder.getPointerField( ::capnp::bounded<2>() * ::capnp::POINTERS)); } inline bool FamilyGraph::Reader::hasFastRoute() const { return !_reader.getPointerField( ::capnp::bounded<3>() * ::capnp::POINTERS).isNull(); } inline bool FamilyGraph::Builder::hasFastRoute() { return !_builder.getPointerField( ::capnp::bounded<3>() * ::capnp::POINTERS).isNull(); } inline ::capnp::List< ::uint32_t>::Reader FamilyGraph::Reader::getFastRoute() const { return ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::get(_reader.getPointerField( ::capnp::bounded<3>() * ::capnp::POINTERS)); } inline ::capnp::List< ::uint32_t>::Builder FamilyGraph::Builder::getFastRoute() { return ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::get(_builder.getPointerField( ::capnp::bounded<3>() * ::capnp::POINTERS)); } inline void FamilyGraph::Builder::setFastRoute( ::capnp::List< ::uint32_t>::Reader value) { ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::set(_builder.getPointerField( ::capnp::bounded<3>() * ::capnp::POINTERS), value); } inline void FamilyGraph::Builder::setFastRoute(::kj::ArrayPtr<const ::uint32_t> value) { ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::set(_builder.getPointerField( ::capnp::bounded<3>() * ::capnp::POINTERS), value); } inline ::capnp::List< ::uint32_t>::Builder FamilyGraph::Builder::initFastRoute(unsigned int size) { return ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::init(_builder.getPointerField( ::capnp::bounded<3>() * ::capnp::POINTERS), size); } inline void FamilyGraph::Builder::adoptFastRoute( ::capnp::Orphan< ::capnp::List< ::uint32_t>>&& value) { ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::adopt(_builder.getPointerField( ::capnp::bounded<3>() * ::capnp::POINTERS), kj::mv(value)); } inline ::capnp::Orphan< ::capnp::List< ::uint32_t>> FamilyGraph::Builder::disownFastRoute() { return ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::disown(_builder.getPointerField( ::capnp::bounded<3>() * ::capnp::POINTERS)); } inline bool FamilyGraph::Reader::hasHopCounts() const { return !_reader.getPointerField( ::capnp::bounded<4>() * ::capnp::POINTERS).isNull(); } inline bool FamilyGraph::Builder::hasHopCounts() { return !_builder.getPointerField( ::capnp::bounded<4>() * ::capnp::POINTERS).isNull(); } inline ::capnp::List< ::uint32_t>::Reader FamilyGraph::Reader::getHopCounts() const { return ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::get(_reader.getPointerField( ::capnp::bounded<4>() * ::capnp::POINTERS)); } inline ::capnp::List< ::uint32_t>::Builder FamilyGraph::Builder::getHopCounts() { return ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::get(_builder.getPointerField( ::capnp::bounded<4>() * ::capnp::POINTERS)); } inline void FamilyGraph::Builder::setHopCounts( ::capnp::List< ::uint32_t>::Reader value) { ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::set(_builder.getPointerField( ::capnp::bounded<4>() * ::capnp::POINTERS), value); } inline void FamilyGraph::Builder::setHopCounts(::kj::ArrayPtr<const ::uint32_t> value) { ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::set(_builder.getPointerField( ::capnp::bounded<4>() * ::capnp::POINTERS), value); } inline ::capnp::List< ::uint32_t>::Builder FamilyGraph::Builder::initHopCounts(unsigned int size) { return ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::init(_builder.getPointerField( ::capnp::bounded<4>() * ::capnp::POINTERS), size); } inline void FamilyGraph::Builder::adoptHopCounts( ::capnp::Orphan< ::capnp::List< ::uint32_t>>&& value) { ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::adopt(_builder.getPointerField( ::capnp::bounded<4>() * ::capnp::POINTERS), kj::mv(value)); } inline ::capnp::Orphan< ::capnp::List< ::uint32_t>> FamilyGraph::Builder::disownHopCounts() { return ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::disown(_builder.getPointerField( ::capnp::bounded<4>() * ::capnp::POINTERS)); } inline bool FamilyGraph::Reader::hasIsEdgeRemoved() const { return !_reader.getPointerField( ::capnp::bounded<5>() * ::capnp::POINTERS).isNull(); } inline bool FamilyGraph::Builder::hasIsEdgeRemoved() { return !_builder.getPointerField( ::capnp::bounded<5>() * ::capnp::POINTERS).isNull(); } inline ::capnp::Data::Reader FamilyGraph::Reader::getIsEdgeRemoved() const { return ::capnp::_::PointerHelpers< ::capnp::Data>::get(_reader.getPointerField( ::capnp::bounded<5>() * ::capnp::POINTERS)); } inline ::capnp::Data::Builder FamilyGraph::Builder::getIsEdgeRemoved() { return ::capnp::_::PointerHelpers< ::capnp::Data>::get(_builder.getPointerField( ::capnp::bounded<5>() * ::capnp::POINTERS)); } inline void FamilyGraph::Builder::setIsEdgeRemoved( ::capnp::Data::Reader value) { ::capnp::_::PointerHelpers< ::capnp::Data>::set(_builder.getPointerField( ::capnp::bounded<5>() * ::capnp::POINTERS), value); } inline ::capnp::Data::Builder FamilyGraph::Builder::initIsEdgeRemoved(unsigned int size) { return ::capnp::_::PointerHelpers< ::capnp::Data>::init(_builder.getPointerField( ::capnp::bounded<5>() * ::capnp::POINTERS), size); } inline void FamilyGraph::Builder::adoptIsEdgeRemoved( ::capnp::Orphan< ::capnp::Data>&& value) { ::capnp::_::PointerHelpers< ::capnp::Data>::adopt(_builder.getPointerField( ::capnp::bounded<5>() * ::capnp::POINTERS), kj::mv(value)); } inline ::capnp::Orphan< ::capnp::Data> FamilyGraph::Builder::disownIsEdgeRemoved() { return ::capnp::_::PointerHelpers< ::capnp::Data>::disown(_builder.getPointerField( ::capnp::bounded<5>() * ::capnp::POINTERS)); } inline bool FamilyGraph::Reader::hasIsNodeInDirtyList() const { return !_reader.getPointerField( ::capnp::bounded<6>() * ::capnp::POINTERS).isNull(); } inline bool FamilyGraph::Builder::hasIsNodeInDirtyList() { return !_builder.getPointerField( ::capnp::bounded<6>() * ::capnp::POINTERS).isNull(); } inline ::capnp::Data::Reader FamilyGraph::Reader::getIsNodeInDirtyList() const { return ::capnp::_::PointerHelpers< ::capnp::Data>::get(_reader.getPointerField( ::capnp::bounded<6>() * ::capnp::POINTERS)); } inline ::capnp::Data::Builder FamilyGraph::Builder::getIsNodeInDirtyList() { return ::capnp::_::PointerHelpers< ::capnp::Data>::get(_builder.getPointerField( ::capnp::bounded<6>() * ::capnp::POINTERS)); } inline void FamilyGraph::Builder::setIsNodeInDirtyList( ::capnp::Data::Reader value) { ::capnp::_::PointerHelpers< ::capnp::Data>::set(_builder.getPointerField( ::capnp::bounded<6>() * ::capnp::POINTERS), value); } inline ::capnp::Data::Builder FamilyGraph::Builder::initIsNodeInDirtyList(unsigned int size) { return ::capnp::_::PointerHelpers< ::capnp::Data>::init(_builder.getPointerField( ::capnp::bounded<6>() * ::capnp::POINTERS), size); } inline void FamilyGraph::Builder::adoptIsNodeInDirtyList( ::capnp::Orphan< ::capnp::Data>&& value) { ::capnp::_::PointerHelpers< ::capnp::Data>::adopt(_builder.getPointerField( ::capnp::bounded<6>() * ::capnp::POINTERS), kj::mv(value)); } inline ::capnp::Orphan< ::capnp::Data> FamilyGraph::Builder::disownIsNodeInDirtyList() { return ::capnp::_::PointerHelpers< ::capnp::Data>::disown(_builder.getPointerField( ::capnp::bounded<6>() * ::capnp::POINTERS)); } inline ::uint32_t FamilyGraph::Reader::getNodeCount() const { return _reader.getDataField< ::uint32_t>( ::capnp::bounded<0>() * ::capnp::ELEMENTS); } inline ::uint32_t FamilyGraph::Builder::getNodeCount() { return _builder.getDataField< ::uint32_t>( ::capnp::bounded<0>() * ::capnp::ELEMENTS); } inline void FamilyGraph::Builder::setNodeCount( ::uint32_t value) { _builder.setDataField< ::uint32_t>( ::capnp::bounded<0>() * ::capnp::ELEMENTS, value); } inline ::Nv::Blast::Serialization::NvBlastDataBlock::Type NvBlastDataBlock::Reader::getDataType() const { return _reader.getDataField< ::Nv::Blast::Serialization::NvBlastDataBlock::Type>( ::capnp::bounded<0>() * ::capnp::ELEMENTS); } inline ::Nv::Blast::Serialization::NvBlastDataBlock::Type NvBlastDataBlock::Builder::getDataType() { return _builder.getDataField< ::Nv::Blast::Serialization::NvBlastDataBlock::Type>( ::capnp::bounded<0>() * ::capnp::ELEMENTS); } inline void NvBlastDataBlock::Builder::setDataType( ::Nv::Blast::Serialization::NvBlastDataBlock::Type value) { _builder.setDataField< ::Nv::Blast::Serialization::NvBlastDataBlock::Type>( ::capnp::bounded<0>() * ::capnp::ELEMENTS, value); } inline ::uint32_t NvBlastDataBlock::Reader::getFormatVersion() const { return _reader.getDataField< ::uint32_t>( ::capnp::bounded<1>() * ::capnp::ELEMENTS); } inline ::uint32_t NvBlastDataBlock::Builder::getFormatVersion() { return _builder.getDataField< ::uint32_t>( ::capnp::bounded<1>() * ::capnp::ELEMENTS); } inline void NvBlastDataBlock::Builder::setFormatVersion( ::uint32_t value) { _builder.setDataField< ::uint32_t>( ::capnp::bounded<1>() * ::capnp::ELEMENTS, value); } inline ::uint32_t NvBlastDataBlock::Reader::getSize() const { return _reader.getDataField< ::uint32_t>( ::capnp::bounded<2>() * ::capnp::ELEMENTS); } inline ::uint32_t NvBlastDataBlock::Builder::getSize() { return _builder.getDataField< ::uint32_t>( ::capnp::bounded<2>() * ::capnp::ELEMENTS); } inline void NvBlastDataBlock::Builder::setSize( ::uint32_t value) { _builder.setDataField< ::uint32_t>( ::capnp::bounded<2>() * ::capnp::ELEMENTS, value); } inline bool NvBlastChunk::Reader::hasCentroid() const { return !_reader.getPointerField( ::capnp::bounded<0>() * ::capnp::POINTERS).isNull(); } inline bool NvBlastChunk::Builder::hasCentroid() { return !_builder.getPointerField( ::capnp::bounded<0>() * ::capnp::POINTERS).isNull(); } inline ::capnp::List<float>::Reader NvBlastChunk::Reader::getCentroid() const { return ::capnp::_::PointerHelpers< ::capnp::List<float>>::get(_reader.getPointerField( ::capnp::bounded<0>() * ::capnp::POINTERS)); } inline ::capnp::List<float>::Builder NvBlastChunk::Builder::getCentroid() { return ::capnp::_::PointerHelpers< ::capnp::List<float>>::get(_builder.getPointerField( ::capnp::bounded<0>() * ::capnp::POINTERS)); } inline void NvBlastChunk::Builder::setCentroid( ::capnp::List<float>::Reader value) { ::capnp::_::PointerHelpers< ::capnp::List<float>>::set(_builder.getPointerField( ::capnp::bounded<0>() * ::capnp::POINTERS), value); } inline void NvBlastChunk::Builder::setCentroid(::kj::ArrayPtr<const float> value) { ::capnp::_::PointerHelpers< ::capnp::List<float>>::set(_builder.getPointerField( ::capnp::bounded<0>() * ::capnp::POINTERS), value); } inline ::capnp::List<float>::Builder NvBlastChunk::Builder::initCentroid(unsigned int size) { return ::capnp::_::PointerHelpers< ::capnp::List<float>>::init(_builder.getPointerField( ::capnp::bounded<0>() * ::capnp::POINTERS), size); } inline void NvBlastChunk::Builder::adoptCentroid( ::capnp::Orphan< ::capnp::List<float>>&& value) { ::capnp::_::PointerHelpers< ::capnp::List<float>>::adopt(_builder.getPointerField( ::capnp::bounded<0>() * ::capnp::POINTERS), kj::mv(value)); } inline ::capnp::Orphan< ::capnp::List<float>> NvBlastChunk::Builder::disownCentroid() { return ::capnp::_::PointerHelpers< ::capnp::List<float>>::disown(_builder.getPointerField( ::capnp::bounded<0>() * ::capnp::POINTERS)); } inline float NvBlastChunk::Reader::getVolume() const { return _reader.getDataField<float>( ::capnp::bounded<0>() * ::capnp::ELEMENTS); } inline float NvBlastChunk::Builder::getVolume() { return _builder.getDataField<float>( ::capnp::bounded<0>() * ::capnp::ELEMENTS); } inline void NvBlastChunk::Builder::setVolume(float value) { _builder.setDataField<float>( ::capnp::bounded<0>() * ::capnp::ELEMENTS, value); } inline ::uint32_t NvBlastChunk::Reader::getParentChunkIndex() const { return _reader.getDataField< ::uint32_t>( ::capnp::bounded<1>() * ::capnp::ELEMENTS); } inline ::uint32_t NvBlastChunk::Builder::getParentChunkIndex() { return _builder.getDataField< ::uint32_t>( ::capnp::bounded<1>() * ::capnp::ELEMENTS); } inline void NvBlastChunk::Builder::setParentChunkIndex( ::uint32_t value) { _builder.setDataField< ::uint32_t>( ::capnp::bounded<1>() * ::capnp::ELEMENTS, value); } inline ::uint32_t NvBlastChunk::Reader::getFirstChildIndex() const { return _reader.getDataField< ::uint32_t>( ::capnp::bounded<2>() * ::capnp::ELEMENTS); } inline ::uint32_t NvBlastChunk::Builder::getFirstChildIndex() { return _builder.getDataField< ::uint32_t>( ::capnp::bounded<2>() * ::capnp::ELEMENTS); } inline void NvBlastChunk::Builder::setFirstChildIndex( ::uint32_t value) { _builder.setDataField< ::uint32_t>( ::capnp::bounded<2>() * ::capnp::ELEMENTS, value); } inline ::uint32_t NvBlastChunk::Reader::getChildIndexStop() const { return _reader.getDataField< ::uint32_t>( ::capnp::bounded<3>() * ::capnp::ELEMENTS); } inline ::uint32_t NvBlastChunk::Builder::getChildIndexStop() { return _builder.getDataField< ::uint32_t>( ::capnp::bounded<3>() * ::capnp::ELEMENTS); } inline void NvBlastChunk::Builder::setChildIndexStop( ::uint32_t value) { _builder.setDataField< ::uint32_t>( ::capnp::bounded<3>() * ::capnp::ELEMENTS, value); } inline ::uint32_t NvBlastChunk::Reader::getUserData() const { return _reader.getDataField< ::uint32_t>( ::capnp::bounded<4>() * ::capnp::ELEMENTS); } inline ::uint32_t NvBlastChunk::Builder::getUserData() { return _builder.getDataField< ::uint32_t>( ::capnp::bounded<4>() * ::capnp::ELEMENTS); } inline void NvBlastChunk::Builder::setUserData( ::uint32_t value) { _builder.setDataField< ::uint32_t>( ::capnp::bounded<4>() * ::capnp::ELEMENTS, value); } inline bool NvBlastBond::Reader::hasNormal() const { return !_reader.getPointerField( ::capnp::bounded<0>() * ::capnp::POINTERS).isNull(); } inline bool NvBlastBond::Builder::hasNormal() { return !_builder.getPointerField( ::capnp::bounded<0>() * ::capnp::POINTERS).isNull(); } inline ::capnp::List<float>::Reader NvBlastBond::Reader::getNormal() const { return ::capnp::_::PointerHelpers< ::capnp::List<float>>::get(_reader.getPointerField( ::capnp::bounded<0>() * ::capnp::POINTERS)); } inline ::capnp::List<float>::Builder NvBlastBond::Builder::getNormal() { return ::capnp::_::PointerHelpers< ::capnp::List<float>>::get(_builder.getPointerField( ::capnp::bounded<0>() * ::capnp::POINTERS)); } inline void NvBlastBond::Builder::setNormal( ::capnp::List<float>::Reader value) { ::capnp::_::PointerHelpers< ::capnp::List<float>>::set(_builder.getPointerField( ::capnp::bounded<0>() * ::capnp::POINTERS), value); } inline void NvBlastBond::Builder::setNormal(::kj::ArrayPtr<const float> value) { ::capnp::_::PointerHelpers< ::capnp::List<float>>::set(_builder.getPointerField( ::capnp::bounded<0>() * ::capnp::POINTERS), value); } inline ::capnp::List<float>::Builder NvBlastBond::Builder::initNormal(unsigned int size) { return ::capnp::_::PointerHelpers< ::capnp::List<float>>::init(_builder.getPointerField( ::capnp::bounded<0>() * ::capnp::POINTERS), size); } inline void NvBlastBond::Builder::adoptNormal( ::capnp::Orphan< ::capnp::List<float>>&& value) { ::capnp::_::PointerHelpers< ::capnp::List<float>>::adopt(_builder.getPointerField( ::capnp::bounded<0>() * ::capnp::POINTERS), kj::mv(value)); } inline ::capnp::Orphan< ::capnp::List<float>> NvBlastBond::Builder::disownNormal() { return ::capnp::_::PointerHelpers< ::capnp::List<float>>::disown(_builder.getPointerField( ::capnp::bounded<0>() * ::capnp::POINTERS)); } inline float NvBlastBond::Reader::getArea() const { return _reader.getDataField<float>( ::capnp::bounded<0>() * ::capnp::ELEMENTS); } inline float NvBlastBond::Builder::getArea() { return _builder.getDataField<float>( ::capnp::bounded<0>() * ::capnp::ELEMENTS); } inline void NvBlastBond::Builder::setArea(float value) { _builder.setDataField<float>( ::capnp::bounded<0>() * ::capnp::ELEMENTS, value); } inline bool NvBlastBond::Reader::hasCentroid() const { return !_reader.getPointerField( ::capnp::bounded<1>() * ::capnp::POINTERS).isNull(); } inline bool NvBlastBond::Builder::hasCentroid() { return !_builder.getPointerField( ::capnp::bounded<1>() * ::capnp::POINTERS).isNull(); } inline ::capnp::List<float>::Reader NvBlastBond::Reader::getCentroid() const { return ::capnp::_::PointerHelpers< ::capnp::List<float>>::get(_reader.getPointerField( ::capnp::bounded<1>() * ::capnp::POINTERS)); } inline ::capnp::List<float>::Builder NvBlastBond::Builder::getCentroid() { return ::capnp::_::PointerHelpers< ::capnp::List<float>>::get(_builder.getPointerField( ::capnp::bounded<1>() * ::capnp::POINTERS)); } inline void NvBlastBond::Builder::setCentroid( ::capnp::List<float>::Reader value) { ::capnp::_::PointerHelpers< ::capnp::List<float>>::set(_builder.getPointerField( ::capnp::bounded<1>() * ::capnp::POINTERS), value); } inline void NvBlastBond::Builder::setCentroid(::kj::ArrayPtr<const float> value) { ::capnp::_::PointerHelpers< ::capnp::List<float>>::set(_builder.getPointerField( ::capnp::bounded<1>() * ::capnp::POINTERS), value); } inline ::capnp::List<float>::Builder NvBlastBond::Builder::initCentroid(unsigned int size) { return ::capnp::_::PointerHelpers< ::capnp::List<float>>::init(_builder.getPointerField( ::capnp::bounded<1>() * ::capnp::POINTERS), size); } inline void NvBlastBond::Builder::adoptCentroid( ::capnp::Orphan< ::capnp::List<float>>&& value) { ::capnp::_::PointerHelpers< ::capnp::List<float>>::adopt(_builder.getPointerField( ::capnp::bounded<1>() * ::capnp::POINTERS), kj::mv(value)); } inline ::capnp::Orphan< ::capnp::List<float>> NvBlastBond::Builder::disownCentroid() { return ::capnp::_::PointerHelpers< ::capnp::List<float>>::disown(_builder.getPointerField( ::capnp::bounded<1>() * ::capnp::POINTERS)); } inline ::uint32_t NvBlastBond::Reader::getUserData() const { return _reader.getDataField< ::uint32_t>( ::capnp::bounded<1>() * ::capnp::ELEMENTS); } inline ::uint32_t NvBlastBond::Builder::getUserData() { return _builder.getDataField< ::uint32_t>( ::capnp::bounded<1>() * ::capnp::ELEMENTS); } inline void NvBlastBond::Builder::setUserData( ::uint32_t value) { _builder.setDataField< ::uint32_t>( ::capnp::bounded<1>() * ::capnp::ELEMENTS, value); } inline ::uint32_t NvBlastSupportGraph::Reader::getNodeCount() const { return _reader.getDataField< ::uint32_t>( ::capnp::bounded<0>() * ::capnp::ELEMENTS); } inline ::uint32_t NvBlastSupportGraph::Builder::getNodeCount() { return _builder.getDataField< ::uint32_t>( ::capnp::bounded<0>() * ::capnp::ELEMENTS); } inline void NvBlastSupportGraph::Builder::setNodeCount( ::uint32_t value) { _builder.setDataField< ::uint32_t>( ::capnp::bounded<0>() * ::capnp::ELEMENTS, value); } inline bool NvBlastSupportGraph::Reader::hasChunkIndices() const { return !_reader.getPointerField( ::capnp::bounded<0>() * ::capnp::POINTERS).isNull(); } inline bool NvBlastSupportGraph::Builder::hasChunkIndices() { return !_builder.getPointerField( ::capnp::bounded<0>() * ::capnp::POINTERS).isNull(); } inline ::capnp::List< ::uint32_t>::Reader NvBlastSupportGraph::Reader::getChunkIndices() const { return ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::get(_reader.getPointerField( ::capnp::bounded<0>() * ::capnp::POINTERS)); } inline ::capnp::List< ::uint32_t>::Builder NvBlastSupportGraph::Builder::getChunkIndices() { return ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::get(_builder.getPointerField( ::capnp::bounded<0>() * ::capnp::POINTERS)); } inline void NvBlastSupportGraph::Builder::setChunkIndices( ::capnp::List< ::uint32_t>::Reader value) { ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::set(_builder.getPointerField( ::capnp::bounded<0>() * ::capnp::POINTERS), value); } inline void NvBlastSupportGraph::Builder::setChunkIndices(::kj::ArrayPtr<const ::uint32_t> value) { ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::set(_builder.getPointerField( ::capnp::bounded<0>() * ::capnp::POINTERS), value); } inline ::capnp::List< ::uint32_t>::Builder NvBlastSupportGraph::Builder::initChunkIndices(unsigned int size) { return ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::init(_builder.getPointerField( ::capnp::bounded<0>() * ::capnp::POINTERS), size); } inline void NvBlastSupportGraph::Builder::adoptChunkIndices( ::capnp::Orphan< ::capnp::List< ::uint32_t>>&& value) { ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::adopt(_builder.getPointerField( ::capnp::bounded<0>() * ::capnp::POINTERS), kj::mv(value)); } inline ::capnp::Orphan< ::capnp::List< ::uint32_t>> NvBlastSupportGraph::Builder::disownChunkIndices() { return ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::disown(_builder.getPointerField( ::capnp::bounded<0>() * ::capnp::POINTERS)); } inline bool NvBlastSupportGraph::Reader::hasAdjacencyPartition() const { return !_reader.getPointerField( ::capnp::bounded<1>() * ::capnp::POINTERS).isNull(); } inline bool NvBlastSupportGraph::Builder::hasAdjacencyPartition() { return !_builder.getPointerField( ::capnp::bounded<1>() * ::capnp::POINTERS).isNull(); } inline ::capnp::List< ::uint32_t>::Reader NvBlastSupportGraph::Reader::getAdjacencyPartition() const { return ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::get(_reader.getPointerField( ::capnp::bounded<1>() * ::capnp::POINTERS)); } inline ::capnp::List< ::uint32_t>::Builder NvBlastSupportGraph::Builder::getAdjacencyPartition() { return ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::get(_builder.getPointerField( ::capnp::bounded<1>() * ::capnp::POINTERS)); } inline void NvBlastSupportGraph::Builder::setAdjacencyPartition( ::capnp::List< ::uint32_t>::Reader value) { ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::set(_builder.getPointerField( ::capnp::bounded<1>() * ::capnp::POINTERS), value); } inline void NvBlastSupportGraph::Builder::setAdjacencyPartition(::kj::ArrayPtr<const ::uint32_t> value) { ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::set(_builder.getPointerField( ::capnp::bounded<1>() * ::capnp::POINTERS), value); } inline ::capnp::List< ::uint32_t>::Builder NvBlastSupportGraph::Builder::initAdjacencyPartition(unsigned int size) { return ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::init(_builder.getPointerField( ::capnp::bounded<1>() * ::capnp::POINTERS), size); } inline void NvBlastSupportGraph::Builder::adoptAdjacencyPartition( ::capnp::Orphan< ::capnp::List< ::uint32_t>>&& value) { ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::adopt(_builder.getPointerField( ::capnp::bounded<1>() * ::capnp::POINTERS), kj::mv(value)); } inline ::capnp::Orphan< ::capnp::List< ::uint32_t>> NvBlastSupportGraph::Builder::disownAdjacencyPartition() { return ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::disown(_builder.getPointerField( ::capnp::bounded<1>() * ::capnp::POINTERS)); } inline bool NvBlastSupportGraph::Reader::hasAdjacentNodeIndices() const { return !_reader.getPointerField( ::capnp::bounded<2>() * ::capnp::POINTERS).isNull(); } inline bool NvBlastSupportGraph::Builder::hasAdjacentNodeIndices() { return !_builder.getPointerField( ::capnp::bounded<2>() * ::capnp::POINTERS).isNull(); } inline ::capnp::List< ::uint32_t>::Reader NvBlastSupportGraph::Reader::getAdjacentNodeIndices() const { return ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::get(_reader.getPointerField( ::capnp::bounded<2>() * ::capnp::POINTERS)); } inline ::capnp::List< ::uint32_t>::Builder NvBlastSupportGraph::Builder::getAdjacentNodeIndices() { return ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::get(_builder.getPointerField( ::capnp::bounded<2>() * ::capnp::POINTERS)); } inline void NvBlastSupportGraph::Builder::setAdjacentNodeIndices( ::capnp::List< ::uint32_t>::Reader value) { ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::set(_builder.getPointerField( ::capnp::bounded<2>() * ::capnp::POINTERS), value); } inline void NvBlastSupportGraph::Builder::setAdjacentNodeIndices(::kj::ArrayPtr<const ::uint32_t> value) { ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::set(_builder.getPointerField( ::capnp::bounded<2>() * ::capnp::POINTERS), value); } inline ::capnp::List< ::uint32_t>::Builder NvBlastSupportGraph::Builder::initAdjacentNodeIndices(unsigned int size) { return ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::init(_builder.getPointerField( ::capnp::bounded<2>() * ::capnp::POINTERS), size); } inline void NvBlastSupportGraph::Builder::adoptAdjacentNodeIndices( ::capnp::Orphan< ::capnp::List< ::uint32_t>>&& value) { ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::adopt(_builder.getPointerField( ::capnp::bounded<2>() * ::capnp::POINTERS), kj::mv(value)); } inline ::capnp::Orphan< ::capnp::List< ::uint32_t>> NvBlastSupportGraph::Builder::disownAdjacentNodeIndices() { return ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::disown(_builder.getPointerField( ::capnp::bounded<2>() * ::capnp::POINTERS)); } inline bool NvBlastSupportGraph::Reader::hasAdjacentBondIndices() const { return !_reader.getPointerField( ::capnp::bounded<3>() * ::capnp::POINTERS).isNull(); } inline bool NvBlastSupportGraph::Builder::hasAdjacentBondIndices() { return !_builder.getPointerField( ::capnp::bounded<3>() * ::capnp::POINTERS).isNull(); } inline ::capnp::List< ::uint32_t>::Reader NvBlastSupportGraph::Reader::getAdjacentBondIndices() const { return ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::get(_reader.getPointerField( ::capnp::bounded<3>() * ::capnp::POINTERS)); } inline ::capnp::List< ::uint32_t>::Builder NvBlastSupportGraph::Builder::getAdjacentBondIndices() { return ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::get(_builder.getPointerField( ::capnp::bounded<3>() * ::capnp::POINTERS)); } inline void NvBlastSupportGraph::Builder::setAdjacentBondIndices( ::capnp::List< ::uint32_t>::Reader value) { ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::set(_builder.getPointerField( ::capnp::bounded<3>() * ::capnp::POINTERS), value); } inline void NvBlastSupportGraph::Builder::setAdjacentBondIndices(::kj::ArrayPtr<const ::uint32_t> value) { ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::set(_builder.getPointerField( ::capnp::bounded<3>() * ::capnp::POINTERS), value); } inline ::capnp::List< ::uint32_t>::Builder NvBlastSupportGraph::Builder::initAdjacentBondIndices(unsigned int size) { return ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::init(_builder.getPointerField( ::capnp::bounded<3>() * ::capnp::POINTERS), size); } inline void NvBlastSupportGraph::Builder::adoptAdjacentBondIndices( ::capnp::Orphan< ::capnp::List< ::uint32_t>>&& value) { ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::adopt(_builder.getPointerField( ::capnp::bounded<3>() * ::capnp::POINTERS), kj::mv(value)); } inline ::capnp::Orphan< ::capnp::List< ::uint32_t>> NvBlastSupportGraph::Builder::disownAdjacentBondIndices() { return ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::disown(_builder.getPointerField( ::capnp::bounded<3>() * ::capnp::POINTERS)); } inline bool UUID::Reader::hasValue() const { return !_reader.getPointerField( ::capnp::bounded<0>() * ::capnp::POINTERS).isNull(); } inline bool UUID::Builder::hasValue() { return !_builder.getPointerField( ::capnp::bounded<0>() * ::capnp::POINTERS).isNull(); } inline ::capnp::Data::Reader UUID::Reader::getValue() const { return ::capnp::_::PointerHelpers< ::capnp::Data>::get(_reader.getPointerField( ::capnp::bounded<0>() * ::capnp::POINTERS)); } inline ::capnp::Data::Builder UUID::Builder::getValue() { return ::capnp::_::PointerHelpers< ::capnp::Data>::get(_builder.getPointerField( ::capnp::bounded<0>() * ::capnp::POINTERS)); } inline void UUID::Builder::setValue( ::capnp::Data::Reader value) { ::capnp::_::PointerHelpers< ::capnp::Data>::set(_builder.getPointerField( ::capnp::bounded<0>() * ::capnp::POINTERS), value); } inline ::capnp::Data::Builder UUID::Builder::initValue(unsigned int size) { return ::capnp::_::PointerHelpers< ::capnp::Data>::init(_builder.getPointerField( ::capnp::bounded<0>() * ::capnp::POINTERS), size); } inline void UUID::Builder::adoptValue( ::capnp::Orphan< ::capnp::Data>&& value) { ::capnp::_::PointerHelpers< ::capnp::Data>::adopt(_builder.getPointerField( ::capnp::bounded<0>() * ::capnp::POINTERS), kj::mv(value)); } inline ::capnp::Orphan< ::capnp::Data> UUID::Builder::disownValue() { return ::capnp::_::PointerHelpers< ::capnp::Data>::disown(_builder.getPointerField( ::capnp::bounded<0>() * ::capnp::POINTERS)); } } // namespace } // namespace } // namespace #endif // CAPNP_INCLUDED_9a4a58fac38375e0_
119,518
C
41.654889
141
0.682366
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/serialization/generated/NvBlastExtTkSerialization-capn.h
// Generated by Cap'n Proto compiler, DO NOT EDIT // source: NvBlastExtTkSerialization-capn #ifndef CAPNP_INCLUDED_affe4498f275ee58_ #define CAPNP_INCLUDED_affe4498f275ee58_ #include <capnp/generated-header-support.h> #if CAPNP_VERSION != 6001 #error "Version mismatch between generated code and library headers. You must use the same version of the Cap'n Proto compiler and library." #endif #include "NvBlastExtLlSerialization-capn.h" namespace capnp { namespace schemas { CAPNP_DECLARE_SCHEMA(ffd67c4b7067dde6); CAPNP_DECLARE_SCHEMA(b7dbad810488a897); CAPNP_DECLARE_SCHEMA(bf661e95794f2749); } // namespace schemas } // namespace capnp namespace Nv { namespace Blast { namespace Serialization { struct TkAsset { TkAsset() = delete; class Reader; class Builder; class Pipeline; struct _capnpPrivate { CAPNP_DECLARE_STRUCT_HEADER(ffd67c4b7067dde6, 0, 2) #if !CAPNP_LITE static constexpr ::capnp::_::RawBrandedSchema const* brand() { return &schema->defaultBrand; } #endif // !CAPNP_LITE }; }; struct TkAssetJointDesc { TkAssetJointDesc() = delete; class Reader; class Builder; class Pipeline; struct _capnpPrivate { CAPNP_DECLARE_STRUCT_HEADER(b7dbad810488a897, 0, 2) #if !CAPNP_LITE static constexpr ::capnp::_::RawBrandedSchema const* brand() { return &schema->defaultBrand; } #endif // !CAPNP_LITE }; }; struct NvVec3 { NvVec3() = delete; class Reader; class Builder; class Pipeline; struct _capnpPrivate { CAPNP_DECLARE_STRUCT_HEADER(bf661e95794f2749, 2, 0) #if !CAPNP_LITE static constexpr ::capnp::_::RawBrandedSchema const* brand() { return &schema->defaultBrand; } #endif // !CAPNP_LITE }; }; // ======================================================================================= class TkAsset::Reader { public: typedef TkAsset Reads; Reader() = default; inline explicit Reader(::capnp::_::StructReader base): _reader(base) {} inline ::capnp::MessageSize totalSize() const { return _reader.totalSize().asPublic(); } #if !CAPNP_LITE inline ::kj::StringTree toString() const { return ::capnp::_::structString(_reader, *_capnpPrivate::brand()); } #endif // !CAPNP_LITE inline bool hasAssetLL() const; inline ::Nv::Blast::Serialization::Asset::Reader getAssetLL() const; inline bool hasJointDescs() const; inline ::capnp::List< ::Nv::Blast::Serialization::TkAssetJointDesc>::Reader getJointDescs() const; private: ::capnp::_::StructReader _reader; template <typename, ::capnp::Kind> friend struct ::capnp::ToDynamic_; template <typename, ::capnp::Kind> friend struct ::capnp::_::PointerHelpers; template <typename, ::capnp::Kind> friend struct ::capnp::List; friend class ::capnp::MessageBuilder; friend class ::capnp::Orphanage; }; class TkAsset::Builder { public: typedef TkAsset Builds; Builder() = delete; // Deleted to discourage incorrect usage. // You can explicitly initialize to nullptr instead. inline Builder(decltype(nullptr)) {} inline explicit Builder(::capnp::_::StructBuilder base): _builder(base) {} inline operator Reader() const { return Reader(_builder.asReader()); } inline Reader asReader() const { return *this; } inline ::capnp::MessageSize totalSize() const { return asReader().totalSize(); } #if !CAPNP_LITE inline ::kj::StringTree toString() const { return asReader().toString(); } #endif // !CAPNP_LITE inline bool hasAssetLL(); inline ::Nv::Blast::Serialization::Asset::Builder getAssetLL(); inline void setAssetLL( ::Nv::Blast::Serialization::Asset::Reader value); inline ::Nv::Blast::Serialization::Asset::Builder initAssetLL(); inline void adoptAssetLL(::capnp::Orphan< ::Nv::Blast::Serialization::Asset>&& value); inline ::capnp::Orphan< ::Nv::Blast::Serialization::Asset> disownAssetLL(); inline bool hasJointDescs(); inline ::capnp::List< ::Nv::Blast::Serialization::TkAssetJointDesc>::Builder getJointDescs(); inline void setJointDescs( ::capnp::List< ::Nv::Blast::Serialization::TkAssetJointDesc>::Reader value); inline ::capnp::List< ::Nv::Blast::Serialization::TkAssetJointDesc>::Builder initJointDescs(unsigned int size); inline void adoptJointDescs(::capnp::Orphan< ::capnp::List< ::Nv::Blast::Serialization::TkAssetJointDesc>>&& value); inline ::capnp::Orphan< ::capnp::List< ::Nv::Blast::Serialization::TkAssetJointDesc>> disownJointDescs(); private: ::capnp::_::StructBuilder _builder; template <typename, ::capnp::Kind> friend struct ::capnp::ToDynamic_; friend class ::capnp::Orphanage; template <typename, ::capnp::Kind> friend struct ::capnp::_::PointerHelpers; }; #if !CAPNP_LITE class TkAsset::Pipeline { public: typedef TkAsset Pipelines; inline Pipeline(decltype(nullptr)): _typeless(nullptr) {} inline explicit Pipeline(::capnp::AnyPointer::Pipeline&& typeless) : _typeless(kj::mv(typeless)) {} inline ::Nv::Blast::Serialization::Asset::Pipeline getAssetLL(); private: ::capnp::AnyPointer::Pipeline _typeless; friend class ::capnp::PipelineHook; template <typename, ::capnp::Kind> friend struct ::capnp::ToDynamic_; }; #endif // !CAPNP_LITE class TkAssetJointDesc::Reader { public: typedef TkAssetJointDesc Reads; Reader() = default; inline explicit Reader(::capnp::_::StructReader base): _reader(base) {} inline ::capnp::MessageSize totalSize() const { return _reader.totalSize().asPublic(); } #if !CAPNP_LITE inline ::kj::StringTree toString() const { return ::capnp::_::structString(_reader, *_capnpPrivate::brand()); } #endif // !CAPNP_LITE inline bool hasNodeIndices() const; inline ::capnp::List< ::uint32_t>::Reader getNodeIndices() const; inline bool hasAttachPositions() const; inline ::capnp::List< ::Nv::Blast::Serialization::NvVec3>::Reader getAttachPositions() const; private: ::capnp::_::StructReader _reader; template <typename, ::capnp::Kind> friend struct ::capnp::ToDynamic_; template <typename, ::capnp::Kind> friend struct ::capnp::_::PointerHelpers; template <typename, ::capnp::Kind> friend struct ::capnp::List; friend class ::capnp::MessageBuilder; friend class ::capnp::Orphanage; }; class TkAssetJointDesc::Builder { public: typedef TkAssetJointDesc Builds; Builder() = delete; // Deleted to discourage incorrect usage. // You can explicitly initialize to nullptr instead. inline Builder(decltype(nullptr)) {} inline explicit Builder(::capnp::_::StructBuilder base): _builder(base) {} inline operator Reader() const { return Reader(_builder.asReader()); } inline Reader asReader() const { return *this; } inline ::capnp::MessageSize totalSize() const { return asReader().totalSize(); } #if !CAPNP_LITE inline ::kj::StringTree toString() const { return asReader().toString(); } #endif // !CAPNP_LITE inline bool hasNodeIndices(); inline ::capnp::List< ::uint32_t>::Builder getNodeIndices(); inline void setNodeIndices( ::capnp::List< ::uint32_t>::Reader value); inline void setNodeIndices(::kj::ArrayPtr<const ::uint32_t> value); inline ::capnp::List< ::uint32_t>::Builder initNodeIndices(unsigned int size); inline void adoptNodeIndices(::capnp::Orphan< ::capnp::List< ::uint32_t>>&& value); inline ::capnp::Orphan< ::capnp::List< ::uint32_t>> disownNodeIndices(); inline bool hasAttachPositions(); inline ::capnp::List< ::Nv::Blast::Serialization::NvVec3>::Builder getAttachPositions(); inline void setAttachPositions( ::capnp::List< ::Nv::Blast::Serialization::NvVec3>::Reader value); inline ::capnp::List< ::Nv::Blast::Serialization::NvVec3>::Builder initAttachPositions(unsigned int size); inline void adoptAttachPositions(::capnp::Orphan< ::capnp::List< ::Nv::Blast::Serialization::NvVec3>>&& value); inline ::capnp::Orphan< ::capnp::List< ::Nv::Blast::Serialization::NvVec3>> disownAttachPositions(); private: ::capnp::_::StructBuilder _builder; template <typename, ::capnp::Kind> friend struct ::capnp::ToDynamic_; friend class ::capnp::Orphanage; template <typename, ::capnp::Kind> friend struct ::capnp::_::PointerHelpers; }; #if !CAPNP_LITE class TkAssetJointDesc::Pipeline { public: typedef TkAssetJointDesc Pipelines; inline Pipeline(decltype(nullptr)): _typeless(nullptr) {} inline explicit Pipeline(::capnp::AnyPointer::Pipeline&& typeless) : _typeless(kj::mv(typeless)) {} private: ::capnp::AnyPointer::Pipeline _typeless; friend class ::capnp::PipelineHook; template <typename, ::capnp::Kind> friend struct ::capnp::ToDynamic_; }; #endif // !CAPNP_LITE class NvVec3::Reader { public: typedef NvVec3 Reads; Reader() = default; inline explicit Reader(::capnp::_::StructReader base): _reader(base) {} inline ::capnp::MessageSize totalSize() const { return _reader.totalSize().asPublic(); } #if !CAPNP_LITE inline ::kj::StringTree toString() const { return ::capnp::_::structString(_reader, *_capnpPrivate::brand()); } #endif // !CAPNP_LITE inline float getX() const; inline float getY() const; inline float getZ() const; private: ::capnp::_::StructReader _reader; template <typename, ::capnp::Kind> friend struct ::capnp::ToDynamic_; template <typename, ::capnp::Kind> friend struct ::capnp::_::PointerHelpers; template <typename, ::capnp::Kind> friend struct ::capnp::List; friend class ::capnp::MessageBuilder; friend class ::capnp::Orphanage; }; class NvVec3::Builder { public: typedef NvVec3 Builds; Builder() = delete; // Deleted to discourage incorrect usage. // You can explicitly initialize to nullptr instead. inline Builder(decltype(nullptr)) {} inline explicit Builder(::capnp::_::StructBuilder base): _builder(base) {} inline operator Reader() const { return Reader(_builder.asReader()); } inline Reader asReader() const { return *this; } inline ::capnp::MessageSize totalSize() const { return asReader().totalSize(); } #if !CAPNP_LITE inline ::kj::StringTree toString() const { return asReader().toString(); } #endif // !CAPNP_LITE inline float getX(); inline void setX(float value); inline float getY(); inline void setY(float value); inline float getZ(); inline void setZ(float value); private: ::capnp::_::StructBuilder _builder; template <typename, ::capnp::Kind> friend struct ::capnp::ToDynamic_; friend class ::capnp::Orphanage; template <typename, ::capnp::Kind> friend struct ::capnp::_::PointerHelpers; }; #if !CAPNP_LITE class NvVec3::Pipeline { public: typedef NvVec3 Pipelines; inline Pipeline(decltype(nullptr)): _typeless(nullptr) {} inline explicit Pipeline(::capnp::AnyPointer::Pipeline&& typeless) : _typeless(kj::mv(typeless)) {} private: ::capnp::AnyPointer::Pipeline _typeless; friend class ::capnp::PipelineHook; template <typename, ::capnp::Kind> friend struct ::capnp::ToDynamic_; }; #endif // !CAPNP_LITE // ======================================================================================= inline bool TkAsset::Reader::hasAssetLL() const { return !_reader.getPointerField( ::capnp::bounded<0>() * ::capnp::POINTERS).isNull(); } inline bool TkAsset::Builder::hasAssetLL() { return !_builder.getPointerField( ::capnp::bounded<0>() * ::capnp::POINTERS).isNull(); } inline ::Nv::Blast::Serialization::Asset::Reader TkAsset::Reader::getAssetLL() const { return ::capnp::_::PointerHelpers< ::Nv::Blast::Serialization::Asset>::get(_reader.getPointerField( ::capnp::bounded<0>() * ::capnp::POINTERS)); } inline ::Nv::Blast::Serialization::Asset::Builder TkAsset::Builder::getAssetLL() { return ::capnp::_::PointerHelpers< ::Nv::Blast::Serialization::Asset>::get(_builder.getPointerField( ::capnp::bounded<0>() * ::capnp::POINTERS)); } #if !CAPNP_LITE inline ::Nv::Blast::Serialization::Asset::Pipeline TkAsset::Pipeline::getAssetLL() { return ::Nv::Blast::Serialization::Asset::Pipeline(_typeless.getPointerField(0)); } #endif // !CAPNP_LITE inline void TkAsset::Builder::setAssetLL( ::Nv::Blast::Serialization::Asset::Reader value) { ::capnp::_::PointerHelpers< ::Nv::Blast::Serialization::Asset>::set(_builder.getPointerField( ::capnp::bounded<0>() * ::capnp::POINTERS), value); } inline ::Nv::Blast::Serialization::Asset::Builder TkAsset::Builder::initAssetLL() { return ::capnp::_::PointerHelpers< ::Nv::Blast::Serialization::Asset>::init(_builder.getPointerField( ::capnp::bounded<0>() * ::capnp::POINTERS)); } inline void TkAsset::Builder::adoptAssetLL( ::capnp::Orphan< ::Nv::Blast::Serialization::Asset>&& value) { ::capnp::_::PointerHelpers< ::Nv::Blast::Serialization::Asset>::adopt(_builder.getPointerField( ::capnp::bounded<0>() * ::capnp::POINTERS), kj::mv(value)); } inline ::capnp::Orphan< ::Nv::Blast::Serialization::Asset> TkAsset::Builder::disownAssetLL() { return ::capnp::_::PointerHelpers< ::Nv::Blast::Serialization::Asset>::disown(_builder.getPointerField( ::capnp::bounded<0>() * ::capnp::POINTERS)); } inline bool TkAsset::Reader::hasJointDescs() const { return !_reader.getPointerField( ::capnp::bounded<1>() * ::capnp::POINTERS).isNull(); } inline bool TkAsset::Builder::hasJointDescs() { return !_builder.getPointerField( ::capnp::bounded<1>() * ::capnp::POINTERS).isNull(); } inline ::capnp::List< ::Nv::Blast::Serialization::TkAssetJointDesc>::Reader TkAsset::Reader::getJointDescs() const { return ::capnp::_::PointerHelpers< ::capnp::List< ::Nv::Blast::Serialization::TkAssetJointDesc>>::get(_reader.getPointerField( ::capnp::bounded<1>() * ::capnp::POINTERS)); } inline ::capnp::List< ::Nv::Blast::Serialization::TkAssetJointDesc>::Builder TkAsset::Builder::getJointDescs() { return ::capnp::_::PointerHelpers< ::capnp::List< ::Nv::Blast::Serialization::TkAssetJointDesc>>::get(_builder.getPointerField( ::capnp::bounded<1>() * ::capnp::POINTERS)); } inline void TkAsset::Builder::setJointDescs( ::capnp::List< ::Nv::Blast::Serialization::TkAssetJointDesc>::Reader value) { ::capnp::_::PointerHelpers< ::capnp::List< ::Nv::Blast::Serialization::TkAssetJointDesc>>::set(_builder.getPointerField( ::capnp::bounded<1>() * ::capnp::POINTERS), value); } inline ::capnp::List< ::Nv::Blast::Serialization::TkAssetJointDesc>::Builder TkAsset::Builder::initJointDescs(unsigned int size) { return ::capnp::_::PointerHelpers< ::capnp::List< ::Nv::Blast::Serialization::TkAssetJointDesc>>::init(_builder.getPointerField( ::capnp::bounded<1>() * ::capnp::POINTERS), size); } inline void TkAsset::Builder::adoptJointDescs( ::capnp::Orphan< ::capnp::List< ::Nv::Blast::Serialization::TkAssetJointDesc>>&& value) { ::capnp::_::PointerHelpers< ::capnp::List< ::Nv::Blast::Serialization::TkAssetJointDesc>>::adopt(_builder.getPointerField( ::capnp::bounded<1>() * ::capnp::POINTERS), kj::mv(value)); } inline ::capnp::Orphan< ::capnp::List< ::Nv::Blast::Serialization::TkAssetJointDesc>> TkAsset::Builder::disownJointDescs() { return ::capnp::_::PointerHelpers< ::capnp::List< ::Nv::Blast::Serialization::TkAssetJointDesc>>::disown(_builder.getPointerField( ::capnp::bounded<1>() * ::capnp::POINTERS)); } inline bool TkAssetJointDesc::Reader::hasNodeIndices() const { return !_reader.getPointerField( ::capnp::bounded<0>() * ::capnp::POINTERS).isNull(); } inline bool TkAssetJointDesc::Builder::hasNodeIndices() { return !_builder.getPointerField( ::capnp::bounded<0>() * ::capnp::POINTERS).isNull(); } inline ::capnp::List< ::uint32_t>::Reader TkAssetJointDesc::Reader::getNodeIndices() const { return ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::get(_reader.getPointerField( ::capnp::bounded<0>() * ::capnp::POINTERS)); } inline ::capnp::List< ::uint32_t>::Builder TkAssetJointDesc::Builder::getNodeIndices() { return ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::get(_builder.getPointerField( ::capnp::bounded<0>() * ::capnp::POINTERS)); } inline void TkAssetJointDesc::Builder::setNodeIndices( ::capnp::List< ::uint32_t>::Reader value) { ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::set(_builder.getPointerField( ::capnp::bounded<0>() * ::capnp::POINTERS), value); } inline void TkAssetJointDesc::Builder::setNodeIndices(::kj::ArrayPtr<const ::uint32_t> value) { ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::set(_builder.getPointerField( ::capnp::bounded<0>() * ::capnp::POINTERS), value); } inline ::capnp::List< ::uint32_t>::Builder TkAssetJointDesc::Builder::initNodeIndices(unsigned int size) { return ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::init(_builder.getPointerField( ::capnp::bounded<0>() * ::capnp::POINTERS), size); } inline void TkAssetJointDesc::Builder::adoptNodeIndices( ::capnp::Orphan< ::capnp::List< ::uint32_t>>&& value) { ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::adopt(_builder.getPointerField( ::capnp::bounded<0>() * ::capnp::POINTERS), kj::mv(value)); } inline ::capnp::Orphan< ::capnp::List< ::uint32_t>> TkAssetJointDesc::Builder::disownNodeIndices() { return ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::disown(_builder.getPointerField( ::capnp::bounded<0>() * ::capnp::POINTERS)); } inline bool TkAssetJointDesc::Reader::hasAttachPositions() const { return !_reader.getPointerField( ::capnp::bounded<1>() * ::capnp::POINTERS).isNull(); } inline bool TkAssetJointDesc::Builder::hasAttachPositions() { return !_builder.getPointerField( ::capnp::bounded<1>() * ::capnp::POINTERS).isNull(); } inline ::capnp::List< ::Nv::Blast::Serialization::NvVec3>::Reader TkAssetJointDesc::Reader::getAttachPositions() const { return ::capnp::_::PointerHelpers< ::capnp::List< ::Nv::Blast::Serialization::NvVec3>>::get(_reader.getPointerField( ::capnp::bounded<1>() * ::capnp::POINTERS)); } inline ::capnp::List< ::Nv::Blast::Serialization::NvVec3>::Builder TkAssetJointDesc::Builder::getAttachPositions() { return ::capnp::_::PointerHelpers< ::capnp::List< ::Nv::Blast::Serialization::NvVec3>>::get(_builder.getPointerField( ::capnp::bounded<1>() * ::capnp::POINTERS)); } inline void TkAssetJointDesc::Builder::setAttachPositions( ::capnp::List< ::Nv::Blast::Serialization::NvVec3>::Reader value) { ::capnp::_::PointerHelpers< ::capnp::List< ::Nv::Blast::Serialization::NvVec3>>::set(_builder.getPointerField( ::capnp::bounded<1>() * ::capnp::POINTERS), value); } inline ::capnp::List< ::Nv::Blast::Serialization::NvVec3>::Builder TkAssetJointDesc::Builder::initAttachPositions(unsigned int size) { return ::capnp::_::PointerHelpers< ::capnp::List< ::Nv::Blast::Serialization::NvVec3>>::init(_builder.getPointerField( ::capnp::bounded<1>() * ::capnp::POINTERS), size); } inline void TkAssetJointDesc::Builder::adoptAttachPositions( ::capnp::Orphan< ::capnp::List< ::Nv::Blast::Serialization::NvVec3>>&& value) { ::capnp::_::PointerHelpers< ::capnp::List< ::Nv::Blast::Serialization::NvVec3>>::adopt(_builder.getPointerField( ::capnp::bounded<1>() * ::capnp::POINTERS), kj::mv(value)); } inline ::capnp::Orphan< ::capnp::List< ::Nv::Blast::Serialization::NvVec3>> TkAssetJointDesc::Builder::disownAttachPositions() { return ::capnp::_::PointerHelpers< ::capnp::List< ::Nv::Blast::Serialization::NvVec3>>::disown(_builder.getPointerField( ::capnp::bounded<1>() * ::capnp::POINTERS)); } inline float NvVec3::Reader::getX() const { return _reader.getDataField<float>( ::capnp::bounded<0>() * ::capnp::ELEMENTS); } inline float NvVec3::Builder::getX() { return _builder.getDataField<float>( ::capnp::bounded<0>() * ::capnp::ELEMENTS); } inline void NvVec3::Builder::setX(float value) { _builder.setDataField<float>( ::capnp::bounded<0>() * ::capnp::ELEMENTS, value); } inline float NvVec3::Reader::getY() const { return _reader.getDataField<float>( ::capnp::bounded<1>() * ::capnp::ELEMENTS); } inline float NvVec3::Builder::getY() { return _builder.getDataField<float>( ::capnp::bounded<1>() * ::capnp::ELEMENTS); } inline void NvVec3::Builder::setY(float value) { _builder.setDataField<float>( ::capnp::bounded<1>() * ::capnp::ELEMENTS, value); } inline float NvVec3::Reader::getZ() const { return _reader.getDataField<float>( ::capnp::bounded<2>() * ::capnp::ELEMENTS); } inline float NvVec3::Builder::getZ() { return _builder.getDataField<float>( ::capnp::bounded<2>() * ::capnp::ELEMENTS); } inline void NvVec3::Builder::setZ(float value) { _builder.setDataField<float>( ::capnp::bounded<2>() * ::capnp::ELEMENTS, value); } } // namespace } // namespace } // namespace #endif // CAPNP_INCLUDED_affe4498f275ee58_
20,761
C
37.448148
141
0.682193
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/authoringCommon/NvBlastExtAuthoringInternalCommon.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2022-2023 NVIDIA Corporation. All rights reserved. #ifndef NVBLASTINTERNALCOMMON_H #define NVBLASTINTERNALCOMMON_H #include "NvBlastExtAuthoringTypes.h" #include "NvBlastNvSharedHelpers.h" #include "NvBlastVolumeIntegrals.h" #include "NvVec2.h" #include "NvVec3.h" #include "NvPlane.h" #include "NvBounds3.h" #include "NvMath.h" #include <algorithm> namespace Nv { namespace Blast { /** Edge representation with index of parent facet */ struct EdgeWithParent { uint32_t s, e; // Starting and ending vertices uint32_t parent; // Parent facet index EdgeWithParent() : s(0), e(0), parent(0) {} EdgeWithParent(uint32_t s, uint32_t e, uint32_t p) : s(s), e(e), parent(p) {} }; /** Comparator for sorting edges according to parent facet number. */ struct EdgeComparator { bool operator()(const EdgeWithParent& a, const EdgeWithParent& b) const { if (a.parent == b.parent) { if (a.s == b.s) { return a.e < b.e; } else { return a.s < b.s; } } else { return a.parent < b.parent; } } }; inline bool operator<(const Edge& a, const Edge& b) { if (a.s == b.s) return a.e < b.e; else return a.s < b.s; } /** Vertex projection direction flag. */ enum ProjectionDirections { YZ_PLANE = 1 << 1, XY_PLANE = 1 << 2, ZX_PLANE = 1 << 3, // This is set when the dominant axis of the normal is negative // because when flattening to 2D the facet is viewed from the positive direction. // As a result, the winding order appears to flip if the normal is in the negative direction. OPPOSITE_WINDING = 1 << 4 }; /** Computes best direction to project points. */ NV_FORCE_INLINE ProjectionDirections getProjectionDirection(const nvidia::NvVec3& normal) { float maxv = std::max(std::abs(normal.x), std::max(std::abs(normal.y), std::abs(normal.z))); ProjectionDirections retVal; if (maxv == std::abs(normal.x)) { retVal = YZ_PLANE; if (normal.x < 0) retVal = (ProjectionDirections)((int)retVal | (int)OPPOSITE_WINDING); return retVal; } if (maxv == std::abs(normal.y)) { retVal = ZX_PLANE; if (normal.y > 0) retVal = (ProjectionDirections)((int)retVal | (int)OPPOSITE_WINDING); return retVal; } retVal = XY_PLANE; if (normal.z < 0) retVal = (ProjectionDirections)((int)retVal | (int)OPPOSITE_WINDING); return retVal; } /** Computes point projected on given axis aligned plane. */ NV_FORCE_INLINE nvidia::NvVec2 getProjectedPoint(const nvidia::NvVec3& point, ProjectionDirections dir) { if (dir & YZ_PLANE) { return nvidia::NvVec2(point.y, point.z); } if (dir & ZX_PLANE) { return nvidia::NvVec2(point.x, point.z); } return nvidia::NvVec2(point.x, point.y); } NV_FORCE_INLINE nvidia::NvVec2 getProjectedPoint(const NvcVec3& point, ProjectionDirections dir) { return getProjectedPoint((const nvidia::NvVec3&)point, dir); } /** Computes point projected on given axis aligned plane, this method is polygon-winding aware. */ NV_FORCE_INLINE nvidia::NvVec2 getProjectedPointWithWinding(const nvidia::NvVec3& point, ProjectionDirections dir) { if (dir & YZ_PLANE) { if (dir & OPPOSITE_WINDING) { return nvidia::NvVec2(point.z, point.y); } else return nvidia::NvVec2(point.y, point.z); } if (dir & ZX_PLANE) { if (dir & OPPOSITE_WINDING) { return nvidia::NvVec2(point.z, point.x); } return nvidia::NvVec2(point.x, point.z); } if (dir & OPPOSITE_WINDING) { return nvidia::NvVec2(point.y, point.x); } return nvidia::NvVec2(point.x, point.y); } #define MAXIMUM_EXTENT 1000 * 1000 * 1000 #define BBOX_TEST_EPS 1e-5f /** Test fattened bounding box intersetion. */ NV_INLINE bool weakBoundingBoxIntersection(const nvidia::NvBounds3& aBox, const nvidia::NvBounds3& bBox) { if (std::max(aBox.minimum.x, bBox.minimum.x) > std::min(aBox.maximum.x, bBox.maximum.x) + BBOX_TEST_EPS) return false; if (std::max(aBox.minimum.y, bBox.minimum.y) > std::min(aBox.maximum.y, bBox.maximum.y) + BBOX_TEST_EPS) return false; if (std::max(aBox.minimum.z, bBox.minimum.z) > std::min(aBox.maximum.z, bBox.maximum.z) + BBOX_TEST_EPS) return false; return true; } /** Test segment vs plane intersection. If segment intersects the plane true is returned. Point of intersection is written into 'result'. */ NV_INLINE bool getPlaneSegmentIntersection(const nvidia::NvPlane& pl, const nvidia::NvVec3& a, const nvidia::NvVec3& b, nvidia::NvVec3& result) { float div = (b - a).dot(pl.n); if (nvidia::NvAbs(div) < 0.0001f) { if (pl.contains(a)) { result = a; return true; } else { return false; } } float t = (-a.dot(pl.n) - pl.d) / div; if (t < 0.0f || t > 1.0f) { return false; } result = (b - a) * t + a; return true; } #define POS_COMPARISON_OFFSET 1e-5f #define NORM_COMPARISON_OFFSET 1e-3f /** Vertex comparator for vertex welding. */ template<bool splitUVs> struct VrtCompare { // This implements a "less than" function for vertices. // Vertices a and b are considered equivalent if !(a < b) && !(b < a) bool operator()(const Vertex& a, const Vertex& b) const { if (a.p.x + POS_COMPARISON_OFFSET < b.p.x) return true; if (a.p.x - POS_COMPARISON_OFFSET > b.p.x) return false; if (a.p.y + POS_COMPARISON_OFFSET < b.p.y) return true; if (a.p.y - POS_COMPARISON_OFFSET > b.p.y) return false; if (a.p.z + POS_COMPARISON_OFFSET < b.p.z) return true; if (a.p.z - POS_COMPARISON_OFFSET > b.p.z) return false; if (a.n.x + NORM_COMPARISON_OFFSET < b.n.x) return true; if (a.n.x - NORM_COMPARISON_OFFSET > b.n.x) return false; if (a.n.y + NORM_COMPARISON_OFFSET < b.n.y) return true; if (a.n.y - NORM_COMPARISON_OFFSET > b.n.y) return false; if (a.n.z + NORM_COMPARISON_OFFSET < b.n.z) return true; if (a.n.z - NORM_COMPARISON_OFFSET > b.n.z) return false; // This is not actually needed if (!splitUVs) if (!splitUVs) return false; if (a.uv[0].x + NORM_COMPARISON_OFFSET < b.uv[0].x) return true; if (a.uv[0].x - NORM_COMPARISON_OFFSET > b.uv[0].x) return false; if (a.uv[0].y + NORM_COMPARISON_OFFSET < b.uv[0].y) return true; if (a.uv[0].y - NORM_COMPARISON_OFFSET > b.uv[0].y) return false; // This is not actually needed return false; }; }; typedef VrtCompare<true> VrtComp; typedef VrtCompare<false> VrtCompNoUV; /** Vertex comparator for vertex welding (not accounts normal and uv parameters of vertice). */ struct VrtPositionComparator { bool operator()(const NvcVec3& a, const NvcVec3& b) const { if (a.x + POS_COMPARISON_OFFSET < b.x) return true; if (a.x - POS_COMPARISON_OFFSET > b.x) return false; if (a.y + POS_COMPARISON_OFFSET < b.y) return true; if (a.y - POS_COMPARISON_OFFSET > b.y) return false; if (a.z + POS_COMPARISON_OFFSET < b.z) return true; if (a.z - POS_COMPARISON_OFFSET > b.z) return false; return false; }; bool operator()(const Vertex& a, const Vertex& b) const { return operator()(a.p, b.p); }; }; NV_INLINE float calculateCollisionHullVolumeAndCentroid(NvcVec3& centroid, const CollisionHull& hull) { class CollisionHullQuery { public: CollisionHullQuery(const CollisionHull& hull) : m_hull(hull) {} size_t faceCount() const { return (size_t)m_hull.polygonDataCount; } size_t vertexCount(size_t faceIndex) const { return (size_t)m_hull.polygonData[faceIndex].vertexCount; } NvcVec3 vertex(size_t faceIndex, size_t vertexIndex) const { return m_hull.points[m_hull.indices[m_hull.polygonData[faceIndex].indexBase + vertexIndex]]; } private: const CollisionHull& m_hull; }; return calculateMeshVolumeAndCentroid<CollisionHullQuery>(centroid, hull); } } // namespace Blast } // namespace Nv #endif
9,905
C
30.150943
133
0.639475
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/authoringCommon/NvBlastExtAuthoringAcceleratorImpl.cpp
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved. #include "NvBlastExtAuthoringAcceleratorImpl.h" #include "NvBlastExtAuthoringMesh.h" #include "NvBlastExtAuthoringInternalCommon.h" #include "NvBlastGlobals.h" #include "NvBlastNvSharedHelpers.h" #include "NvCMath.h" namespace Nv { namespace Blast { DummyAccelerator::DummyAccelerator(int32_t count) : m_count(count) { m_current = 0; } void DummyAccelerator::release() { NVBLAST_DELETE(this, DummyAccelerator); } void DummyAccelerator::setState(const Vertex* pos, const Edge* ed, const Facet& fc) { m_current = 0; NV_UNUSED(pos); NV_UNUSED(ed); NV_UNUSED(fc); } void DummyAccelerator::setState(const NvcBounds3* bound) { m_current = 0; NV_UNUSED(bound); } void DummyAccelerator::setState(const NvcVec3& point) { m_current = 0; NV_UNUSED(point); } int32_t DummyAccelerator::getNextFacet() { if (m_current < m_count) { ++m_current; return m_current - 1; } else return -1; } Grid::Grid(int32_t resolution) : m_resolution(resolution) { /** Set up 3d grid */ m_r3 = resolution * resolution * resolution; m_spatialMap.resize(resolution * resolution * resolution); } void Grid::release() { NVBLAST_DELETE(this, Grid); } void Grid::setMesh(const Mesh* m) { nvidia::NvBounds3 bd = toNvShared(m->getBoundingBox()); m_mappedFacetCount = m->getFacetCount(); bd.fattenFast(0.001f); m_spos = fromNvShared(bd.minimum); m_deltas = { m_resolution / bd.getDimensions().x, m_resolution / bd.getDimensions().y, m_resolution / bd.getDimensions().z }; for (int32_t i = 0; i < m_r3; ++i) m_spatialMap[i].clear(); const float ofs = 0.001f; for (uint32_t fc = 0; fc < m->getFacetCount(); ++fc) { NvcBounds3 cfc = *m->getFacetBound(fc); int32_t is = (int32_t)std::max(0.f, (cfc.minimum.x - m_spos.x - ofs) * m_deltas.x); int32_t ie = (int32_t)std::max(0.f, (cfc.maximum.x - m_spos.x + ofs) * m_deltas.x); int32_t js = (int32_t)std::max(0.f, (cfc.minimum.y - m_spos.y - ofs) * m_deltas.y); int32_t je = (int32_t)std::max(0.f, (cfc.maximum.y - m_spos.y + ofs) * m_deltas.y); int32_t ks = (int32_t)std::max(0.f, (cfc.minimum.z - m_spos.z - ofs) * m_deltas.z); int32_t ke = (int32_t)std::max(0.f, (cfc.maximum.z - m_spos.z + ofs) * m_deltas.z); for (int32_t i = is; i < m_resolution && i <= ie; ++i) { for (int32_t j = js; j < m_resolution && j <= je; ++j) { for (int32_t k = ks; k < m_resolution && k <= ke; ++k) { m_spatialMap[(i * m_resolution + j) * m_resolution + k].push_back(fc); } } } } } GridAccelerator::GridAccelerator(Grid* grd) { m_grid = grd; m_alreadyGotValue = 0; m_alreadyGotFlag.resize(1 << 12); m_cellList.resize(1 << 12); m_pointCmdDir = 0; } void GridAccelerator::release() { NVBLAST_DELETE(this, GridAccelerator); } void GridAccelerator::setState(const Vertex* pos, const Edge* ed, const Facet& fc) { nvidia::NvBounds3 cfc(nvidia::NvBounds3::empty()); for (uint32_t v = 0; v < fc.edgesCount; ++v) { cfc.include(toNvShared(pos[ed[fc.firstEdgeNumber + v].s].p)); cfc.include(toNvShared(pos[ed[fc.firstEdgeNumber + v].e].p)); } setState(&fromNvShared(cfc)); } void GridAccelerator::setState(const NvcBounds3* facetBounding) { m_alreadyGotValue++; m_iteratorCell = -1; m_iteratorFacet = -1; m_gotCells = 0; NvcBounds3 cfc = *facetBounding; int32_t is = (int32_t)std::max(0.f, (cfc.minimum.x - m_grid->m_spos.x - 0.001f) * m_grid->m_deltas.x); int32_t ie = (int32_t)std::max(0.f, (cfc.maximum.x - m_grid->m_spos.x + 0.001f) * m_grid->m_deltas.x); int32_t js = (int32_t)std::max(0.f, (cfc.minimum.y - m_grid->m_spos.y - 0.001f) * m_grid->m_deltas.y); int32_t je = (int32_t)std::max(0.f, (cfc.maximum.y - m_grid->m_spos.y + 0.001f) * m_grid->m_deltas.y); int32_t ks = (int32_t)std::max(0.f, (cfc.minimum.z - m_grid->m_spos.z - 0.001f) * m_grid->m_deltas.z); int32_t ke = (int32_t)std::max(0.f, (cfc.maximum.z - m_grid->m_spos.z + 0.001f) * m_grid->m_deltas.z); for (int32_t i = is; i < m_grid->m_resolution && i <= ie; ++i) { for (int32_t j = js; j < m_grid->m_resolution && j <= je; ++j) { for (int32_t k = ks; k < m_grid->m_resolution && k <= ke; ++k) { int32_t id = (i * m_grid->m_resolution + j) * m_grid->m_resolution + k; if (!m_grid->m_spatialMap[id].empty()) { m_cellList[m_gotCells++] = id; } } } } if (m_gotCells != 0) { m_iteratorFacet = 0; m_iteratorCell = m_cellList[m_gotCells - 1]; m_gotCells--; } } void GridAccelerator::setPointCmpDirection(int32_t d) { m_pointCmdDir = d; } void GridAccelerator::setState(const NvcVec3& point) { m_alreadyGotValue++; m_iteratorCell = -1; m_iteratorFacet = -1; m_gotCells = 0; int32_t is = (int32_t)std::max(0.f, (point.x - m_grid->m_spos.x - 0.001f) * m_grid->m_deltas.x); int32_t ie = (int32_t)std::max(0.f, (point.x - m_grid->m_spos.x + 0.001f) * m_grid->m_deltas.x); int32_t js = (int32_t)std::max(0.f, (point.y - m_grid->m_spos.y - 0.001f) * m_grid->m_deltas.y); int32_t je = (int32_t)std::max(0.f, (point.y - m_grid->m_spos.y + 0.001f) * m_grid->m_deltas.y); int32_t ks = 0; int32_t ke = m_grid->m_resolution; switch (m_pointCmdDir) { case 1: ks = (int32_t)std::max(0.f, (point.z - m_grid->m_spos.z - 0.001f) * m_grid->m_deltas.z); break; case -1: ke = (int32_t)std::max(0.f, (point.z - m_grid->m_spos.z + 0.001f) * m_grid->m_deltas.z); } for (int32_t i = is; i < m_grid->m_resolution && i <= ie; ++i) { for (int32_t j = js; j < m_grid->m_resolution && j <= je; ++j) { for (int32_t k = ks; k <= ke && k < m_grid->m_resolution; ++k) { int32_t id = (i * m_grid->m_resolution + j) * m_grid->m_resolution + k; if (!m_grid->m_spatialMap[id].empty()) { m_cellList[m_gotCells++] = id; } } } } if (m_gotCells != 0) { m_iteratorFacet = 0; m_iteratorCell = m_cellList[m_gotCells - 1]; m_gotCells--; } } int32_t GridAccelerator::getNextFacet() { int32_t facetId = -1; while (m_iteratorCell != -1) { if (m_iteratorFacet >= (int32_t)m_grid->m_spatialMap[m_iteratorCell].size()) { if (m_gotCells != 0) { m_iteratorCell = m_cellList[m_gotCells - 1]; m_gotCells--; m_iteratorFacet = 0; } else { m_iteratorCell = -1; break; } } if (m_alreadyGotFlag[m_grid->m_spatialMap[m_iteratorCell][m_iteratorFacet]] != m_alreadyGotValue) { facetId = m_grid->m_spatialMap[m_iteratorCell][m_iteratorFacet]; m_iteratorFacet++; break; } else { m_iteratorFacet++; } } if (facetId != -1) { m_alreadyGotFlag[facetId] = m_alreadyGotValue; } return facetId; } BBoxBasedAccelerator::BBoxBasedAccelerator(const Mesh* mesh, int32_t resolution) : m_resolution(resolution), m_alreadyGotValue(1) { m_bounds = mesh->getBoundingBox(); m_spatialMap.resize(resolution * resolution * resolution); m_cells.resize(resolution * resolution * resolution); int32_t currentCell = 0; NvcVec3 incr = (m_bounds.maximum - m_bounds.minimum) * (1.0f / m_resolution); for (int32_t z = 0; z < resolution; ++z) { for (int32_t y = 0; y < resolution; ++y) { for (int32_t x = 0; x < resolution; ++x) { m_cells[currentCell].minimum.x = m_bounds.minimum.x + x * incr.x; m_cells[currentCell].minimum.y = m_bounds.minimum.y + y * incr.y; m_cells[currentCell].minimum.z = m_bounds.minimum.z + z * incr.z; m_cells[currentCell].maximum.x = m_bounds.minimum.x + (x + 1) * incr.x; m_cells[currentCell].maximum.y = m_bounds.minimum.y + (y + 1) * incr.y; m_cells[currentCell].maximum.z = m_bounds.minimum.z + (z + 1) * incr.z; ++currentCell; } } } m_cellList.resize(1 << 16); m_gotCells = 0; buildAccelStructure(mesh->getVertices(), mesh->getEdges(), mesh->getFacetsBuffer(), mesh->getFacetCount()); } void BBoxBasedAccelerator::release() { NVBLAST_DELETE(this, BBoxBasedAccelerator); } BBoxBasedAccelerator::~BBoxBasedAccelerator() { m_resolution = 0; toNvShared(m_bounds).setEmpty(); m_spatialMap.clear(); m_cells.clear(); m_cellList.clear(); } int32_t BBoxBasedAccelerator::getNextFacet() { int32_t facetId = -1; while (m_iteratorCell != -1) { if (m_iteratorFacet >= (int32_t)m_spatialMap[m_iteratorCell].size()) { if (m_gotCells != 0) { m_iteratorCell = m_cellList[m_gotCells - 1]; m_gotCells--; m_iteratorFacet = 0; } else { m_iteratorCell = -1; break; } } if (m_alreadyGotFlag[m_spatialMap[m_iteratorCell][m_iteratorFacet]] != m_alreadyGotValue) { facetId = m_spatialMap[m_iteratorCell][m_iteratorFacet]; m_iteratorFacet++; break; } else { m_iteratorFacet++; } } if (facetId != -1) { m_alreadyGotFlag[facetId] = m_alreadyGotValue; } return facetId; } void BBoxBasedAccelerator::setState(const Vertex* pos, const Edge* ed, const Facet& fc) { nvidia::NvBounds3 cfc(nvidia::NvBounds3::empty()); for (uint32_t v = 0; v < fc.edgesCount; ++v) { cfc.include(toNvShared(pos[ed[fc.firstEdgeNumber + v].s].p)); cfc.include(toNvShared(pos[ed[fc.firstEdgeNumber + v].e].p)); } setState(&fromNvShared(cfc)); } void BBoxBasedAccelerator::setState(const NvcBounds3* facetBox) { m_alreadyGotValue++; m_iteratorCell = -1; m_iteratorFacet = -1; m_gotCells = 0; for (uint32_t i = 0; i < m_cells.size(); ++i) { if (weakBoundingBoxIntersection(toNvShared(m_cells[i]), *toNvShared(facetBox))) { if (!m_spatialMap[i].empty()) m_cellList[m_gotCells++] = i; } } if (m_gotCells != 0) { m_iteratorFacet = 0; m_iteratorCell = m_cellList[m_gotCells - 1]; m_gotCells--; } } void BBoxBasedAccelerator::setState(const NvcVec3& p) { m_alreadyGotValue++; m_iteratorCell = -1; m_iteratorFacet = -1; m_gotCells = 0; int32_t perSlice = m_resolution * m_resolution; for (uint32_t i = 0; i < m_cells.size(); ++i) { if (toNvShared(m_cells[i]).contains(toNvShared(p))) { int32_t xyCellId = i % perSlice; for (int32_t zCell = 0; zCell < m_resolution; ++zCell) { int32_t cell = zCell * perSlice + xyCellId; if (!m_spatialMap[cell].empty()) m_cellList[m_gotCells++] = cell; } } } if (m_gotCells != 0) { m_iteratorFacet = 0; m_iteratorCell = m_cellList[m_gotCells - 1]; m_gotCells--; } } void BBoxBasedAccelerator::buildAccelStructure(const Vertex* pos, const Edge* edges, const Facet* fc, int32_t facetCount) { for (int32_t facet = 0; facet < facetCount; ++facet) { nvidia::NvBounds3 bBox; bBox.setEmpty(); const Edge* edge = &edges[0] + fc->firstEdgeNumber; int32_t count = fc->edgesCount; for (int32_t ec = 0; ec < count; ++ec) { bBox.include(toNvShared(pos[edge->s].p)); bBox.include(toNvShared(pos[edge->e].p)); edge++; } for (uint32_t i = 0; i < m_cells.size(); ++i) { if (weakBoundingBoxIntersection(toNvShared(m_cells[i]), bBox)) { m_spatialMap[i].push_back(facet); } } fc++; } m_alreadyGotFlag.resize(facetCount, 0); } #define SWEEP_RESOLUTION 2048 void buildIndex(std::vector<SegmentToIndex>& segm, float offset, float mlt, std::vector<std::vector<uint32_t>>& blocks) { std::set<uint32_t> currentEnabled; uint32_t lastBlock = 0; for (uint32_t i = 0; i < segm.size(); ++i) { uint32_t currentBlock = (uint32_t)((segm[i].coord - offset) * mlt); if (currentBlock >= SWEEP_RESOLUTION) break; if (currentBlock != lastBlock) { for (uint32_t j = lastBlock + 1; j <= currentBlock; ++j) { for (auto id : currentEnabled) blocks[j].push_back(id); } lastBlock = currentBlock; } if (segm[i].end == false) { blocks[lastBlock].push_back(segm[i].index); currentEnabled.insert(segm[i].index); } else { currentEnabled.erase(segm[i].index); } } } SweepingAccelerator::SweepingAccelerator(const Nv::Blast::Mesh* in) { nvidia::NvBounds3 bnd; const Vertex* verts = in->getVertices(); const Edge* edges = in->getEdges(); m_facetCount = in->getFacetCount(); m_foundx.resize(m_facetCount, 0); m_foundy.resize(m_facetCount, 0); std::vector<SegmentToIndex> xevs; std::vector<SegmentToIndex> yevs; std::vector<SegmentToIndex> zevs; for (uint32_t i = 0; i < in->getFacetCount(); ++i) { const Facet* fc = in->getFacet(i); bnd.setEmpty(); for (uint32_t v = 0; v < fc->edgesCount; ++v) { bnd.include(toNvShared(verts[edges[v + fc->firstEdgeNumber].s].p)); } bnd.scaleFast(1.1f); xevs.push_back(SegmentToIndex(bnd.minimum.x, i, false)); xevs.push_back(SegmentToIndex(bnd.maximum.x, i, true)); yevs.push_back(SegmentToIndex(bnd.minimum.y, i, false)); yevs.push_back(SegmentToIndex(bnd.maximum.y, i, true)); zevs.push_back(SegmentToIndex(bnd.minimum.z, i, false)); zevs.push_back(SegmentToIndex(bnd.maximum.z, i, true)); } std::sort(xevs.begin(), xevs.end()); std::sort(yevs.begin(), yevs.end()); std::sort(zevs.begin(), zevs.end()); m_minimal.x = xevs[0].coord; m_minimal.y = yevs[0].coord; m_minimal.z = zevs[0].coord; m_maximal.x = xevs.back().coord; m_maximal.y = yevs.back().coord; m_maximal.z = zevs.back().coord; m_rescale = (m_maximal - m_minimal) * 1.01f; m_rescale.x = 1.0f / m_rescale.x * SWEEP_RESOLUTION; m_rescale.y = 1.0f / m_rescale.y * SWEEP_RESOLUTION; m_rescale.z = 1.0f / m_rescale.z * SWEEP_RESOLUTION; m_xSegm.resize(SWEEP_RESOLUTION); m_ySegm.resize(SWEEP_RESOLUTION); m_zSegm.resize(SWEEP_RESOLUTION); buildIndex(xevs, m_minimal.x, m_rescale.x, m_xSegm); buildIndex(yevs, m_minimal.y, m_rescale.y, m_ySegm); buildIndex(zevs, m_minimal.z, m_rescale.z, m_zSegm); m_iterId = 1; m_current = 0; } void SweepingAccelerator::release() { NVBLAST_DELETE(this, SweepingAccelerator); } void SweepingAccelerator::setState(const NvcBounds3* facetBounds) { m_current = 0; m_indices.clear(); nvidia::NvBounds3 bnd = *toNvShared(facetBounds); bnd.scaleFast(1.1f); uint32_t start = (uint32_t)((std::max(0.0f, bnd.minimum.x - m_minimal.x)) * m_rescale.x); uint32_t end = (uint32_t)((std::max(0.0f, bnd.maximum.x - m_minimal.x)) * m_rescale.x); for (uint32_t i = start; i <= end && i < SWEEP_RESOLUTION; ++i) { for (auto id : m_xSegm[i]) { m_foundx[id] = m_iterId; } } start = (uint32_t)((std::max(0.0f, bnd.minimum.y - m_minimal.y)) * m_rescale.y); end = (uint32_t)((std::max(0.0f, bnd.maximum.y - m_minimal.y)) * m_rescale.y); for (uint32_t i = start; i <= end && i < SWEEP_RESOLUTION; ++i) { for (auto id : m_ySegm[i]) { m_foundy[id] = m_iterId; } } start = (uint32_t)((std::max(0.0f, bnd.minimum.z - m_minimal.z)) * m_rescale.z); end = (uint32_t)((std::max(0.0f, bnd.maximum.z - m_minimal.z)) * m_rescale.z); for (uint32_t i = start; i <= end && i < SWEEP_RESOLUTION; ++i) { for (auto id : m_zSegm[i]) { if (m_foundy[id] == m_iterId && m_foundx[id] == m_iterId) { m_foundx[id] = m_iterId + 1; m_foundy[id] = m_iterId + 1; m_indices.push_back(id); } } } m_iterId += 2; } void SweepingAccelerator::setState(const Vertex* pos, const Edge* ed, const Facet& fc) { nvidia::NvBounds3 cfc(nvidia::NvBounds3::empty()); for (uint32_t v = 0; v < fc.edgesCount; ++v) { cfc.include(toNvShared(pos[ed[fc.firstEdgeNumber + v].s].p)); cfc.include(toNvShared(pos[ed[fc.firstEdgeNumber + v].e].p)); } setState(&fromNvShared(cfc)); } void SweepingAccelerator::setState(const NvcVec3& point) { m_indices.clear(); /*for (uint32_t i = 0; i < facetCount; ++i) { indices.push_back(i); }*/ uint32_t yIndex = (uint32_t)((point.y - m_minimal.y) * m_rescale.y); uint32_t xIndex = (uint32_t)((point.x - m_minimal.x) * m_rescale.x); for (uint32_t i = 0; i < m_xSegm[xIndex].size(); ++i) { m_foundx[m_xSegm[xIndex][i]] = m_iterId; } for (uint32_t i = 0; i < m_ySegm[yIndex].size(); ++i) { if (m_foundx[m_ySegm[yIndex][i]] == m_iterId) { m_indices.push_back(m_ySegm[yIndex][i]); } } m_iterId++; m_current = 0; NV_UNUSED(point); } int32_t SweepingAccelerator::getNextFacet() { if (static_cast<uint32_t>(m_current) < m_indices.size()) { ++m_current; return m_indices[m_current - 1]; } else return -1; } } // namespace Blast } // namespace Nv
19,981
C++
28.602963
129
0.563285
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/authoringCommon/NvBlastExtAuthoringAcceleratorImpl.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved. #ifndef NVBLASTEXTAUTHORINGACCELERATORIMPL_H #define NVBLASTEXTAUTHORINGACCELERATORIMPL_H #include <set> #include <vector> #include "NvBlastExtAuthoringAccelerator.h" namespace Nv { namespace Blast { class Mesh; /** Dummy accelerator iterates through all facets of mesh. */ class DummyAccelerator : public SpatialAccelerator { public: /** \param[in] count Mesh facets count for which accelerator should be built. */ DummyAccelerator(int32_t count); virtual void release() override; virtual void setState(const NvcBounds3* bounds) override; virtual void setState(const Vertex* pos, const Edge* ed, const Facet& fc) override; virtual void setState(const NvcVec3& point) override; virtual int32_t getNextFacet() override; virtual void setPointCmpDirection(int32_t dir) override { NV_UNUSED(dir); } private: int32_t m_count; int32_t m_current; }; struct SegmentToIndex { float coord; uint32_t index; bool end; SegmentToIndex(float c, uint32_t i, bool end) : coord(c), index(i), end(end) {} bool operator<(const SegmentToIndex& in) const { if (coord < in.coord) return true; if (coord > in.coord) return false; return end < in.end; } }; class Grid : public SpatialGrid { public: friend class GridAccelerator; Grid(int32_t resolution); virtual void release() override; virtual void setMesh(const Nv::Blast::Mesh* m) override; private: int32_t m_resolution; int32_t m_r3; int32_t m_mappedFacetCount; NvcVec3 m_spos; NvcVec3 m_deltas; std::vector< std::vector<int32_t> > m_spatialMap; }; class GridAccelerator : public SpatialAccelerator // Iterator to traverse the grid { public: GridAccelerator(Grid* grd); virtual void release() override; virtual void setState(const NvcBounds3* bounds) override; virtual void setState(const Vertex* pos, const Edge* ed, const Facet& fc) override; virtual void setState(const NvcVec3& point) override; virtual int32_t getNextFacet() override; virtual void setPointCmpDirection(int32_t dir) override; private: Grid* m_grid; // Iterator data std::vector<uint32_t> m_alreadyGotFlag; uint32_t m_alreadyGotValue; std::vector<int32_t> m_cellList; int32_t m_gotCells; int32_t m_iteratorCell; int32_t m_iteratorFacet; int32_t m_pointCmdDir; }; class SweepingAccelerator : public SpatialAccelerator { public: /** \param[in] count Mesh facets count for which accelerator should be built. */ SweepingAccelerator(const Nv::Blast::Mesh* in); virtual void release() override; virtual void setState(const Vertex* pos, const Edge* ed, const Facet& fc) override; virtual void setState(const NvcBounds3* bounds) override; virtual void setState(const NvcVec3& point) override; virtual int32_t getNextFacet() override; virtual void setPointCmpDirection(int32_t dir) override { NV_UNUSED(dir); } private: /* For fast point test. */ std::vector<std::vector<uint32_t> > m_xSegm; std::vector<std::vector<uint32_t> > m_ySegm; std::vector<std::vector<uint32_t> > m_zSegm; std::vector<uint32_t> m_indices; std::vector<uint32_t> m_foundx; std::vector<uint32_t> m_foundy; uint32_t m_iterId; int32_t m_current; uint32_t m_facetCount; NvcVec3 m_minimal; NvcVec3 m_maximal; NvcVec3 m_rescale; }; /** Accelerator which builds map from 3d grid to initial mesh facets. To find all facets which possibly intersect given one, it return all facets which are pointed by grid cells, which intersects with bounding box of given facet. To find all facets which possibly cover given point, all facets which are pointed by cells in column which contains given point are returned. */ class BBoxBasedAccelerator : public SpatialAccelerator { public: /** \param[in] mesh Mesh for which acceleration structure should be built. \param[in] resolution Resolution on 3d grid. */ BBoxBasedAccelerator(const Mesh* mesh, int32_t resolution); virtual ~BBoxBasedAccelerator(); virtual void release() override; virtual int32_t getNextFacet() override; virtual void setState(const Vertex* pos, const Edge* ed, const Facet& fc) override; virtual void setState(const NvcBounds3* bounds) override; virtual void setState(const NvcVec3& p) override; virtual void setPointCmpDirection(int32_t dir) override { NV_UNUSED(dir); } private: void buildAccelStructure(const Vertex* pos, const Edge* edges, const Facet* fc, int32_t facetCount); int32_t m_resolution; NvcBounds3 m_bounds; std::vector< std::vector<int32_t> > m_spatialMap; std::vector<NvcBounds3> m_cells; // Iterator data std::vector<uint32_t> m_alreadyGotFlag; uint32_t m_alreadyGotValue; std::vector<int32_t> m_cellList; int32_t m_gotCells; int32_t m_iteratorCell; int32_t m_iteratorFacet; }; } // namespace Blast } // namsepace Nv #endif // ifndef NVBLASTEXTAUTHORINGACCELERATORIMPL_H
7,801
C
35.12037
171
0.610563
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/authoringCommon/NvBlastExtAuthoringMeshImpl.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved. #ifndef NVBLASTAUTHORINGMESHIMPL_H #define NVBLASTAUTHORINGMESHIMPL_H #include "NvBlastExtAuthoringMesh.h" #include "NvBounds3.h" #include <vector> #include <map> #include <set> namespace Nv { namespace Blast { /** Class for internal mesh representation */ class MeshImpl : public Mesh { public: /** Constructs mesh object from array of triangles. \param[in] position Array of vertex positions \param[in] normals Array of vertex normals \param[in] uv Array of vertex uv coordinates \param[in] verticesCount Vertices count \param[in] indices Array of vertex indices. Indices contain vertex index triplets which form a mesh triangle. \param[in] indicesCount Indices count (should be equal to numberOfTriangles * 3) */ MeshImpl(const NvcVec3* position, const NvcVec3* normals, const NvcVec2* uv, uint32_t verticesCount, const uint32_t* indices, uint32_t indicesCount); /** Constructs mesh object from array of facets. \param[in] vertices Array of vertices \param[in] edges Array of edges \param[in] facets Array of facets \param[in] posCount Vertices count \param[in] edgesCount Edges count \param[in] facetsCount Facets count */ MeshImpl(const Vertex* vertices, const Edge* edges, const Facet* facets, uint32_t posCount, uint32_t edgesCount, uint32_t facetsCount); MeshImpl(const Vertex* vertices, uint32_t count); MeshImpl(const Vertex* vertices, uint32_t count, uint32_t* indices, uint32_t indexCount, void* materials, uint32_t materialStride); ~MeshImpl(); virtual void release() override; /** Return true if mesh is valid */ bool isValid() const override; /** Return pointer on vertices array */ Vertex* getVerticesWritable() override; /** Return pointer on edges array */ Edge* getEdgesWritable() override; /** Return pointer on facets array */ Facet* getFacetsBufferWritable() override; /** Return pointer on vertices array */ const Vertex* getVertices() const override; /** Return pointer on edges array */ const Edge* getEdges() const override; /** Return pointer on facets array */ const Facet* getFacetsBuffer() const override; /** Return writable pointer on specified facet */ Facet* getFacetWritable(int32_t facet) override; /** Return writable pointer on specified facet */ const Facet* getFacet(int32_t facet) const override; /** Return edges count */ uint32_t getEdgesCount() const override; /** Return vertices count */ uint32_t getVerticesCount() const override; /** Return facet count */ uint32_t getFacetCount() const override; /** Return reference on mesh bounding box. */ const NvcBounds3& getBoundingBox() const override; /** Return writable reference on mesh bounding box. */ NvcBounds3& getBoundingBoxWritable() override; /** Recalculate bounding box */ void recalculateBoundingBox() override; /** Compute mesh volume and centroid. Assumes mesh has outward normals and no holes. */ float getMeshVolumeAndCentroid(NvcVec3& centroid) const override; /** Set per-facet material id. */ void setMaterialId(const int32_t* materialIds) override; /** Replaces an material id on faces with a new one */ void replaceMaterialId(int32_t oldMaterialId, int32_t newMaterialId) override; /** Set per-facet smoothing group. */ void setSmoothingGroup(const int32_t* smoothingGroups) override; /** Calculate per-facet bounding boxes. */ virtual void calcPerFacetBounds() override; /** Get pointer on facet bounding box, if not calculated return nullptr. */ virtual const NvcBounds3* getFacetBound(uint32_t index) const override; private: std::vector<Vertex> mVertices; std::vector<Edge> mEdges; std::vector<Facet> mFacets; nvidia::NvBounds3 mBounds; std::vector<nvidia::NvBounds3> mPerFacetBounds; }; } // namespace Blast } // namespace Nv #endif // ifndef NVBLASTAUTHORINGMESHIMPL_H
6,150
C
30.22335
153
0.657724
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/authoringCommon/NvBlastExtAuthoringMeshImpl.cpp
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved. #define _CRT_SECURE_NO_WARNINGS #include "NvBlastExtAuthoringMeshImpl.h" #include "NvBlastExtAuthoringTypes.h" #include <NvBlastAssert.h> #include "NvMath.h" #include <NvBlastNvSharedHelpers.h> #include <NvBlastVolumeIntegrals.h> #include <cmath> #include <string.h> #include <vector> #include <algorithm> namespace Nv { namespace Blast { MeshImpl::MeshImpl(const NvcVec3* position, const NvcVec3* normals, const NvcVec2* uv, uint32_t verticesCount, const uint32_t* indices, uint32_t indicesCount) { mVertices.resize(verticesCount); for (uint32_t i = 0; i < mVertices.size(); ++i) { mVertices[i].p = position[i]; } if (normals != 0) { for (uint32_t i = 0; i < mVertices.size(); ++i) { mVertices[i].n = normals[i]; } } else { for (uint32_t i = 0; i < mVertices.size(); ++i) { mVertices[i].n = {0, 0, 0}; } } if (uv != 0) { for (uint32_t i = 0; i < mVertices.size(); ++i) { mVertices[i].uv[0] = uv[i]; } } else { for (uint32_t i = 0; i < mVertices.size(); ++i) { mVertices[i].uv[0] = {0, 0}; } } mEdges.resize(indicesCount); mFacets.resize(indicesCount / 3); int32_t facetId = 0; for (uint32_t i = 0; i < indicesCount; i += 3) { mEdges[i].s = indices[i]; mEdges[i].e = indices[i + 1]; mEdges[i + 1].s = indices[i + 1]; mEdges[i + 1].e = indices[i + 2]; mEdges[i + 2].s = indices[i + 2]; mEdges[i + 2].e = indices[i]; mFacets[facetId].firstEdgeNumber = i; mFacets[facetId].edgesCount = 3; mFacets[facetId].materialId = 0; //Unassigned for now mFacets[facetId].smoothingGroup = -1; facetId++; } recalculateBoundingBox(); } MeshImpl::MeshImpl(const Vertex* vertices, const Edge* edges, const Facet* facets, uint32_t posCount, uint32_t edgesCount, uint32_t facetsCount) { mVertices.resize(posCount); mEdges.resize(edgesCount); mFacets.resize(facetsCount); memcpy(mVertices.data(), vertices, sizeof(Vertex) * posCount); memcpy(mEdges.data(), edges, sizeof(Edge) * edgesCount); memcpy(mFacets.data(), facets, sizeof(Facet) * facetsCount); recalculateBoundingBox(); } MeshImpl::MeshImpl(const Vertex* vertices, uint32_t count) { mVertices = std::vector<Vertex>(vertices, vertices + count); mEdges.resize(count); mFacets.resize(count / 3); uint32_t vp = 0; for (uint32_t i = 0; i < count; i += 3) { mEdges[i].s = vp; mEdges[i].e = vp + 1; mEdges[i + 1].s = vp + 1; mEdges[i + 1].e = vp + 2; mEdges[i + 2].s = vp + 2; mEdges[i + 2].e = vp; vp += 3; } for (uint32_t i = 0; i < count / 3; ++i) { mFacets[i].edgesCount = 3; mFacets[i].firstEdgeNumber = i * 3; } recalculateBoundingBox(); } MeshImpl::MeshImpl(const Vertex* vertices, uint32_t count, uint32_t* indices, uint32_t indexCount, void* materials, uint32_t materialStride) { mVertices = std::vector<Vertex>(vertices, vertices + count); mEdges.resize(indexCount); mFacets.resize(indexCount / 3); for (uint32_t i = 0; i < indexCount; i += 3) { mEdges[i].s = indices[i]; mEdges[i].e = indices[i + 1]; mEdges[i + 1].s = indices[i + 1]; mEdges[i + 1].e = indices[i + 2]; mEdges[i + 2].s = indices[i + 2]; mEdges[i + 2].e = indices[i]; } for (uint32_t i = 0; i < indexCount / 3; ++i) { mFacets[i].edgesCount = 3; mFacets[i].firstEdgeNumber = i * 3; mFacets[i].userData = 0; if (materials != nullptr) { mFacets[i].materialId = *(uint32_t*)((uint8_t*)materials + i * materialStride); } } recalculateBoundingBox(); } float MeshImpl::getMeshVolumeAndCentroid(NvcVec3& centroid) const { class MeshImplQuery { public: MeshImplQuery(const MeshImpl& mesh) : m_mesh(mesh) {} size_t faceCount() const { return (size_t)m_mesh.getFacetCount(); } size_t vertexCount(size_t faceIndex) const { return (size_t)m_mesh.getFacet((int32_t)faceIndex)->edgesCount; } NvcVec3 vertex(size_t faceIndex, size_t vertexIndex) const { const Nv::Blast::Facet* facet = m_mesh.getFacet(faceIndex); return m_mesh.getVertices()[m_mesh.getEdges()[facet->firstEdgeNumber + vertexIndex].s].p; } const MeshImpl& m_mesh; }; return calculateMeshVolumeAndCentroid<MeshImplQuery>(centroid, *this); } uint32_t MeshImpl::getFacetCount() const { return static_cast<uint32_t>(mFacets.size()); } Vertex* MeshImpl::getVerticesWritable() { return mVertices.data(); } Edge* MeshImpl::getEdgesWritable() { return mEdges.data(); } const Vertex* MeshImpl::getVertices() const { return mVertices.data(); } const Edge* MeshImpl::getEdges() const { return mEdges.data(); } uint32_t MeshImpl::getEdgesCount() const { return static_cast<uint32_t>(mEdges.size()); } uint32_t MeshImpl::getVerticesCount() const { return static_cast<uint32_t>(mVertices.size()); } Facet* MeshImpl::getFacetsBufferWritable() { return mFacets.data(); } const Facet* MeshImpl::getFacetsBuffer() const { return mFacets.data(); } Facet* MeshImpl::getFacetWritable(int32_t facet) { return &mFacets[facet]; } const Facet* MeshImpl::getFacet(int32_t facet) const { return &mFacets[facet]; } MeshImpl::~MeshImpl() { } void MeshImpl::release() { delete this; } const NvcBounds3& MeshImpl::getBoundingBox() const { return fromNvShared(mBounds); } NvcBounds3& MeshImpl::getBoundingBoxWritable() { return fromNvShared(mBounds); } void MeshImpl::recalculateBoundingBox() { mBounds.setEmpty(); for (uint32_t i = 0; i < mVertices.size(); ++i) { mBounds.include(toNvShared(mVertices[i].p)); } calcPerFacetBounds(); } const NvcBounds3* MeshImpl::getFacetBound(uint32_t index) const { if (mPerFacetBounds.empty()) { return nullptr; } return &fromNvShared(mPerFacetBounds[index]); } void MeshImpl::calcPerFacetBounds() { mPerFacetBounds.resize(mFacets.size()); for (uint32_t i = 0; i < mFacets.size(); ++i) { auto& fb = mPerFacetBounds[i]; fb.setEmpty(); for (uint32_t v = 0; v < mFacets[i].edgesCount; ++v) { fb.include(toNvShared(mVertices[mEdges[mFacets[i].firstEdgeNumber + v].s].p)); fb.include(toNvShared(mVertices[mEdges[mFacets[i].firstEdgeNumber + v].e].p)); } } } void MeshImpl::setMaterialId(const int32_t* materialId) { if (materialId != nullptr) { for (uint32_t i = 0; i < mFacets.size(); ++i) { mFacets[i].materialId = *materialId; ++materialId; } } } bool MeshImpl::isValid() const { return mVertices.size() > 0 && mEdges.size() > 0 && mFacets.size() > 0; } void MeshImpl::replaceMaterialId(int32_t oldMaterialId, int32_t newMaterialId) { for (uint32_t i = 0; i < mFacets.size(); ++i) { if (mFacets[i].materialId == oldMaterialId) { mFacets[i].materialId = newMaterialId; } } } void MeshImpl::setSmoothingGroup(const int32_t* smoothingGroups) { if (smoothingGroups != nullptr) { for (uint32_t i = 0; i < mFacets.size(); ++i) { mFacets[i].smoothingGroup = *smoothingGroups; ++smoothingGroups; } } } } // namespace Blast } // namespace Nv
9,218
C++
25.34
144
0.622152
NVIDIA-Omniverse/PhysX/blast/source/sdk/toolkit/NvBlastTkActorImpl.cpp
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved. #include "NvPreprocessor.h" #include "NvBlastTkFrameworkImpl.h" #include "NvBlastTkActorImpl.h" #include "NvBlastTkGroupImpl.h" #include "NvBlastTkAssetImpl.h" #include "NvBlastTkFamilyImpl.h" #include "NvBlastTkJointImpl.h" #include "NvBlast.h" #include "NvBlastAssert.h" #include "NvBlastMemory.h" namespace Nv { namespace Blast { TkActorImpl* TkActorImpl::create(const TkActorDesc& desc) { const TkAssetImpl* asset = static_cast<const TkAssetImpl*>(desc.asset); TkFamilyImpl* family = TkFamilyImpl::create(asset); NvBlastFamily* familyLL = family->getFamilyLLInternal(); Array<char>::type scratch((uint32_t)NvBlastFamilyGetRequiredScratchForCreateFirstActor(familyLL, logLL)); NvBlastActor* actorLL = NvBlastFamilyCreateFirstActor(familyLL, &desc, scratch.begin(), logLL); if (actorLL == nullptr) { NVBLAST_LOG_ERROR("TkActorImpl::create: low-level actor could not be created."); return nullptr; } TkActorImpl* actor = family->addActor(actorLL); if (actor != nullptr) { // Add internal joints const uint32_t internalJointCount = asset->getJointDescCountInternal(); const TkAssetJointDesc* jointDescs = asset->getJointDescsInternal(); const NvBlastSupportGraph graph = asset->getGraph(); TkJointImpl* joints = family->getInternalJoints(); for (uint32_t jointNum = 0; jointNum < internalJointCount; ++jointNum) { const TkAssetJointDesc& assetJointDesc = jointDescs[jointNum]; NVBLAST_ASSERT(assetJointDesc.nodeIndices[0] < graph.nodeCount && assetJointDesc.nodeIndices[1] < graph.nodeCount); TkJointDesc jointDesc; jointDesc.families[0] = jointDesc.families[1] = family; jointDesc.chunkIndices[0] = graph.chunkIndices[assetJointDesc.nodeIndices[0]]; jointDesc.chunkIndices[1] = graph.chunkIndices[assetJointDesc.nodeIndices[1]]; jointDesc.attachPositions[0] = assetJointDesc.attachPositions[0]; jointDesc.attachPositions[1] = assetJointDesc.attachPositions[1]; TkJointImpl* joint = new (joints + jointNum) TkJointImpl(jointDesc, family); actor->addJoint(joint->m_links[0]); } // Mark as damaged to trigger first split call. It could be the case that asset is already split into few actors initially. actor->markAsDamaged(); } return actor; } //////// Member functions //////// TkActorImpl::TkActorImpl() : m_actorLL(nullptr) , m_family(nullptr) , m_group(nullptr) , m_groupJobIndex(invalidIndex<uint32_t>()) , m_flags(0) , m_jointCount(0) { #if NV_PROFILE NvBlastTimersReset(&m_timers); #endif } TkActorImpl::~TkActorImpl() { } void TkActorImpl::release() { // Disassoaciate all joints // Copy joint array for safety against implementation of joint->setActor TkJointImpl** joints = reinterpret_cast<TkJointImpl**>(NvBlastAlloca(sizeof(TkJointImpl*)*getJointCountInternal())); TkJointImpl** stop = joints + getJointCountInternal(); TkJointImpl** jointHandle = joints; for (JointIt j(*this); (bool)j; ++j) { *jointHandle++ = *j; } jointHandle = joints; while (jointHandle < stop) { NVBLAST_ASSERT(*jointHandle != nullptr); NVBLAST_ASSERT((*jointHandle)->getDataInternal().actors[0] == this || (*jointHandle)->getDataInternal().actors[1] == this); (*jointHandle++)->setActors(nullptr, nullptr); } NVBLAST_ASSERT(getJointCountInternal() == 0); if (m_group != nullptr) { m_group->removeActor(*this); } if (m_actorLL != nullptr) { NvBlastActorDeactivate(m_actorLL, logLL); } if (m_family != nullptr) { m_family->removeActor(this); // Make sure we dispatch any remaining events when this family is emptied, since it will no longer be done by any group if (m_family->getActorCountInternal() == 0) { m_family->getQueue().dispatch(); } } } const NvBlastActor* TkActorImpl::getActorLL() const { return m_actorLL; } TkFamily& TkActorImpl::getFamily() const { return getFamilyImpl(); } uint32_t TkActorImpl::getIndex() const { return getIndexInternal(); } TkGroup* TkActorImpl::getGroup() const { return getGroupImpl(); } TkGroup* TkActorImpl::removeFromGroup() { if (m_group == nullptr) { NVBLAST_LOG_WARNING("TkActorImpl::removeFromGroup: actor not in a group."); return nullptr; } if (m_group->isProcessing()) { NVBLAST_LOG_ERROR("TkActorImpl::removeFromGroup: cannot alter Group while processing."); return nullptr; } TkGroup* group = m_group; return m_group->removeActor(*this) ? group : nullptr; } NvBlastFamily* TkActorImpl::getFamilyLL() const { return m_family->getFamilyLLInternal(); } const TkAsset* TkActorImpl::getAsset() const { return m_family->getAssetImpl(); } uint32_t TkActorImpl::getVisibleChunkCount() const { return NvBlastActorGetVisibleChunkCount(m_actorLL, logLL); } uint32_t TkActorImpl::getVisibleChunkIndices(uint32_t* visibleChunkIndices, uint32_t visibleChunkIndicesSize) const { return NvBlastActorGetVisibleChunkIndices(visibleChunkIndices, visibleChunkIndicesSize, m_actorLL, logLL); } uint32_t TkActorImpl::getGraphNodeCount() const { return NvBlastActorGetGraphNodeCount(m_actorLL, logLL); } uint32_t TkActorImpl::getGraphNodeIndices(uint32_t* graphNodeIndices, uint32_t graphNodeIndicesSize) const { return NvBlastActorGetGraphNodeIndices(graphNodeIndices, graphNodeIndicesSize, m_actorLL, logLL); } const float* TkActorImpl::getBondHealths() const { return NvBlastActorGetBondHealths(m_actorLL, logLL); } uint32_t TkActorImpl::getSplitMaxActorCount() const { return NvBlastActorGetMaxActorCountForSplit(m_actorLL, logLL); } bool TkActorImpl::isDamaged() const { NVBLAST_ASSERT(!m_flags.isSet(TkActorFlag::DAMAGED) || (m_flags.isSet(TkActorFlag::DAMAGED) && m_flags.isSet(TkActorFlag::PENDING))); return m_flags.isSet(TkActorFlag::DAMAGED); } void TkActorImpl::markAsDamaged() { m_flags |= TkActorFlag::DAMAGED; makePending(); } void TkActorImpl::makePending() { if (m_group != nullptr && !isPending()) { m_group->enqueue(this); } m_flags |= TkActorFlag::PENDING; } TkActorImpl::operator Nv::Blast::TkActorData() const { TkActorData data = { m_family, userData, getIndex() }; return data; } void TkActorImpl::damage(const NvBlastDamageProgram& program, const void* programParams) { BLAST_PROFILE_SCOPE_L("TkActor::damage"); if (m_group == nullptr) { NVBLAST_LOG_WARNING("TkActor::damage: actor is not in a group, cannot fracture."); return; } if (m_group->isProcessing()) { NVBLAST_LOG_WARNING("TkActor::damage: group is being processed, cannot fracture this actor."); return; } if (NvBlastActorCanFracture(m_actorLL, logLL)) { m_damageBuffer.pushBack(DamageData{ program, programParams}); makePending(); } } void TkActorImpl::generateFracture(NvBlastFractureBuffers* commands, const NvBlastDamageProgram& program, const void* programParams) const { BLAST_PROFILE_SCOPE_L("TkActor::generateFracture"); if (m_group && m_group->isProcessing()) { NVBLAST_LOG_WARNING("TkActor::generateFracture: group is being processed, cannot fracture this actor."); return; } // const context, must make m_timers mutable otherwise NvBlastActorGenerateFracture(commands, m_actorLL, program, programParams, logLL, const_cast<NvBlastTimers*>(&m_timers)); } void TkActorImpl::applyFracture(NvBlastFractureBuffers* eventBuffers, const NvBlastFractureBuffers* commands) { BLAST_PROFILE_SCOPE_L("TkActor::applyFracture"); if (m_group && m_group->isProcessing()) { NVBLAST_LOG_WARNING("TkActor::applyFracture: group is being processed, cannot fracture this actor."); return; } NvBlastActorApplyFracture(eventBuffers, m_actorLL, commands, logLL, &m_timers); if (commands->chunkFractureCount > 0 || commands->bondFractureCount > 0) { markAsDamaged(); TkFractureCommands* fevt = getFamilyImpl().getQueue().allocData<TkFractureCommands>(); fevt->tkActorData = *this; fevt->buffers = *commands; getFamilyImpl().getQueue().addEvent(fevt); getFamilyImpl().getQueue().dispatch(); } } uint32_t TkActorImpl::getJointCount() const { return getJointCountInternal(); } uint32_t TkActorImpl::getJoints(TkJoint** joints, uint32_t jointsSize) const { uint32_t jointsWritten = 0; for (JointIt j(*this); (bool)j && jointsWritten < jointsSize; ++j) { joints[jointsWritten++] = *j; } return jointsWritten; } bool TkActorImpl::hasExternalBonds() const { return NvBlastActorHasExternalBonds(m_actorLL, logLL); } } // namespace Blast } // namespace Nv
10,590
C++
27.394102
138
0.694523
NVIDIA-Omniverse/PhysX/blast/source/sdk/toolkit/NvBlastTkGUID.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved. #ifndef NVBLASTTKGUID_H #define NVBLASTTKGUID_H #include "NvPreprocessor.h" #if NV_WINDOWS_FAMILY #include <rpc.h> #else //#include <uuid/uuid.h> #include "NvBlastTime.h" #endif #include "NsHash.h" namespace Nv { namespace Blast { #if NV_WINDOWS_FAMILY NV_INLINE NvBlastID TkGenerateGUID(void* ptr) { NV_UNUSED(ptr); NV_COMPILE_TIME_ASSERT(sizeof(UUID) == sizeof(NvBlastID)); NvBlastID guid; UuidCreate(reinterpret_cast<UUID*>(&guid)); return guid; } #else NV_INLINE NvBlastID TkGenerateGUID(void* ptr) { // NV_COMPILE_TIME_ASSERT(sizeof(uuid_t) == sizeof(NvBlastID)); Time time; NvBlastID guid; // uuid_generate_random(reinterpret_cast<uuid_t&>(guid)); *reinterpret_cast<uint64_t*>(guid.data) = reinterpret_cast<uintptr_t>(ptr); *reinterpret_cast<int64_t*>(guid.data + 8) = time.getLastTickCount(); return guid; } #endif /** Compares two NvBlastIDs. \param[in] id1 A pointer to the first id to compare. \param[in] id2 A pointer to the second id to compare. \return true iff ids are equal. */ NV_INLINE bool TkGUIDsEqual(const NvBlastID* id1, const NvBlastID* id2) { return !memcmp(id1, id2, sizeof(NvBlastID)); } /** Clears an NvBlastID (sets all of its fields to zero). \param[out] id A pointer to the ID to clear. */ NV_INLINE void TkGUIDReset(NvBlastID* id) { memset(id, 0, sizeof(NvBlastID)); } /** Tests an NvBlastID to determine if it's zeroed. After calling TkGUIDReset on an ID, passing it to this function will return a value of true. \param[in] id A pointer to the ID to test. */ NV_INLINE bool TkGUIDIsZero(const NvBlastID* id) { return *reinterpret_cast<const uint64_t*>(&id->data[0]) == 0 && *reinterpret_cast<const uint64_t*>(&id->data[8]) == 0; } } // namespace Blast } // namespace Nv namespace nvidia { namespace shdfnd { // hash specialization for NvBlastID template <> struct Hash<NvBlastID> { uint32_t operator()(const NvBlastID& k) const { // "DJB" string hash uint32_t h = 5381; for (uint32_t i = 0; i < sizeof(k.data) / sizeof(k.data[0]); ++i) h = ((h << 5) + h) ^ uint32_t(k.data[i]); return h; } bool equal(const NvBlastID& k0, const NvBlastID& k1) const { return Nv::Blast::TkGUIDsEqual(&k0, &k1); } }; } // namespace shdfnd } // namespace nvidia #endif // #ifndef NVBLASTTKGUID_H
3,944
C
25.655405
122
0.704615
NVIDIA-Omniverse/PhysX/blast/source/sdk/toolkit/NvBlastTkTaskImpl.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved. #ifndef NVBLASTTKTASKIMPL_H #define NVBLASTTKTASKIMPL_H #include "NvBlast.h" #include "NvBlastTkFrameworkImpl.h" #include "NvBlastTkEventQueue.h" #include "NvBlastArray.h" #include <atomic> #include <mutex> #include <condition_variable> #include "NvBlastAssert.h" #include "NvBlastTkGroup.h" // TkGroupStats namespace Nv { namespace Blast { class TkGroupImpl; class TkActorImpl; class TkFamilyImpl; /** Transient structure describing a job and its results. */ struct TkWorkerJob { TkActorImpl* m_tkActor; //!< the actor to process TkActorImpl** m_newActors; //!< list of child actors created by splitting uint32_t m_newActorsCount; //!< the number of child actors created }; /** A list of equally sized memory blocks sharable between tasks. */ template<typename T> class SharedBlock { public: SharedBlock() : m_numElementsPerBlock(0), m_numBlocks(0), m_buffer(nullptr) {} /** Allocates one large memory block of elementsPerBlock*numBlocks elements. */ void allocate(uint32_t elementsPerBlock, uint32_t numBlocks) { NVBLAST_ASSERT(elementsPerBlock > 0 && numBlocks > 0); m_buffer = reinterpret_cast<T*>(NVBLAST_ALLOC_NAMED(elementsPerBlock*numBlocks*sizeof(T), "SharedBlock")); m_numElementsPerBlock = elementsPerBlock; m_numBlocks = numBlocks; } /** Returns the pointer to the first element of a block of numElementsPerBlock() elements. */ T* getBlock(uint32_t id) { NVBLAST_ASSERT(id < m_numBlocks || 0 == m_numElementsPerBlock); return &m_buffer[id*m_numElementsPerBlock]; } /** The number of elements available per block. */ uint32_t numElementsPerBlock() const { return m_numElementsPerBlock; } /** Frees the whole memory block. */ void release() { m_numBlocks = 0; m_numElementsPerBlock = 0; NVBLAST_FREE(m_buffer); m_buffer = nullptr; } private: uint32_t m_numElementsPerBlock; //!< elements available in one block uint32_t m_numBlocks; //!< number of virtual blocks available T* m_buffer; //!< contiguous memory for all blocks }; /** A preallocated, shared array from which can be allocated from in tasks. Intended to be used when the maximum amount of data (e.g. for a family) is known in advance. No further allocations take place on exhaustion. Exhaustion asserts in debug builds and overflows otherwise. */ template<typename T> class SharedBuffer { public: SharedBuffer() : m_capacity(0), m_used(0), m_buffer(nullptr) {} /** Atomically gets a pointer to the first element of an array of n elements. */ T* reserve(size_t n) { NVBLAST_ASSERT(m_used + n <= m_capacity); size_t start = m_used.fetch_add(n); return &m_buffer[start]; } /** Preallocates memory for capacity elements. */ void allocate(size_t capacity) { NVBLAST_ASSERT(m_buffer == nullptr); m_buffer = reinterpret_cast<T*>(NVBLAST_ALLOC_NAMED(capacity*sizeof(T), "SplitMemory")); m_capacity = capacity; } /** Preserves the memory allocated but resets to reserve from the beginning of the array. */ void reset() { m_used = 0; } /** Frees the preallocated array. */ void release() { NVBLAST_ASSERT(m_buffer != nullptr); NVBLAST_FREE(m_buffer); m_buffer = nullptr; m_capacity = m_used = 0; } private: size_t m_capacity; //!< available elements in the buffer std::atomic<size_t> m_used; //!< used elements in the buffer T* m_buffer; //!< the memory containing T's }; /** Allocates from a preallocated, externally owned memory block initialized with. When blocks run out of space, new ones are allocated and owned by this class. */ template<typename T> class LocalBuffer { public: /** Returns the pointer to the first element of an array of n elements. Allocates a new block of memory when exhausted, its size being the larger of n and capacity set with initialize(). */ T* allocate(size_t n) { if (m_used + n > m_capacity) { allocateNewBlock(n > m_capacity ? n : m_capacity); } size_t index = m_used; m_used += n; return &m_currentBlock[index]; } /** Release the additionally allocated memory blocks. The externally owned memory block remains untouched. */ void clear() { for (void* block : m_memoryBlocks) { NVBLAST_FREE(block); } m_memoryBlocks.clear(); } /** Set the externally owned memory block to start allocating from, with a size of capacity elements. */ void initialize(T* block, size_t capacity) { m_currentBlock = block; m_capacity = capacity; m_used = 0; } private: /** Allocates space for capacity elements. */ void allocateNewBlock(size_t capacity) { BLAST_PROFILE_SCOPE_L("Local Buffer allocation"); m_capacity = capacity; m_currentBlock = static_cast<T*>(NVBLAST_ALLOC_NAMED(capacity*sizeof(T), "Blast LocalBuffer")); m_memoryBlocks.pushBack(m_currentBlock); m_used = 0; } InlineArray<void*, 4>::type m_memoryBlocks; //!< storage for memory blocks T* m_currentBlock; //!< memory block used to allocate from size_t m_used; //!< elements used in current block size_t m_capacity; //!< elements available in current block }; /** Holds the memory used by TkWorker for each family in each group. */ class SharedMemory { public: SharedMemory() : m_eventsMemory(0), m_eventsCount(0), m_refCount(0) {} /** Reserves n entries from preallocated memory. */ NvBlastActor** reserveNewActors(size_t n) { return m_newActorBuffers.reserve(n); } /** Reserves n entries from preallocated memory. */ TkActor** reserveNewTkActors(size_t n) { return m_newTkActorBuffers.reserve(n); } /** Allocates buffers to hold */ void allocate(TkFamilyImpl&); /** Resets the internal buffers to reserve from their beginning. Preserves the allocated memory. */ void reset() { m_newActorBuffers.reset(); m_newTkActorBuffers.reset(); } /** Increments the reference count. */ void addReference() { m_refCount++; } /** Increments the reference count by n. */ void addReference(size_t n) { m_refCount += n; } /** Decrements the reference count. Returns true if the count reached zero. */ bool removeReference() { m_refCount--; return !isUsed(); } /** Checks if the reference count is not zero. */ bool isUsed() { return m_refCount > 0; } /** Release the internal buffers' memory. */ void release() { m_newActorBuffers.release(); m_newTkActorBuffers.release(); } TkEventQueue m_events; //!< event queue shared across a group's actors of the same family uint32_t m_eventsMemory; //!< expected memory size for event data uint32_t m_eventsCount; //!< expected number of events private: size_t m_refCount; //!< helper for usage and releasing memory SharedBuffer<NvBlastActor*> m_newActorBuffers; //!< memory for splitting SharedBuffer<TkActor*> m_newTkActorBuffers; //!< memory for split events }; /** Thread worker fracturing and splitting actors sequentially. The list of actual jobs is provided by the group owning this worker. */ class TkWorker final : public TkGroupWorker { public: TkWorker() : m_id(~(uint32_t)0), m_group(nullptr), m_isBusy(false) {} void process(uint32_t jobID); void initialize(); void process(TkWorkerJob& job); uint32_t m_id; //!< this worker's id TkGroupImpl* m_group; //!< the group owning this worker LocalBuffer<NvBlastChunkFractureData> m_chunkBuffer; //!< memory manager for chunk event data LocalBuffer<NvBlastBondFractureData> m_bondBuffer; //!< memory manager for bonds event data void* m_splitScratch; NvBlastFractureBuffers m_tempBuffer; bool m_isBusy; #if NV_PROFILE TkGroupStats m_stats; #endif }; } } #endif // NVBLASTTKTASKIMPL_H
10,422
C
26.9437
122
0.627807
NVIDIA-Omniverse/PhysX/blast/source/sdk/toolkit/NvBlastTkFamilyImpl.cpp
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved. #include "NvBlastTkFrameworkImpl.h" #include "NvBlastTkFamilyImpl.h" #include "NvBlastTkGroupImpl.h" #include "NvBlastTkAssetImpl.h" #include "NvBlastTkActorImpl.h" #include "NvBlastTkJointImpl.h" #include "NvBlastIndexFns.h" #include "NvBlastMemory.h" namespace Nv { namespace Blast { //////// Static data //////// NVBLASTTK_DEFINE_TYPE_IDENTIFIABLE(Family); //////// Member functions //////// TkFamilyImpl::TkFamilyImpl() : m_familyLL(nullptr), m_internalJointCount(0), m_asset(nullptr) { } TkFamilyImpl::TkFamilyImpl(const NvBlastID& id) : TkFamilyType(id), m_familyLL(nullptr), m_internalJointCount(0), m_asset(nullptr) { } TkFamilyImpl::~TkFamilyImpl() { if (m_familyLL != nullptr) { uint32_t familyActorCount = NvBlastFamilyGetActorCount(m_familyLL, logLL); if (familyActorCount != 0) { NVBLAST_LOG_WARNING("TkFamilyImpl::~TkFamilyImpl(): family actor count is not 0."); } NVBLAST_FREE(m_familyLL); } } void TkFamilyImpl::release() { for (TkActorImpl& actor : m_actors) { if (actor.isActive()) { actor.release(); } } m_actors.clear(); NVBLAST_DELETE(this, TkFamilyImpl); } const NvBlastFamily* TkFamilyImpl::getFamilyLL() const { return m_familyLL; } TkActorImpl* TkFamilyImpl::addActor(NvBlastActor* actorLL) { TkActorImpl* actor = getActorByActorLL(actorLL); NVBLAST_ASSERT(actor); actor->m_actorLL = actorLL; actor->m_family = this; return actor; } void TkFamilyImpl::removeActor(TkActorImpl* actor) { NVBLAST_ASSERT(actor != nullptr && actor->m_family == this); //actor->m_family = nullptr; actor->m_actorLL = nullptr; } uint32_t TkFamilyImpl::getActorCount() const { return getActorCountInternal(); } uint32_t TkFamilyImpl::getActors(TkActor** buffer, uint32_t bufferSize, uint32_t indexStart /*= 0*/) const { uint32_t actorCount = getActorCount(); if (actorCount <= indexStart) { NVBLAST_LOG_WARNING("TkFamilyImpl::getActors: indexStart beyond end of actor list."); return 0; } actorCount -= indexStart; if (actorCount > bufferSize) { actorCount = static_cast<uint32_t>(bufferSize); } uint32_t index = 0; for (const TkActorImpl& actor : m_actors) { if (actor.isActive()) { if (index >= indexStart) { if ((index - indexStart) >= actorCount) { break; } else { *buffer++ = const_cast<TkActorImpl*>(&actor); } } index++; } } return actorCount; } NV_INLINE bool areLLActorsEqual(const NvBlastActor* actor0, const NvBlastActor* actor1, Array<uint32_t>::type& scratch) { if (NvBlastActorGetGraphNodeCount(actor0, logLL) != NvBlastActorGetGraphNodeCount(actor1, logLL)) { return false; } const uint32_t chunkCount = NvBlastActorGetVisibleChunkCount(actor0, logLL); if (chunkCount != NvBlastActorGetVisibleChunkCount(actor1, logLL)) { return false; } scratch.resize(chunkCount * 2); NvBlastActorGetVisibleChunkIndices(scratch.begin(), chunkCount, actor0, logLL); NvBlastActorGetVisibleChunkIndices(scratch.begin() + chunkCount, chunkCount, actor1, logLL); return memcmp(scratch.begin(), scratch.begin() + chunkCount, chunkCount * sizeof(uint32_t)) == 0; } void TkFamilyImpl::reinitialize(const NvBlastFamily* newFamily, TkGroup* group) { NVBLAST_ASSERT(newFamily); #if NV_ENABLE_ASSERTS NvBlastID id0 = NvBlastFamilyGetAssetID(m_familyLL, logLL); NvBlastID id1 = NvBlastFamilyGetAssetID(newFamily, logLL); NVBLAST_ASSERT(TkGUIDsEqual(&id0, &id1)); #endif NVBLAST_ASSERT(NvBlastFamilyGetSize(m_familyLL, logLL) == NvBlastFamilyGetSize(newFamily, logLL)); // alloc and init new family const uint32_t blockSize = NvBlastFamilyGetSize(newFamily, logLL); NvBlastFamily* newFamilyCopy = (NvBlastFamily*)NVBLAST_ALLOC_NAMED(blockSize, "TkFamilyImpl::reinitialize"); memcpy(newFamilyCopy, newFamily, blockSize); NvBlastFamilySetAsset(newFamilyCopy, m_asset->getAssetLL(), logLL); // get actors from new family Array<NvBlastActor*>::type newLLActors(NvBlastFamilyGetActorCount(newFamilyCopy, logLL)); uint32_t actorCount = NvBlastFamilyGetActors(newLLActors.begin(), newLLActors.size(), newFamilyCopy, logLL); // reset actor families to nullptr (we use it as a flag later) for (TkActorImpl& actor : m_actors) { if (actor.isActive()) { actor.m_family = nullptr; } } // prepare split event with new actors auto newActorsSplitEvent = getQueue().allocData<TkSplitEvent>(); Array<TkActor*>::type children(actorCount); children.resizeUninitialized(0); newActorsSplitEvent->children = children.begin(); // scratch Array<uint32_t>::type scratch(m_asset->getChunkCount()); for (uint32_t i = 0; i < actorCount; ++i) { NvBlastActor* newLLActor = newLLActors[i]; uint32_t actorIndex = NvBlastActorGetIndex(newLLActor, logLL); TkActorImpl& tkActor = *getActorByIndex(actorIndex); tkActor.m_family = this; if (!tkActor.isActive() || !areLLActorsEqual(newLLActor, tkActor.m_actorLL, scratch)) { if (tkActor.isActive()) { auto removeSplitEvent = getQueue().allocData<TkSplitEvent>(); removeSplitEvent->parentData.family = this; removeSplitEvent->numChildren = 0; removeSplitEvent->parentData.userData = tkActor.userData; removeSplitEvent->parentData.index = tkActor.getIndex(); getQueue().addEvent(removeSplitEvent); } tkActor.m_actorLL = newLLActor; // switch groups TkGroupImpl* prevGroup = tkActor.m_group; if (prevGroup != group) { if (prevGroup) { prevGroup->removeActor(tkActor); } if (group) { group->addActor(tkActor); } } children.pushBack(&tkActor); } else { tkActor.m_actorLL = newLLActor; } } // if m_family is still nullptr for an active actor -> remove it. It doesn't exist in new family. for (TkActorImpl& tkActor : m_actors) { if (tkActor.isActive() && tkActor.m_family == nullptr) { tkActor.m_family = this; if (tkActor.m_group) { tkActor.m_group->removeActor(tkActor); } auto removeSplitEvent = getQueue().allocData<TkSplitEvent>(); removeSplitEvent->parentData.family = this; removeSplitEvent->numChildren = 0; removeSplitEvent->parentData.userData = tkActor.userData; removeSplitEvent->parentData.index = tkActor.getIndex(); getQueue().addEvent(removeSplitEvent); tkActor.m_actorLL = nullptr; } } // add split event with all new actors newActorsSplitEvent->parentData.family = this; newActorsSplitEvent->parentData.userData = 0; newActorsSplitEvent->parentData.index = invalidIndex<uint32_t>(); newActorsSplitEvent->numChildren = children.size(); if (newActorsSplitEvent->numChildren > 0) { getQueue().addEvent(newActorsSplitEvent); } // replace family NVBLAST_FREE(m_familyLL); m_familyLL = newFamilyCopy; // update joints for (TkActorImpl& tkActor : m_actors) { if (!tkActor.m_jointList.isEmpty()) { updateJoints(&tkActor); } } getQueue().dispatch(); } TkActorImpl* TkFamilyImpl::getActorByChunk(uint32_t chunk) { if (chunk >= NvBlastAssetGetChunkCount(m_asset->getAssetLLInternal(), logLL)) { NVBLAST_LOG_WARNING("TkFamilyImpl::getActorByChunk: invalid chunk index. Returning NULL."); return nullptr; } NvBlastActor* actorLL = NvBlastFamilyGetChunkActor(m_familyLL, chunk, logLL); return actorLL ? getActorByActorLL(actorLL) : nullptr; } void TkFamilyImpl::applyFractureInternal(const NvBlastFractureBuffers* commands) { NvBlastSupportGraph graph = getAsset()->getGraph(); // apply bond fracture commands on relevant actors { TkActorImpl* currActor = nullptr; NvBlastBondFractureData* bondFractures = commands->bondFractures; uint32_t bondFracturesCount = 0; auto applyFracture = [&]() { if (bondFracturesCount > 0) { if (currActor != nullptr && currActor->isActive()) { NvBlastFractureBuffers newCommands; newCommands.bondFractures = bondFractures; newCommands.bondFractureCount = bondFracturesCount; newCommands.chunkFractures = nullptr; newCommands.chunkFractureCount = 0; currActor->applyFracture(nullptr, &newCommands); } bondFractures += bondFracturesCount; bondFracturesCount = 0; } }; for (uint32_t i = 0; i < commands->bondFractureCount; ++i, ++bondFracturesCount) { const NvBlastBondFractureData& command = commands->bondFractures[i]; uint32_t chunk0 = graph.chunkIndices[command.nodeIndex0]; uint32_t chunk1 = graph.chunkIndices[command.nodeIndex1]; TkActorImpl* actor0 = getActorByChunk(chunk0); TkActorImpl* actor1 = getActorByChunk(chunk1); if (actor0 != actor1) { // skipping this event, bond already broken actor0 = nullptr; } if (actor0 != currActor) { applyFracture(); currActor = actor0; } } if (bondFracturesCount > 0) { applyFracture(); } } // apply chunk fracture commands on relevant actors { TkActorImpl* currActor = nullptr; NvBlastChunkFractureData* chunkFractures = commands->chunkFractures; uint32_t chunkFracturesCount = 0; auto applyFracture = [&]() { if (chunkFracturesCount > 0) { if (currActor != nullptr && currActor->isActive()) { NvBlastFractureBuffers newCommands; newCommands.bondFractures = nullptr; newCommands.bondFractureCount = 0; newCommands.chunkFractures = chunkFractures; newCommands.chunkFractureCount = chunkFracturesCount; currActor->applyFracture(nullptr, &newCommands); } chunkFractures += chunkFracturesCount; chunkFracturesCount = 0; } }; for (uint32_t i = 0; i < commands->chunkFractureCount; ++i, ++chunkFracturesCount) { const NvBlastChunkFractureData& command = commands->chunkFractures[i]; TkActorImpl* actor = getActorByChunk(command.chunkIndex); if (actor != currActor) { applyFracture(); currActor = actor; } } if (chunkFracturesCount > 0) { applyFracture(); } } } void TkFamilyImpl::updateJoints(TkActorImpl* actor, TkEventQueue* alternateQueue) { // Copy joint array for safety against implementation of joint->setActor TkJointImpl** joints = reinterpret_cast<TkJointImpl**>(NvBlastAlloca(sizeof(TkJointImpl*)*actor->getJointCountInternal())); TkJointImpl** stop = joints + actor->getJointCountInternal(); TkJointImpl** jointHandle = joints; for (TkActorImpl::JointIt j(*actor); (bool)j; ++j) { *jointHandle++ = *j; } jointHandle = joints; while (jointHandle < stop) { TkJointImpl* joint = *jointHandle++; const TkJointData& data = joint->getDataInternal(); TkActorImpl* actor0 = data.actors[0] != nullptr ? static_cast<TkActorImpl&>(*data.actors[0]).getFamilyImpl().getActorByChunk(data.chunkIndices[0]) : nullptr; TkActorImpl* actor1 = data.actors[1] != nullptr ? static_cast<TkActorImpl&>(*data.actors[1]).getFamilyImpl().getActorByChunk(data.chunkIndices[1]) : nullptr; joint->setActors(actor0, actor1, alternateQueue); } } const TkAsset* TkFamilyImpl::getAsset() const { return m_asset; } //////// Static functions //////// TkFamilyImpl* TkFamilyImpl::create(const TkAssetImpl* asset) { TkFamilyImpl* family = NVBLAST_NEW(TkFamilyImpl); family->m_asset = asset; void* mem = NVBLAST_ALLOC_NAMED(NvBlastAssetGetFamilyMemorySize(asset->getAssetLL(), logLL), "TkFamilyImpl::create"); family->m_familyLL = NvBlastAssetCreateFamily(mem, asset->getAssetLL(), logLL); //family->addListener(*TkFrameworkImpl::get()); if (family->m_familyLL == nullptr) { NVBLAST_LOG_ERROR("TkFamilyImpl::create: low-level family could not be created."); family->release(); return nullptr; } uint32_t maxActorCount = NvBlastFamilyGetMaxActorCount(family->m_familyLL, logLL); family->m_actors.resize(maxActorCount); family->m_internalJointBuffer.resize(asset->getJointDescCountInternal() * sizeof(TkJointImpl), 0); family->m_internalJointCount = asset->getJointDescCountInternal(); return family; } TkJointImpl** TkFamilyImpl::createExternalJointHandle(const NvBlastID& otherFamilyID, uint32_t chunkIndex0, uint32_t chunkIndex1) { JointSet* jointSet; const FamilyIDMap::Entry* jointSetIndexEntry = m_familyIDMap.find(otherFamilyID); uint32_t otherFamilyIndex; if (jointSetIndexEntry != nullptr) { otherFamilyIndex = jointSetIndexEntry->second; jointSet = m_jointSets[otherFamilyIndex]; } else { jointSet = NVBLAST_NEW(JointSet); NVBLAST_CHECK_ERROR(jointSet != nullptr, "TkFamilyImpl::addExternalJoint: failed to create joint set for other family ID.", return nullptr); jointSet->m_familyID = otherFamilyID; otherFamilyIndex = m_jointSets.size(); m_familyIDMap[otherFamilyID] = otherFamilyIndex; m_jointSets.pushBack(jointSet); } const ExternalJointKey key(chunkIndex0, chunkIndex1); const bool jointExists = jointSet->m_joints.find(key) != nullptr; NVBLAST_CHECK_WARNING(!jointExists, "TkFamilyImpl::addExternalJoint: joint already added.", return nullptr); return &jointSet->m_joints[key]; } bool TkFamilyImpl::deleteExternalJointHandle(TkJointImpl*& joint, const NvBlastID& otherFamilyID, uint32_t chunkIndex0, uint32_t chunkIndex1) { const FamilyIDMap::Entry* jointSetIndexEntry = m_familyIDMap.find(otherFamilyID); if (jointSetIndexEntry != nullptr) { const uint32_t jointSetIndex = jointSetIndexEntry->second; ExternalJointKey jointKey = ExternalJointKey(chunkIndex0, chunkIndex1); const HashMap<ExternalJointKey, TkJointImpl*>::type::Entry* e = m_jointSets[jointSetIndex]->m_joints.find(jointKey); if (e != nullptr) { joint = e->second; // Return value that was stored m_jointSets[jointSetIndex]->m_joints.erase(jointKey); // Delete the joint set if it is empty if (m_jointSets[jointSetIndex]->m_joints.size() == 0) { NVBLAST_DELETE(m_jointSets[jointSetIndex], JointSet); m_jointSets.replaceWithLast(jointSetIndex); m_familyIDMap.erase(otherFamilyID); if (jointSetIndex < m_jointSets.size()) { m_familyIDMap[m_jointSets[jointSetIndex]->m_familyID] = jointSetIndex; } } return true; } } return false; } TkJointImpl* TkFamilyImpl::findExternalJoint(const TkFamilyImpl* otherFamily, ExternalJointKey key) const { const FamilyIDMap::Entry* jointSetIndexEntry = m_familyIDMap.find(getFamilyID(otherFamily)); if (jointSetIndexEntry != nullptr) { const HashMap<ExternalJointKey, TkJointImpl*>::type::Entry* e = m_jointSets[jointSetIndexEntry->second]->m_joints.find(key); if (e != nullptr) { return e->second; } } return nullptr; } } // namespace Blast } // namespace Nv
18,252
C++
31.711469
148
0.633739
NVIDIA-Omniverse/PhysX/blast/source/sdk/toolkit/NvBlastTkTask.cpp
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved. #include "NvBlastGlobals.h" #include "NvBlastTkTask.h" #include "NvCpuDispatcher.h" #include "NvBlastTkGroup.h" using namespace Nv::Blast; uint32_t TkGroupTaskManagerImpl::process(uint32_t workerCount) { NVBLAST_CHECK_WARNING(m_group != nullptr, "TkGroupTaskManager::process cannot process, no group set.", return 0); NVBLAST_CHECK_WARNING(m_sync.isDone(), "TkGroupTaskManager::process group is already being processed.", return 0); // at least one task must start, even when dispatcher has none specified uint32_t dispatcherThreads = m_taskManager.getCpuDispatcher()->getWorkerCount(); dispatcherThreads = dispatcherThreads > 0 ? dispatcherThreads : 1; // not expecting an arbitrary amount of tasks uint32_t availableTasks = TASKS_MAX_COUNT; // use workerCount tasks, unless dispatcher has less threads or less tasks are available uint32_t requestedTasks = workerCount > 0 ? workerCount : dispatcherThreads; requestedTasks = requestedTasks > dispatcherThreads ? dispatcherThreads : requestedTasks; requestedTasks = requestedTasks > availableTasks ? availableTasks : requestedTasks; // ensure the group has enough memory allocated for concurrent processing m_group->setWorkerCount(requestedTasks); // check if there is work to do uint32_t jobCount = m_group->startProcess(); if (jobCount) { // don't start more tasks than jobs are available requestedTasks = requestedTasks > jobCount ? jobCount : requestedTasks; // common counter for all tasks m_counter.reset(jobCount); // set to busy state m_sync.setCount(requestedTasks); // set up tasks for (uint32_t i = 0; i < requestedTasks; i++) { m_tasks[i].setup(m_group, &m_counter, &m_sync); m_tasks[i].setContinuation(m_taskManager, nullptr); m_tasks[i].removeReference(); } return requestedTasks; } // there was no work to be done return 0; } bool TkGroupTaskManagerImpl::wait(bool block) { if (block && !m_sync.isDone()) { m_sync.wait(); } if (m_sync.isDone()) { return m_group->endProcess(); } return false; } void TkGroupTaskManagerImpl::setGroup(TkGroup* group) { NVBLAST_CHECK_WARNING(m_sync.isDone(), "TkGroupTaskManager::setGroup trying to change group while processing.", return); m_group = group; } TkGroupTaskManager* TkGroupTaskManager::create(nvidia::task::NvTaskManager& taskManager, TkGroup* group) { return NVBLAST_NEW(TkGroupTaskManagerImpl) (taskManager, group); } void TkGroupTaskManagerImpl::release() { NVBLAST_CHECK_WARNING(m_sync.isDone(), "TkGroupTaskManager::release group is still being processed.", return); NVBLAST_DELETE(this, TkGroupTaskManagerImpl); }
4,386
C++
35.558333
124
0.719562
NVIDIA-Omniverse/PhysX/blast/source/sdk/toolkit/NvBlastTkGroupImpl.cpp
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved. #include "NvPreprocessor.h" #include "NvBlastAssert.h" #include "NvBlast.h" #include "NvBlastTkFrameworkImpl.h" #include "NvBlastTkGroupImpl.h" #include "NvBlastTkActorImpl.h" #include "NvBlastTkFamilyImpl.h" #include "NvBlastTkAssetImpl.h" #include "NvBlastTkTaskImpl.h" #undef max #undef min #include <algorithm> using namespace nvidia; namespace Nv { namespace Blast { //////// Static data //////// NVBLASTTK_DEFINE_TYPE_IDENTIFIABLE(Group); //////// Member functions //////// TkGroupImpl::TkGroupImpl() : m_actorCount(0), m_isProcessing(false) { #if NV_PROFILE memset(&m_stats, 0, sizeof(TkGroupStats)); #endif } TkGroupImpl::~TkGroupImpl() { NVBLAST_ASSERT(getActorCount() == 0); NVBLAST_ASSERT(m_sharedMemory.size() == 0); } void TkGroupImpl::release() { if (isProcessing()) { // abort all processing? NVBLAST_LOG_ERROR("TkGroup::release: cannot release Group while processing."); NVBLAST_ALWAYS_ASSERT_MESSAGE("TkGroup::release: cannot release Group while processing."); return; } for (auto it = m_sharedMemory.getIterator(); !it.done(); ++it) { TkFamilyImpl* family = it->first; for (TkActorImpl& actor : family->getActorsInternal()) { if (actor.m_group == this) { removeActorInternal(actor); } } SharedMemory* mem = it->second; mem->release(); NVBLAST_DELETE(mem, SharedMemory); } m_sharedMemory.clear(); m_bondTempDataBlock.release(); m_chunkTempDataBlock.release(); m_bondEventDataBlock.release(); m_chunkEventDataBlock.release(); m_splitScratchBlock.release(); NVBLAST_DELETE(this, TkGroupImpl); } void TkGroupImpl::addActorsInternal(TkActorImpl** actors, uint32_t numActors) { for (uint32_t i = 0; i < numActors; i++) { addActorInternal(*actors[i]); } } void TkGroupImpl::addActorInternal(TkActorImpl& tkActor) { NVBLAST_ASSERT(tkActor.getGroup() == nullptr); tkActor.m_group = this; m_actorCount++; } bool TkGroupImpl::addActor(TkActor& actor) { TkActorImpl& tkActor = static_cast<TkActorImpl&>(actor); if (tkActor.getGroup() != nullptr) { NVBLAST_LOG_ERROR("TkGroup::addActor: actor already belongs to a Group. Remove from current group first."); return false; } if (isProcessing()) { NVBLAST_LOG_ERROR("TkGroup::addActor: cannot alter Group while processing."); return false; } // mark the actor that it now belongs to this group addActorInternal(tkActor); // actors that were fractured already or have damage requested // must be enqueued to be processed if (tkActor.isPending()) { enqueue(&tkActor); } TkFamilyImpl& family = tkActor.getFamilyImpl(); SharedMemory* mem = m_sharedMemory[&family]; if (mem == nullptr) { // the actor belongs to a family not involved in this group yet // shared memory must be allocated and temporary buffers adjusted accordingly BLAST_PROFILE_ZONE_BEGIN("family memory"); mem = NVBLAST_NEW(SharedMemory); mem->allocate(family); m_sharedMemory[&family] = mem; BLAST_PROFILE_ZONE_END("family memory"); BLAST_PROFILE_ZONE_BEGIN("group memory"); const uint32_t workerCount = m_workers.size(); NvBlastLog theLog = logLL; // this group's tasks will use one temporary buffer each, which is of max size of, for all families involved const size_t requiredScratch = NvBlastActorGetRequiredScratchForSplit(tkActor.getActorLL(), theLog); if (static_cast<size_t>(m_splitScratchBlock.numElementsPerBlock()) < requiredScratch) { m_splitScratchBlock.release(); m_splitScratchBlock.allocate(static_cast<uint32_t>(requiredScratch), workerCount); } // generate and apply fracture may create an entry for each bond const uint32_t bondCount = NvBlastAssetGetBondCount(tkActor.getAsset()->getAssetLL(), theLog); if (m_bondTempDataBlock.numElementsPerBlock() < bondCount) { m_bondTempDataBlock.release(); m_bondTempDataBlock.allocate(bondCount, workerCount); m_bondEventDataBlock.release(); m_bondEventDataBlock.allocate(bondCount, workerCount); } // apply fracture may create an entry for each lower-support chunk const uint32_t graphNodeCount = NvBlastAssetGetSupportGraph(tkActor.getAsset()->getAssetLL(), theLog).nodeCount; const uint32_t subsupportChunkCount = NvBlastAssetGetChunkCount(tkActor.getAsset()->getAssetLL(), theLog) - NvBlastAssetGetFirstSubsupportChunkIndex(tkActor.getAsset()->getAssetLL(), theLog); const uint32_t chunkCount = graphNodeCount + subsupportChunkCount; if (m_chunkTempDataBlock.numElementsPerBlock() < chunkCount) { m_chunkTempDataBlock.release(); m_chunkTempDataBlock.allocate(chunkCount, workerCount); m_chunkEventDataBlock.release(); m_chunkEventDataBlock.allocate(chunkCount, workerCount); } BLAST_PROFILE_ZONE_END("group memory"); } mem->addReference(); return true; } uint32_t TkGroupImpl::getActors(TkActor** buffer, uint32_t bufferSize, uint32_t indexStart /* = 0 */) const { BLAST_PROFILE_SCOPE_L("TkGroup::getActors"); uint32_t actorCount = m_actorCount; if (actorCount <= indexStart) { NVBLAST_LOG_WARNING("TkGroup::getActors: indexStart beyond end of actor list."); return 0; } actorCount -= indexStart; if (actorCount > bufferSize) { actorCount = bufferSize; } uint32_t index = 0; bool done = false; for (auto it = const_cast<TkGroupImpl*>(this)->m_sharedMemory.getIterator(); !it.done();++it) { TkFamilyImpl* fam = it->first; for (TkActorImpl& actor : fam->getActorsInternal()) { if (actor.m_group == this) { NVBLAST_ASSERT(actor.isActive()); if (index >= indexStart) { *buffer++ = &actor; } index++; done = (index - indexStart) >= actorCount; } if (done) break; } if (done) break; } return actorCount; } void TkGroupImpl::removeActorInternal(TkActorImpl& tkActor) { NVBLAST_ASSERT(tkActor.m_group == this); tkActor.m_group = nullptr; m_actorCount--; } void TkGroupImpl::releaseSharedMemory(TkFamilyImpl* fam, SharedMemory* mem) { NVBLAST_ASSERT(mem != nullptr && m_sharedMemory[fam] == mem); mem->release(); m_sharedMemory.erase(fam); NVBLAST_DELETE(mem, SharedMemory); } bool TkGroupImpl::removeActor(TkActor& actor) { TkActorImpl& tkActor = static_cast<TkActorImpl&>(actor); if (tkActor.getGroup() != this) { NVBLAST_LOG_ERROR("TkGroup::removeActor: actor does not belong to this Group."); return false; } if (isProcessing()) { NVBLAST_LOG_ERROR("TkGroup::removeActor: cannot alter Group while processing."); return false; } removeActorInternal(tkActor); // pending actors must be removed from the job queue as well if(tkActor.isPending()) { uint32_t index = tkActor.m_groupJobIndex; tkActor.m_groupJobIndex = invalidIndex<uint32_t>(); if (index < m_jobs.size()) { m_jobs.replaceWithLast(index); if (index < m_jobs.size()) { NVBLAST_ASSERT(m_jobs[index].m_tkActor->m_groupJobIndex == m_jobs.size()); NVBLAST_ASSERT(m_jobs[index].m_tkActor->isPending()); m_jobs[index].m_tkActor->m_groupJobIndex = index; } } } // if the actor is the last of its family in this group // the group-family memory can be released TkFamilyImpl* family = &tkActor.getFamilyImpl(); SharedMemory* mem = getSharedMemory(family); if (mem->removeReference()) { releaseSharedMemory(family, mem); } return true; } TkGroupImpl* TkGroupImpl::create(const TkGroupDesc& desc) { TkGroupImpl* group = NVBLAST_NEW(TkGroupImpl); group->setWorkerCount(desc.workerCount); return group; } void TkGroupImpl::setWorkerCount(uint32_t workerCount) { if (isProcessing()) { NVBLAST_LOG_WARNING("TkGroup::setWorkerCount: Group is still processing, call TkGroup::endProcess first."); return; } if (workerCount == 0) { NVBLAST_LOG_WARNING("TkGroup: attempting to create a Group with 0 workers. Forced to 1."); workerCount = 1; } if (workerCount != m_workers.size()) { m_workers.resize(workerCount); uint32_t workerId = 0; for (auto& worker : m_workers) { worker.m_id = workerId++; worker.m_group = this; } const uint32_t bondCount = m_bondTempDataBlock.numElementsPerBlock(); if (bondCount > 0) { m_bondTempDataBlock.release(); m_bondTempDataBlock.allocate(bondCount, workerCount); m_bondEventDataBlock.release(); m_bondEventDataBlock.allocate(bondCount, workerCount); } const uint32_t chunkCount = m_chunkTempDataBlock.numElementsPerBlock(); if (chunkCount > 0) { m_chunkTempDataBlock.release(); m_chunkTempDataBlock.allocate(chunkCount, workerCount); m_chunkEventDataBlock.release(); m_chunkEventDataBlock.allocate(chunkCount, workerCount); } const uint32_t scratchSize = m_splitScratchBlock.numElementsPerBlock(); if (scratchSize > 0) { m_splitScratchBlock.release(); m_splitScratchBlock.allocate(scratchSize, workerCount); } } } NV_INLINE uint32_t TkGroupImpl::getWorkerCount() const { return m_workers.size(); } uint32_t TkGroupImpl::startProcess() { BLAST_PROFILE_SCOPE_L("TkGroup::startProcess"); if (!setProcessing(true)) { NVBLAST_LOG_WARNING("TkGroup::process: Group is still processing, call TkGroup::endProcess first."); return 0; } if (m_jobs.size() > 0) { BLAST_PROFILE_ZONE_BEGIN("task setup"); BLAST_PROFILE_ZONE_BEGIN("setup job queue"); for (const auto& job : m_jobs) { const TkActorImpl* a = job.m_tkActor; SharedMemory* mem = getSharedMemory(&a->getFamilyImpl()); const uint32_t damageCount = a->m_damageBuffer.size(); // applyFracture'd actor do not necessarily have damage queued NVBLAST_ASSERT(damageCount > 0 || a->m_flags.isSet(TkActorFlag::DAMAGED)); // no reason to be here without these NVBLAST_ASSERT(a->m_flags.isSet(TkActorFlag::PENDING)); NVBLAST_ASSERT(a->m_group == this); // collect the amount of event payload memory to preallocate for TkWorkers mem->m_eventsMemory += damageCount * (sizeof(TkFractureCommands) + sizeof(TkFractureEvents)) + sizeof(TkSplitEvent); // collect the amount of event entries to preallocate for TkWorkers // (two TkFracture* events per damage plus one TkSplitEvent) mem->m_eventsCount += 2 * damageCount + 1; } BLAST_PROFILE_ZONE_END("setup job queue"); BLAST_PROFILE_ZONE_BEGIN("memory protect"); for (auto it = m_sharedMemory.getIterator(); !it.done(); ++it) { // preallocate the event memory for TkWorkers SharedMemory* mem = it->second; mem->m_events.reserveData(mem->m_eventsMemory); mem->m_events.reserveEvents(mem->m_eventsCount); // these counters are not used anymore // reset them immediately for next time mem->m_eventsCount = 0; mem->m_eventsMemory = 0; // switch to parallel mode mem->m_events.protect(true); } BLAST_PROFILE_ZONE_END("memory protect"); BLAST_PROFILE_ZONE_END("task setup"); for (auto&worker : m_workers) { worker.initialize(); } return m_jobs.size(); } else { bool success = setProcessing(false); NVBLAST_ASSERT(success); NV_UNUSED(success); return 0; } } bool TkGroupImpl::endProcess() { if (isProcessing()) { BLAST_PROFILE_SCOPE_L("TkGroupImpl::endProcess"); if (m_jobs.size() > 0) { #if NV_PROFILE BLAST_PROFILE_ZONE_BEGIN("accumulate timers"); NvBlastTimers accumulated; NvBlastTimersReset(&accumulated); uint32_t jobCount = 0; int64_t workerTime = 0; for (TkWorker& worker : m_workers) { accumulated += worker.m_stats.timers; jobCount += worker.m_stats.processedActorsCount; workerTime += worker.m_stats.workerTime; } m_stats.timers = accumulated; m_stats.processedActorsCount = jobCount; m_stats.workerTime = workerTime; BLAST_PROFILE_ZONE_END("accumulate timers"); #endif BLAST_PROFILE_ZONE_BEGIN("job update"); for (auto& j : m_jobs) { if (j.m_newActorsCount) { TkFamilyImpl* fam = &j.m_tkActor->getFamilyImpl(); SharedMemory* mem = getSharedMemory(fam); // as LL is implemented, where newActorsCount the parent is always deleted removeActorInternal(*j.m_tkActor); mem->removeReference(); addActorsInternal(j.m_newActors, j.m_newActorsCount); mem->addReference(j.m_newActorsCount); // Update joints mem->m_events.protect(false); // allow allocations again BLAST_PROFILE_ZONE_BEGIN("updateJoints"); fam->updateJoints(j.m_tkActor, &mem->m_events); BLAST_PROFILE_ZONE_END("updateJoints"); } // virtually dequeue the actor // the queue itself is cleared right after this loop j.m_tkActor->m_flags.clear(TkActorFlag::PENDING); j.m_tkActor->m_groupJobIndex = invalidIndex<uint32_t>(); BLAST_PROFILE_ZONE_BEGIN("damageBuffer.clear"); j.m_tkActor->m_damageBuffer.clear(); BLAST_PROFILE_ZONE_END("damageBuffer.clear"); } m_jobs.clear(); BLAST_PROFILE_ZONE_END("job update"); BLAST_PROFILE_ZONE_BEGIN("event dispatch"); for (auto it = m_sharedMemory.getIterator(); !it.done(); ++it) { BLAST_PROFILE_SCOPE_L("event dispatch"); TkFamilyImpl* family = it->first; SharedMemory* mem = it->second; NVBLAST_ASSERT(family != nullptr); NVBLAST_ASSERT(mem != nullptr && mem->isUsed()); // where no actor of a family has split, // its group/family event queue has not been // unprotected in the jobs loop above mem->m_events.protect(false); family->getQueue().dispatch(mem->m_events); mem->m_events.reset(); mem->reset(); } BLAST_PROFILE_ZONE_END("event dispatch"); BLAST_PROFILE_ZONE_BEGIN("event memory release"); for (auto& worker : m_workers) { worker.m_bondBuffer.clear(); worker.m_chunkBuffer.clear(); } BLAST_PROFILE_ZONE_END("event memory release"); } bool success = setProcessing(false); NVBLAST_ASSERT(success); return success; } return false; } bool TkGroupImpl::setProcessing(bool value) { bool expected = !value; return m_isProcessing.compare_exchange_strong(expected, value); } void TkGroupImpl::enqueue(TkActorImpl* tkActor) { NVBLAST_ASSERT(tkActor->getGroupImpl() != nullptr); NVBLAST_ASSERT(tkActor->getGroupImpl() == this); NVBLAST_ASSERT(isInvalidIndex(tkActor->m_groupJobIndex)); NVBLAST_ASSERT(isProcessing() == false); #if NV_DEBUG for (TkWorkerJob& j : m_jobs) { NVBLAST_ASSERT(j.m_tkActor != tkActor); } #endif tkActor->m_groupJobIndex = m_jobs.size(); TkWorkerJob& j = m_jobs.insert(); j.m_tkActor = tkActor; } TkGroupWorker* TkGroupImpl::acquireWorker() { BLAST_PROFILE_SCOPE_L("TkGroupImpl::acquireWorker"); std::unique_lock<std::mutex> lk(m_workerMtx); for (auto& worker:m_workers) { if (!worker.m_isBusy) { worker.m_isBusy = true; return &worker; } } return nullptr; } void TkGroupImpl::returnWorker(TkGroupWorker* worker) { BLAST_PROFILE_SCOPE_L("TkGroupImpl::returnWorker"); std::unique_lock<std::mutex> lk(m_workerMtx); auto w = static_cast<TkWorker*>(worker); NVBLAST_CHECK_WARNING(w->m_group == this, "TkGroup::returnWorker worker does not belong to this group.", return); w->m_isBusy = false; } } // namespace Blast } // namespace Nv
19,115
C++
30.082927
128
0.611405
NVIDIA-Omniverse/PhysX/blast/source/sdk/toolkit/NvBlastTkTaskImpl.cpp
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved. #include "NvBlastTime.h" #include "NvBlastTkTaskImpl.h" #include "NvBlastTkFamilyImpl.h" #include "NvBlastTkAssetImpl.h" #include "NvBlastTkGroupImpl.h" using namespace Nv::Blast; void SharedMemory::allocate(TkFamilyImpl& tkFamily) { NVBLAST_ASSERT(m_refCount == 0); const NvBlastAsset* assetLL = tkFamily.getAsset()->getAssetLL(); // at most leafChunkCount actors can be created within a family // tasks will grab their portion out of these memory blocks uint32_t leafChunkCount = NvBlastAssetGetLeafChunkCount(assetLL, logLL); m_newActorBuffers.allocate(2 * leafChunkCount); // GWD-167 workaround (2*) m_newTkActorBuffers.allocate(leafChunkCount); } /** Creates a TkEvent::FractureCommand according to the input buffer for tkActor into events queue using the LocalBuffers to store the actual event data. */ NV_FORCE_INLINE void reportFractureCommands( const NvBlastFractureBuffers& buffer, LocalBuffer<NvBlastBondFractureData>& bondBuffer, LocalBuffer<NvBlastChunkFractureData>& chunkBuffer, TkEventQueue& events, const TkActorImpl* tkActor) { NvBlastBondFractureData* bdata = nullptr; if (buffer.bondFractureCount > 0) { bdata = bondBuffer.allocate(buffer.bondFractureCount); memcpy(bdata, buffer.bondFractures, sizeof(NvBlastBondFractureData)*buffer.bondFractureCount); } NvBlastChunkFractureData* cdata = nullptr; if (buffer.chunkFractureCount > 0) { cdata = chunkBuffer.allocate(buffer.chunkFractureCount); memcpy(cdata, buffer.chunkFractures, sizeof(NvBlastChunkFractureData)*buffer.chunkFractureCount); } TkFractureCommands* fevt = events.allocData<TkFractureCommands>(); fevt->tkActorData = *tkActor; fevt->buffers = { buffer.bondFractureCount, buffer.chunkFractureCount, bdata, cdata }; events.addEvent(fevt); } /** Creates a TkEvent::FractureEvent according to the input buffer for tkActor into events queue using the LocalBuffers to store the actual event data. */ NV_FORCE_INLINE void reportFractureEvents( const NvBlastFractureBuffers& buffer, LocalBuffer<NvBlastBondFractureData>& bondBuffer, LocalBuffer<NvBlastChunkFractureData>& chunkBuffer, TkEventQueue& events, const TkActorImpl* tkActor) { uint32_t result[4] = { 0,0,0,0 }; NvBlastBondFractureData* bdata = nullptr; if (buffer.bondFractureCount > 0) { bdata = bondBuffer.allocate(buffer.bondFractureCount); for (uint32_t b = 0; b < buffer.bondFractureCount; ++b) { bdata[b] = buffer.bondFractures[b]; result[buffer.bondFractures[b].health > 0 ? 0 : 1]++; } } NvBlastChunkFractureData* cdata = nullptr; if (buffer.chunkFractureCount > 0) { cdata = chunkBuffer.allocate(buffer.chunkFractureCount); for (uint32_t c = 0; c < buffer.chunkFractureCount; ++c) { cdata[c] = buffer.chunkFractures[c]; result[buffer.chunkFractures[c].health > 0 ? 2 : 3]++; } } TkFractureEvents* fevt = events.allocData<TkFractureEvents>(); fevt->tkActorData = *tkActor; fevt->buffers = { buffer.bondFractureCount, buffer.chunkFractureCount, bdata, cdata }; fevt->bondsDamaged = result[0]; fevt->bondsBroken = result[1]; fevt->chunksDamaged = result[2]; fevt->chunksBroken = result[3]; events.addEvent(fevt); } void TkWorker::initialize() { // temporary memory used to generate and apply fractures // it must fit for the largest family involved in the group that owns this worker NvBlastBondFractureData* bondFractureData = m_group->m_bondTempDataBlock.getBlock(m_id); uint32_t bondFractureCount = m_group->m_bondTempDataBlock.numElementsPerBlock(); NvBlastChunkFractureData* chunkFractureData = m_group->m_chunkTempDataBlock.getBlock(m_id); uint32_t chunkFractureCount = m_group->m_chunkTempDataBlock.numElementsPerBlock(); m_tempBuffer = { bondFractureCount, chunkFractureCount, bondFractureData, chunkFractureData }; // temporary memory used to split the actor // large enough for the largest family involved m_splitScratch = m_group->m_splitScratchBlock.getBlock(m_id); // to avoid unnecessary allocations, preallocated memory exists to fit all chunks and bonds taking damage once // where multiple damage occurs, more memory will be allocated on demand (this may thwart other threads doing the same) m_bondBuffer.initialize(m_group->m_bondEventDataBlock.getBlock(m_id), m_group->m_bondEventDataBlock.numElementsPerBlock()); m_chunkBuffer.initialize(m_group->m_chunkEventDataBlock.getBlock(m_id), m_group->m_chunkEventDataBlock.numElementsPerBlock()); #if NV_PROFILE NvBlastTimersReset(&m_stats.timers); m_stats.processedActorsCount = 0; #endif } void TkWorker::process(TkWorkerJob& j) { NvBlastTimers* timers = nullptr; BLAST_PROFILE_SCOPE_M("TkActor"); TkActorImpl* tkActor = j.m_tkActor; const uint32_t tkActorIndex = tkActor->getIndex(); NvBlastActor* actorLL = tkActor->getActorLLInternal(); TkFamilyImpl& family = tkActor->getFamilyImpl(); SharedMemory* mem = m_group->getSharedMemory(&family); TkEventQueue& events = mem->m_events; NVBLAST_ASSERT(tkActor->getGroupImpl() == m_group); NVBLAST_ASSERT(tkActor->m_flags.isSet(TkActorFlag::PENDING)); #if NV_PROFILE timers = &m_stats.timers; *timers += tkActor->m_timers; NvBlastTimersReset(&tkActor->m_timers); m_stats.processedActorsCount++; #endif // generate and apply fracture for all damage requested on this actor // and queue events accordingly for (const auto& damage : tkActor->m_damageBuffer) { NvBlastFractureBuffers commandBuffer = m_tempBuffer; BLAST_PROFILE_ZONE_BEGIN("Material"); NvBlastActorGenerateFracture(&commandBuffer, actorLL, damage.program, damage.programParams, logLL, timers); BLAST_PROFILE_ZONE_END("Material"); if (commandBuffer.chunkFractureCount > 0 || commandBuffer.bondFractureCount > 0) { BLAST_PROFILE_SCOPE_M("Fill Command Events"); reportFractureCommands(commandBuffer, m_bondBuffer, m_chunkBuffer, events, tkActor); } NvBlastFractureBuffers eventBuffer = m_tempBuffer; BLAST_PROFILE_ZONE_BEGIN("Fracture"); NvBlastActorApplyFracture(&eventBuffer, actorLL, &commandBuffer, logLL, timers); BLAST_PROFILE_ZONE_END("Fracture"); if (eventBuffer.chunkFractureCount > 0 || eventBuffer.bondFractureCount > 0) { BLAST_PROFILE_SCOPE_M("Fill Fracture Events"); tkActor->m_flags |= (TkActorFlag::DAMAGED); reportFractureEvents(eventBuffer, m_bondBuffer, m_chunkBuffer, events, tkActor); } } // split the actor, which could have been damaged directly though the TkActor's fracture functions // i.e. it did not have damage queued for the above loop NvBlastActorSplitEvent splitEvent = { nullptr, nullptr }; if (tkActor->isDamaged()) { BLAST_PROFILE_ZONE_BEGIN("Split Memory"); uint32_t maxActorCount = NvBlastActorGetMaxActorCountForSplit(actorLL, logLL); splitEvent.newActors = mem->reserveNewActors(maxActorCount); BLAST_PROFILE_ZONE_END("Split Memory"); BLAST_PROFILE_ZONE_BEGIN("Split"); j.m_newActorsCount = NvBlastActorSplit(&splitEvent, actorLL, maxActorCount, m_splitScratch, logLL, timers); BLAST_PROFILE_ZONE_END("Split"); tkActor->m_flags.clear(TkActorFlag::DAMAGED); } else { j.m_newActorsCount = 0; } // update the TkActor according to the LL split results and queue events accordingly if (j.m_newActorsCount > 0) { NVBLAST_ASSERT(splitEvent.deletedActor == tkActor->getActorLL()); BLAST_PROFILE_ZONE_BEGIN("memory new actors"); auto tkSplitEvent = events.allocData<TkSplitEvent>(); tkSplitEvent->children = mem->reserveNewTkActors(j.m_newActorsCount); tkSplitEvent->numChildren = j.m_newActorsCount; tkSplitEvent->parentData.family = &family; tkSplitEvent->parentData.userData = tkActor->userData; tkSplitEvent->parentData.index = tkActorIndex; family.removeActor(tkActor); BLAST_PROFILE_ZONE_END("memory new actors"); BLAST_PROFILE_ZONE_BEGIN("create new actors"); for (uint32_t i = 0; i < j.m_newActorsCount; ++i) { TkActorImpl* newActor = family.addActor(splitEvent.newActors[i]); tkSplitEvent->children[i] = newActor; } j.m_newActors = reinterpret_cast<TkActorImpl**>(tkSplitEvent->children); BLAST_PROFILE_ZONE_END("create new actors"); BLAST_PROFILE_ZONE_BEGIN("split event"); events.addEvent(tkSplitEvent); BLAST_PROFILE_ZONE_END("split event"); } j.m_tkActor->m_flags.clear(TkActorFlag::PENDING); } void TkWorker::process(uint32_t jobID) { TkWorkerJob& j = m_group->m_jobs[jobID]; process(j); }
10,597
C++
38.107011
130
0.70888
NVIDIA-Omniverse/PhysX/blast/source/sdk/toolkit/NvBlastTkActorImpl.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved. #ifndef NVBLASTTKACTORIMPL_H #define NVBLASTTKACTORIMPL_H #include "NvBlastTkCommon.h" #include "NvBlastAssert.h" #include "NvBlastDLink.h" #include "NvBlastIteratorBase.h" #include "NvBlastTkJointImpl.h" #include "NvBlast.h" #include "NvBlastTkActor.h" #include "NvFlags.h" namespace Nv { namespace Blast { // Forward declarations: class TkGroupImpl; class TkFamilyImpl; class TkAssetImpl; class TkJointImpl; /** Struct-enum for actor status flags, used in TkGroup processing. */ struct TkActorFlag { enum Enum { DAMAGED = (1 << 0), //!< The actor had fractures applied successfully and will take the split step. PENDING = (1 << 1), //!< The actor will be processed when its group executes, used to update job queues when moving group. }; }; /** Implementation of TkActor. */ class TkActorImpl : public TkActor { public: TkActorImpl(); ~TkActorImpl(); // Begin TkActor virtual const NvBlastActor* getActorLL() const override; virtual TkFamily& getFamily() const override; virtual uint32_t getIndex() const override; virtual TkGroup* getGroup() const override; virtual TkGroup* removeFromGroup() override; virtual const TkAsset* getAsset() const override; virtual uint32_t getVisibleChunkCount() const override; virtual uint32_t getVisibleChunkIndices(uint32_t* visibleChunkIndices, uint32_t visibleChunkIndicesSize) const override; virtual uint32_t getGraphNodeCount() const override; virtual uint32_t getGraphNodeIndices(uint32_t* graphNodeIndices, uint32_t graphNodeIndicesSize) const override; virtual const float* getBondHealths() const override; virtual uint32_t getSplitMaxActorCount() const override; virtual void damage(const NvBlastDamageProgram& program, const void* programParams) override; virtual bool isPending() const override; virtual void generateFracture(NvBlastFractureBuffers* commands, const NvBlastDamageProgram& program, const void* programParams) const override; virtual void applyFracture(NvBlastFractureBuffers* eventBuffers, const NvBlastFractureBuffers* commands) override; virtual uint32_t getJointCount() const override; virtual uint32_t getJoints(TkJoint** joints, uint32_t jointsSize) const override; virtual bool hasExternalBonds() const override; // End TkActor // Begin TkObject virtual void release() override; // End TkObject // Public methods /** Factory create method. \param[in] desc Actor descriptor set by the user. \return a pointer to a new TkActorImpl object if successful, NULL otherwise. */ static TkActorImpl* create(const TkActorDesc& desc); /** TkActorImpl objects are created in an array within a TkFamilyImpl. Actors may become 'inactive' without their memory being freed. If inactive, the actor should be treated as if it has been released. \return the active status of this TkActorImpl. */ bool isActive() const; /** Utility to return the low-level family to which the low-level actor belongs. \return a pointer to the NvBlastFamily to which the low-level actor belongs. */ NvBlastFamily* getFamilyLL() const; /** Utility to access the TkFamily to which this actor belongs. \return a reference to the TkFamilyImpl to which this TkActorImpl belongs. */ TkFamilyImpl& getFamilyImpl() const; /** \return the index of this actor with its TkFamilyImpl. */ uint32_t getIndexInternal() const; /** Access to the group to which this actor belongs, if any. \return a pointer to the TkGroupImpl to which this TkActorImpl belongs, if any. If this actor is not in a group, this function returns NULL. */ TkGroupImpl* getGroupImpl() const; /** Access to the low-level actor associated with this TkActorImpl. \return a pointer to the NvBlastActor associated with this TkActorImpl. If this actor is inactive (see isActive), this function returns NULL. */ NvBlastActor* getActorLLInternal() const; /** \return the number of TkJointImpl objects that reference this actor. */ uint32_t getJointCountInternal() const; /** Joint iterator. Usage: Given a TkActorImpl a, for (TkActorImpl::JointIt i(a); (bool)i; ++i) { TkJointImpl* joint = (TkJointImpl*)i; // ... } */ class JointIt : public DList::It { public: /** Constructed from an actor. */ JointIt(const TkActorImpl& actor, Direction dir = Forward); /** Current joint. */ TkJointImpl* operator * () const; }; /** Implicit converter to TkActorData for events. */ operator Nv::Blast::TkActorData() const; private: /** Functions to raise or check 'damaged' state: this actor will take the split step. 'damaged' actors automatically become 'pending' also. */ void markAsDamaged(); bool isDamaged() const; /** Raise actor to 'pending' state: this actor will be processed when its group executes next. Enqueues the actor in its group's job list if a group is set. Otherwise the group will enqueue the actor when it is added. */ void makePending(); /** Functions to add or remove an internal reference to a joint. (Joints and actors mutually reference each other.) */ void addJoint(TkJointLink& jointLink); void removeJoint(TkJointLink& jointLink); struct DamageData { NvBlastDamageProgram program; const void* programParams; }; // Data NvBlastActor* m_actorLL; //!< The low-level actor associated with this actor TkFamilyImpl* m_family; //!< The TkFamilyImpl to which this actor belongs TkGroupImpl* m_group; //!< The TkGroupImpl (if any) to which this actor belongs uint32_t m_groupJobIndex; //!< The index of this actor's job within its group's job list nvidia::NvFlags<TkActorFlag::Enum, char> m_flags; //!< Status flags for this actor Array<DamageData>::type m_damageBuffer; //!< Buffered damage input uint32_t m_jointCount; //!< The number of joints referenced in m_jointList DList m_jointList; //!< A doubly-linked list of joint references //#if NV_PROFILE NvBlastTimers m_timers; //!< If profiling, each actor stores timing data //#endif friend class TkWorker; // m_damageBuffer and m_flags friend class TkGroupImpl; friend class TkFamilyImpl; friend class TkJointImpl; friend class TkFrameworkImpl; }; //////// TkActorImpl inline methods //////// NV_INLINE TkFamilyImpl& TkActorImpl::getFamilyImpl() const { NVBLAST_ASSERT(m_family != nullptr); return *m_family; } NV_INLINE uint32_t TkActorImpl::getIndexInternal() const { NVBLAST_ASSERT(isActive()); return NvBlastActorGetIndex(m_actorLL, logLL); } NV_INLINE NvBlastActor* TkActorImpl::getActorLLInternal() const { return m_actorLL; } NV_INLINE uint32_t TkActorImpl::getJointCountInternal() const { return m_jointCount; } NV_INLINE TkGroupImpl* TkActorImpl::getGroupImpl() const { return m_group; } NV_INLINE bool TkActorImpl::isActive() const { return m_actorLL != nullptr; } NV_INLINE bool TkActorImpl::isPending() const { return m_flags.isSet(TkActorFlag::PENDING); } NV_INLINE void TkActorImpl::addJoint(TkJointLink& jointLink) { NVBLAST_ASSERT(m_jointList.isSolitary(jointLink)); m_jointList.insertHead(jointLink); ++m_jointCount; } NV_INLINE void TkActorImpl::removeJoint(TkJointLink& jointLink) { NVBLAST_ASSERT(!m_jointList.isSolitary(jointLink)); NVBLAST_ASSERT(m_jointCount > 0); if (m_jointCount > 0) { --m_jointCount; m_jointList.remove(jointLink); } } //////// TkActorImpl::JointIt methods //////// NV_INLINE TkActorImpl::JointIt::JointIt(const TkActorImpl& actor, Direction dir) : DList::It(actor.m_jointList, dir) {} NV_INLINE TkJointImpl* TkActorImpl::JointIt::operator * () const { const DLink* link = (const DLink*)(*this); return reinterpret_cast<const TkJointLink*>(link)->m_joint; } } // namespace Blast } // namespace Nv #endif // ifndef NVBLASTTKACTORIMPL_H
10,565
C
29.894737
162
0.654614
NVIDIA-Omniverse/PhysX/blast/source/sdk/toolkit/NvBlastTkCommon.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved. #ifndef NVBLASTTKCOMMON_H #define NVBLASTTKCOMMON_H #include "NvBlastGlobals.h" #include "NvBlastTkGUID.h" // Macro to define standard object classes. An intermediate class is defined which holds common implementations. #define NVBLASTTK_IMPL_DECLARE(_name) \ class Tk##_name##Type : public Tk##_name \ { \ public: \ /* Blank constructor generates a new NvBlastID and informs framework */ \ Tk##_name##Type() \ { \ memset(&m_ID, 0, sizeof(NvBlastID)); \ setID(TkGenerateGUID(this)); \ TkFrameworkImpl::get()->onCreate(*this); \ } \ \ /* This constructor takes an existing NvBlastID and informs framework */ \ Tk##_name##Type(const NvBlastID& id) \ { \ memset(&m_ID, 0, sizeof(NvBlastID)); \ setID(id); \ TkFrameworkImpl::get()->onCreate(*this); \ } \ \ /* Destructor informs framework */ \ ~Tk##_name##Type() { TkFrameworkImpl::get()->onDestroy(*this); } \ \ /* Begin TkIdentifiable */ \ virtual void setID(const NvBlastID& id) override \ { \ /* Inform framework of ID change */ \ TkFrameworkImpl::get()->onIDChange(*this, m_ID, id); \ m_ID = id; \ } \ virtual const NvBlastID& getID() const override { return getIDInternal(); } \ virtual const TkType& getType() const override { return s_type; } \ /* End TkIdentifiable */ \ \ /* Begin public API */ \ \ /* Inline method for internal access to NvBlastID */ \ const NvBlastID& getIDInternal() const { return m_ID; } \ \ /* End public API */ \ \ /* Static type information */ \ static TkTypeImpl s_type; \ \ private: \ NvBlastID m_ID; /* NvBlastID for a TkIdentifiable object */ \ }; \ \ /* Derive object implementation from common implementation class above */ \ class Tk##_name##Impl final : public Tk##_name##Type // Macro to declare standard object interfaces, enums, etc. #define NVBLASTTK_IMPL_DEFINE_IDENTIFIABLE(_id0, _id1, _id2, _id3) \ /* Begin TkObject */ \ virtual void release() override; \ /* End TkObject */ \ \ /* Enums */ \ \ /* Generate a ClassID enum used to identify this TkIdentifiable. */ \ enum { ClassID = NVBLAST_FOURCC(_id0, _id1, _id2, _id3) } // Macro to define class type data #define NVBLASTTK_DEFINE_TYPE_IDENTIFIABLE(_name) \ TkTypeImpl Tk##_name##Type::s_type("Tk" #_name, Tk##_name##Impl::ClassID, 0) #endif // ifndef NVBLASTTKCOMMON_H
6,979
C
64.233644
113
0.391317
NVIDIA-Omniverse/PhysX/blast/source/sdk/toolkit/NvBlastTkAssetImpl.cpp
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved. #include "NvBlastTkFrameworkImpl.h" #include "NvBlastTkAssetImpl.h" #include "NvBlastTkFamilyImpl.h" #include "NvBlast.h" #include "NvBlastMemory.h" namespace Nv { namespace Blast { //////// Static data //////// NVBLASTTK_DEFINE_TYPE_IDENTIFIABLE(Asset); //////// Member functions //////// TkAssetImpl::TkAssetImpl() : m_assetLL(nullptr), m_ownsAsset(false) { } TkAssetImpl::TkAssetImpl(const NvBlastID& id) : TkAssetType(id), m_assetLL(nullptr), m_ownsAsset(false) { } TkAssetImpl::~TkAssetImpl() { if (m_assetLL != nullptr && m_ownsAsset) { NVBLAST_FREE(m_assetLL); } } const NvBlastAsset* TkAssetImpl::getAssetLL() const { return getAssetLLInternal(); } uint32_t TkAssetImpl::getChunkCount() const { return NvBlastAssetGetChunkCount(m_assetLL, logLL); } uint32_t TkAssetImpl::getLeafChunkCount() const { return NvBlastAssetGetLeafChunkCount(m_assetLL, logLL); } uint32_t TkAssetImpl::getBondCount() const { return NvBlastAssetGetBondCount(m_assetLL, logLL); } const NvBlastChunk* TkAssetImpl::getChunks() const { return NvBlastAssetGetChunks(m_assetLL, logLL); } const NvBlastBond* TkAssetImpl::getBonds() const { return NvBlastAssetGetBonds(m_assetLL, logLL); } const NvBlastSupportGraph TkAssetImpl::getGraph() const { return NvBlastAssetGetSupportGraph(m_assetLL, logLL); } uint32_t TkAssetImpl::getDataSize() const { return NvBlastAssetGetSize(m_assetLL, logLL); } uint32_t TkAssetImpl::getJointDescCount() const { return getJointDescCountInternal(); } const TkAssetJointDesc* TkAssetImpl::getJointDescs() const { return getJointDescsInternal(); } void TkAssetImpl::release() { const TkType& tkType = TkFamilyImpl::s_type; const uint32_t num = TkFrameworkImpl::get()->getObjectCount(tkType); if (num) { Array<TkIdentifiable*>::type dependents(num); TkFrameworkImpl::get()->getObjects(dependents.begin(), dependents.size(), tkType); for (TkObject* o : dependents) { TkFamilyImpl* f = static_cast<TkFamilyImpl*>(o); if (f->getAssetImpl() == this) { f->release(); } } } NVBLAST_DELETE(this, TkAssetImpl); } //////// Static functions //////// TkAssetImpl* TkAssetImpl::create(const TkAssetDesc& desc) { TkAssetImpl* asset = NVBLAST_NEW(TkAssetImpl); Array<char>::type scratch((uint32_t)NvBlastGetRequiredScratchForCreateAsset(&desc, logLL)); void* mem = NVBLAST_ALLOC_NAMED(NvBlastGetAssetMemorySize(&desc, logLL), "TkAssetImpl::create"); asset->m_assetLL = NvBlastCreateAsset(mem, &desc, scratch.begin(), logLL); if (asset->m_assetLL == nullptr) { NVBLAST_LOG_ERROR("TkAssetImpl::create: low-level asset could not be created."); asset->release(); return nullptr; } if (desc.bondFlags != nullptr) { for (uint32_t bondN = 0; bondN < desc.bondCount; ++bondN) { if (0 != (desc.bondFlags[bondN] & TkAssetDesc::BondJointed)) { const NvBlastBondDesc& bondDesc = desc.bondDescs[bondN]; const uint32_t c0 = bondDesc.chunkIndices[0]; const uint32_t c1 = bondDesc.chunkIndices[1]; if (c0 >= desc.chunkCount || c1 >= desc.chunkCount) { NVBLAST_LOG_WARNING("TkAssetImpl::create: joint flag set for badly described bond. No joint descriptor created."); continue; } if (!asset->addJointDesc(c0, c1)) { NVBLAST_LOG_WARNING("TkAssetImpl::create: no bond corresponds to the user-described bond indices. No joint descriptor created."); } } } } asset->m_ownsAsset = true; // asset->setID(NvBlastAssetGetID(asset->m_assetLL, logLL)); // Keeping LL and Tk IDs distinct return asset; } TkAssetImpl* TkAssetImpl::create(const NvBlastAsset* assetLL, Nv::Blast::TkAssetJointDesc* jointDescs, uint32_t jointDescCount, bool ownsAsset) { TkAssetImpl* asset = NVBLAST_NEW(TkAssetImpl); //NOTE: Why are we passing in a const NvBlastAsset* and then discarding the const? asset->m_assetLL = const_cast<NvBlastAsset*>(assetLL); if (asset->m_assetLL == nullptr) { NVBLAST_LOG_ERROR("TkAssetImpl::create: low-level asset could not be created."); asset->release(); return nullptr; } asset->m_ownsAsset = ownsAsset; asset->setID(NvBlastAssetGetID(asset->m_assetLL, logLL)); asset->m_jointDescs.resize(jointDescCount); for (uint32_t i = 0; i < asset->m_jointDescs.size(); ++i) { asset->m_jointDescs[i] = jointDescs[i]; } return asset; } bool TkAssetImpl::addJointDesc(uint32_t chunkIndex0, uint32_t chunkIndex1) { if (m_assetLL == nullptr) { return false; } const uint32_t upperSupportChunkCount = NvBlastAssetGetFirstSubsupportChunkIndex(m_assetLL, logLL); if (chunkIndex0 >= upperSupportChunkCount || chunkIndex1 >= upperSupportChunkCount) { return false; } const uint32_t* chunkToGraphNodeMap = NvBlastAssetGetChunkToGraphNodeMap(m_assetLL, logLL); const uint32_t node0 = chunkToGraphNodeMap[chunkIndex0]; const uint32_t node1 = chunkToGraphNodeMap[chunkIndex1]; const NvBlastSupportGraph graph = NvBlastAssetGetSupportGraph(m_assetLL, logLL); if (node0 >= graph.nodeCount || node1 >= graph.nodeCount) { return false; } // Find bond index // Iterate through all neighbors of node0 chunk uint32_t bondIndex = 0xFFFFFFFF; for (uint32_t i = graph.adjacencyPartition[node0]; i < graph.adjacencyPartition[node0 + 1]; i++) { if (graph.adjacentNodeIndices[i] == node1) { bondIndex = graph.adjacentBondIndices[i]; break; } } if (bondIndex >= NvBlastAssetGetBondCount(m_assetLL, logLL)) { return false; } const NvBlastBond& bond = NvBlastAssetGetBonds(m_assetLL, logLL)[bondIndex]; TkAssetJointDesc jointDesc; jointDesc.attachPositions[0] = jointDesc.attachPositions[1] = nvidia::NvVec3(bond.centroid[0], bond.centroid[1], bond.centroid[2]); jointDesc.nodeIndices[0] = node0; jointDesc.nodeIndices[1] = node1; m_jointDescs.pushBack(jointDesc); return true; } } // namespace Blast } // namespace Nv
8,002
C++
27.996377
150
0.673957
NVIDIA-Omniverse/PhysX/blast/source/sdk/toolkit/NvBlastTkTypeImpl.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved. #ifndef NVBLASTTKTYPEIMPL_H #define NVBLASTTKTYPEIMPL_H #include "NvPreprocessor.h" #include "NvBlastTkType.h" namespace Nv { namespace Blast { /** Implementation of TkType, storing class information for TkIdentifiable-derived classes. */ class TkTypeImpl : public TkType { public: TkTypeImpl(const char* typeName, uint32_t typeID, uint32_t version); // Begin TkType virtual const char* getName() const override { return getNameInternal(); } virtual uint32_t getVersion() const override { return getVersionInternal(); } // End TkType // Public methods /** Access to the class name. \return a C string pointer to the class name. */ const char* getNameInternal() const; /** Access to the data format version for the class. \return the data format version. */ uint32_t getVersionInternal() const; /** Access to a unique identifier for the class (set using the NVBLASTTK_IMPL_DEFINE_IDENTIFIABLE macro). \return the class's unique identifier. */ uint32_t getID() const; /** Access to a runtime-unique small index for the class. \return the index for the class. */ uint32_t getIndex() const; /** \return whether or not the index has been set (see setIndex) to a valid value. */ bool indexIsValid() const; private: enum { InvalidIndex = 0xFFFFFFFF }; /** Sets the type index. \param[in] index The index to set. */ void setIndex(uint32_t index); const char* m_name; //!< The name of the class, set by the constructor. uint32_t m_ID; //!< The unique identifier for the class, set by the constructor. uint32_t m_version; //!< The data format version for the class, set by the constructor. uint32_t m_index; //!< The index set for this class, set using setIndex(). friend class TkFrameworkImpl; }; //////// TkTypeImpl inline methods //////// NV_INLINE TkTypeImpl::TkTypeImpl(const char* typeName, uint32_t typeID, uint32_t version) : m_name(typeName) , m_ID(typeID) , m_version(version) , m_index((uint32_t)InvalidIndex) { } NV_INLINE const char* TkTypeImpl::getNameInternal() const { return m_name; } NV_INLINE uint32_t TkTypeImpl::getVersionInternal() const { return m_version; } NV_INLINE uint32_t TkTypeImpl::getID() const { return m_ID; } NV_INLINE uint32_t TkTypeImpl::getIndex() const { return m_index; } NV_INLINE bool TkTypeImpl::indexIsValid() const { return m_index != (uint32_t)InvalidIndex; } NV_INLINE void TkTypeImpl::setIndex(uint32_t index) { m_index = index; } } // namespace Blast } // namespace Nv #endif // ifndef NVBLASTTKTYPEIMPL_H
4,415
C
26.428571
110
0.68154
NVIDIA-Omniverse/PhysX/blast/source/sdk/toolkit/NvBlastTkFrameworkImpl.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved. #ifndef NVBLASTTKFRAMEWORKIMPL_H #define NVBLASTTKFRAMEWORKIMPL_H #include "NvBlastTkFramework.h" #include "NvBlastInternalProfiler.h" #include "NvBlastTkCommon.h" #include "NvBlastArray.h" #include "NvBlastHashMap.h" #include "NvBlastHashSet.h" namespace Nv { namespace Blast { // Forward declarations class TkTypeImpl; class TkJointImpl; /** Implementation of TkFramework */ class TkFrameworkImpl : public TkFramework { public: TkFrameworkImpl(); ~TkFrameworkImpl(); // Begin TkFramework virtual void release() override; virtual const TkType* getType(TkTypeIndex::Enum typeIndex) const override; virtual TkIdentifiable* findObjectByID(const NvBlastID& id) const override; virtual uint32_t getObjectCount(const TkType& type) const override; virtual uint32_t getObjects(TkIdentifiable** buffer, uint32_t bufferSize, const TkType& type, uint32_t indexStart = 0) const override; virtual bool reorderAssetDescChunks(NvBlastChunkDesc* chunkDescs, uint32_t chunkCount, NvBlastBondDesc* bondDescs, uint32_t bondCount, uint32_t* chunkReorderMap = nullptr, bool keepBondNormalChunkOrder = false) const override; virtual bool ensureAssetExactSupportCoverage(NvBlastChunkDesc* chunkDescs, uint32_t chunkCount) const override; virtual TkAsset* createAsset(const TkAssetDesc& desc) override; virtual TkAsset* createAsset(const NvBlastAsset* assetLL, Nv::Blast::TkAssetJointDesc* jointDescs = nullptr, uint32_t jointDescCount = 0, bool ownsAsset = false) override; virtual TkGroup* createGroup(const TkGroupDesc& desc) override; virtual TkActor* createActor(const TkActorDesc& desc) override; virtual TkJoint* createJoint(const TkJointDesc& desc) override; // End TkFramework // Public methods /** To be called by any TkIdentifiable object when it is created, so the framework can track it. */ void onCreate(TkIdentifiable& object); /** To be called by any TkIdentifiable object when it is deleted, so the framework can stop tracking it. */ void onDestroy(TkIdentifiable& object); /** Special onCreate method for joints, since they are not TkIdentifiable. */ void onCreate(TkJointImpl& joint); /** Special onDestroy method for joints, since they are not TkIdentifiable. */ void onDestroy(TkJointImpl& joint); /** Must be called whenever a TkIdentifiable object's ID is changed, so that the framework can associate the new ID with it. */ void onIDChange(TkIdentifiable& object, const NvBlastID& IDPrev, const NvBlastID& IDCurr); /** Internal (non-virtual) method to find a TkIdentifiable object based upon its NvBlastID. */ TkIdentifiable* findObjectByIDInternal(const NvBlastID& id) const; // Access to singleton /** Retrieve the global singleton. */ static TkFrameworkImpl* get(); /** Set the global singleton, if it's not already set, or set it to NULL. Returns true iff successful. */ static bool set(TkFrameworkImpl* framework); private: // Enums enum { ClassID = NVBLAST_FOURCC('T', 'K', 'F', 'W') }; //!< TkFramework identifier token, used in serialization // Static data static TkFrameworkImpl* s_framework; //!< Global (singleton) object pointer // Types InlineArray<const TkTypeImpl*, TkTypeIndex::TypeCount>::type m_types; //!< TkIdentifiable static type data HashMap<uint32_t, uint32_t>::type m_typeIDToIndex; //!< Map to type data keyed by ClassID // Objects and object names HashMap<NvBlastID, TkIdentifiable*>::type m_IDToObject; //!< Map to all TkIdentifiable objects, keyed by NvBlastID InlineArray<Array<TkIdentifiable*>::type, TkTypeIndex::TypeCount>::type m_objects; //!< Catalog of all TkIdentifiable objects, grouped by type. (Revisit implementation.) // Track external joints (to do: make this a pool) HashSet<TkJointImpl*>::type m_joints; //!< All internal joints }; //////// TkFrameworkImpl inline methods //////// NV_INLINE TkIdentifiable* TkFrameworkImpl::findObjectByIDInternal(const NvBlastID& id) const { const auto entry = m_IDToObject.find(id); if (entry == nullptr) { return nullptr; } return entry->second; } } // namespace Blast } // namespace Nv #endif // ifndef NVBLASTTKFRAMEWORKIMPL_H
6,653
C
40.074074
253
0.650534
NVIDIA-Omniverse/PhysX/blast/source/sdk/toolkit/NvBlastTkTaskManager.cpp
// // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. #include "NvTask.h" #include "NvTaskDefine.h" #include "NvCpuDispatcher.h" #include "NvGpuDispatcher.h" #include "NvErrorCallback.h" #include "NvBlastGlobals.h" #include "NvBlastAssert.h" #include "NvBlastAtomic.h" #include "NvBlastAllocator.h" #include "NvBlastArray.h" #include "NvBlastHashMap.h" #include <mutex> using namespace nvidia; using namespace nvidia::task; namespace Nv { namespace Blast { class MutexScopedLock { std::mutex& mMutex; NV_NOCOPY(MutexScopedLock) public: NV_INLINE MutexScopedLock(std::mutex& mutex) : mMutex(mutex) { mMutex.lock(); } NV_INLINE ~MutexScopedLock() { mMutex.unlock(); } }; #define LOCK() MutexScopedLock __lock__(mMutex) constexpr int EOL = -1; typedef HashMap<const char *, NvTaskID>::type NvBlastTkTaskNameToIDMap; struct NvBlastTkTaskDepTableRow { NvTaskID mTaskID; int mNextDep; }; typedef Array<NvBlastTkTaskDepTableRow>::type NvBlastTkTaskDepTable; struct NvTaskAccess : public NvTask { void setTaskID(NvTaskID taskID) { mTaskID = taskID; } void setTm(NvTaskManager* tm) { mTm = tm; } }; NvTaskAccess& ACCESS(NvTask& task) { return reinterpret_cast<NvTaskAccess&>(task); } NvTaskAccess* ACCESS(NvTask* task) { return reinterpret_cast<NvTaskAccess*>(task); } struct NvLightCpuTaskAccess : public NvLightCpuTask { bool atomicIncrementRefCount() { return Nv::Blast::atomicIncrement(&mRefCount); } bool atomicDecrementRefCount() { return Nv::Blast::atomicDecrement(&mRefCount); } }; NvLightCpuTaskAccess& ACCESS(NvLightCpuTask& task) { return reinterpret_cast<NvLightCpuTaskAccess&>(task); } class NvBlastTkTaskTableRow { public: NvBlastTkTaskTableRow() : mRefCount( 1 ), mStartDep(EOL), mLastDep(EOL) {} void addDependency( NvBlastTkTaskDepTable& depTable, NvTaskID taskID ) { int newDep = int(depTable.size()); NvBlastTkTaskDepTableRow row; row.mTaskID = taskID; row.mNextDep = EOL; depTable.pushBack( row ); if( mLastDep == EOL ) { mStartDep = mLastDep = newDep; } else { depTable[ uint32_t(mLastDep) ].mNextDep = newDep; mLastDep = newDep; } } NvTask * mTask; volatile int mRefCount; NvTaskType::Enum mType; int mStartDep; int mLastDep; }; typedef Array<NvBlastTkTaskTableRow>::type NvTaskTable; /* Implementation of NvTaskManager abstract API */ class NvBlastTkTaskManager : public NvTaskManager { NV_NOCOPY(NvBlastTkTaskManager) public: NvBlastTkTaskManager(NvErrorCallback& , NvCpuDispatcher*, NvGpuDispatcher*); ~NvBlastTkTaskManager(); void setCpuDispatcher( NvCpuDispatcher& ref ) { mCpuDispatcher = &ref; } NvCpuDispatcher* getCpuDispatcher() const { return mCpuDispatcher; } void setGpuDispatcher( NvGpuDispatcher& ref ) { mGpuDispatcher = &ref; } NvGpuDispatcher* getGpuDispatcher() const { return mGpuDispatcher; } void resetDependencies(); void startSimulation(); void stopSimulation(); void taskCompleted( NvTask& task ); NvTaskID getNamedTask( const char *name ); NvTaskID submitNamedTask( NvTask *task, const char *name, NvTaskType::Enum type = NvTaskType::TT_CPU ); NvTaskID submitUnnamedTask( NvTask& task, NvTaskType::Enum type = NvTaskType::TT_CPU ); NvTask* getTaskFromID( NvTaskID ); bool dispatchTask( NvTaskID taskID, bool gpuGroupStart ); bool resolveRow( NvTaskID taskID, bool gpuGroupStart ); void release(); void finishBefore( NvTask& task, NvTaskID taskID ); void startAfter( NvTask& task, NvTaskID taskID ); void addReference( NvTaskID taskID ); void decrReference( NvTaskID taskID ); int32_t getReference( NvTaskID taskID ) const; void decrReference( NvLightCpuTask& lighttask ); void addReference( NvLightCpuTask& lighttask ); void emitStartEvent(NvBaseTask& basetask, uint32_t threadId); void emitStopEvent(NvBaseTask& basetask, uint32_t threadId); NvErrorCallback& mErrorCallback; NvCpuDispatcher* mCpuDispatcher; NvGpuDispatcher* mGpuDispatcher; NvBlastTkTaskNameToIDMap mName2IDmap; volatile int mPendingTasks; std::mutex mMutex; NvBlastTkTaskDepTable mDepTable; NvTaskTable mTaskTable; Array<NvTaskID>::type mStartDispatch; }; NvBlastTkTaskManager::NvBlastTkTaskManager(NvErrorCallback& errorCallback, NvCpuDispatcher* cpuDispatcher, NvGpuDispatcher* gpuDispatcher) : mErrorCallback (errorCallback) , mCpuDispatcher( cpuDispatcher ) , mGpuDispatcher( gpuDispatcher ) , mPendingTasks( 0 ) , mDepTable(NV_DEBUG_EXP("NvBlastTkTaskDepTable")) , mTaskTable(NV_DEBUG_EXP("NvTaskTable")) , mStartDispatch(NV_DEBUG_EXP("StartDispatch")) { } NvBlastTkTaskManager::~NvBlastTkTaskManager() { } void NvBlastTkTaskManager::release() { NVBLAST_DELETE(this, NvBlastTkTaskManager); } void NvBlastTkTaskManager::decrReference(NvLightCpuTask& lighttask) { /* This does not need a lock! */ if (!ACCESS(lighttask).atomicDecrementRefCount()) { NVBLAST_ASSERT(mCpuDispatcher); if (mCpuDispatcher) { mCpuDispatcher->submitTask(lighttask); } else { lighttask.release(); } } } void NvBlastTkTaskManager::addReference(NvLightCpuTask& lighttask) { /* This does not need a lock! */ ACCESS(lighttask).atomicIncrementRefCount(); } void NvBlastTkTaskManager::emitStartEvent(NvBaseTask& basetask, uint32_t threadId) { NvBaseTask* tmp = &basetask; NV_UNUSED(tmp); NV_UNUSED(threadId); /* This does not need a lock! */ #if NV_SUPPORT_NVTASK_PROFILING && NV_PROFILE //NV_COMPILE_TIME_ASSERT(sizeof(NvProfileEventId::mEventId) == sizeof(NvBaseTask::mEventID)); if (NvBlastGlobalGetProfilerCallback()) NvBlastGlobalGetProfilerCallback()->zoneStart(basetask.getName(), true, 0); #endif } void NvBlastTkTaskManager::emitStopEvent(NvBaseTask& basetask, uint32_t threadId) { NvBaseTask* tmp = &basetask; NV_UNUSED(tmp); NV_UNUSED(threadId); /* This does not need a lock! */ if (NvBlastGlobalGetProfilerCallback()) NvBlastGlobalGetProfilerCallback()->zoneEnd(nullptr, basetask.getName(), true, 0); #if NV_SUPPORT_NVTASK_PROFILING && NV_PROFILE //NV_COMPILE_TIME_ASSERT(sizeof(NvProfileEventId::mEventId) == sizeof(NvBaseTask::mEventID)); #endif } /* * Called by the owner (Scene) at the start of every frame, before * asking for tasks to be submitted. */ void NvBlastTkTaskManager::resetDependencies() { NVBLAST_ASSERT( !mPendingTasks ); // only valid if you don't resubmit named tasks, this is true for the SDK NVBLAST_ASSERT( mCpuDispatcher ); mTaskTable.clear(); mDepTable.clear(); mName2IDmap.clear(); mPendingTasks = 0; } /* * Called by the owner (Scene) to start simulating the task graph. * Dispatch all tasks with refCount == 1 */ void NvBlastTkTaskManager::startSimulation() { NVBLAST_ASSERT( mCpuDispatcher ); if( mGpuDispatcher ) { mGpuDispatcher->startSimulation(); } /* Handle empty task graph */ if( mPendingTasks == 0 ) { return; } bool gpuDispatch = false; for( NvTaskID i = 0 ; i < mTaskTable.size() ; i++ ) { if( mTaskTable[ i ].mType == NvTaskType::TT_COMPLETED ) { continue; } if( !Nv::Blast::atomicDecrement( &mTaskTable[ i ].mRefCount ) ) { mStartDispatch.pushBack(i); } } for( uint32_t i=0; i<mStartDispatch.size(); ++i) { gpuDispatch |= dispatchTask( mStartDispatch[i], gpuDispatch ); } //mStartDispatch.resize(0); mStartDispatch.forceSize_Unsafe(0); if( mGpuDispatcher && gpuDispatch ) { mGpuDispatcher->finishGroup(); } } void NvBlastTkTaskManager::stopSimulation() { if( mGpuDispatcher ) { mGpuDispatcher->stopSimulation(); } } NvTaskID NvBlastTkTaskManager::getNamedTask( const char *name ) { const NvBlastTkTaskNameToIDMap::Entry *ret; { LOCK(); ret = mName2IDmap.find( name ); } if( ret ) { return ret->second; } else { // create named entry in task table, without a task return submitNamedTask( NULL, name, NvTaskType::TT_NOT_PRESENT ); } } NvTask* NvBlastTkTaskManager::getTaskFromID( NvTaskID id ) { LOCK(); // todo: reader lock necessary? return mTaskTable[ id ].mTask; } /* If called at runtime, must be thread-safe */ NvTaskID NvBlastTkTaskManager::submitNamedTask( NvTask *task, const char *name, NvTaskType::Enum type ) { if( task ) { ACCESS(task)->setTm(this); task->submitted(); } LOCK(); const NvBlastTkTaskNameToIDMap::Entry *ret = mName2IDmap.find( name ); if( ret ) { NvTaskID prereg = ret->second; if( task ) { /* name was registered for us by a dependent task */ NVBLAST_ASSERT( !mTaskTable[ prereg ].mTask ); NVBLAST_ASSERT( mTaskTable[ prereg ].mType == NvTaskType::TT_NOT_PRESENT ); mTaskTable[ prereg ].mTask = task; mTaskTable[ prereg ].mType = type; ACCESS(task)->setTaskID(prereg); } return prereg; } else { Nv::Blast::atomicIncrement(&mPendingTasks); NvTaskID id = static_cast<NvTaskID>(mTaskTable.size()); mName2IDmap[ name ] = id; if( task ) { ACCESS(task)->setTaskID(id); } NvBlastTkTaskTableRow r; r.mTask = task; r.mType = type; mTaskTable.pushBack(r); return id; } } /* * Add an unnamed task to the task table */ NvTaskID NvBlastTkTaskManager::submitUnnamedTask( NvTask& task, NvTaskType::Enum type ) { Nv::Blast::atomicIncrement(&mPendingTasks); ACCESS(task).setTm(this); task.submitted(); LOCK(); ACCESS(task).setTaskID(static_cast<NvTaskID>(mTaskTable.size())); NvBlastTkTaskTableRow r; r.mTask = &task; r.mType = type; mTaskTable.pushBack(r); return task.getTaskID(); } /* Called by worker threads (or cooperating application threads) when a * NvTask has completed. Propogate depdenencies, decrementing all * referenced tasks' refCounts. If any of those reach zero, activate * those tasks. */ void NvBlastTkTaskManager::taskCompleted( NvTask& task ) { LOCK(); if( resolveRow( task.getTaskID(), false ) ) { mGpuDispatcher->finishGroup(); } } /* ================== Private Functions ======================= */ /* * Add a dependency to force 'task' to complete before the * referenced 'taskID' is allowed to be dispatched. */ void NvBlastTkTaskManager::finishBefore( NvTask& task, NvTaskID taskID ) { LOCK(); NVBLAST_ASSERT( mTaskTable[ taskID ].mType != NvTaskType::TT_COMPLETED ); mTaskTable[ task.getTaskID() ].addDependency( mDepTable, taskID ); Nv::Blast::atomicIncrement( &mTaskTable[ taskID ].mRefCount ); } /* * Add a dependency to force 'task' to wait for the referenced 'taskID' * to complete before it is allowed to be dispatched. */ void NvBlastTkTaskManager::startAfter( NvTask& task, NvTaskID taskID ) { LOCK(); NVBLAST_ASSERT( mTaskTable[ taskID ].mType != NvTaskType::TT_COMPLETED ); mTaskTable[ taskID ].addDependency( mDepTable, task.getTaskID() ); Nv::Blast::atomicIncrement( &mTaskTable[ task.getTaskID() ].mRefCount ); } void NvBlastTkTaskManager::addReference( NvTaskID taskID ) { LOCK(); Nv::Blast::atomicIncrement( &mTaskTable[ taskID ].mRefCount ); } /* * Remove one reference count from a task. Must be done here to make it thread safe. */ void NvBlastTkTaskManager::decrReference( NvTaskID taskID ) { LOCK(); if( !Nv::Blast::atomicDecrement( &mTaskTable[ taskID ].mRefCount ) ) { if( dispatchTask( taskID, false ) ) { mGpuDispatcher->finishGroup(); } } } int32_t NvBlastTkTaskManager::getReference(NvTaskID taskID) const { return mTaskTable[ taskID ].mRefCount; } /* * A task has completed, decrement all dependencies and submit tasks * that are ready to run. Signal simulation end if ther are no more * pending tasks. */ bool NvBlastTkTaskManager::resolveRow( NvTaskID taskID, bool gpuGroupStart ) { int depRow = mTaskTable[ taskID ].mStartDep; while( depRow != EOL ) { NvBlastTkTaskDepTableRow& row = mDepTable[ uint32_t(depRow) ]; NvBlastTkTaskTableRow& dtt = mTaskTable[ row.mTaskID ]; if( !Nv::Blast::atomicDecrement( &dtt.mRefCount ) ) { gpuGroupStart |= dispatchTask( row.mTaskID, gpuGroupStart ); } depRow = row.mNextDep; } Nv::Blast::atomicDecrement( &mPendingTasks ); return gpuGroupStart; } /* * Submit a ready task to its appropriate dispatcher. */ bool NvBlastTkTaskManager::dispatchTask( NvTaskID taskID, bool gpuGroupStart ) { LOCK(); // todo: reader lock necessary? NvBlastTkTaskTableRow& tt = mTaskTable[ taskID ]; // prevent re-submission if( tt.mType == NvTaskType::TT_COMPLETED ) { mErrorCallback.reportError(NvErrorCode::eDEBUG_WARNING, "NvTask dispatched twice", __FILE__, __LINE__); return false; } switch ( tt.mType ) { case NvTaskType::TT_CPU: mCpuDispatcher->submitTask( *tt.mTask ); break; case NvTaskType::TT_GPU: #if NV_WINDOWS_FAMILY if( mGpuDispatcher ) { if( !gpuGroupStart ) { mGpuDispatcher->startGroup(); } mGpuDispatcher->submitTask( *tt.mTask ); gpuGroupStart = true; } else #endif { mErrorCallback.reportError(NvErrorCode::eDEBUG_WARNING, "No GPU dispatcher", __FILE__, __LINE__); } break; case NvTaskType::TT_NOT_PRESENT: /* No task registered with this taskID, resolve its dependencies */ NVBLAST_ASSERT(!tt.mTask); gpuGroupStart |= resolveRow( taskID, gpuGroupStart ); break; case NvTaskType::TT_COMPLETED: default: mErrorCallback.reportError(NvErrorCode::eDEBUG_WARNING, "Unknown task type", __FILE__, __LINE__); gpuGroupStart |= resolveRow( taskID, gpuGroupStart ); break; } tt.mType = NvTaskType::TT_COMPLETED; return gpuGroupStart; } } // namespace Blast } // namespace Nv // Implement NvTaskManager factory namespace nvidia { namespace task { NvTaskManager* NvTaskManager::createTaskManager(NvErrorCallback& errorCallback, NvCpuDispatcher* cpuDispatcher, NvGpuDispatcher* gpuDispatcher) { return NVBLAST_NEW(Nv::Blast::NvBlastTkTaskManager)(errorCallback, cpuDispatcher, gpuDispatcher); } } }
16,566
C++
27.51463
143
0.664433
NVIDIA-Omniverse/PhysX/blast/source/sdk/toolkit/NvBlastTkGroupImpl.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved. #ifndef NVBLASTTKGROUPIMPL_H #define NVBLASTTKGROUPIMPL_H #include "NvBlastTkTaskImpl.h" #include "NvBlastTkGroup.h" #include "NvBlastTkTypeImpl.h" namespace Nv { namespace Blast { class TkActorImpl; class TkFamilyImpl; NVBLASTTK_IMPL_DECLARE(Group) { ~TkGroupImpl(); public: TkGroupImpl(); NVBLASTTK_IMPL_DEFINE_IDENTIFIABLE('G', 'R', 'P', '\0'); static TkGroupImpl* create(const TkGroupDesc& desc); // Begin TkGroup virtual bool addActor(TkActor& actor) override; virtual uint32_t getActorCount() const override; virtual uint32_t getActors(TkActor** buffer, uint32_t bufferSize, uint32_t indexStart = 0) const override; virtual uint32_t startProcess() override; virtual bool endProcess() override; virtual void getStats(TkGroupStats& stats) const override; virtual void setWorkerCount(uint32_t workerCount) override; virtual uint32_t getWorkerCount() const override; virtual TkGroupWorker* acquireWorker() override; virtual void returnWorker(TkGroupWorker*) override; // End TkGroup // TkGroupImpl API /** Remove the actor from this group if the actor actually belongs to it and the group is not processing. \param[in] actor The TkActor to remove. \return true if removing succeeded, false otherwise */ bool removeActor(TkActor& actor); /** Add the actor to this group's job queue. It is the caller's responsibility to add an actor only once. This condition is checked in debug builds. */ void enqueue(TkActorImpl* tkActor); /** Atomically check if this group is processing actors. @see setProcessing() \return true between startProcess() and endProcess() calls, false otherwise */ bool isProcessing() const; private: /** Atomically set the processing state. This function checks for the current state before changing it. @see isProcessing() \param[in] value the value of the new state \return true if the new state could be set, false otherwise */ bool setProcessing(bool value); /** Get the group-family shared memory for the specified family. To be used when the memory is expected to already exist. */ SharedMemory* getSharedMemory(TkFamilyImpl* family); void releaseSharedMemory(TkFamilyImpl* fam, SharedMemory* mem); // functions to add/remove actors _without_ group-family memory management void addActorInternal(TkActorImpl& tkActor); void addActorsInternal(TkActorImpl** actors, uint32_t numActors); void removeActorInternal(TkActorImpl& tkActor); uint32_t m_actorCount; //!< number of actors in this group HashMap<TkFamilyImpl*, SharedMemory*>::type m_sharedMemory; //!< memory sharable by actors in the same family in this group // it is assumed no more than the asset's number of bond and chunks fracture commands are produced SharedBlock<NvBlastChunkFractureData> m_chunkTempDataBlock; //!< chunk data for damage/fracture SharedBlock<NvBlastBondFractureData> m_bondTempDataBlock; //!< bond data for damage/fracture SharedBlock<NvBlastChunkFractureData> m_chunkEventDataBlock; //!< initial memory block for event data SharedBlock<NvBlastBondFractureData> m_bondEventDataBlock; //!< initial memory block for event data SharedBlock<char> m_splitScratchBlock; //!< split scratch memory std::atomic<bool> m_isProcessing; //!< true while workers are processing Array<TkWorker>::type m_workers; //!< this group's workers Array<TkWorkerJob>::type m_jobs; //!< this group's process jobs //#if NV_PROFILE TkGroupStats m_stats; //!< accumulated group's worker stats //#endif std::mutex m_workerMtx; friend class TkWorker; }; NV_INLINE bool TkGroupImpl::isProcessing() const { return m_isProcessing.load(); } NV_INLINE void TkGroupImpl::getStats(TkGroupStats& stats) const { #if NV_PROFILE memcpy(&stats, &m_stats, sizeof(TkGroupStats)); #else NV_UNUSED(stats); #endif } NV_INLINE uint32_t TkGroupImpl::getActorCount() const { return m_actorCount; } NV_INLINE SharedMemory* TkGroupImpl::getSharedMemory(TkFamilyImpl* family) { SharedMemory* mem = m_sharedMemory[family]; NVBLAST_ASSERT(mem != nullptr); return mem; } NV_FORCE_INLINE void operator +=(NvBlastTimers& lhs, const NvBlastTimers& rhs) { lhs.material += rhs.material; lhs.fracture += rhs.fracture; lhs.island += rhs.fracture; lhs.partition += rhs.partition; lhs.visibility += rhs.visibility; } } // namespace Blast } // namespace Nv #endif // ifndef NVBLASTTKGROUPIMPL_H
6,776
C
33.93299
139
0.664994
NVIDIA-Omniverse/PhysX/blast/source/sdk/toolkit/NvBlastTkFrameworkImpl.cpp
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved. #include "NvBlastAssert.h" #include "NvBlastTkFrameworkImpl.h" #include "NvBlastTkAssetImpl.h" #include "NvBlastTkFamilyImpl.h" #include "NvBlastTkGroupImpl.h" #include "NvBlastTkActorImpl.h" #include "NvBlastTkJointImpl.h" #include "NvBlastTkTypeImpl.h" #include "NvBlastGlobals.h" #include <algorithm> using namespace nvidia; using namespace nvidia::shdfnd; NV_INLINE bool operator < (const NvBlastID& id1, const NvBlastID& id2) { return memcmp(&id1, &id2, sizeof(NvBlastID)) < 0; } namespace Nv { namespace Blast { //////// Local definitions //////// // Map type ID to static type data #define NVBLASTTK_REGISTER_TYPE(_name) \ if (!Tk##_name##Impl::s_type.indexIsValid()) \ { \ Tk##_name##Impl::s_type.setIndex(TkTypeIndex::_name); \ } \ m_types[TkTypeIndex::_name] = &Tk##_name##Impl::s_type; \ m_typeIDToIndex[Tk##_name##Impl::s_type.getID()] = TkTypeIndex::_name #define NVBLASTTK_RELEASE_TYPE(_name) \ { \ TkTypeImpl& type = Tk##_name##Impl::s_type; \ auto& toRelease = m_objects[type.getIndex()]; \ for (TkObject* obj : toRelease) \ { \ obj->release(); \ } \ } //////// TkFrameworkImpl static variables //////// TkFrameworkImpl* TkFrameworkImpl::s_framework = nullptr; //////// TkFrameworkImpl static function //////// TkFrameworkImpl* TkFrameworkImpl::get() { return s_framework; } bool TkFrameworkImpl::set(TkFrameworkImpl* framework) { if (s_framework != nullptr) { if (framework != nullptr) { NVBLAST_LOG_ERROR("TkFrameworkImpl::set: framework already set. Pass NULL to this function to destroy framework."); return false; } NVBLAST_DELETE(s_framework, TkFrameworkImpl); } s_framework = framework; return true; } //////// TkFrameworkImpl methods //////// TkFrameworkImpl::TkFrameworkImpl() : TkFramework() { // Register types m_types.resize(TkTypeIndex::TypeCount); m_objects.resize(TkTypeIndex::TypeCount); NVBLASTTK_REGISTER_TYPE(Asset); NVBLASTTK_REGISTER_TYPE(Family); NVBLASTTK_REGISTER_TYPE(Group); } TkFrameworkImpl::~TkFrameworkImpl() { } void TkFrameworkImpl::release() { // Special release of joints, which are not TkIdentifiable: Array<TkJointImpl*>::type joints; // Since the EraseIterator is not exposed joints.reserve(m_joints.size()); for (auto j = m_joints.getIterator(); !j.done(); ++j) { joints.pushBack(*j); } for (uint32_t i = 0; i < joints.size(); ++i) { joints[i]->release(); } NVBLAST_ASSERT(m_joints.size() == 0); joints.reset(); // Since we will be deleting the allocator NVBLASTTK_RELEASE_TYPE(Group); NVBLASTTK_RELEASE_TYPE(Asset); set(nullptr); } const TkType* TkFrameworkImpl::getType(TkTypeIndex::Enum typeIndex) const { if (typeIndex < 0 || typeIndex >= TkTypeIndex::TypeCount) { NVBLAST_LOG_WARNING("TkFrameworkImpl::getType: invalid typeIndex."); return nullptr; } return m_types[typeIndex]; } TkIdentifiable* TkFrameworkImpl::findObjectByID(const NvBlastID& id) const { TkIdentifiable* object = findObjectByIDInternal(id); if (object == nullptr) { NVBLAST_LOG_WARNING("TkFrameworkImpl::findObjectByID: object not found."); } return object; } uint32_t TkFrameworkImpl::getObjectCount(const TkType& type) const { const uint32_t index = static_cast<const TkTypeImpl&>(type).getIndex(); if (index >= m_objects.size()) { NVBLAST_LOG_ERROR("TkFrameworkImpl::getObjectCount: BlastTk object type unrecognized."); return 0; } return m_objects[index].size(); } uint32_t TkFrameworkImpl::getObjects(TkIdentifiable** buffer, uint32_t bufferSize, const TkType& type, uint32_t indexStart /* = 0 */) const { const uint32_t index = static_cast<const TkTypeImpl&>(type).getIndex(); if (index >= m_objects.size()) { NVBLAST_LOG_ERROR("TkFrameworkImpl::getObjectCount: BlastTk object type unrecognized."); return 0; } const auto& objectArray = m_objects[index]; uint32_t objectCount = objectArray.size(); if (objectCount <= indexStart) { NVBLAST_LOG_WARNING("TkFrameworkImpl::getObjects: indexStart beyond end of object list."); return 0; } objectCount -= indexStart; if (objectCount > bufferSize) { objectCount = bufferSize; } memcpy(buffer, objectArray.begin() + indexStart, objectCount * sizeof(TkObject*)); return objectCount; } bool TkFrameworkImpl::reorderAssetDescChunks(NvBlastChunkDesc* chunkDescs, uint32_t chunkCount, NvBlastBondDesc* bondDescs, uint32_t bondCount, uint32_t* chunkReorderMap /*= nullptr*/, bool keepBondNormalChunkOrder /*= false*/) const { uint32_t* map = chunkReorderMap != nullptr ? chunkReorderMap : static_cast<uint32_t*>(NVBLAST_ALLOC_NAMED(chunkCount * sizeof(uint32_t), "reorderAssetDescChunks:chunkReorderMap")); void* scratch = NVBLAST_ALLOC_NAMED(chunkCount * sizeof(NvBlastChunkDesc), "reorderAssetDescChunks:scratch"); const bool result = NvBlastReorderAssetDescChunks(chunkDescs, chunkCount, bondDescs, bondCount, map, keepBondNormalChunkOrder, scratch, logLL); NVBLAST_FREE(scratch); if (chunkReorderMap == nullptr) { NVBLAST_FREE(map); } return result; } bool TkFrameworkImpl::ensureAssetExactSupportCoverage(NvBlastChunkDesc* chunkDescs, uint32_t chunkCount) const { void* scratch = NVBLAST_ALLOC_NAMED(chunkCount, "ensureAssetExactSupportCoverage:scratch"); const bool result = NvBlastEnsureAssetExactSupportCoverage(chunkDescs, chunkCount, scratch, logLL); NVBLAST_FREE(scratch); return result; } TkAsset* TkFrameworkImpl::createAsset(const TkAssetDesc& desc) { TkAssetImpl* asset = TkAssetImpl::create(desc); if (asset == nullptr) { NVBLAST_LOG_ERROR("TkFrameworkImpl::createAsset: failed to create asset."); } return asset; } TkAsset* TkFrameworkImpl::createAsset(const NvBlastAsset* assetLL, Nv::Blast::TkAssetJointDesc* jointDescs, uint32_t jointDescCount, bool ownsAsset) { TkAssetImpl* asset = TkAssetImpl::create(assetLL, jointDescs, jointDescCount, ownsAsset); if (asset == nullptr) { NVBLAST_LOG_ERROR("TkFrameworkImpl::createAsset: failed to create asset."); } return asset; } TkGroup* TkFrameworkImpl::createGroup(const TkGroupDesc& desc) { TkGroupImpl* group = TkGroupImpl::create(desc); if (group == nullptr) { NVBLAST_LOG_ERROR("TkFrameworkImpl::createGroup: failed to create group."); } return group; } TkActor* TkFrameworkImpl::createActor(const TkActorDesc& desc) { TkActor* actor = TkActorImpl::create(desc); if (actor == nullptr) { NVBLAST_LOG_ERROR("TkFrameworkImpl::createActor: failed to create actor."); } return actor; } TkJoint* TkFrameworkImpl::createJoint(const TkJointDesc& desc) { TkJointImpl** handle0 = nullptr; TkJointImpl** handle1 = nullptr; TkFamilyImpl* family0 = static_cast<TkFamilyImpl*>(desc.families[0]); TkFamilyImpl* family1 = static_cast<TkFamilyImpl*>(desc.families[1]); NVBLAST_CHECK_ERROR(family0 != nullptr || family1 != nullptr, "TkFrameworkImpl::createJoint: at least one family in the TkJointDesc must be valid.", return nullptr); NVBLAST_CHECK_ERROR(family0 == nullptr || desc.chunkIndices[0] < family0->getAssetImpl()->getChunkCount(), "TkFrameworkImpl::createJoint: desc.chunkIndices[0] is invalid.", return nullptr); NVBLAST_CHECK_ERROR(family1 == nullptr || desc.chunkIndices[1] < family1->getAssetImpl()->getChunkCount(), "TkFrameworkImpl::createJoint: desc.chunkIndices[1] is invalid.", return nullptr); const bool actorsAreTheSame = family0 == family1 && family0->getActorByChunk(desc.chunkIndices[0]) == family1->getActorByChunk(desc.chunkIndices[1]); NVBLAST_CHECK_ERROR(!actorsAreTheSame, "TkFrameworkImpl::createJoint: the chunks listed in the TkJointDesc must be in different actors.", return nullptr); if (family0 != nullptr) { const bool isSupportChunk = !isInvalidIndex(NvBlastAssetGetChunkToGraphNodeMap(family0->getAssetImpl()->getAssetLLInternal(), logLL)[desc.chunkIndices[0]]); NVBLAST_CHECK_ERROR(isSupportChunk, "TkFrameworkImpl::createJoint: desc.chunkIndices[0] is not a support chunk in the asset for desc.families[0]. Joint not created.", return nullptr); handle0 = family0->createExternalJointHandle(getFamilyID(family1), desc.chunkIndices[0], desc.chunkIndices[1]); NVBLAST_CHECK_ERROR(handle0 != nullptr, "TkFrameworkImpl::createJoint: could not create joint handle in family[0]. Joint not created.", return nullptr); } if (family1 != nullptr) { const bool isSupportChunk = !isInvalidIndex(NvBlastAssetGetChunkToGraphNodeMap(family1->getAssetImpl()->getAssetLLInternal(), logLL)[desc.chunkIndices[1]]); NVBLAST_CHECK_ERROR(isSupportChunk, "TkFrameworkImpl::createJoint: desc.chunkIndices[1] is not a support chunk in the asset for desc.families[1]. Joint not created.", return nullptr); if (family1 != family0) { handle1 = family1->createExternalJointHandle(getFamilyID(family0), desc.chunkIndices[1], desc.chunkIndices[0]); NVBLAST_CHECK_ERROR(handle1 != nullptr, "TkFrameworkImpl::createJoint: could not create joint handle in family[1]. Joint not created.", return nullptr); } } TkJointImpl* joint = NVBLAST_NEW(TkJointImpl)(desc, nullptr); NVBLAST_CHECK_ERROR(joint != nullptr, "TkFrameworkImpl::createJoint: failed to create joint.", return nullptr); const TkJointData& jointData = joint->getDataInternal(); if (handle0 != nullptr) { *handle0 = joint; static_cast<TkActorImpl*>(jointData.actors[0])->addJoint(joint->m_links[0]); } if (handle1 != nullptr) { *handle1 = joint; if (jointData.actors[0] != jointData.actors[1]) { static_cast<TkActorImpl*>(jointData.actors[1])->addJoint(joint->m_links[1]); } } return joint; } void TkFrameworkImpl::onCreate(TkIdentifiable& object) { const TkTypeImpl& type = static_cast<const TkTypeImpl&>(object.getType()); const uint32_t index = type.getIndex(); if (index >= m_objects.size()) { if (!isInvalidIndex(index)) { NVBLAST_LOG_ERROR("TkFrameworkImpl::addObject: object type unrecognized."); } return; } auto& objectArray = m_objects[index]; NVBLAST_ASSERT(objectArray.find(&object) == objectArray.end()); objectArray.pushBack(&object); } void TkFrameworkImpl::onDestroy(TkIdentifiable& object) { // remove from id map if present const auto id = object.getID(); if (!TkGUIDIsZero(&id)) { m_IDToObject.erase(id); } // remove from object list const TkTypeImpl& type = static_cast<const TkTypeImpl&>(object.getType()); const uint32_t index = type.getIndex(); if (index >= m_objects.size()) { if (!isInvalidIndex(index)) { NVBLAST_LOG_ERROR("TkFrameworkImpl::removeObject: object type unrecognized."); } return; } auto& objectArray = m_objects[index]; objectArray.findAndReplaceWithLast(&object); } void TkFrameworkImpl::onCreate(TkJointImpl& joint) { NVBLAST_CHECK_ERROR(m_joints.insert(&joint), "TkFrameworkImpl::onCreate: Joint already tracked.", return); } void TkFrameworkImpl::onDestroy(TkJointImpl& joint) { NVBLAST_CHECK_ERROR(m_joints.erase(&joint), "TkFrameworkImpl::onDestroy: Joint not tracked.", return); } void TkFrameworkImpl::onIDChange(TkIdentifiable& object, const NvBlastID& IDPrev, const NvBlastID& IDCurr) { if (!TkGUIDIsZero(&IDPrev)) { if (!m_IDToObject.erase(IDPrev)) { NVBLAST_LOG_ERROR("TkFrameworkImpl::reportIDChanged: object with previous ID doesn't exist."); } } if (!TkGUIDIsZero(&IDCurr)) { auto& value = m_IDToObject[IDCurr]; if (value != nullptr) { NVBLAST_LOG_ERROR("TkFrameworkImpl::reportIDChanged: object with new ID already exists."); return; } value = &object; } } } // namespace Blast } // namespace Nv //////// Global API implementation //////// Nv::Blast::TkFramework* NvBlastTkFrameworkCreate() { if (Nv::Blast::TkFrameworkImpl::get() != nullptr) { NVBLAST_LOG_ERROR("TkFramework::create: framework already created. Use TkFramework::get() to access."); return nullptr; } Nv::Blast::TkFrameworkImpl* framework = NVBLAST_NEW(Nv::Blast::TkFrameworkImpl) (); Nv::Blast::TkFrameworkImpl::set(framework); return Nv::Blast::TkFrameworkImpl::get(); } Nv::Blast::TkFramework* NvBlastTkFrameworkGet() { return Nv::Blast::TkFrameworkImpl::get(); }
15,109
C++
31.634989
233
0.657886
NVIDIA-Omniverse/PhysX/blast/source/sdk/toolkit/NvBlastTkTask.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved. #ifndef NVBLASTTKTASK_H #define NVBLASTTKTASK_H #include "NvBlastTkGroupTaskManager.h" #include "NvTask.h" #include "NvBlastTkGroup.h" #include <atomic> #include <mutex> #include <condition_variable> namespace Nv { namespace Blast { /** Counting synchronization object for waiting on TkWorkers to finish. */ class TkTaskSync { public: /** Initializes with an expected number of notifications. */ TkTaskSync(uint32_t count) : m_count(count) {} /** Blocks until the expected number of notifications happened. */ void wait() { std::unique_lock<std::mutex> lk(m_mutex); m_cv.wait(lk, [&] { return m_count == 0; }); } /** Decrement the wait() count by one. */ void notify() { //PERF_SCOPE_H("TaskSync::notify"); std::unique_lock<std::mutex> lk(m_mutex); if (m_count > 0) { m_count--; } if (m_count == 0) { lk.unlock(); m_cv.notify_one(); } } /** Peek if notifications are pending. */ bool isDone() { std::unique_lock<std::mutex> lk(m_mutex); return m_count == 0; } /** Sets the expected number of notifications for wait() to unblock. */ void setCount(uint32_t count) { m_count = count; } private: std::mutex m_mutex; std::condition_variable m_cv; uint32_t m_count; }; /** Common job counter for all tasks. */ class TkAtomicCounter { public: TkAtomicCounter() : m_current(0), m_maxCount(0) {} bool isValid(uint32_t val) { return val < m_maxCount; } uint32_t next() { return m_current.fetch_add(1); } void reset(uint32_t maxCount) { m_maxCount = maxCount; m_current = 0; } private: std::atomic<uint32_t> m_current; uint32_t m_maxCount; }; /** A task running one group job after the other until done. Synchronizes atomically with its siblings. */ class TkGroupWorkerTask : public nvidia::task::NvLightCpuTask { public: TkGroupWorkerTask() : NvLightCpuTask(), m_group(nullptr), m_counter(nullptr), m_sync(nullptr) { } void setup(TkGroup* group, TkAtomicCounter* counter, TkTaskSync* sync) { m_group = group; m_counter = counter; m_sync = sync; } virtual void run() override { Nv::Blast::TkGroupWorker* worker = m_group->acquireWorker(); uint32_t jobID = m_counter->next(); while (m_counter->isValid(jobID)) { worker->process(jobID); jobID = m_counter->next(); } m_group->returnWorker(worker); } virtual void release() override { NvLightCpuTask::release(); // release the sync last m_sync->notify(); } virtual const char* getName() const override { return "BlastGroupWorkerTask"; } private: TkGroup* m_group; TkAtomicCounter* m_counter; TkTaskSync* m_sync; }; /** Implements TkGroupTaskManager */ class TkGroupTaskManagerImpl : public TkGroupTaskManager { public: TkGroupTaskManagerImpl(nvidia::task::NvTaskManager& taskManager, TkGroup* group) : m_taskManager(taskManager), m_sync(0), m_group(group) {} // TkGroupTaskManager API virtual void setGroup(TkGroup*) override; virtual uint32_t process(uint32_t) override; virtual void release() override; virtual bool wait(bool block) override; private: static const uint32_t TASKS_MAX_COUNT = 16; nvidia::task::NvTaskManager& m_taskManager; TkAtomicCounter m_counter; TkGroupWorkerTask m_tasks[TASKS_MAX_COUNT]; TkTaskSync m_sync; TkGroup* m_group; }; } // namespace Blast } // namespace Nv #endif // NVBLASTTKTASK_H
5,444
C
25.052631
99
0.641073
NVIDIA-Omniverse/PhysX/blast/source/sdk/toolkit/NvBlastTkJointImpl.cpp
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved. #include "NvBlastTkFrameworkImpl.h" #include "NvBlastTkJointImpl.h" #include "NvBlastTkActorImpl.h" #include "NvBlastTkAssetImpl.h" #include "NvBlastTkFamilyImpl.h" namespace Nv { namespace Blast { //////// Member functions //////// TkJointImpl::TkJointImpl(const TkJointDesc& desc, TkFamilyImpl* owner) : m_owner(owner) { userData = nullptr; // Do not fire off a creation event. Creation events will only be fired when a family-internal joint is created. NVBLAST_ASSERT(desc.families[0] != nullptr || desc.families[1] != nullptr); NVBLAST_ASSERT(desc.families[0] == nullptr || desc.chunkIndices[0] < static_cast<TkFamilyImpl*>(desc.families[0])->getAssetImpl()->getChunkCount()); NVBLAST_ASSERT(desc.attachPositions[0].isFinite()); NVBLAST_ASSERT(desc.families[1] == nullptr || desc.chunkIndices[1] < static_cast<TkFamilyImpl*>(desc.families[1])->getAssetImpl()->getChunkCount()); NVBLAST_ASSERT(desc.attachPositions[1].isFinite()); for (int i = 0; i < 2; ++i) { m_data.actors[i] = desc.families[i] != nullptr ? static_cast<TkFamilyImpl*>(desc.families[i])->getActorByChunk(desc.chunkIndices[i]) : nullptr; m_data.chunkIndices[i] = desc.chunkIndices[i]; m_data.attachPositions[i] = desc.attachPositions[i]; m_links[i].m_joint = this; } if (owner == nullptr) { TkFrameworkImpl::get()->onCreate(*this); } } void TkJointImpl::release() { removeReferencesInActors(); if (m_owner != nullptr) { // Internal joint m_owner->releaseJoint(*this); } else { // External joint removeReferencesInFamilies(); TkFrameworkImpl::get()->onDestroy(*this); NVBLAST_DELETE(this, TkJointImpl); } } void TkJointImpl::setActors(TkActorImpl* actor0, TkActorImpl* actor1, TkEventQueue* alternateQueue) { NVBLAST_ASSERT(m_data.actors[0] != nullptr || m_data.actors[1] != nullptr); const bool unreferenced = (actor0 == nullptr && m_data.actors[0] != nullptr) || (actor1 == nullptr && m_data.actors[1] != nullptr); removeReferencesInActors(); if (!unreferenced) { if (actor0 != nullptr) { actor0->addJoint(m_links[0]); } if (actor1 != nullptr && actor1 != actor0) // If the actors are the same, we only need one joint reference { actor1->addJoint(m_links[1]); } } // We do _not_ return if m_data.m_actors[0] == actor0 && m_data.m_actors[1] == actor1 since // this leads to a bug. This function will only be called when an actor is split. It is // possible that the two TkActors in a joint are the same as before, but in this case one // of the actors will be the split actor. Since will be represented by a different // physical actor, this case still needs to be reported in an event. Returning when neither // TkActor has changed will prevent that, and lead to unwanted joint disconnection. const uint32_t familyToUse = m_data.actors[0] != actor0 ? 0 : 1; TkEventQueue* q = alternateQueue == nullptr ? &static_cast<TkActorImpl*>(m_data.actors[familyToUse])->getFamilyImpl().getQueue() : alternateQueue; const bool jointWasInternal = m_data.actors[0] == m_data.actors[1]; if (unreferenced) { removeReferencesInFamilies(); actor0 = actor1 = nullptr; // Make both new actors NULL } if (!jointWasInternal || actor0 != actor1) { // The original actors were different, or they are now, signal a joint update TkJointUpdateEvent* e = q->allocData<TkJointUpdateEvent>(); e->joint = this; e->subtype = unreferenced ? TkJointUpdateEvent::Unreferenced : (jointWasInternal ? TkJointUpdateEvent::External : TkJointUpdateEvent::Changed); m_data.actors[0] = actor0; m_data.actors[1] = actor1; q->addEvent(e); } else if (jointWasInternal) { // The joint was originally created within the same actor and now it remains within the same actor. m_data.actors[0] = m_data.actors[1] = actor0; } } const TkJointData TkJointImpl::getData() const { return getDataInternal(); } void TkJointImpl::removeReferencesInActors() { TkActorImpl* actor0 = static_cast<TkActorImpl*>(m_data.actors[0]); TkActorImpl* actor1 = static_cast<TkActorImpl*>(m_data.actors[1]); if (actor0 != nullptr) { actor0->removeJoint(m_links[0]); } if (actor1 != nullptr && actor1 != actor0) // If the actors are the same, we only had one joint reference { actor1->removeJoint(m_links[1]); } } void TkJointImpl::removeReferencesInFamilies() { if (m_owner != nullptr) { return; // Only concerned with external joints } NVBLAST_ASSERT(m_data.actors[0] != m_data.actors[1] || m_data.actors[0] == nullptr); // This is enforced by the initial assumption in TkFrameworkImpl::createJoint. for (int i = 0; i < 2; ++i) { if (m_data.actors[i] != nullptr) { TkFamilyImpl& family = static_cast<TkActorImpl*>(m_data.actors[i])->getFamilyImpl(); TkJointImpl* joint = nullptr; const bool found = family.deleteExternalJointHandle(joint, getFamilyID(m_data.actors[i ^ 1]), m_data.chunkIndices[i], m_data.chunkIndices[i ^ 1]); NVBLAST_ASSERT((!found && m_data.actors[i ^ 1] == nullptr) || joint == this); // Might not be found if the actors in a family are in the process of being deleted NV_UNUSED(found); } } } } // namespace Blast } // namespace Nv
7,185
C++
35.851282
175
0.667223
NVIDIA-Omniverse/PhysX/blast/source/sdk/toolkit/NvBlastTkAssetImpl.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved. #ifndef NVBLASTTKASSETIMPL_H #define NVBLASTTKASSETIMPL_H #include "NvBlastTkCommon.h" #include "NvBlastTkJoint.h" #include "NvBlastTkAsset.h" #include "NvBlastTkTypeImpl.h" #include "NvBlastArray.h" // Forward declarations struct NvBlastAsset; namespace Nv { namespace Blast { /** Implementation of TkAsset */ NVBLASTTK_IMPL_DECLARE(Asset) { public: TkAssetImpl(); TkAssetImpl(const NvBlastID& id); ~TkAssetImpl(); NVBLASTTK_IMPL_DEFINE_IDENTIFIABLE('A', 'S', 'S', 'T'); // Public methods /** Factory create method. This method creates a low-level asset and stores a reference to it. \param[in] desc Asset descriptor set by the user. \return a pointer to a new TkAssetImpl object if successful, NULL otherwise. */ static TkAssetImpl* create(const TkAssetDesc& desc); /** Static method to create an asset from an existing low-level asset. \param[in] assetLL A valid low-level asset passed in by the user. \param[in] jointDescs Optional joint descriptors to add to the new asset. \param[in] jointDescCount The number of joint descriptors in the jointDescs array. If non-zero, jointDescs cannot be NULL. \param[in] ownsAsset Whether or not to let this TkAssetImpl object release the low-level NvBlastAsset memory upon its own release. \return a pointer to a new TkAssetImpl object if successful, NULL otherwise. */ static TkAssetImpl* create(const NvBlastAsset* assetLL, Nv::Blast::TkAssetJointDesc* jointDescs = nullptr, uint32_t jointDescCount = 0, bool ownsAsset = false); /** \return a pointer to the underlying low-level NvBlastAsset associated with this asset. */ const NvBlastAsset* getAssetLLInternal() const; /** \return the number of internal joint descriptors stored with this asset. */ uint32_t getJointDescCountInternal() const; /** \return the array of internal joint descriptors stored with this asset, with size given by getJointDescCountInternal(). */ const TkAssetJointDesc* getJointDescsInternal() const; // Begin TkAsset virtual const NvBlastAsset* getAssetLL() const override; virtual uint32_t getChunkCount() const override; virtual uint32_t getLeafChunkCount() const override; virtual uint32_t getBondCount() const override; virtual const NvBlastChunk* getChunks() const override; virtual const NvBlastBond* getBonds() const override; virtual const NvBlastSupportGraph getGraph() const override; virtual uint32_t getDataSize() const override; virtual uint32_t getJointDescCount() const override; virtual const TkAssetJointDesc* getJointDescs() const override; // End TkAsset private: /** Utility to add a joint descriptor between the indexed chunks. The two chunks must be support chunks, and there must exist a bond between them. The joint's attachment positions will be the bond centroid. \param[in] chunkIndex0 The first chunk index. \param[in] chunkIndex1 The second chunk index. \return true iff successful. */ bool addJointDesc(uint32_t chunkIndex0, uint32_t chunkIndex1); NvBlastAsset* m_assetLL; //!< The underlying low-level asset. Array<TkAssetJointDesc>::type m_jointDescs; //!< The array of internal joint descriptors. bool m_ownsAsset; //!< Whether or not this asset should release its low-level asset upon its own release. }; //////// TkAssetImpl inline methods //////// NV_INLINE const NvBlastAsset* TkAssetImpl::getAssetLLInternal() const { return m_assetLL; } NV_INLINE uint32_t TkAssetImpl::getJointDescCountInternal() const { return m_jointDescs.size(); } NV_INLINE const TkAssetJointDesc* TkAssetImpl::getJointDescsInternal() const { return m_jointDescs.begin(); } } // namespace Blast } // namespace Nv #endif // ifndef NVBLASTTKASSETIMPL_H
5,764
C
34.368098
180
0.695524
NVIDIA-Omniverse/PhysX/blast/source/sdk/toolkit/NvBlastTkEventQueue.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved. #ifndef NVBLASTTKEVENTQUEUE_H #define NVBLASTTKEVENTQUEUE_H #include <algorithm> #include <vector> #include <mutex> #include <atomic> #include "NvBlastTkFrameworkImpl.h" #include "NvBlastAssert.h" namespace Nv { namespace Blast { /** A dispatcher queue providing preallocation and thread-safe insertions therein. Typical usage: - preallocate space for events and payload: - reserveEvents, reserveData - enable asserts to detect undersized storage (allocations are not thread safe): - protect(true) - get pointers to payload data and events to fill in, thread safe for preallocated memory: - allocData, addEvent - back on main thread, ensure consistency: - protect(false) - continue adding events and payload on main thread if necessary like above (allocations are safe here) eventually dispatch, or reset if dispatched by proxy */ class TkEventQueue { public: TkEventQueue() : m_currentEvent(0), m_poolCapacity(0), m_pool(nullptr), m_allowAllocs(true) {} /** Peek events queue for dispatch. Do not use in protected state. */ operator const Array<TkEvent>::type&() { NVBLAST_ASSERT(m_allowAllocs); NVBLAST_ASSERT(m_currentEvent == m_events.size()); return m_events; } /** Debug help to catch (unwanted) allocations during task work. Note that this will not actually avoid allocations, but assert in debug builds. Set true before using in distributed environment. Set false to return to single-thread mode. */ void protect(bool enable) { // During parallel use, m_events.size() and m_currentEvent are allowed to diverge. // This is fine because resizeUninitialized does not alter the stored data. NVBLAST_ASSERT(m_currentEvent <= m_events.capacity()); m_events.resizeUninitialized(m_currentEvent); m_allowAllocs = !enable; } /** Restores initial state. Data memory is currently not being reused. To be improved. */ void reset() { m_events.clear(); m_currentEvent = 0; for (void* mem : m_memory) { NVBLAST_FREE(mem); } m_memory.clear(); m_currentData = 0; m_allowAllocs = true; m_poolCapacity = 0; m_pool = nullptr; } /** Queue an event with a payload. */ template<class T> void addEvent(T* payload) { uint32_t index = m_currentEvent.fetch_add(1); // Should not allocate in protected state. NVBLAST_ASSERT(m_allowAllocs || m_currentEvent <= m_events.capacity()); m_events.resizeUninitialized(m_currentEvent); // During parallel use, m_events.size() and m_currentEvent are allowed to diverge. // Consistency is restored in protect(). NVBLAST_ASSERT(!m_allowAllocs || m_currentEvent == m_events.size()); TkEvent& evt = m_events[index]; evt.type = TkEvent::Type(T::EVENT_TYPE); evt.payload = payload; } /** Request storage for payload. */ template<typename T> T* allocData() { uint32_t index = m_currentData.fetch_add(sizeof(T)); if (m_currentData <= m_poolCapacity) { return reinterpret_cast<T*>(&m_pool[index]); } else { // Could do larger block allocation here. reserveData(sizeof(T)); // Account for the requested size. m_currentData = sizeof(T); return reinterpret_cast<T*>(&m_pool[0]); } } /** Preallocate a memory block of size Bytes for payload data. Note that this will inevitably allocate a new memory block. Subsequent calls to allocData will use this memory piecewise. */ void reserveData(size_t size) { NVBLAST_ASSERT(m_allowAllocs); m_pool = reinterpret_cast<uint8_t*>(allocDataBySize(size)); m_poolCapacity = size; m_currentData = 0; } /** Preallocate space for events. */ void reserveEvents(uint32_t n) { NVBLAST_ASSERT(m_allowAllocs); m_events.reserve(m_events.size() + n); } /** Add a listener to dispatch to. */ void addListener(TkEventListener& l) { m_listeners.pushBack(&l); } /** Remove a listener from dispatch list. */ void removeListener(TkEventListener& l) { m_listeners.findAndReplaceWithLast(&l); } /** Dispatch the stored events to the registered listeners. After dispatch, all data is invalidated. */ void dispatch() { dispatch(*this); reset(); } /** Proxy function to dispatch events to this queue's listeners. */ void dispatch(const Array<TkEvent>::type& events) const { if (events.size()) { for (TkEventListener* l : m_listeners) { BLAST_PROFILE_SCOPE_M("TkEventQueue::dispatch"); l->receive(events.begin(), events.size()); } } } private: /** Allocates and stores a block of size Bytes of payload data. */ void* allocDataBySize(size_t size) { void* memory = nullptr; if (size > 0) { memory = NVBLAST_ALLOC_NAMED(size, "TkEventQueue Data"); m_memory.pushBack(memory); } return memory; } Array<TkEvent>::type m_events; //!< holds events Array<void*>::type m_memory; //!< holds allocated data memory blocks std::atomic<uint32_t> m_currentEvent; //!< reference index for event insertion std::atomic<uint32_t> m_currentData; //!< reference index for data insertion size_t m_poolCapacity; //!< size of the currently active memory block (m_pool) uint8_t* m_pool; //!< the current memory block allocData() uses bool m_allowAllocs; //!< assert guard InlineArray<TkEventListener*,4>::type m_listeners; //!< objects to dispatch to }; } // namespace Blast } // namespace Nv #endif // ifndef NVBLASTTKEVENTQUEUE_H
7,933
C
30.991935
136
0.621329
NVIDIA-Omniverse/PhysX/blast/source/sdk/toolkit/NvBlastTkJointImpl.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved. #ifndef NVBLASTTKJOINTIMPL_H #define NVBLASTTKJOINTIMPL_H #include "NvBlastTkJoint.h" #include "NvBlastTkCommon.h" #include "NvBlastIndexFns.h" #include "NvBlastAssert.h" #include "NvBlastDLink.h" #include <atomic> namespace Nv { namespace Blast { // Forward declarations class TkActorImpl; class TkJointImpl; class TkFamilyImpl; class TkEventQueue; /** Double-sided link (DLink) which holds a reference back to a joint which contains it. */ struct TkJointLink : public DLink { TkJointImpl* m_joint; //!< The joint containing this link. }; /** Implementation of TkJoint. */ class TkJointImpl : public TkJoint { public: /** Blank constructor only creates valid TkJointLinks (point back to this object) */ TkJointImpl(); /** This constructor sets all internal data. If the joint is defined in an asset, the family instanced from that asset will own this joint, and the 'owner' parameter is that family. Otherwise, in the case where a joint is created from TkFramwork::createJoint, the joint is not owned by a family and 'owner' will be NULL. */ TkJointImpl(const TkJointDesc& desc, TkFamilyImpl* owner); // Begin TkObject virtual void release() override; // End TkObject // Begin TkJoint virtual const TkJointData getData() const override; // End TkJoint // Public API /** Internal method to access a const reference to the joint data. \return a const reference to the joint data. */ const TkJointData& getDataInternal() const; /** Internal method to access a non-const reference to the joint data. \return a non-const reference to the joint data. */ TkJointData& getDataWritable(); /** Set the actors that this joint attaches to. When the actors are different from the joint's current actors, an event will be generated on one of the actors' families event queues to signal the change. Alternatively, if alternateQueue is not NULL then it will be used to hold the event. If a non-NULL attached actor becomes NULL, then this joint will detach its references to both actors (if they exist) and send an event of subtype Unreferenced. This signals the user that the joint may be deleted. \param[in] actor0 The new TkActor to replace the first attached actor. \param[in] actor1 The new TkActor to replace the second attached actor. \param[in] alternateQueue If not NULL, this queue will be used to hold events generated by this function. */ void setActors(TkActorImpl* actor0, TkActorImpl* actor1, TkEventQueue* alternateQueue = nullptr); /** Ensures that any attached actors no longer refer to this joint. */ void removeReferencesInActors(); /** Ensures that any attached actors' families no longer refer to this joint. External joints (created using TkFramework::createJoint) are referenced by the attached actors' families. */ void removeReferencesInFamilies(); private: TkJointData m_data; //!< The data given to the user: attached actors, chunk indices, and actor-local attachment positions. TkJointLink m_links[2]; //!< One link for each actor in m_data.m_actors. If m_data.m_actors[0] == m_data.m_actors[1], then only m_links[0] is used. TkFamilyImpl* m_owner; //!< The owning family if this is an internal joint created during TkFramework::createActor() from a TkAssetDesc with joint flags. friend class TkFrameworkImpl; friend class TkFamilyImpl; friend class TkActorImpl; }; //////// TkJointImpl inline methods //////// NV_INLINE TkJointImpl::TkJointImpl() { m_links[0].m_joint = m_links[1].m_joint = this; } NV_INLINE const TkJointData& TkJointImpl::getDataInternal() const { return m_data; } NV_INLINE TkJointData& TkJointImpl::getDataWritable() { return m_data; } } // namespace Blast } // namespace Nv #endif // ifndef NVBLASTTKJOINTIMPL_H
5,637
C
33.378049
162
0.710484
NVIDIA-Omniverse/PhysX/blast/source/sdk/toolkit/NvBlastTkFamilyImpl.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved. #ifndef NVBLASTTKFAMILYIMPL_H #define NVBLASTTKFAMILYIMPL_H #include "NvBlastTkCommon.h" #include "NvBlastTkFamily.h" #include "NvBlastTkTypeImpl.h" #include "NvBlastTkActorImpl.h" #include "NvBlastTkEventQueue.h" #include "NvBlastHashSet.h" #include "NvBlastHashMap.h" #include "NvBlast.h" #include "NvBlastAssert.h" #include "NvBlastDLink.h" // Forward declarations struct NvBlastFamily; namespace Nv { namespace Blast { // Forward declarations class TkGroupImpl; class TkAssetImpl; NVBLASTTK_IMPL_DECLARE(Family) { public: TkFamilyImpl(); TkFamilyImpl(const NvBlastID& id); ~TkFamilyImpl(); NVBLASTTK_IMPL_DEFINE_IDENTIFIABLE('A', 'C', 'T', 'F'); // Begin TkFamily virtual const NvBlastFamily* getFamilyLL() const override; virtual uint32_t getActorCount() const override; virtual uint32_t getActors(TkActor** buffer, uint32_t bufferSize, uint32_t indexStart = 0) const override; virtual void addListener(TkEventListener& l) override { m_queue.addListener(l); } virtual void removeListener(TkEventListener& l) override { m_queue.removeListener(l); } virtual void applyFracture(const NvBlastFractureBuffers* commands) override { applyFractureInternal(commands); } virtual const TkAsset* getAsset() const override; virtual void reinitialize(const NvBlastFamily* newFamily, TkGroup* group) override; // End TkFamily // Public methods static TkFamilyImpl* create(const TkAssetImpl* asset); const TkAssetImpl* getAssetImpl() const; NvBlastFamily* getFamilyLLInternal() const; uint32_t getActorCountInternal() const; TkActorImpl* addActor(NvBlastActor* actorLL); void applyFractureInternal(const NvBlastFractureBuffers* commands); void removeActor(TkActorImpl* actorLL); TkEventQueue& getQueue() { return m_queue; } TkActorImpl* getActorByActorLL(const NvBlastActor* actorLL); void updateJoints(TkActorImpl* actor, TkEventQueue* alternateQueue = nullptr); Array<TkActorImpl>::type& getActorsInternal(); uint32_t getInternalJointCount() const; TkJointImpl* getInternalJoints() const; TkJointImpl** createExternalJointHandle(const NvBlastID& otherFamilyID, uint32_t chunkIndex0, uint32_t chunkIndex1); bool deleteExternalJointHandle(TkJointImpl*& joint, const NvBlastID& otherFamilyID, uint32_t chunkIndex0, uint32_t chunkIndex1); void releaseJoint(TkJointImpl& joint); TkActorImpl* getActorByChunk(uint32_t chunkIndex); typedef nvidia::shdfnd::Pair<uint32_t, uint32_t> ExternalJointKey; //!< The chunk indices within the TkFamily objects joined by the joint. These chunks will be support chunks. TkJointImpl* findExternalJoint(const TkFamilyImpl* otherFamily, ExternalJointKey key) const; private: TkActorImpl* getActorByIndex(uint32_t index); struct JointSet { NvBlastID m_familyID; HashMap<ExternalJointKey, TkJointImpl*>::type m_joints; }; typedef HashMap<NvBlastID, uint32_t>::type FamilyIDMap; NvBlastFamily* m_familyLL; Array<TkActorImpl>::type m_actors; uint32_t m_internalJointCount; Array<uint8_t>::type m_internalJointBuffer; Array<JointSet*>::type m_jointSets; FamilyIDMap m_familyIDMap; const TkAssetImpl* m_asset; TkEventQueue m_queue; }; //////// TkFamilyImpl inline methods //////// NV_INLINE const TkAssetImpl* TkFamilyImpl::getAssetImpl() const { return m_asset; } NV_INLINE NvBlastFamily* TkFamilyImpl::getFamilyLLInternal() const { return m_familyLL; } NV_INLINE uint32_t TkFamilyImpl::getActorCountInternal() const { NVBLAST_ASSERT(m_familyLL != nullptr); return NvBlastFamilyGetActorCount(m_familyLL, logLL); } NV_INLINE TkActorImpl* TkFamilyImpl::getActorByIndex(uint32_t index) { NVBLAST_ASSERT(index < m_actors.size()); return &m_actors[index]; } NV_INLINE TkActorImpl* TkFamilyImpl::getActorByActorLL(const NvBlastActor* actorLL) { uint32_t index = NvBlastActorGetIndex(actorLL, logLL); return getActorByIndex(index); } NV_INLINE Array<TkActorImpl>::type& TkFamilyImpl::getActorsInternal() { return m_actors; } NV_INLINE uint32_t TkFamilyImpl::getInternalJointCount() const { return m_internalJointCount; } NV_INLINE TkJointImpl* TkFamilyImpl::getInternalJoints() const { return const_cast<TkJointImpl*>(reinterpret_cast<const TkJointImpl*>(m_internalJointBuffer.begin())); } NV_INLINE void TkFamilyImpl::releaseJoint(TkJointImpl& joint) { NVBLAST_ASSERT(joint.m_owner == this); NVBLAST_ASSERT(&joint >= getInternalJoints() && &joint < getInternalJoints() + getInternalJointCount() * sizeof(TkJointImpl)); joint.~TkJointImpl(); joint.m_owner = nullptr; } //////// Inline global functions //////// NV_INLINE const NvBlastID& getFamilyID(const TkActor* actor) { return actor != nullptr ? static_cast<const TkActorImpl*>(actor)->getFamilyImpl().getIDInternal() : *reinterpret_cast<const NvBlastID*>("\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0"); } NV_INLINE const NvBlastID& getFamilyID(const TkFamilyImpl* family) { return family != nullptr ? family->getIDInternal() : *reinterpret_cast<const NvBlastID*>("\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0"); } } // namespace Blast } // namespace Nv #endif // ifndef NVBLASTTKFAMILYIMPL_H
7,463
C
31.593886
182
0.679619
NVIDIA-Omniverse/PhysX/blast/source/sdk/globals/NvBlastInternalProfiler.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved. #ifndef NVBLASTINTERNALPROFILER_H #define NVBLASTINTERNALPROFILER_H #include "NvPreprocessor.h" #if NV_NVTX #include "nvToolsExt.h" NV_INLINE void platformZoneStart(const char* name) { nvtxRangePushA(name); } NV_INLINE void platformZoneEnd() { nvtxRangePop(); } #else NV_INLINE void platformZoneStart(const char*) { } NV_INLINE void platformZoneEnd() { } #endif namespace Nv { namespace Blast { /** Profiler detail to be reported. The higher setting is used, the more details are reported. */ struct InternalProfilerDetail { enum Level { LOW, MEDIUM, HIGH }; }; NV_C_API void NvBlastInternalProfilerSetPlatformEnabled(bool platformEnabled); NV_C_API void NvBlastInternalProfilerSetDetail(Nv::Blast::InternalProfilerDetail::Level); NV_C_API Nv::Blast::InternalProfilerDetail::Level NvBlastInternalProfilerGetDetail(); #if NV_PROFILE NV_C_API void NvBlastProfilerBegin(const char* name, Nv::Blast::InternalProfilerDetail::Level); NV_C_API void NvBlastProfilerEnd(const void* name, Nv::Blast::InternalProfilerDetail::Level); class ProfileScope { public: ProfileScope(const char* name, InternalProfilerDetail::Level level) :m_name(name), m_level(level) { NvBlastProfilerBegin(m_name, m_level); } ~ProfileScope() { NvBlastProfilerEnd(m_name, m_level); } private: const char* m_name; InternalProfilerDetail::Level m_level; }; #define BLAST_PROFILE_PREFIX "Blast: " #define BLAST_PROFILE_ZONE_BEGIN(name) Nv::Blast::NvBlastProfilerBegin(BLAST_PROFILE_PREFIX name, Nv::Blast::InternalProfilerDetail::HIGH) #define BLAST_PROFILE_ZONE_END(name) Nv::Blast::NvBlastProfilerEnd(BLAST_PROFILE_PREFIX name, Nv::Blast::InternalProfilerDetail::HIGH) #define BLAST_PROFILE_SCOPE(name, detail) Nv::Blast::ProfileScope NV_CONCAT(_scope,__LINE__) (BLAST_PROFILE_PREFIX name, detail) #define BLAST_PROFILE_SCOPE_L(name) BLAST_PROFILE_SCOPE(name, Nv::Blast::InternalProfilerDetail::LOW) #define BLAST_PROFILE_SCOPE_M(name) BLAST_PROFILE_SCOPE(name, Nv::Blast::InternalProfilerDetail::MEDIUM) #define BLAST_PROFILE_SCOPE_H(name) BLAST_PROFILE_SCOPE(name, Nv::Blast::InternalProfilerDetail::HIGH) #else #define BLAST_PROFILE_ZONE_BEGIN(name) #define BLAST_PROFILE_ZONE_END(name) #define BLAST_PROFILE_SCOPE_L(name) #define BLAST_PROFILE_SCOPE_M(name) #define BLAST_PROFILE_SCOPE_H(name) #endif } // namespace Blast } // namespace Nv #endif
4,037
C
35.709091
143
0.747089
NVIDIA-Omniverse/PhysX/blast/source/sdk/globals/NvBlastGlobals.cpp
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved. #include "NvBlastGlobals.h" #include "NvBlastAssert.h" #include "NvAllocatorCallback.h" #include "NvErrorCallback.h" #include "NsGlobals.h" #include <cstdlib> #include <sstream> #include <iostream> #if NV_WINDOWS_FAMILY #include <windows.h> #endif #if NV_WINDOWS_FAMILY || NV_LINUX_FAMILY #include <malloc.h> #endif namespace Nv { namespace Blast { #if NV_WINDOWS_FAMILY // on win32 we only have 8-byte alignment guaranteed, but the CRT provides special aligned allocation fns NV_FORCE_INLINE void* platformAlignedAlloc(size_t size) { return _aligned_malloc(size, 16); } NV_FORCE_INLINE void platformAlignedFree(void* ptr) { _aligned_free(ptr); } #elif NV_LINUX_FAMILY NV_FORCE_INLINE void* platformAlignedAlloc(size_t size) { return ::memalign(16, size); } NV_FORCE_INLINE void platformAlignedFree(void* ptr) { ::free(ptr); } #else NV_FORCE_INLINE void* platformAlignedAlloc(size_t size) { const int A = 16; unsigned char* mem = (unsigned char*)malloc(size + A); const unsigned char offset = (unsigned char)((uintptr_t)A - (uintptr_t)mem % A - 1); mem += offset; *mem++ = offset; return mem; } NV_FORCE_INLINE void platformAlignedFree(void* ptr) { if (ptr != nullptr) { unsigned char* mem = (unsigned char*)ptr; const unsigned char offset = *--mem; ::free(mem - offset); } } #endif class DefaultAllocatorCallback : public nvidia::NvAllocatorCallback { public: virtual void* allocate(size_t size, const char* typeName, const char* filename, int line) override { NV_UNUSED(typeName); NV_UNUSED(filename); NV_UNUSED(line); return platformAlignedAlloc(size); } virtual void deallocate(void* ptr) override { platformAlignedFree(ptr); } }; DefaultAllocatorCallback s_defaultAllocatorCallback; class DefaultErrorCallback : public nvidia::NvErrorCallback { virtual void reportError(nvidia::NvErrorCode::Enum code, const char* msg, const char* file, int line) override { #if 1 || NV_DEBUG || NV_CHECKED std::stringstream str; str << "NvBlast "; bool critical = false; switch (code) { case nvidia::NvErrorCode::eNO_ERROR: str << "[Info]"; critical = false; break; case nvidia::NvErrorCode::eDEBUG_INFO: str << "[Debug Info]"; critical = false; break; case nvidia::NvErrorCode::eDEBUG_WARNING: str << "[Debug Warning]"; critical = false; break; case nvidia::NvErrorCode::eINVALID_PARAMETER: str << "[Invalid Parameter]"; critical = true; break; case nvidia::NvErrorCode::eINVALID_OPERATION: str << "[Invalid Operation]"; critical = true; break; case nvidia::NvErrorCode::eOUT_OF_MEMORY: str << "[Out of] Memory"; critical = true; break; case nvidia::NvErrorCode::eINTERNAL_ERROR: str << "[Internal Error]"; critical = true; break; case nvidia::NvErrorCode::eABORT: str << "[Abort]"; critical = true; break; case nvidia::NvErrorCode::ePERF_WARNING: str << "[Perf Warning]"; critical = false; break; default: NVBLAST_ASSERT(false); } str << file << "(" << line << "): " << msg << "\n"; std::string message = str.str(); std::cout << message; #if NV_WINDOWS_FAMILY OutputDebugStringA(message.c_str()); #endif NVBLAST_ASSERT_WITH_MESSAGE(!critical, message.c_str()); #else NV_UNUSED(code); NV_UNUSED(msg); NV_UNUSED(file); NV_UNUSED(line); #endif } }; static DefaultErrorCallback s_defaultErrorCallback; static nvidia::NvAllocatorCallback* s_allocatorCallback = &s_defaultAllocatorCallback; static nvidia::NvErrorCallback* s_errorCallback = &s_defaultErrorCallback; nvidia::NvProfilerCallback *g_profilerCallback = nullptr; } // namespace Blast } // namespace Nv //////// Global API implementation //////// nvidia::NvAllocatorCallback* NvBlastGlobalGetAllocatorCallback() { return Nv::Blast::s_allocatorCallback; } void NvBlastGlobalSetAllocatorCallback(nvidia::NvAllocatorCallback* allocator) { Nv::Blast::s_allocatorCallback = allocator ? allocator : &Nv::Blast::s_defaultAllocatorCallback; } nvidia::NvErrorCallback* NvBlastGlobalGetErrorCallback() { return Nv::Blast::s_errorCallback; } void NvBlastGlobalSetErrorCallback(nvidia::NvErrorCallback* errorCallback) { Nv::Blast::s_errorCallback = errorCallback ? errorCallback : &Nv::Blast::s_defaultErrorCallback; } nvidia::NvProfilerCallback* NvBlastGlobalGetProfilerCallback() { return Nv::Blast::g_profilerCallback; } void NvBlastGlobalSetProfilerCallback(nvidia::NvProfilerCallback* profilerCallback) { Nv::Blast::g_profilerCallback = profilerCallback; }
6,403
C++
32.181347
114
0.689989